id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
163,729 | from asdl.hypothesis import Hypothesis
from asdl.transition_system import ApplyRuleAction, GenTokenAction
from asdl.sql.sql_transition_system import SelectColumnAction, SelectTableAction
class ActionInfo(object):
"""sufficient statistics for making a prediction of an action at a time step"""
def __init__(self, action=None):
self.t = 0
self.parent_t = -1
self.action = action
self.frontier_prod = None
self.frontier_field = None
# for GenToken actions only
self.copy_from_src = False
self.src_token_position = -1
def __repr__(self, verbose=False):
repr_str = '%s (t=%d, p_t=%d, frontier_field=%s)' % (repr(self.action),
self.t,
self.parent_t,
self.frontier_field.__repr__(True) if self.frontier_field else 'None')
if verbose:
verbose_repr = 'action_prob=%.4f, ' % self.action_prob
if isinstance(self.action, GenTokenAction):
verbose_repr += 'in_vocab=%s, ' \
'gen_copy_switch=%s, ' \
'p(gen)=%s, p(copy)=%s, ' \
'has_copy=%s, copy_pos=%s' % (self.in_vocab,
self.gen_copy_switch,
self.gen_token_prob, self.copy_token_prob,
self.copy_from_src, self.src_token_position)
repr_str += '\n' + verbose_repr
return repr_str
class Hypothesis(object):
def __init__(self):
self.tree = None
self.actions = []
self.score = 0.
self.frontier_node = None
self.frontier_field = None
self._value_buffer = []
# record the current time step
self.t = 0
def apply_action(self, action):
if self.tree is None: # the first action
assert isinstance(action, ApplyRuleAction), 'Invalid action [%s], only ApplyRule action is valid ' \
'at the beginning of decoding'
self.tree = AbstractSyntaxTree(action.production)
self.update_frontier_info()
elif self.frontier_node:
if isinstance(self.frontier_field.type, ASDLCompositeType):
if isinstance(action, ApplyRuleAction):
field_value = AbstractSyntaxTree(action.production)
field_value.created_time = self.t
self.frontier_field.add_value(field_value)
self.update_frontier_info()
elif isinstance(action, ReduceAction):
assert self.frontier_field.cardinality in ('optional', 'multiple'), 'Reduce action can only be ' \
'applied on field with multiple ' \
'cardinality'
self.frontier_field.set_finish()
self.update_frontier_info()
else:
raise ValueError('Invalid action [%s] on field [%s]' % (action, self.frontier_field))
else: # fill in a primitive field
if isinstance(action, GenTokenAction):
# only field of type string requires termination signal </primitive>
end_primitive = False
if self.frontier_field.type.name == 'string':
if action.is_stop_signal():
self.frontier_field.add_value(' '.join(self._value_buffer))
self._value_buffer = []
end_primitive = True
else:
self._value_buffer.append(action.token)
else:
self.frontier_field.add_value(action.token)
end_primitive = True
if end_primitive and self.frontier_field.cardinality in ('single', 'optional'):
self.frontier_field.set_finish()
self.update_frontier_info()
elif isinstance(action, ReduceAction):
assert self.frontier_field.cardinality in ('optional', 'multiple'), 'Reduce action can only be ' \
'applied on field with multiple ' \
'cardinality'
self.frontier_field.set_finish()
self.update_frontier_info()
else:
raise ValueError('Can only invoke GenToken or Reduce actions on primitive fields')
self.t += 1
self.actions.append(action)
def update_frontier_info(self):
def _find_frontier_node_and_field(tree_node):
# return None if each field of this ast node is realized else unfinished ast node, unrealized field
if tree_node:
for field in tree_node.fields:
# if it's an intermediate node, check its children
if isinstance(field.type, ASDLCompositeType) and field.value:
if field.cardinality in ('single', 'optional'): iter_values = [field.value]
else: iter_values = field.value
for child_node in iter_values:
result = _find_frontier_node_and_field(child_node)
if result: return result
# now all its possible children are checked
if not field.finished:
return tree_node, field
return None
else: return None
frontier_info = _find_frontier_node_and_field(self.tree)
if frontier_info:
self.frontier_node, self.frontier_field = frontier_info
else:
self.frontier_node, self.frontier_field = None, None
def clone_and_apply_action(self, action):
new_hyp = self.copy()
new_hyp.apply_action(action)
return new_hyp
def copy(self):
new_hyp = Hypothesis()
if self.tree:
new_hyp.tree = self.tree.copy()
new_hyp.actions = list(self.actions)
new_hyp.score = self.score
new_hyp._value_buffer = list(self._value_buffer)
new_hyp.t = self.t
new_hyp.update_frontier_info()
return new_hyp
def completed(self):
return self.tree and self.frontier_field is None
class GenTokenAction(Action):
def __init__(self, token):
self.token = token
def is_stop_signal(self):
return self.token == '</primitive>'
def __repr__(self):
return 'GenToken[%s]' % self.token
class SelectColumnAction(GenTokenAction):
def __init__(self, column_id):
super(SelectColumnAction, self).__init__(column_id)
def column_id(self):
return self.token
def __repr__(self):
return 'SelectColumnAction[id=%s]' % self.column_id
class SelectTableAction(GenTokenAction):
def __init__(self, table_id):
super(SelectTableAction, self).__init__(table_id)
def table_id(self):
return self.token
def __repr__(self):
return 'SelectTableAction[id=%s]' % self.table_id
def get_action_infos(src_query: list = None, tgt_actions: list = [], force_copy=False):
action_infos = []
hyp = Hypothesis()
for t, action in enumerate(tgt_actions):
action_info = ActionInfo(action)
action_info.t = t
if hyp.frontier_node:
action_info.parent_t = hyp.frontier_node.created_time
action_info.frontier_prod = hyp.frontier_node.production
action_info.frontier_field = hyp.frontier_field.field
if isinstance(action, SelectColumnAction) or isinstance(action, SelectTableAction):
pass
elif isinstance(action, GenTokenAction): # GenToken
try:
tok_src_idx = src_query.index(str(action.token))
action_info.copy_from_src = True
action_info.src_token_position = tok_src_idx
except ValueError:
if force_copy: raise ValueError('cannot copy primitive token %s from source' % action.token)
hyp.apply_action(action)
action_infos.append(action_info)
return action_infos | null |
163,753 | import os, json, pickle, argparse, sys, time
from asdl.asdl import ASDLGrammar
from asdl.transition_system import TransitionSystem
from asdl.action_info import get_action_infos
from preprocess.common_utils import Preprocessor
def process_tables(processor, tables_list, output_path=None, verbose=False):
tables = {}
for each in tables_list:
if verbose:
print('*************** Processing database %s **************' % (each['db_id'])) #
tables[each['db_id']] = processor.preprocess_database(each, verbose=verbose)
print('In total, process %d databases .' % (len(tables)))
if output_path is not None:
pickle.dump(tables, open(output_path, 'wb'))
return tables | null |
163,754 | import os, json, pickle, argparse, sys, time
from asdl.asdl import ASDLGrammar
from asdl.transition_system import TransitionSystem
from asdl.action_info import get_action_infos
from preprocess.common_utils import Preprocessor
def process_example(processor, entry, db, trans, verbose=False):
# preprocess raw tokens, schema linking and subgraph extraction
entry = processor.pipeline(entry, db, verbose=verbose)
# generate target output actions
ast = trans.surface_code_to_ast(entry['sql'])
actions = trans.get_actions(ast)
entry['ast'] = ast
entry['actions'] = get_action_infos(tgt_actions=actions)
return entry
class ASDLGrammar(object):
"""
Collection of types, constructors and productions
"""
def __init__(self, productions, file_path):
# productions are indexed by their head types
file_name = os.path.basename(file_path)
grammar_name = file_name[:file_name.index('.txt')] if '.txt' in file_name else file_name
self._grammar_name = grammar_name
self._productions = OrderedDict()
self._constructor_production_map = dict()
for prod in productions:
if prod.type not in self._productions:
self._productions[prod.type] = list()
self._productions[prod.type].append(prod)
self._constructor_production_map[prod.constructor.name] = prod
self.root_type = productions[0].type
# number of constructors
self.size = sum(len(head) for head in self._productions.values())
# get entities to their ids map
self.prod2id = {prod: i for i, prod in enumerate(self.productions)}
self.type2id = {type: i for i, type in enumerate(self.types)}
self.field2id = {field: i for i, field in enumerate(self.fields)}
self.id2prod = {i: prod for i, prod in enumerate(self.productions)}
self.id2type = {i: type for i, type in enumerate(self.types)}
self.id2field = {i: field for i, field in enumerate(self.fields)}
def __len__(self):
return self.size
def productions(self):
return sorted(chain.from_iterable(self._productions.values()), key=lambda x: repr(x))
def __getitem__(self, datum):
if isinstance(datum, str):
return self._productions[ASDLType(datum)]
elif isinstance(datum, ASDLType):
return self._productions[datum]
def get_prod_by_ctr_name(self, name):
return self._constructor_production_map[name]
def types(self):
if not hasattr(self, '_types'):
all_types = set()
for prod in self.productions:
all_types.add(prod.type)
all_types.update(map(lambda x: x.type, prod.constructor.fields))
self._types = sorted(all_types, key=lambda x: x.name)
return self._types
def fields(self):
if not hasattr(self, '_fields'):
all_fields = set()
for prod in self.productions:
all_fields.update(prod.constructor.fields)
self._fields = sorted(all_fields, key=lambda x: (x.name, x.type.name, x.cardinality))
return self._fields
def primitive_types(self):
return filter(lambda x: isinstance(x, ASDLPrimitiveType), self.types)
def composite_types(self):
return filter(lambda x: isinstance(x, ASDLCompositeType), self.types)
def is_composite_type(self, asdl_type):
return asdl_type in self.composite_types
def is_primitive_type(self, asdl_type):
return asdl_type in self.primitive_types
def from_filepath(file_path):
def _parse_field_from_text(_text):
d = _text.strip().split(' ')
name = d[1].strip()
type_str = d[0].strip()
cardinality = 'single'
if type_str[-1] == '*':
type_str = type_str[:-1]
cardinality = 'multiple'
elif type_str[-1] == '?':
type_str = type_str[:-1]
cardinality = 'optional'
if type_str in primitive_type_names:
return Field(name, ASDLPrimitiveType(type_str), cardinality=cardinality)
else:
return Field(name, ASDLCompositeType(type_str), cardinality=cardinality)
def _parse_constructor_from_text(_text):
_text = _text.strip()
fields = None
if '(' in _text:
name = _text[:_text.find('(')]
field_blocks = _text[_text.find('(') + 1:_text.find(')')].split(',')
fields = map(_parse_field_from_text, field_blocks)
else:
name = _text
if name == '': name = None
return ASDLConstructor(name, fields)
with open(file_path, 'r') as inf:
text = inf.read()
lines = remove_comment(text).split('\n')
lines = list(map(lambda l: l.strip(), lines))
lines = list(filter(lambda l: l, lines))
line_no = 0
# first line is always the primitive types
primitive_type_names = list(map(lambda x: x.strip(), lines[line_no].split(',')))
line_no += 1
all_productions = list()
while True:
type_block = lines[line_no]
type_name = type_block[:type_block.find('=')].strip()
constructors_blocks = type_block[type_block.find('=') + 1:].split('|')
i = line_no + 1
while i < len(lines) and lines[i].strip().startswith('|'):
t = lines[i].strip()
cont_constructors_blocks = t[1:].split('|')
constructors_blocks.extend(cont_constructors_blocks)
i += 1
constructors_blocks = filter(lambda x: x and x.strip(), constructors_blocks)
# parse type name
new_type = ASDLPrimitiveType(type_name) if type_name in primitive_type_names else ASDLCompositeType(type_name)
constructors = map(_parse_constructor_from_text, constructors_blocks)
productions = list(map(lambda c: ASDLProduction(new_type, c), constructors))
all_productions.extend(productions)
line_no = i
if line_no == len(lines):
break
grammar = ASDLGrammar(all_productions, file_path)
return grammar
class TransitionSystem(object):
def __init__(self, grammar):
self.grammar = grammar
def get_actions(self, asdl_ast):
"""
generate action sequence given the ASDL Syntax Tree
"""
actions = []
parent_action = ApplyRuleAction(asdl_ast.production)
actions.append(parent_action)
for field in asdl_ast.fields:
# is a composite field
if self.grammar.is_composite_type(field.type):
if field.cardinality == 'single':
field_actions = self.get_actions(field.value)
else:
field_actions = []
if field.value is not None:
if field.cardinality == 'multiple':
for val in field.value:
cur_child_actions = self.get_actions(val)
field_actions.extend(cur_child_actions)
elif field.cardinality == 'optional':
field_actions = self.get_actions(field.value)
# if an optional field is filled, then do not need Reduce action
if field.cardinality == 'multiple' or field.cardinality == 'optional' and not field_actions:
field_actions.append(ReduceAction())
else: # is a primitive field
field_actions = self.get_primitive_field_actions(field)
# if an optional field is filled, then do not need Reduce action
if field.cardinality == 'multiple' or field.cardinality == 'optional' and not field_actions:
# reduce action
field_actions.append(ReduceAction())
actions.extend(field_actions)
return actions
def tokenize_code(self, code, mode):
raise NotImplementedError
def compare_ast(self, hyp_ast, ref_ast):
raise NotImplementedError
def ast_to_surface_code(self, asdl_ast):
raise NotImplementedError
def surface_code_to_ast(self, code):
raise NotImplementedError
def get_primitive_field_actions(self, realized_field):
raise NotImplementedError
def get_valid_continuation_types(self, hyp):
if hyp.tree:
if self.grammar.is_composite_type(hyp.frontier_field.type):
if hyp.frontier_field.cardinality == 'single':
return ApplyRuleAction,
else: # optional, multiple
return ApplyRuleAction, ReduceAction
else:
if hyp.frontier_field.cardinality == 'single':
return GenTokenAction,
elif hyp.frontier_field.cardinality == 'optional':
if hyp._value_buffer:
return GenTokenAction,
else:
return GenTokenAction, ReduceAction
else:
return GenTokenAction, ReduceAction
else:
return ApplyRuleAction,
def get_valid_continuating_productions(self, hyp):
if hyp.tree:
if self.grammar.is_composite_type(hyp.frontier_field.type):
return self.grammar[hyp.frontier_field.type]
else:
raise ValueError
else:
return self.grammar[self.grammar.root_type]
def get_class_by_lang(lang):
if lang == 'sql':
from asdl.sql.sql_transition_system import SQLTransitionSystem
else:
raise ValueError('unknown language %s' % lang)
return SQLTransitionSystem
GRAMMAR_FILEPATH = 'asdl/sql/grammar/sql_asdl_v2.txt'
def process_dataset(processor, dataset, tables, output_path=None, skip_large=False, verbose=False):
from utils.constants import GRAMMAR_FILEPATH
grammar = ASDLGrammar.from_filepath(GRAMMAR_FILEPATH)
trans = TransitionSystem.get_class_by_lang('sql')(grammar)
processed_dataset = []
cnt=0
for idx, entry in enumerate(dataset):
if skip_large and len(tables[entry['db_id']]['column_names']) > 100: continue
if verbose:
print('*************** Processing %d-th sample **************' % (idx))
entry = process_example(processor, entry, tables[entry['db_id']], trans, verbose=verbose)
# if cnt%2==1:
# # print('here')
# assert entry['query'] == processed_dataset[-1]['query']
cnt+=1
processed_dataset.append(entry)
print('In total, process %d samples , skip %d extremely large databases.' % (len(processed_dataset), len(dataset) - len(processed_dataset)))
if output_path is not None:
# serialize preprocessed dataset
pickle.dump(processed_dataset, open(output_path, 'wb'))
return processed_dataset | null |
163,755 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
from utils.constants import MAX_RELATIVE_DIST
def is_number(s):
try:
float(s)
return True
except ValueError:
return False | null |
163,756 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
from utils.constants import MAX_RELATIVE_DIST
The provided code snippet includes necessary dependencies for implementing the `quote_normalization` function. Write a Python function `def quote_normalization(question)` to solve the following problem:
Normalize all usage of quotation marks into a separate \"
Here is the function:
def quote_normalization(question):
""" Normalize all usage of quotation marks into a separate \" """
new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"]
for idx, tok in enumerate(question):
if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:
new_question += ["\"", tok[1:-1], "\""]
elif len(tok) > 2 and tok[0] in quotation_marks:
new_question += ["\"", tok[1:]]
elif len(tok) > 2 and tok[-1] in quotation_marks:
new_question += [tok[:-1], "\"" ]
elif tok in quotation_marks:
new_question.append("\"")
elif len(tok) == 2 and tok[0] in quotation_marks:
# special case: the length of entity value is 1
if idx + 1 < len(question) and question[idx + 1] in quotation_marks:
new_question += ["\"", tok[1]]
else:
new_question.append(tok)
else:
new_question.append(tok)
return new_question | Normalize all usage of quotation marks into a separate \" |
163,761 | import os, json, pickle, argparse, sys, time
from preprocess.graph_utils import GraphProcessor
def process_dataset_graph(processor, dataset, tables, method, output_path=None, skip_large=False):
processed_dataset = []
for idx, entry in enumerate(dataset):
db = tables[entry['db_id']]
if skip_large and len(db['column_names']) > 100:
continue
if (idx + 1) % 500 == 0:
print('Processing the %d-th example ...' % (idx + 1))
entry = processor.process_graph_utils(entry, db, method=method)
processed_dataset.append(entry)
print('In total, process %d samples, skip %d samples .' % (len(processed_dataset), len(dataset) - len(processed_dataset)))
if output_path is not None:
# serialize preprocessed dataset
pickle.dump(processed_dataset, open(output_path, 'wb'))
return processed_dataset | null |
163,762 | import sys, os, json, pickle, argparse, time, torch
from argparse import Namespace
from preprocess.process_dataset import process_tables, process_dataset
from preprocess.process_graphs import process_dataset_graph
from preprocess.common_utils import Preprocessor
from preprocess.graph_utils import GraphProcessor
from utils.example import Example
from utils.batch import Batch
from model.model_utils import Registrable
from model.model_constructor import *
dataset, tables = preprocess_database_and_dataset(db_dir=args.db_dir, table_path=args.table_path, dataset_path=args.dataset_path, method=params.model)
dataset = load_examples(dataset, tables)
with open(args.output_path, 'w', encoding='utf8') as of:
evaluator = Example.evaluator
for idx, hyp in enumerate(all_hyps):
pred_sql = evaluator.obtain_sql(hyp, dataset[idx].db)
# best_ast = hyp[0].tree # by default, the top beam prediction
# pred_sql = Example.trans.ast_to_surface_code(best_ast, dataset[idx].db)
of.write(pred_sql + '\n')
def process_tables(processor, tables_list, output_path=None, verbose=False):
def process_dataset(processor, dataset, tables, output_path=None, skip_large=False, verbose=False):
def process_dataset_graph(processor, dataset, tables, method, output_path=None, skip_large=False):
class Preprocessor():
def __init__(self, db_dir='data/database', db_content=True):
def pipeline(self, entry: dict, db: dict, verbose: bool = False):
def preprocess_database(self, db: dict, verbose: bool = False):
def preprocess_question(self, entry: dict, db: dict, verbose: bool = False):
def extract_subgraph(self, entry: dict, db: dict, verbose: bool = False):
def extract_subgraph_from_sql(self, sql: dict, used_schema: dict):
def extract_subgraph_from_conds(self, conds: list, used_schema: dict):
def schema_linking(self, entry: dict, db: dict, verbose: bool = False):
class GraphProcessor():
def process_rgatsql(self, ex: dict, db: dict, relation: list):
def check_node(i):
def process_lgesql(self, ex: dict, db: dict, relation: list):
def process_graph_utils(self, ex: dict, db: dict, method: str = 'rgatsql'):
def preprocess_database_and_dataset(db_dir='database/', table_path='data/tables.json', dataset_path='data/dev.json', method='lgesql'):
tables = json.load(open(table_path, 'r'))
dataset = json.load(open(dataset_path, 'r'))
processor = Preprocessor(db_dir=db_dir, db_content=True)
output_tables = process_tables(processor, tables)
output_dataset = process_dataset(processor, dataset, output_tables)
graph_processor = GraphProcessor()
output_dataset = process_dataset_graph(graph_processor, output_dataset, output_tables, method=method)
return output_dataset, output_tables | null |
163,764 | import sys, os, time, json, gc
from argparse import Namespace
from utils.args import init_args
from utils.hyperparams import hyperparam_path
from utils.initialization import *
from utils.example import Example
from utils.batch import Batch
from utils.optimization import set_optimizer
from model.model_utils import Registrable
from model.model_constructor import *
args = init_args(sys.argv[1:])
device = set_torch_device(args.device)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if args.read_model_path:
params = json.load(open(os.path.join(args.read_model_path, 'params.json')), object_hook=lambda d: Namespace(**d))
params.lazy_load = True
else:
params = args
train_dataset, dev_dataset, rd_dataset = Example.load_dataset('train'), Example.load_dataset('dev'), Example.load_dataset('rd')
args.word_vocab, args.relation_num = len(Example.word_vocab), len(Example.relation_vocab)
model = Registrable.by_name('text2sql')(params, sql_trans).to(device)
if args.read_model_path:
check_point = torch.load(open(os.path.join(args.read_model_path, 'model.bin'), 'rb'), map_location=device)
model.load_state_dict(check_point['model'])
logger.info("Load saved model from path: %s" % (args.read_model_path))
else:
json.dump(vars(params), open(os.path.join(exp_path, 'params.json'), 'w'), indent=4)
if params.plm is None:
ratio = Example.word2vec.load_embeddings(model.encoder.input_layer.word_embed, Example.word_vocab, device=device)
logger.info("Init model and word embedding layer with a coverage %.2f" % (ratio))
if not args.testing:
num_training_steps = ((len(train_dataset) + args.batch_size - 1) // args.batch_size) * args.max_epoch
num_warmup_steps = int(num_training_steps * args.warmup_ratio)
logger.info('Total training steps: %d;\t Warmup steps: %d' % (num_training_steps, num_warmup_steps))
optimizer, scheduler = set_optimizer(model, args, num_warmup_steps, num_training_steps)
start_epoch, nsamples, best_result = 0, len(train_dataset), {'dev_acc': 0.}
train_index, step_size = np.arange(nsamples), args.batch_size // args.grad_accumulate
rs = len(rd_dataset)
r_idx = np.arange(rs)
# sub_train_index=np.arange(0,nsamples,2)
# np.random.shuffle(sub_train_index)
# train_index = []
# for ti in sub_train_index:
# train_index.append(ti)
# train_index.append(ti+1)
# print(len(train_index))
if args.read_model_path and args.load_optimizer:
optimizer.load_state_dict(check_point['optim'])
scheduler.load_state_dict(check_point['scheduler'])
start_epoch = check_point['epoch'] + 1
logger.info('Start training ......')
for i in range(start_epoch, args.max_epoch):
start_time = time.time()
epoch_loss, epoch_gp_loss, count = 0, 0, 0
#start
sub_train_index=np.arange(0,nsamples,2)
np.random.shuffle(sub_train_index)
train_index = []
for ti in sub_train_index:
train_index.append(ti)
train_index.append(ti+1)
#end
# np.random.shuffle(train_index)
model.train()
for j in range(0, nsamples, step_size):
count += 1
cur_dataset = [train_dataset[k] for k in train_index[j: j + step_size]]
for wl in range(0, len(cur_dataset), 2):
# print(wl.query)
now = cur_dataset[wl : wl+2]
# print(now[0].query)
# print(now[1].query)
assert now[0].query == now[1].query
# print('end')
current_batch = Batch.from_example_list(cur_dataset, device, train=True, smoothing=args.smoothing)
loss, gp_loss = model(current_batch) # see utils/batch.py for batch elements
epoch_loss += loss.item()
epoch_gp_loss += gp_loss.item()
# print("Minibatch loss: %.4f" % (loss.item()))
loss += gp_loss
loss.backward()
if count == args.grad_accumulate or j + step_size >= nsamples:
count = 0
model.pad_embedding_grad_zero()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
np.random.shuffle(r_idx)
for j in range(0, rs, step_size):
count += 1
cur_dataset = [rd_dataset[k] for k in r_idx[j: j + step_size]]
if len(cur_dataset)>=4:
for ii in range(len(cur_dataset)):
c_dataset = [cur_dataset[ii]]*2
if ii == 0:
c_dataset.extend(cur_dataset[1:])
else:
c_dataset.extend(cur_dataset[0:ii])
c_dataset.extend(cur_dataset[ii+1:])
# print(len(c_dataset))
current_batch = Batch.from_example_list_drop(c_dataset, device, train=True, smoothing=args.smoothing)
loss, gp_loss = model(current_batch, flag = False) # see utils/batch.py for batch elements
epoch_loss += loss.item()
epoch_gp_loss += gp_loss.item()
# print("Minibatch loss: %.4f" % (loss.item()))
loss += gp_loss
loss.backward()
if count == args.grad_accumulate or j + step_size >= nsamples:
count = 0
model.pad_embedding_grad_zero()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
else:
# print('end')
current_batch = Batch.from_example_list(cur_dataset, device, train=True, smoothing=args.smoothing)
loss, gp_loss = model(current_batch, flag = False) # see utils/batch.py for batch elements
epoch_loss += loss.item()
epoch_gp_loss += gp_loss.item()
# print("Minibatch loss: %.4f" % (loss.item()))
loss += gp_loss
loss.backward()
if count == args.grad_accumulate or j + step_size >= nsamples:
count = 0
model.pad_embedding_grad_zero()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
logger.info('Training: \tEpoch: %d\tTime: %.4f\tTraining loss: %.4f/%.4f' % (i, time.time() - start_time, epoch_loss, epoch_gp_loss))
torch.cuda.empty_cache()
gc.collect()
if i < args.eval_after_epoch: # avoid unnecessary evaluation
continue
start_time = time.time()
dev_acc = decode('dev', os.path.join(exp_path, 'dev.iter' + str(i)), acc_type='sql')
logger.info('Evaluation: \tEpoch: %d\tTime: %.4f\tDev acc: %.4f' % (i, time.time() - start_time, dev_acc))
if dev_acc > best_result['dev_acc']:
best_result['dev_acc'], best_result['iter'] = dev_acc, i
torch.save({
'epoch': i, 'model': model.state_dict(),
'optim': optimizer.state_dict(),
'scheduler': scheduler.state_dict()
}, open(os.path.join(exp_path, 'model.bin'), 'wb'))
logger.info('NEW BEST MODEL: \tEpoch: %d\tDev acc: %.4f' % (i, dev_acc))
logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc: %.4f' % (best_result['iter'], best_result['dev_acc']))
# check_point = torch.load(open(os.path.join(exp_path, 'model.bin'), 'rb'))
# model.load_state_dict(check_point['model'])
# dev_acc_beam = decode('dev', output_path=os.path.join(exp_path, 'dev.iter' + str(best_result['iter']) + '.beam' + str(args.beam_size)), acc_type='beam')
# logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc/Beam acc: %.4f/%.4f' % (best_result['iter'], best_result['dev_acc'], dev_acc_beam))
else:
# start_time = time.time()
# train_acc = decode('train', output_path=os.path.join(args.read_model_path, 'train.eval'), acc_type='sql')
# logger.info("Evaluation costs %.2fs ; Train dataset exact match acc is %.4f ." % (time.time() - start_time, train_acc))
start_time = time.time()
dev_acc = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval'), acc_type='sql')
dev_acc_checker = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.checker'), acc_type='sql', use_checker=True)
dev_acc_beam = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.beam' + str(args.beam_size)), acc_type='beam')
logger.info("Evaluation costs %.2fs ; Dev dataset exact match/checker/beam acc is %.4f/%.4f ." % (time.time() - start_time, dev_acc, dev_acc_checker, dev_acc_beam))
class Batch():
def __init__(self, examples, device='cpu'):
def from_example_list(cls, ex_list, device='cpu', train=True, method='text2sql', **kwargs):
def __len__(self):
def __getitem__(self, idx):
def max_question_len(self):
def max_table_len(self):
def max_column_len(self):
def max_table_word_len(self):
def max_column_word_len(self):
def max_question_subword_len(self):
def max_table_subword_len(self):
def max_column_subword_len(self):
def mask(self):
def question_mask(self):
def table_mask(self):
def column_mask(self):
def table_word_mask(self):
def column_word_mask(self):
def question_subword_mask(self):
def table_subword_mask(self):
def column_subword_mask(self):
def get_frontier_field_idx(self, t):
def get_frontier_prod_idx(self, t):
def get_frontier_field_type_idx(self, t):
def decode(choice, output_path, acc_type='sql', use_checker=False):
assert acc_type in ['beam', 'ast', 'sql'] and choice in ['train', 'dev']
model.eval()
dataset = train_dataset if choice == 'train' else dev_dataset
all_hyps = []
with torch.no_grad():
for i in range(0, len(dataset), args.batch_size):
current_batch = Batch.from_example_list(dataset[i: i + args.batch_size], device, train=False)
hyps = model.parse(current_batch, args.beam_size)
all_hyps.extend(hyps)
acc = evaluator.acc(all_hyps, dataset, output_path, acc_type=acc_type, etype='match', use_checker=use_checker)
torch.cuda.empty_cache()
gc.collect()
return acc | null |
163,771 | import copy, math
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from torch.nn import functional as F
The provided code snippet includes necessary dependencies for implementing the `clones` function. Write a Python function `def clones(module, N)` to solve the following problem:
Produce N identical layers.
Here is the function:
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) | Produce N identical layers. |
163,772 | import copy, math
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from torch.nn import functional as F
def lens2mask(lens):
bsize = lens.numel()
max_len = lens.max()
masks = torch.arange(0, max_len).type_as(lens).to(lens.device).repeat(bsize, 1).lt(lens.unsqueeze(1))
masks.requires_grad = False
return masks | null |
163,773 | import copy, math
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from torch.nn import functional as F
def mask2matrix(mask):
col_mask, row_mask = mask.unsqueeze(-1), mask.unsqueeze(-2)
return col_mask & row_mask | null |
163,774 | import copy, math
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from torch.nn import functional as F
The provided code snippet includes necessary dependencies for implementing the `tile` function. Write a Python function `def tile(x, count, dim=0)` to solve the following problem:
Tiles x on dimension dim count times. E.g. [1, 2, 3], count=2 ==> [1, 1, 2, 2, 3, 3] [[1, 2], [3, 4]], count=3, dim=1 ==> [[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]] Different from torch.repeat
Here is the function:
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
E.g. [1, 2, 3], count=2 ==> [1, 1, 2, 2, 3, 3]
[[1, 2], [3, 4]], count=3, dim=1 ==> [[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]]
Different from torch.repeat
"""
if x is None:
return x
elif type(x) in [list, tuple]:
return type(x)([tile(each, count, dim) for each in x])
else:
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.contiguous().view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x | Tiles x on dimension dim count times. E.g. [1, 2, 3], count=2 ==> [1, 1, 2, 2, 3, 3] [[1, 2], [3, 4]], count=3, dim=1 ==> [[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]] Different from torch.repeat |
163,775 | import copy, math
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from torch.nn import functional as F
The provided code snippet includes necessary dependencies for implementing the `rnn_wrapper` function. Write a Python function `def rnn_wrapper(encoder, inputs, lens, cell='lstm')` to solve the following problem:
@args: encoder(nn.Module): rnn series bidirectional encoder, batch_first=True inputs(torch.FloatTensor): rnn inputs, [bsize x max_seq_len x in_dim] lens(torch.LongTensor): seq len for each sample, allow length=0, padding with 0-vector, [bsize] @return: out(torch.FloatTensor): output of encoder, bsize x max_seq_len x hidden_dim*2 hidden_states([tuple of ]torch.FloatTensor): final hidden states, num_layers*2 x bsize x hidden_dim
Here is the function:
def rnn_wrapper(encoder, inputs, lens, cell='lstm'):
"""
@args:
encoder(nn.Module): rnn series bidirectional encoder, batch_first=True
inputs(torch.FloatTensor): rnn inputs, [bsize x max_seq_len x in_dim]
lens(torch.LongTensor): seq len for each sample, allow length=0, padding with 0-vector, [bsize]
@return:
out(torch.FloatTensor): output of encoder, bsize x max_seq_len x hidden_dim*2
hidden_states([tuple of ]torch.FloatTensor): final hidden states, num_layers*2 x bsize x hidden_dim
"""
# rerank according to lens and remove empty inputs
sorted_lens, sort_key = torch.sort(lens, descending=True)
nonzero_num, total_num = torch.sum(sorted_lens > 0).item(), sorted_lens.size(0)
sort_key = sort_key[:nonzero_num]
sorted_inputs = torch.index_select(inputs, dim=0, index=sort_key)
# forward non empty inputs
packed_inputs = rnn_utils.pack_padded_sequence(sorted_inputs, sorted_lens[:nonzero_num].tolist(), batch_first=True)
packed_out, sorted_h = encoder(packed_inputs) # bsize x srclen x dim
sorted_out, _ = rnn_utils.pad_packed_sequence(packed_out, batch_first=True)
if cell.upper() == 'LSTM':
sorted_h, sorted_c = sorted_h
# rerank according to sort_key
out_shape = list(sorted_out.size())
out_shape[0] = total_num
out = sorted_out.new_zeros(*out_shape).scatter_(0, sort_key.unsqueeze(-1).unsqueeze(-1).repeat(1, *out_shape[1:]), sorted_out)
h_shape = list(sorted_h.size())
h_shape[1] = total_num
h = sorted_h.new_zeros(*h_shape).scatter_(1, sort_key.unsqueeze(0).unsqueeze(-1).repeat(h_shape[0], 1, h_shape[-1]), sorted_h)
if cell.upper() == 'LSTM':
c = sorted_c.new_zeros(*h_shape).scatter_(1, sort_key.unsqueeze(0).unsqueeze(-1).repeat(h_shape[0], 1, h_shape[-1]), sorted_c)
return out, (h.contiguous(), c.contiguous())
return out, h.contiguous() | @args: encoder(nn.Module): rnn series bidirectional encoder, batch_first=True inputs(torch.FloatTensor): rnn inputs, [bsize x max_seq_len x in_dim] lens(torch.LongTensor): seq len for each sample, allow length=0, padding with 0-vector, [bsize] @return: out(torch.FloatTensor): output of encoder, bsize x max_seq_len x hidden_dim*2 hidden_states([tuple of ]torch.FloatTensor): final hidden states, num_layers*2 x bsize x hidden_dim |
163,776 | import torch
import numpy as np
from utils.example import Example, get_position_ids, get_position_ids_drop
from utils.constants import PAD, UNK
from model.model_utils import lens2mask, cached_property
import torch.nn.functional as F
def from_example_list_base(ex_list, device='cpu', train=True):
"""
question_lens: torch.long, bsize
questions: torch.long, bsize x max_question_len, include [CLS] if add_cls
table_lens: torch.long, bsize, number of tables for each example
table_word_lens: torch.long, number of words for each table name
tables: torch.long, sum_of_tables x max_table_word_len
column_lens: torch.long, bsize, number of columns for each example
column_word_lens: torch.long, number of words for each column name
columns: torch.long, sum_of_columns x max_column_word_len
"""
batch = Batch(ex_list, device)
plm = Example.plm
pad_idx = Example.word_vocab[PAD] if plm is None else Example.tokenizer.pad_token_id
question_lens = [len(ex.question) for ex in ex_list]
batch.question_lens = torch.tensor(question_lens, dtype=torch.long, device=device)
batch.table_lens = torch.tensor([len(ex.table) for ex in ex_list], dtype=torch.long, device=device)
table_word_lens = [len(t) for ex in ex_list for t in ex.table]
batch.table_word_lens = torch.tensor(table_word_lens, dtype=torch.long, device=device)
batch.column_lens = torch.tensor([len(ex.column) for ex in ex_list], dtype=torch.long, device=device)
column_word_lens = [len(c) for ex in ex_list for c in ex.column]
batch.column_word_lens = torch.tensor(column_word_lens, dtype=torch.long, device=device)
if plm is None: # glove.42B.300d
questions = [ex.question_id + [pad_idx] * (batch.max_question_len - len(ex.question_id)) for ex in ex_list]
batch.questions = torch.tensor(questions, dtype=torch.long, device=device)
tables = [t + [pad_idx] * (batch.max_table_word_len - len(t)) for ex in ex_list for t in ex.table_id]
batch.tables = torch.tensor(tables, dtype=torch.long, device=device)
columns = [c + [pad_idx] * (batch.max_column_word_len - len(c)) for ex in ex_list for c in ex.column_id]
batch.columns = torch.tensor(columns, dtype=torch.long, device=device)
else:
# prepare inputs for pretrained models
batch.inputs = {"input_ids": None, "attention_mask": None, "token_type_ids": None, "position_ids": None}
input_lens = [len(ex.input_id) for ex in ex_list]
max_len = max(input_lens)
input_ids = [ex.input_id + [pad_idx] * (max_len - len(ex.input_id)) for ex in ex_list]
batch.inputs["input_ids"] = torch.tensor(input_ids, dtype=torch.long, device=device)
attention_mask = [[1] * l + [0] * (max_len - l) for l in input_lens]
batch.inputs["attention_mask"] = torch.tensor(attention_mask, dtype=torch.float, device=device)
token_type_ids = [ex.segment_id + [0] * (max_len - len(ex.segment_id)) for ex in ex_list]
batch.inputs["token_type_ids"] = torch.tensor(token_type_ids, dtype=torch.long, device=device)
position_ids = [get_position_ids(ex, shuffle=train) + [0] * (max_len - len(ex.input_id)) for ex in ex_list]
batch.inputs["position_ids"] = torch.tensor(position_ids, dtype=torch.long, device=device)
# extract representations after plm, remove [SEP]
question_mask_plm = [ex.question_mask_plm + [0] * (max_len - len(ex.question_mask_plm)) for ex in ex_list]
batch.question_mask_plm = torch.tensor(question_mask_plm, dtype=torch.bool, device=device)
table_mask_plm = [ex.table_mask_plm + [0] * (max_len - len(ex.table_mask_plm)) for ex in ex_list]
batch.table_mask_plm = torch.tensor(table_mask_plm, dtype=torch.bool, device=device)
column_mask_plm = [ex.column_mask_plm + [0] * (max_len - len(ex.column_mask_plm)) for ex in ex_list]
batch.column_mask_plm = torch.tensor(column_mask_plm, dtype=torch.bool, device=device)
# subword aggregation
question_subword_lens = [l for ex in ex_list for l in ex.question_subword_len]
batch.question_subword_lens = torch.tensor(question_subword_lens, dtype=torch.long, device=device)
table_subword_lens = [l for ex in ex_list for l in ex.table_subword_len]
batch.table_subword_lens = torch.tensor(table_subword_lens, dtype=torch.long, device=device)
column_subword_lens = [l for ex in ex_list for l in ex.column_subword_len]
batch.column_subword_lens = torch.tensor(column_subword_lens, dtype=torch.long, device=device)
batch.question_unk_mask, batch.table_unk_mask, batch.column_unk_mask = None, None, None
if not train and plm is None:
# during evaluation, for words not in vocab but in glove vocab, extract its correpsonding embedding
word2vec, unk_idx = Example.word2vec, Example.word_vocab[UNK]
question_unk_mask = (batch.questions == unk_idx).cpu()
if question_unk_mask.any().item():
raw_questions = np.array([ex.question + [PAD] * (batch.max_question_len - len(ex.question)) for ex in ex_list], dtype='<U100')
unk_words = raw_questions[question_unk_mask.numpy()].tolist()
unk_word_embeddings = [word2vec.emb(w) for w in unk_words]
oov_flag = torch.tensor([True if e is not None else False for e in unk_word_embeddings], dtype=torch.bool)
if oov_flag.any().item():
batch.question_unk_mask = question_unk_mask.masked_scatter_(torch.clone(question_unk_mask), oov_flag).to(device)
batch.question_unk_embeddings = torch.tensor([e for e in unk_word_embeddings if e is not None], dtype=torch.float, device=device)
table_unk_mask = (batch.tables == unk_idx).cpu()
if table_unk_mask.any().item():
raw_tables = np.array([t + [PAD] * (batch.max_table_word_len - len(t)) for ex in ex_list for t in ex.table], dtype='<U100')
unk_words = raw_tables[table_unk_mask.numpy()].tolist()
unk_word_embeddings = [word2vec.emb(w) for w in unk_words]
oov_flag = torch.tensor([True if e is not None else False for e in unk_word_embeddings], dtype=torch.bool)
if oov_flag.any().item():
batch.table_unk_mask = table_unk_mask.masked_scatter_(torch.clone(table_unk_mask), oov_flag).to(device)
batch.table_unk_embeddings = torch.tensor([e for e in unk_word_embeddings if e is not None], dtype=torch.float, device=device)
column_unk_mask = (batch.columns == unk_idx).cpu()
if column_unk_mask.any().item():
raw_columns = np.array([c + [PAD] * (batch.max_column_word_len - len(c)) for ex in ex_list for c in ex.column], dtype='<U100')
unk_words = raw_columns[column_unk_mask.numpy()].tolist()
unk_word_embeddings = [word2vec.emb(w) for w in unk_words]
oov_flag = torch.tensor([True if e is not None else False for e in unk_word_embeddings], dtype=torch.bool)
if oov_flag.any().item():
batch.column_unk_mask = column_unk_mask.masked_scatter_(torch.clone(column_unk_mask), oov_flag).to(device)
batch.column_unk_embeddings = torch.tensor([e for e in unk_word_embeddings if e is not None], dtype=torch.float, device=device)
return batch
class Example():
def configuration(cls, plm=None, method='lgesql', table_path='data/tables.json', tables='data/tables.bin', db_dir='data/database'):
cls.plm, cls.method = plm, method
cls.grammar = ASDLGrammar.from_filepath(GRAMMAR_FILEPATH)
cls.trans = TransitionSystem.get_class_by_lang('sql')(cls.grammar)
cls.tables = pickle.load(open(tables, 'rb')) if type(tables) == str else tables
cls.evaluator = Evaluator(cls.trans, table_path, db_dir)
if plm is None:
cls.word2vec = Word2vecUtils()
cls.tokenizer = lambda x: x
cls.word_vocab = Vocab(padding=True, unk=True, boundary=True, default=UNK,
filepath='./pretrained_models/glove.42b.300d/vocab.txt', specials=SCHEMA_TYPES) # word vocab for glove.42B.300d
else:
cls.tokenizer = AutoTokenizer.from_pretrained(os.path.join('./pretrained_models', plm))
cls.word_vocab = cls.tokenizer.get_vocab()
cls.relation_vocab = Vocab(padding=False, unk=False, boundary=False, iterable=RELATIONS, default=None)
cls.graph_factory = GraphFactory(cls.method, cls.relation_vocab)
def load_dataset(cls, choice, debug=False):
assert choice in ['train', 'dev']
fp = os.path.join('data', choice + '.' + cls.method + '.bin')
datasets = pickle.load(open(fp, 'rb'))
# question_lens = [len(ex['processed_question_toks']) for ex in datasets]
# print('Max/Min/Avg question length in %s dataset is: %d/%d/%.2f' % (choice, max(question_lens), min(question_lens), float(sum(question_lens))/len(question_lens)))
# action_lens = [len(ex['actions']) for ex in datasets]
# print('Max/Min/Avg action length in %s dataset is: %d/%d/%.2f' % (choice, max(action_lens), min(action_lens), float(sum(action_lens))/len(action_lens)))
examples, outliers = [], 0
for ex in datasets:
if choice == 'train' and len(cls.tables[ex['db_id']]['column_names']) > 100:
outliers += 1
continue
examples.append(cls(ex, cls.tables[ex['db_id']]))
if debug and len(examples) >= 100:
return examples
if choice == 'train':
print("Skip %d extremely large samples in training dataset ..." % (outliers))
return examples
def __init__(self, ex: dict, db: dict):
super(Example, self).__init__()
self.ex = ex
self.db = db
""" Mapping word to corresponding index """
if Example.plm is None:
self.question = ex['processed_question_toks']
self.question_id = [Example.word_vocab[w] for w in self.question]
self.column = [[db['column_types'][idx].lower()] + c for idx, c in enumerate(db['processed_column_toks'])]
self.column_id = [[Example.word_vocab[w] for w in c] for c in self.column]
self.table = [['table'] + t for t in db['processed_table_toks']]
self.table_id = [[Example.word_vocab[w] for w in t] for t in self.table]
else:
t = Example.tokenizer
self.question = [q.lower() for q in ex['raw_question_toks']]
self.question_id = [t.cls_token_id] # map token to id
self.question_mask_plm = [] # remove SEP token in our case
self.question_subword_len = [] # subword len for each word, exclude SEP token
for w in self.question:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.question_id.extend(toks)
self.question_subword_len.append(len(toks))
self.question_mask_plm = [0] + [1] * (len(self.question_id) - 1) + [0]
self.question_id.append(t.sep_token_id)
self.table = [['table'] + t.lower().split() for t in db['table_names']]
self.table_id, self.table_mask_plm, self.table_subword_len = [], [], []
self.table_word_len = []
for s in self.table:
l = 0
for w in s:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.table_id.extend(toks)
self.table_subword_len.append(len(toks))
l += len(toks)
self.table_word_len.append(l)
self.table_mask_plm = [1] * len(self.table_id)
self.column = [[db['column_types'][idx].lower()] + c.lower().split() for idx, (_, c) in enumerate(db['column_names'])]
self.column_id, self.column_mask_plm, self.column_subword_len = [], [], []
self.column_word_len = []
for s in self.column:
l = 0
for w in s:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.column_id.extend(toks)
self.column_subword_len.append(len(toks))
l += len(toks)
self.column_word_len.append(l)
self.column_mask_plm = [1] * len(self.column_id) + [0]
self.column_id.append(t.sep_token_id)
self.input_id = self.question_id + self.table_id + self.column_id
self.segment_id = [0] * len(self.question_id) + [1] * (len(self.table_id) + len(self.column_id)) \
if Example.plm != 'grappa_large_jnt' and not Example.plm.startswith('roberta') \
else [0] * (len(self.question_id) + len(self.table_id) + len(self.column_id))
self.question_mask_plm = self.question_mask_plm + [0] * (len(self.table_id) + len(self.column_id))
self.table_mask_plm = [0] * len(self.question_id) + self.table_mask_plm + [0] * len(self.column_id)
self.column_mask_plm = [0] * (len(self.question_id) + len(self.table_id)) + self.column_mask_plm
self.graph = Example.graph_factory.graph_construction(ex, db)
# outputs
self.query = ' '.join(ex['query'].split('\t'))
self.ast = ex['ast']
self.tgt_action = ex['actions']
self.used_tables, self.used_columns = ex['used_tables'], ex['used_columns']
The provided code snippet includes necessary dependencies for implementing the `from_example_list_text2sql` function. Write a Python function `def from_example_list_text2sql(ex_list, device='cpu', train=True, **kwargs)` to solve the following problem:
New fields: batch.lens, batch.max_len, batch.relations, batch.relations_mask
Here is the function:
def from_example_list_text2sql(ex_list, device='cpu', train=True, **kwargs):
""" New fields: batch.lens, batch.max_len, batch.relations, batch.relations_mask
"""
batch = from_example_list_base(ex_list, device, train)
batch.graph = Example.graph_factory.batch_graphs(ex_list, device, train=train, **kwargs)
if train:
batch.max_action_num = max([len(ex.tgt_action) for ex in ex_list])
return batch | New fields: batch.lens, batch.max_len, batch.relations, batch.relations_mask |
163,777 | import torch
import numpy as np
from utils.example import Example, get_position_ids, get_position_ids_drop
from utils.constants import PAD, UNK
from model.model_utils import lens2mask, cached_property
import torch.nn.functional as F
def from_example_list_base_drop(ex_list, device='cpu', train=True):
"""
question_lens: torch.long, bsize
questions: torch.long, bsize x max_question_len, include [CLS] if add_cls
table_lens: torch.long, bsize, number of tables for each example
table_word_lens: torch.long, number of words for each table name
tables: torch.long, sum_of_tables x max_table_word_len
column_lens: torch.long, bsize, number of columns for each example
column_word_lens: torch.long, number of words for each column name
columns: torch.long, sum_of_columns x max_column_word_len
"""
batch = Batch(ex_list, device)
plm = Example.plm
pad_idx = Example.word_vocab[PAD] if plm is None else Example.tokenizer.pad_token_id
question_lens = [len(ex.question) for ex in ex_list]
batch.question_lens = torch.tensor(question_lens, dtype=torch.long, device=device)
batch.table_lens = torch.tensor([len(ex.table) for ex in ex_list], dtype=torch.long, device=device)
table_word_lens = [len(t) for ex in ex_list for t in ex.table]
batch.table_word_lens = torch.tensor(table_word_lens, dtype=torch.long, device=device)
batch.column_lens = torch.tensor([len(ex.column) for ex in ex_list], dtype=torch.long, device=device)
column_word_lens = [len(c) for ex in ex_list for c in ex.column]
batch.column_word_lens = torch.tensor(column_word_lens, dtype=torch.long, device=device)
if plm is None: # glove.42B.300d
questions = [ex.question_id + [pad_idx] * (batch.max_question_len - len(ex.question_id)) for ex in ex_list]
batch.questions = torch.tensor(questions, dtype=torch.long, device=device)
tables = [t + [pad_idx] * (batch.max_table_word_len - len(t)) for ex in ex_list for t in ex.table_id]
batch.tables = torch.tensor(tables, dtype=torch.long, device=device)
columns = [c + [pad_idx] * (batch.max_column_word_len - len(c)) for ex in ex_list for c in ex.column_id]
batch.columns = torch.tensor(columns, dtype=torch.long, device=device)
else:
# prepare inputs for pretrained models
batch.inputs = {"input_ids": None, "attention_mask": None, "token_type_ids": None, "position_ids": None}
input_lens = [len(ex.input_id) for ex in ex_list]
max_len = max(input_lens)
input_ids = [ex.input_id + [pad_idx] * (max_len - len(ex.input_id)) for ex in ex_list]
batch.inputs["input_ids"] = torch.tensor(input_ids, dtype=torch.long, device=device)
attention_mask = [[1] * l + [0] * (max_len - l) for l in input_lens]
batch.inputs["attention_mask"] = torch.tensor(attention_mask, dtype=torch.float, device=device)
token_type_ids = [ex.segment_id + [0] * (max_len - len(ex.segment_id)) for ex in ex_list]
batch.inputs["token_type_ids"] = torch.tensor(token_type_ids, dtype=torch.long, device=device)
position_ids = [get_position_ids_drop(ex,f, shuffle=train) + [0] * (max_len - len(ex.input_id)) for f,ex in enumerate(ex_list)]
batch.inputs["position_ids"] = torch.tensor(position_ids, dtype=torch.long, device=device)
# extract representations after plm, remove [SEP]
question_mask_plm = [ex.question_mask_plm + [0] * (max_len - len(ex.question_mask_plm)) for ex in ex_list]
batch.question_mask_plm = torch.tensor(question_mask_plm, dtype=torch.bool, device=device)
table_mask_plm = [ex.table_mask_plm + [0] * (max_len - len(ex.table_mask_plm)) for ex in ex_list]
batch.table_mask_plm = torch.tensor(table_mask_plm, dtype=torch.bool, device=device)
column_mask_plm = [ex.column_mask_plm + [0] * (max_len - len(ex.column_mask_plm)) for ex in ex_list]
batch.column_mask_plm = torch.tensor(column_mask_plm, dtype=torch.bool, device=device)
# subword aggregation
question_subword_lens = [l for ex in ex_list for l in ex.question_subword_len]
batch.question_subword_lens = torch.tensor(question_subword_lens, dtype=torch.long, device=device)
table_subword_lens = [l for ex in ex_list for l in ex.table_subword_len]
batch.table_subword_lens = torch.tensor(table_subword_lens, dtype=torch.long, device=device)
column_subword_lens = [l for ex in ex_list for l in ex.column_subword_len]
batch.column_subword_lens = torch.tensor(column_subword_lens, dtype=torch.long, device=device)
batch.question_unk_mask, batch.table_unk_mask, batch.column_unk_mask = None, None, None
if not train and plm is None:
# during evaluation, for words not in vocab but in glove vocab, extract its correpsonding embedding
word2vec, unk_idx = Example.word2vec, Example.word_vocab[UNK]
question_unk_mask = (batch.questions == unk_idx).cpu()
if question_unk_mask.any().item():
raw_questions = np.array([ex.question + [PAD] * (batch.max_question_len - len(ex.question)) for ex in ex_list], dtype='<U100')
unk_words = raw_questions[question_unk_mask.numpy()].tolist()
unk_word_embeddings = [word2vec.emb(w) for w in unk_words]
oov_flag = torch.tensor([True if e is not None else False for e in unk_word_embeddings], dtype=torch.bool)
if oov_flag.any().item():
batch.question_unk_mask = question_unk_mask.masked_scatter_(torch.clone(question_unk_mask), oov_flag).to(device)
batch.question_unk_embeddings = torch.tensor([e for e in unk_word_embeddings if e is not None], dtype=torch.float, device=device)
table_unk_mask = (batch.tables == unk_idx).cpu()
if table_unk_mask.any().item():
raw_tables = np.array([t + [PAD] * (batch.max_table_word_len - len(t)) for ex in ex_list for t in ex.table], dtype='<U100')
unk_words = raw_tables[table_unk_mask.numpy()].tolist()
unk_word_embeddings = [word2vec.emb(w) for w in unk_words]
oov_flag = torch.tensor([True if e is not None else False for e in unk_word_embeddings], dtype=torch.bool)
if oov_flag.any().item():
batch.table_unk_mask = table_unk_mask.masked_scatter_(torch.clone(table_unk_mask), oov_flag).to(device)
batch.table_unk_embeddings = torch.tensor([e for e in unk_word_embeddings if e is not None], dtype=torch.float, device=device)
column_unk_mask = (batch.columns == unk_idx).cpu()
if column_unk_mask.any().item():
raw_columns = np.array([c + [PAD] * (batch.max_column_word_len - len(c)) for ex in ex_list for c in ex.column], dtype='<U100')
unk_words = raw_columns[column_unk_mask.numpy()].tolist()
unk_word_embeddings = [word2vec.emb(w) for w in unk_words]
oov_flag = torch.tensor([True if e is not None else False for e in unk_word_embeddings], dtype=torch.bool)
if oov_flag.any().item():
batch.column_unk_mask = column_unk_mask.masked_scatter_(torch.clone(column_unk_mask), oov_flag).to(device)
batch.column_unk_embeddings = torch.tensor([e for e in unk_word_embeddings if e is not None], dtype=torch.float, device=device)
return batch
class Example():
def configuration(cls, plm=None, method='lgesql', table_path='data/tables.json', tables='data/tables.bin', db_dir='data/database'):
cls.plm, cls.method = plm, method
cls.grammar = ASDLGrammar.from_filepath(GRAMMAR_FILEPATH)
cls.trans = TransitionSystem.get_class_by_lang('sql')(cls.grammar)
cls.tables = pickle.load(open(tables, 'rb')) if type(tables) == str else tables
cls.evaluator = Evaluator(cls.trans, table_path, db_dir)
if plm is None:
cls.word2vec = Word2vecUtils()
cls.tokenizer = lambda x: x
cls.word_vocab = Vocab(padding=True, unk=True, boundary=True, default=UNK,
filepath='./pretrained_models/glove.42b.300d/vocab.txt', specials=SCHEMA_TYPES) # word vocab for glove.42B.300d
else:
cls.tokenizer = AutoTokenizer.from_pretrained(os.path.join('./pretrained_models', plm))
cls.word_vocab = cls.tokenizer.get_vocab()
cls.relation_vocab = Vocab(padding=False, unk=False, boundary=False, iterable=RELATIONS, default=None)
cls.graph_factory = GraphFactory(cls.method, cls.relation_vocab)
def load_dataset(cls, choice, debug=False):
assert choice in ['train', 'dev']
fp = os.path.join('data', choice + '.' + cls.method + '.bin')
datasets = pickle.load(open(fp, 'rb'))
# question_lens = [len(ex['processed_question_toks']) for ex in datasets]
# print('Max/Min/Avg question length in %s dataset is: %d/%d/%.2f' % (choice, max(question_lens), min(question_lens), float(sum(question_lens))/len(question_lens)))
# action_lens = [len(ex['actions']) for ex in datasets]
# print('Max/Min/Avg action length in %s dataset is: %d/%d/%.2f' % (choice, max(action_lens), min(action_lens), float(sum(action_lens))/len(action_lens)))
examples, outliers = [], 0
for ex in datasets:
if choice == 'train' and len(cls.tables[ex['db_id']]['column_names']) > 100:
outliers += 1
continue
examples.append(cls(ex, cls.tables[ex['db_id']]))
if debug and len(examples) >= 100:
return examples
if choice == 'train':
print("Skip %d extremely large samples in training dataset ..." % (outliers))
return examples
def __init__(self, ex: dict, db: dict):
super(Example, self).__init__()
self.ex = ex
self.db = db
""" Mapping word to corresponding index """
if Example.plm is None:
self.question = ex['processed_question_toks']
self.question_id = [Example.word_vocab[w] for w in self.question]
self.column = [[db['column_types'][idx].lower()] + c for idx, c in enumerate(db['processed_column_toks'])]
self.column_id = [[Example.word_vocab[w] for w in c] for c in self.column]
self.table = [['table'] + t for t in db['processed_table_toks']]
self.table_id = [[Example.word_vocab[w] for w in t] for t in self.table]
else:
t = Example.tokenizer
self.question = [q.lower() for q in ex['raw_question_toks']]
self.question_id = [t.cls_token_id] # map token to id
self.question_mask_plm = [] # remove SEP token in our case
self.question_subword_len = [] # subword len for each word, exclude SEP token
for w in self.question:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.question_id.extend(toks)
self.question_subword_len.append(len(toks))
self.question_mask_plm = [0] + [1] * (len(self.question_id) - 1) + [0]
self.question_id.append(t.sep_token_id)
self.table = [['table'] + t.lower().split() for t in db['table_names']]
self.table_id, self.table_mask_plm, self.table_subword_len = [], [], []
self.table_word_len = []
for s in self.table:
l = 0
for w in s:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.table_id.extend(toks)
self.table_subword_len.append(len(toks))
l += len(toks)
self.table_word_len.append(l)
self.table_mask_plm = [1] * len(self.table_id)
self.column = [[db['column_types'][idx].lower()] + c.lower().split() for idx, (_, c) in enumerate(db['column_names'])]
self.column_id, self.column_mask_plm, self.column_subword_len = [], [], []
self.column_word_len = []
for s in self.column:
l = 0
for w in s:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.column_id.extend(toks)
self.column_subword_len.append(len(toks))
l += len(toks)
self.column_word_len.append(l)
self.column_mask_plm = [1] * len(self.column_id) + [0]
self.column_id.append(t.sep_token_id)
self.input_id = self.question_id + self.table_id + self.column_id
self.segment_id = [0] * len(self.question_id) + [1] * (len(self.table_id) + len(self.column_id)) \
if Example.plm != 'grappa_large_jnt' and not Example.plm.startswith('roberta') \
else [0] * (len(self.question_id) + len(self.table_id) + len(self.column_id))
self.question_mask_plm = self.question_mask_plm + [0] * (len(self.table_id) + len(self.column_id))
self.table_mask_plm = [0] * len(self.question_id) + self.table_mask_plm + [0] * len(self.column_id)
self.column_mask_plm = [0] * (len(self.question_id) + len(self.table_id)) + self.column_mask_plm
self.graph = Example.graph_factory.graph_construction(ex, db)
# outputs
self.query = ' '.join(ex['query'].split('\t'))
self.ast = ex['ast']
self.tgt_action = ex['actions']
self.used_tables, self.used_columns = ex['used_tables'], ex['used_columns']
The provided code snippet includes necessary dependencies for implementing the `from_example_list_text2sql_drop` function. Write a Python function `def from_example_list_text2sql_drop(ex_list, device='cpu', train=True, **kwargs)` to solve the following problem:
New fields: batch.lens, batch.max_len, batch.relations, batch.relations_mask
Here is the function:
def from_example_list_text2sql_drop(ex_list, device='cpu', train=True, **kwargs):
""" New fields: batch.lens, batch.max_len, batch.relations, batch.relations_mask
"""
batch = from_example_list_base_drop(ex_list, device, train)
batch.graph = Example.graph_factory.batch_graphs(ex_list, device, train=train, **kwargs)
if train:
batch.max_action_num = max([len(ex.tgt_action) for ex in ex_list])
return batch | New fields: batch.lens, batch.max_len, batch.relations, batch.relations_mask |
163,790 | import os
import re
import string
from collections import Counter
import json
import sacrebleu
import torch
import tqdm
from rouge import Rouge
from torch.utils.data import DataLoader
from transformers import AdamW, get_scheduler
import transformers
from modelscope.hub.snapshot_download import snapshot_download
from modelscope.msdatasets import MsDataset
from modelscope.trainers.nlp.document_grounded_dialog_generate_trainer import \
DocumentGroundedDialogGenerateTrainer
from modelscope.utils.constant import DownloadMode
from modelscope.utils.logger import get_logger
logger = get_logger()
def collate(batch):
query = [item['query'] for item in batch]
context = [json.loads(item['rerank']) for item in batch]
label = [item['response'] for item in batch]
return query, context, label
def prepare_optimizer(model, lr, weight_decay, eps):
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{
'params': [
p for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
'weight_decay':
weight_decay,
}, {
'params': [
p for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
'weight_decay':
0.0,
}]
optimizer = AdamW(optimizer_grouped_parameters, lr=lr, eps=eps)
return optimizer
def prepare_scheduler(optimizer, epochs, steps_per_epoch, warmup_rate):
total_steps = epochs * steps_per_epoch
warmup_steps = int(total_steps * warmup_rate)
scheduler = get_scheduler(
name='linear',
optimizer=optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=total_steps)
return scheduler
def evaluate(trainer, batch_size=16, checkpoint_path=None):
model = trainer.model.model.generator.generator
tokenizer = trainer.preprocessor.generation_tokenizer
device = trainer.preprocessor.device
if checkpoint_path is not None:
state_dict = torch.load(checkpoint_path)
trainer.model.model.load_state_dict(state_dict)
valid_loader = DataLoader(
dataset=trainer.eval_dataset,
batch_size=batch_size,
collate_fn=collate)
trainer.model.model.eval()
with torch.no_grad():
results = {'outputs': [], 'targets': []}
for index, payload in enumerate(tqdm.tqdm(valid_loader)):
query, context, label = payload
query = [
tokenizer.decode(
tokenizer([x], add_special_tokens=False, return_tensors='pt')['input_ids'][0][:128])
for x in query
]
generator_inputs = [
' '.join([query[i], '<passage>', context[i][0]])
for i in range(len(query))
]
input_ids = tokenizer.batch_encode_plus(
list(generator_inputs), padding=True, return_tensors='pt').input_ids.to(device)
outputs = model.generate(input_ids, num_beams=3, max_length=128, early_stopping=True,
no_repeat_ngram_size=3)
predictions = tokenizer.batch_decode(outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False)
label = trainer.preprocessor.generation_tokenizer.batch_decode(
trainer.preprocessor.generation_tokenizer.batch_encode_plus(
label, add_special_tokens=False).input_ids,
skip_special_tokens=True,
clean_up_tokenization_spaces=False)
results['outputs'] += predictions
results['targets'] += label
meters = measure_result(results)
result_path = os.path.join(trainer.model.model_dir,
'evaluate_result.json')
with open(result_path, 'w') as f:
json.dump(results, f, ensure_ascii=False, indent=4)
logger.info(meters)
return meters
def train(trainer,
total_epoches=10,
batch_size=16,
accumulation_steps=1,
learning_rate=1e-4,
warmup_ratio=0.1,
weight_decay=0.1,
eps=1e-06,
loss_log_freq=40,
clip_grad_norm=1.0):
model = trainer.model.model.generator.generator
tokenizer = trainer.preprocessor.generation_tokenizer
device = trainer.preprocessor.device
train_loader = DataLoader(
dataset=trainer.train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate)
optimizer = prepare_optimizer(trainer.model.model, learning_rate,
weight_decay, eps)
steps_per_epoch = len(train_loader) // accumulation_steps
scheduler = prepare_scheduler(optimizer, total_epoches,
steps_per_epoch, warmup_ratio)
best_score = 0.0
for epoch in range(total_epoches):
trainer.model.model.train()
losses = []
for index, payload in enumerate(tqdm.tqdm(train_loader)):
query, context, label = payload
query = [
tokenizer.decode(
tokenizer([x], add_special_tokens=False, return_tensors='pt')['input_ids'][0][:128])
for x in query
]
generator_inputs = [
' '.join([query[i], '<passage>', context[i][0]])
for i in range(len(query))
]
input_ids = tokenizer.batch_encode_plus(
list(generator_inputs), padding=True, return_tensors='pt').input_ids.to(device)
label_ids = tokenizer.batch_encode_plus(
list(label), padding=True, return_tensors='pt').input_ids.to(device)
loss = model(input_ids=input_ids, labels=label_ids)[0]
if accumulation_steps > 1:
loss = loss / accumulation_steps
loss.backward()
if (index + 1) % accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
losses.append(loss.item())
if (index + 1) % loss_log_freq == 0:
logger.info(
f'epoch: {epoch} \t batch: {batch_size * index} \t loss: {sum(losses) / len(losses)}'
)
losses = []
if losses:
logger.info(
f'epoch: {epoch} \t batch: last \t loss: {sum(losses) / len(losses)}'
)
meters = evaluate(trainer, batch_size=batch_size)
total_score = sum([x for x in meters.values()])
if total_score >= best_score:
best_score = total_score
model_path = os.path.join(trainer.model.model_dir,
'finetuned_model.bin')
state_dict = trainer.model.model.state_dict()
torch.save(state_dict, model_path)
logger.info(
'epoch %d obtain max score: %.4f, saving model to %s' %
(epoch, total_score, model_path)) | null |
163,791 | import torch
import copy
import tqdm
import json
import os
import copy
import re
import pytorch_lightning as pl
from PIL import Image
from einops import rearrange
from pace.config import ex
from pace.modules import TransformerSS , TransformerSSDecode
from pace.transforms import pixelbert_transform
from pace.utils.format_simmc_generation import main
from pace.datamodules.multitask_datamodule import MTDataModule
from pace.modules.dist_utils import all_gather
from torch.utils.data.distributed import DistributedSampler
from pace.gadgets.my_metrics import Accuracy, VQAScore, Scalar, NDCG, BLEUScorer
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
from pace.utils.format_simmc_dst_generation import format_for_dst
from pace.utils.eval_simmc2_dst import evaluate_from_flat_list
import functools
def compute_tr_recall(model,dm_module,type,get_relevance_tensor=False):
assert type in ['val','test']
dms = dm_module.dms
if type == 'val':
text_dset = dms[0].val_dataset
elif type == 'test':
text_dset = dms[0].test_dataset
text_dset.tokenizer = dms[0].tokenizer
# dist_sampler = DistributedSampler(text_dset, shuffle=False)
text_dset.draw_false_text = 99
option_len = 100
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size = 1 ,#model.hparams.config["batch_size"],
num_workers=model.hparams.config["num_workers"],
# sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=dms[0].mlm_collator,
),
)
rank_scores = list()
rank_iids = list()
relevance_scores = list()
ret = {
"scores":None,
"relevance_scores":None
}
with torch.no_grad():
for dict_batch in tqdm.tqdm(text_loader, desc=f"{type}:rank loop"):
_bs,_c,_h,_w = dict_batch["image"][0].shape
text_ids = torch.stack(
[dict_batch[f"false_text_{i}_ids"] for i in range(option_len-1)], dim=1
)
text_masks = torch.stack(
[dict_batch[f"false_text_{i}_masks"] for i in range(option_len-1)], dim=1
)
text_labels = torch.stack(
[dict_batch[f"false_text_{i}_labels"] for i in range(option_len-1)], dim=1
)
text_ids = torch.cat([dict_batch["text_ids"].unsqueeze(1), text_ids], dim=1).to(model.device)
text_masks = torch.cat([dict_batch["text_masks"].unsqueeze(1), text_masks], dim=1).to(model.device)
text_labels = torch.cat([dict_batch["text_labels"].unsqueeze(1), text_labels], dim=1).to(model.device)
images = dict_batch["image"][0].unsqueeze(1).expand(_bs,option_len,_c,_h,_w).to(model.device)
infer_input = {
"image":[rearrange(images , "bs ol c h w -> (bs ol) c h w")],
"text_ids":rearrange(text_ids,"bs ol tl -> (bs ol) tl"),
"text_masks":rearrange(text_masks,"bs ol tl -> (bs ol) tl"),
"text_labels":rearrange(text_labels,"bs ol tl -> (bs ol) tl")
}
if "false_text_0_segment_ids" in dict_batch:
text_segment_ids = torch.stack(
[dict_batch[f"false_text_{i}_segment_ids"] for i in range(option_len-1)], dim=1
)
text_segment_ids = torch.cat([dict_batch["text_segment_ids"].unsqueeze(1) , text_segment_ids], dim=1).to(model.device)
infer_input["text_segment_ids"] = rearrange(text_segment_ids , "bs fs tl -> (bs fs) tl")
infer = model.infer(infer_input)
score = model.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score , "(bs ol) -> bs ol", bs=_bs, ol=option_len)
rank_scores.extend(score.cpu().tolist())
rank_iids.extend(dict_batch["raw_index"])
if get_relevance_tensor:
curr_relevance_scores = torch.stack(
[dict_batch[f"false_text_{i}_relevance"] for i in range(option_len-1)], dim=1
)
curr_relevance_scores = torch.cat([dict_batch["text_relevance"].unsqueeze(1) , curr_relevance_scores], dim=1)
relevance_scores.extend(curr_relevance_scores.tolist())
# torch.distributed.barrier()
gather_rank_scores = all_gather(rank_scores)
gather_relevance_scores = all_gather(relevance_scores)
gather_rank_iids = all_gather(rank_iids)
iids = torch.tensor(gather_rank_iids)
iids = iids.view(-1)
scores = torch.tensor(gather_rank_scores)
relevance_scores = torch.tensor(gather_relevance_scores)
scores = scores.view(len(iids), -1)
relevance_scores = relevance_scores.view(len(iids), -1)
ret["scores"] = scores
if get_relevance_tensor:
ret["relevance_scores"] = relevance_scores
return ret
def calculate_imagechat_test_rank(model,dm,tot_size):
scores = compute_tr_recall(model,dm,'test')["scores"]
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
#数据预处理时,已将ground truth放到options的头部,所以计算recall@k时,只需要计算topk中是否出现0即可
gt_ids = torch.zeros(len(scores))
tr_r10 = (gt_ids.unsqueeze(1) == topk10.indices).float().max(dim=1)[0].sum()
tr_r5 = (gt_ids.unsqueeze(1) == topk5.indices).float().max(dim=1)[0].sum()
tr_r1 = (gt_ids.unsqueeze(1) == topk1.indices).float().max(dim=1)[0].sum()
return (tr_r1.item()/tot_size, tr_r5.item()/tot_size, tr_r10.item()/tot_size) | null |
163,792 | import torch
import copy
import tqdm
import json
import os
import copy
import re
import pytorch_lightning as pl
from PIL import Image
from einops import rearrange
from pace.config import ex
from pace.modules import TransformerSS , TransformerSSDecode
from pace.transforms import pixelbert_transform
from pace.utils.format_simmc_generation import main
from pace.datamodules.multitask_datamodule import MTDataModule
from pace.modules.dist_utils import all_gather
from torch.utils.data.distributed import DistributedSampler
from pace.gadgets.my_metrics import Accuracy, VQAScore, Scalar, NDCG, BLEUScorer
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
from pace.utils.format_simmc_dst_generation import format_for_dst
from pace.utils.eval_simmc2_dst import evaluate_from_flat_list
import functools
def detokenize(tk_list):
def generation(model,dm_module,type,decode_prompt_text=None):
dms = dm_module.dms
if type == 'val':
text_dset = dms[0].make_no_false_val_dset()
elif type == 'test':
text_dset = dms[0].test_dataset
tokenizer = text_dset.tokenizer = dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=16,
num_workers=model.hparams.config["num_workers"],
pin_memory=True,
shuffle=False,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=dms[0].mlm_collator,
),
)
outputs = list()
decode_prompt = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(decode_prompt_text)) if decode_prompt_text != None else None
print("decode_prompt: " , decode_prompt , decode_prompt_text)
with torch.no_grad():
for _b in tqdm.tqdm(text_loader, desc="generation loop"):
output_ids = model(_b , decode_prompt=decode_prompt)['pred_seq']
texts = _b["text"]
text_ids = _b["text_ids"]
for i in range(len(texts)):
# sent = text_dset.tokenizer.decode(output_ids[i])
output_tokens = text_dset.tokenizer.convert_ids_to_tokens(output_ids[i])
sent = ' '.join(detokenize(output_tokens))
splits = sent.split("[SEP]")
result = ""
for split in splits:
split_sent = split.replace("[PAD]", "").strip()
if len(split_sent)>0:
result = split_sent
break
outputs.append(result)
return outputs | null |
163,793 | import re
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') :
if len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk[2:])
else:
r_list.append(tk)
tk_list = r_list
r_list = []
flag = False
for i,tk in enumerate(tk_list):
#i'll that's
flag = False
if len(r_list)>0 and r_list[-1] in ["'" , "-", "/", "&" , "_"] :
x = r_list[-1]
if len(r_list)>1:
y = r_list[-2]
r_list = r_list[:-2]
x = y+x+tk
else:
r_list = r_list[:-1]
x = x+tk
r_list.append(x)
flag = True
elif len(r_list)>0 and r_list[-1] == ".":
x = r_list[-1]
if len(r_list)>1:
y = r_list[-2]
if re.match("\d+",tk) and re.match("\d+",y):
r_list = r_list[:-2]
x = y+x+tk
if len(r_list)>0:
z = r_list[-1]
if z == '$':
r_list = r_list[:-1]
x = z+x
r_list.append(x)
flag = True
elif len(r_list)>0 and (r_list[-1] in ["$", "#", "(", "<" , "["] or tk in [")" , ">", "]"] ):
r_list[-1] += tk
flag = True
if not flag:
r_list.append(tk)
while len(r_list)>0 and r_list[0] in [".", "?", "!", ","]:
r_list.pop(0)
return r_list | null |
163,801 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def cost_matrix_cosine(x, y, eps=1e-5):
"""Compute cosine distnace across every pairs of x, y (batched)
[B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]"""
assert x.dim() == y.dim()
assert x.size(0) == y.size(0)
assert x.size(2) == y.size(2)
x_norm = F.normalize(x, p=2, dim=-1, eps=eps)
y_norm = F.normalize(y, p=2, dim=-1, eps=eps)
cosine_sim = x_norm.matmul(y_norm.transpose(1, 2))
cosine_dist = 1 - cosine_sim
return cosine_dist
def trace(x):
""" compute trace of input tensor (batched) """
b, m, n = x.size()
assert m == n
mask = torch.eye(n, dtype=torch.bool, device=x.device).unsqueeze(0).expand_as(x)
trace = x.masked_select(mask).contiguous().view(b, n).sum(dim=-1, keepdim=False)
return trace
def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k):
""" [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]"""
b, m, n = C.size()
sigma = torch.ones(b, m, dtype=C.dtype, device=C.device) / x_len.unsqueeze(1)
T = torch.ones(b, n, m, dtype=C.dtype, device=C.device)
A = torch.exp(-C.transpose(1, 2) / beta)
# mask padded positions
sigma.masked_fill_(x_pad, 0)
joint_pad = joint_pad.transpose(1, 2)
T.masked_fill_(joint_pad, 0)
A.masked_fill_(joint_pad, 0)
# broadcastable lengths
x_len = x_len.unsqueeze(1).unsqueeze(2)
y_len = y_len.unsqueeze(1).unsqueeze(2)
# mask to zero out padding in delta and sigma
x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1)
y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1)
for _ in range(iteration):
Q = A * T # bs * n * m
sigma = sigma.view(b, m, 1)
for _ in range(k):
delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask)
sigma = 1 / (x_len * delta.matmul(Q) + x_mask)
T = delta.view(b, n, 1) * Q * sigma
T.masked_fill_(joint_pad, 0)
return T
The provided code snippet includes necessary dependencies for implementing the `optimal_transport_dist` function. Write a Python function `def optimal_transport_dist( txt_emb, img_emb, txt_pad, img_pad, beta=0.5, iteration=50, k=1 )` to solve the following problem:
[B, M, D], [B, N, D], [B, M], [B, N]
Here is the function:
def optimal_transport_dist(
txt_emb, img_emb, txt_pad, img_pad, beta=0.5, iteration=50, k=1
):
""" [B, M, D], [B, N, D], [B, M], [B, N]"""
cost = cost_matrix_cosine(txt_emb, img_emb)
# mask the padded inputs
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
T = ipot(
cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, beta, iteration, k
)
distance = trace(cost.matmul(T.detach()))
return distance | [B, M, D], [B, N, D], [B, M], [B, N] |
163,802 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def compute_mlm(pl_module, batch):
infer = pl_module.infer(batch, mask_text=True, mask_image=False)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_mlm_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"mlm/{phase}/loss", loss)
pl_module.log(f"mlm/{phase}/accuracy", acc)
return ret | null |
163,803 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def generation_test_wrapup(pl_module):
detokenize = pl_module.hparams.config["detokenize"]
dms = pl_module.trainer.datamodule.dms
dataset = dms[0].test_dataset
tokenizer = dataset.tokenizer
prompt_text = pl_module.hparams.config["decode_prompt"]
decode_prompt = tokenizer(prompt_text , add_special_tokens=False).input_ids
dist_sampler = DistributedSampler(dataset, shuffle=False)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=pl_module.hparams.config["per_gpu_batchsize"],
num_workers=pl_module.hparams.config["num_workers"],
pin_memory=True,
sampler=dist_sampler,
collate_fn=functools.partial(
dataset.collate,
mlm_collator=dms[0].mlm_collator,
),
)
outputs = []
iids = []
source_texts = []
for _b in tqdm.tqdm(dataloader, desc="generation loop"):
iids.extend(_b["raw_index"])
source_texts.extend(_b["text"])
output = pl_module(_b , decode_prompt=decode_prompt)['pred_seq']
outputs.extend(output)
torch.distributed.barrier()
gather_iids = all_gather(iids)
gather_outputs = all_gather(outputs)
gather_sources = all_gather(source_texts)
print('rank num:',len(gather_iids))
output_sequences = {}
for i in range(len(gather_outputs)):
for j in range(len(gather_outputs[i])):
# sent = text_dset.tokenizer.decode(output_ids[i])
output_tokens = tokenizer.convert_ids_to_tokens(gather_outputs[i][j])
sent = ' '.join(detokenize(output_tokens))
splits = sent.split("[SEP]")
result = ""
for split in splits:
split_sent = split.replace("[PAD]","").strip()
if len(split_sent)>0:
result = split_sent
break
output_sequences[gather_iids[i][j]] = {
'pred':result,
'source':gather_sources[i][j]
}
task = pl_module.hparams.config["datasets"][0]
model_path = pl_module.hparams.config["load_path"].split(".")[0]
output_file_name = task + "_" + model_path
output_file_name = output_file_name.replace("/","_") + '.json'
if 0 == torch.distributed.get_rank():
with open(output_file_name , "w") as output_file:
json.dump(output_sequences , output_file)
print(f"output file has been saved to {output_file_name}") | null |
163,804 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def compute_seq2seq(pl_module , batch):
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
infer = pl_module.infer(batch, mask_text=True, mask_image=False)
masked_pos = batch["target_masked_pos"]
masked_labels = batch["target_masked_ids"]
masked_weights = batch["target_masked_weights"]
text_feats_masked = gather_seq_out_by_pos(infer["text_feats"] , masked_pos)
masked_logits = pl_module.mlm_score(text_feats_masked)
masked_loss = F.cross_entropy(
masked_logits.view(-1, pl_module.hparams.config["vocab_size"]),
masked_labels.view(-1),
ignore_index=-100
)
ret = {
"seq2seq_loss": masked_loss,
"seq2seq_logits": masked_logits,
"seq2seq_labels": masked_labels,
"seq2seq_ids": masked_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_seq2seq_loss")(ret["seq2seq_loss"])
acc = getattr(pl_module, f"{phase}_seq2seq_accuracy")(
ret["seq2seq_logits"], ret["seq2seq_labels"]
)
pl_module.log(f"seq2seq/{phase}/loss", loss)
pl_module.log(f"seq2seq/{phase}/accuracy", acc)
return ret | null |
163,805 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def compute_mpp(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=True)
mpp_logits = pl_module.mpp_score(infer["image_feats"])
mpp_logits = torch.stack(
[
mpp_logits[:, :, 0:256],
mpp_logits[:, :, 256:512],
mpp_logits[:, :, 512:768],
],
dim=2,
)
mpp_labels = infer["image_labels"]
mpp_loss = F.cross_entropy(
mpp_logits.view(-1, 256),
mpp_labels.view(-1),
ignore_index=-100,
)
ret = {
"mpp_loss": mpp_loss,
"mpp_logits": mpp_logits,
"mpp_labels": mpp_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mpp_loss")(ret["mpp_loss"])
acc = getattr(pl_module, f"{phase}_mpp_accuracy")(
ret["mpp_logits"], ret["mpp_labels"]
)
pl_module.log(f"mpp/{phase}/loss", loss)
pl_module.log(f"mpp/{phase}/accuracy", acc)
return ret | null |
163,806 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def compute_mppd(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=True)
mppd_logits = pl_module.mppd_score(infer["image_feats"])
mppd_labels = infer["image_labels_mppd"]
filter_to_train = infer["image_labels"].float().mean(dim=-1) != -100
labels = mppd_labels[filter_to_train]
logits = mppd_logits[filter_to_train]
mppd_loss = F.mse_loss(logits, labels)
ret = {
"mppd_loss": mppd_loss,
"mppd_logits": mppd_logits,
"mppd_labels": mppd_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mppd_loss")(ret["mppd_loss"])
pl_module.log(f"mppd/{phase}/loss", loss)
return ret | null |
163,807 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def compute_mpfr(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=True)
mpfr_logits = pl_module.mpfr_score(infer["image_feats"])
mpfr_labels = infer["image_labels_mpfr"]
filter_to_train = infer["image_labels"].float().mean(dim=-1) != -100
labels = mpfr_labels[filter_to_train]
logits = mpfr_logits[filter_to_train]
mpfr_loss = F.mse_loss(logits, labels)
ret = {
"mpfr_loss": mpfr_loss,
"mpfr_logits": mpfr_logits,
"mpfr_labels": mpfr_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mpfr_loss")(ret["mpfr_loss"])
pl_module.log(f"mpfr/{phase}/loss", loss)
return ret | null |
163,808 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def cost_matrix_cosine(x, y, eps=1e-5):
"""Compute cosine distnace across every pairs of x, y (batched)
[B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]"""
assert x.dim() == y.dim()
assert x.size(0) == y.size(0)
assert x.size(2) == y.size(2)
x_norm = F.normalize(x, p=2, dim=-1, eps=eps)
y_norm = F.normalize(y, p=2, dim=-1, eps=eps)
cosine_sim = x_norm.matmul(y_norm.transpose(1, 2))
cosine_dist = 1 - cosine_sim
return cosine_dist
def trace(x):
""" compute trace of input tensor (batched) """
b, m, n = x.size()
assert m == n
mask = torch.eye(n, dtype=torch.bool, device=x.device).unsqueeze(0).expand_as(x)
trace = x.masked_select(mask).contiguous().view(b, n).sum(dim=-1, keepdim=False)
return trace
def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k):
""" [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]"""
b, m, n = C.size()
sigma = torch.ones(b, m, dtype=C.dtype, device=C.device) / x_len.unsqueeze(1)
T = torch.ones(b, n, m, dtype=C.dtype, device=C.device)
A = torch.exp(-C.transpose(1, 2) / beta)
# mask padded positions
sigma.masked_fill_(x_pad, 0)
joint_pad = joint_pad.transpose(1, 2)
T.masked_fill_(joint_pad, 0)
A.masked_fill_(joint_pad, 0)
# broadcastable lengths
x_len = x_len.unsqueeze(1).unsqueeze(2)
y_len = y_len.unsqueeze(1).unsqueeze(2)
# mask to zero out padding in delta and sigma
x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1)
y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1)
for _ in range(iteration):
Q = A * T # bs * n * m
sigma = sigma.view(b, m, 1)
for _ in range(k):
delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask)
sigma = 1 / (x_len * delta.matmul(Q) + x_mask)
T = delta.view(b, n, 1) * Q * sigma
T.masked_fill_(joint_pad, 0)
return T
def compute_itm_wpa(pl_module, batch):
pos_len = len(batch["text"]) // 2
# pos_len = 1
neg_len = len(batch["text"]) - pos_len
itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to(
pl_module.device
)
itm_labels = itm_labels[torch.randperm(itm_labels.size(0))]
itm_images = [
torch.stack(
[
ti if itm_labels[i] == 1 else fi
for i, (ti, fi) in enumerate(zip(bti, bfi))
]
)
for bti, bfi in zip(batch["image"], batch["false_image_0"])
]
batch = {k: v for k, v in batch.items()}
batch["image"] = itm_images
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
with torch.cuda.amp.autocast(enabled=False):
txt_emb, img_emb = infer["text_feats"], infer["image_feats"]
txt_mask, img_mask = infer["text_masks"].bool(), infer["image_masks"].bool()
for i, _len in enumerate(txt_mask.sum(dim=1)):
txt_mask[i, _len - 1] = False
txt_mask[:, 0] = False
img_mask[:, 0] = False
if "deit" in pl_module.hparams.config["vit"]:
img_mask[:, 1] = False
txt_pad, img_pad = ~txt_mask, ~img_mask
cost = cost_matrix_cosine(txt_emb.float(), img_emb.float())
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(
dtype=cost.dtype
)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(
dtype=cost.dtype
)
T = ipot(
cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, 0.5, 50, 1
)
distance = trace(cost.matmul(T.detach()))
dist_pos = distance.masked_select(itm_labels == 1)
dist_neg = distance.masked_select(itm_labels == 0)
ot_loss = (dist_pos.sum() - dist_neg.sum()) / (dist_pos.size(0) + dist_neg.size(0))
itm_logits = pl_module.itm_score(infer["cls_feats"])
itm_loss = F.cross_entropy(itm_logits, itm_labels.long())
ret = {
"itm_loss": itm_loss,
"itm_wpa_loss": 0.1 * ot_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_itm_loss")(ret["itm_loss"])
wpa_loss = getattr(pl_module, f"{phase}_itm_wpa_loss")(ret["itm_wpa_loss"])
acc = getattr(pl_module, f"{phase}_itm_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"itm/{phase}/loss", loss)
pl_module.log(f"itm/{phase}/wpa_loss", wpa_loss)
pl_module.log(f"itm/{phase}/accuracy", acc)
return ret | null |
163,809 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def compute_imgcls(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
imgcls_logits = pl_module.img_classifier(infer["cls_feats"])
imgcls_labels = batch["label"]
imgcls_labels = torch.tensor(imgcls_labels).to(pl_module.device).long()
imgcls_loss = F.cross_entropy(imgcls_logits, imgcls_labels)
ret = {
"imgcls_loss": imgcls_loss,
"imgcls_logits": imgcls_logits,
"imgcls_labels": imgcls_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_imgcls_loss")(ret["imgcls_loss"])
acc = getattr(pl_module, f"{phase}_imgcls_accuracy")(
ret["imgcls_logits"], ret["imgcls_labels"]
)
pl_module.log(f"imgcls/{phase}/loss", loss)
pl_module.log(f"imgcls/{phase}/accuracy", acc)
return ret | null |
163,810 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def set_slot_tokens(pl_module):
tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
for slot in slot_values_keys:
if slot not in slot_tokens and slot not in open_slots:
for candidate in slot_values[slot]:
slot_tokens[slot].append((tokenizer(candidate, return_tensors='pt')['input_ids'])) | null |
163,811 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def compute_mmconvdst_loss(pl_module, outputs, extras):
span_gt = extras['span'].to(pl_module.device)
gate_gt = extras['gate'].to(pl_module.device)
action_gt = extras['action'].to(pl_module.device)
slot_gt = extras['slot_value']
curr_maxlen = span_gt.shape[1]
span_pred, gate_pred = outputs['span'][:, :curr_maxlen, :], outputs['gate']
action_pred, slot_pred = outputs['action'], outputs['slot']
batch_loss_ga = pl_module.cross_entropy(gate_pred, gate_gt)
batch_loss_os = pl_module.cross_entropy(span_pred.reshape(-1, span_pred.shape[-1]), span_gt.view(-1))
batch_loss_ac = pl_module.cross_entropy(action_pred, action_gt)
batch_loss_sl = 0
fixed_slot_sample_count = 0
for i, slot_pd in enumerate(slot_pred):
value_idx = slot_gt[i].item()
if value_idx != -1:
fixed_slot = slot_pd.detach().clone()
fixed_slot[value_idx] = -1e7
loss_fixed_slot = 0.2 - slot_pd[value_idx] + slot_pd[fixed_slot.argmax()]
if loss_fixed_slot.item() > 0:
batch_loss_sl += loss_fixed_slot
fixed_slot_sample_count += 1
loss = 2*batch_loss_ga + 10*batch_loss_os + batch_loss_ac
if fixed_slot_sample_count and bool(batch_loss_sl != 0):
batch_loss_sl /= fixed_slot_sample_count
loss += batch_loss_sl
return {"mmconv_dst_loss" : loss}
def compute_dst(pl_module, batch):
# loss_fn = nn.CrossEntropyLoss()
ignore_index = pl_module.cross_entropy.ignore_index
## ====== prepare ====== ##
batch['span'][batch['span'] == -1] = ignore_index
for i, s in enumerate(batch['span']):
if s[s != ignore_index].sum() == 0:
batch['span'][i] = ignore_index
batch['action'][batch['action'] == -1] = ignore_index
extras = {'id': batch['id'], 'input_ids_len': batch['input_ids_len'], 'span': batch['span'],
'gate': batch['gate'], 'action': batch['action'], 'slot_value': batch['slot value']}
## ====== infer ====== ##
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
hidden_states = pl_module.dropout(infer["text_feats"])
pooled_output = pl_module.dropout(infer["cls_feats"])
span = pl_module.classifier_span(hidden_states)
gate = pl_module.classifier_gate(pooled_output)
action = pl_module.classifier_action(pooled_output)
cosine_matching = []
cache = True
slot = batch["slot"]
for i in range(len(slot)):
candidate_tokens = slot_tokens[slot_values_keys[slot[i]]]
cosine_matching.append(torch.zeros((len(candidate_tokens),)).to(pl_module.device))
for j, candidate_token in enumerate(candidate_tokens):
tuple_token = tuple(candidate_token.squeeze(0).numpy())
if cache and tuple_token in pl_module.candidate_value_cache:
candidate_output = pl_module.candidate_value_cache[tuple_token]
else:
candidate_token = candidate_token.to(pl_module.device)
token_type_ids_curr = torch.ones_like(candidate_token)
token_type_ids_curr[..., 0] = 0
with torch.no_grad():
candidate_output = pl_module.pure_text_infer(candidate_token)["cls_feats"]
if cache:
pl_module.candidate_value_cache[tuple_token] = candidate_output
cosine_matching[i][j] = pooled_output[i].unsqueeze(0).mm(candidate_output.t()) / (pooled_output[i].norm() * candidate_output.norm())
outputs = {'span': span, 'gate': gate, 'action': action, 'slot': cosine_matching}
results = make_results(ignore_index, outputs, extras)
ret = compute_mmconvdst_loss(pl_module, outputs, extras)
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_dst_loss")(ret["mmconv_dst_loss"])
score = getattr(pl_module, f"{phase}_dst_DSTScore")(results)
pl_module.log(f"mmconv_dst/{phase}/loss", loss)
for m in ["ac", "os", "sl", "joint", "ga"]:
pl_module.log(f"dst/{phase}/score_{m}", score[m])
return ret | null |
163,812 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def compute_itm_intent(pl_module, batch):
is_training_phase = pl_module.training
_bs, _c, _h, _w = batch["image"][0].shape
false_len = pl_module.hparams.config["draw_false_text"]
intent_labels = torch.zeros(_bs, false_len+1).to(pl_module.device)
for i, ignore_idx in enumerate(batch["ignore_idx"]):
intent_labels[i, ignore_idx+1:] = -100
intent_labels[:, 0] = 1
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
# text_ids = torch.cat([text_ids[:, :pos_index], batch["text_ids"].unsqueeze(1), text_ids[:, pos_index:]], dim=1)
# text_masks = torch.cat([text_masks[:, :pos_index], batch["text_masks"].unsqueeze(1), text_masks[:, pos_index:]], dim=1)
# text_labels = torch.cat([text_labels[:,:pos_index], batch["text_labels"].unsqueeze(1), text_labels[:,pos_index:]], dim=1)
images = batch["image"][0].unsqueeze(1).expand(_bs, false_len + 1, _c, _h, _w)
infer = pl_module.infer(
{
"image": [rearrange(images, "bs fs c h w -> (bs fs) c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl")
}
)
intent_logits = pl_module.itm_score(infer["cls_feats"])
weight = torch.tensor([1.0, 5.0]).to(pl_module.device)
intent_labels = intent_labels.reshape(-1, 1).squeeze(1)
intent_loss = F.cross_entropy(intent_logits, intent_labels.long(), weight)
ret = {
"intent_loss": intent_loss,
"intent_logits": intent_logits,
"intent_labels": intent_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_intent_loss")(ret["intent_loss"])
acc = getattr(pl_module, f"{phase}_intent_accuracy")(
ret["intent_logits"], ret["intent_labels"]
)
pl_module.log(f"intent/{phase}/loss", loss)
pl_module.log(f"intent/{phase}/accuracy", acc)
# intent_test_wrapup([ret])
return ret | null |
163,813 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def intent_test_wrapup(outs):
ret = {"predictions":[],"labels":[]}
for out in outs:
logits = out["intent_logits"]
target = out["intent_labels"]
preds = logits.argmax(dim=-1)
preds = preds[target != -100]
target = target[target != -100]
assert len(preds) == len(target)
ret["predictions"] += preds.tolist()
ret["labels"] += target.tolist()
torch.distributed.barrier()
labels = ret["labels"]
predictions = ret["predictions"]
precision = precision_score(labels, predictions, average="macro")
recall = recall_score(labels, predictions, average="macro")
f1 = f1_score(labels, predictions, average="macro")
print("PRECISION : ", round(precision, 4), " RECALL : ",round(recall, 4), " F1-SCORE : ",round(f1, 4))
torch.distributed.barrier() | null |
163,814 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def compute_irtr(pl_module, batch):
is_training_phase = pl_module.training
_bs, _c, _h, _w = batch["image"][0].shape
false_len = pl_module.hparams.config["draw_false_text"]
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
images = batch["image"][0].unsqueeze(1).expand(_bs, false_len + 1, _c, _h, _w)
infer_input = {
"image": [rearrange(images, "bs fs c h w -> (bs fs) c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
if pl_module.hparams.config["use_segment_ids"] and false_len > 0:
text_segment_ids = torch.stack(
[batch[f"false_text_{i}_segment_ids"] for i in range(false_len)], dim=1
)
text_segment_ids = torch.cat([batch["text_segment_ids"].unsqueeze(1) , text_segment_ids], dim=1)
infer_input["text_segment_ids"] = rearrange(text_segment_ids , "bs fs tl -> (bs fs) tl")
infer = pl_module.infer(
infer_input
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
answer = torch.zeros(_bs).to(score).long()
irtr_loss = F.cross_entropy(score, answer)
ret = {
"irtr_loss": irtr_loss,
}
phase = "train" if pl_module.training else "val"
irtr_loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"])
pl_module.log(f"irtr/{phase}/irtr_loss", irtr_loss)
return ret | null |
163,815 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def compute_mmdial_irtr_recall(pl_module):
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
image_mapper = text_dset.load_evalset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=64,
num_workers=pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
rank_scores = list()
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
"image": _b["image"][0].to(pl_module.device),
"negs_imgs": _b["negs_imgs"]
}
)
for tbatch in tqdm.tqdm(text_preload, desc="text batch loop"):
for i in range(len(tbatch["img_index"])):
txt_batch = {
"text_ids": tbatch["text_ids"][i].unsqueeze(0),
"text_masks": tbatch["text_masks"][i].unsqueeze(0),
"text_labels": tbatch["text_labels"][i].unsqueeze(0),
"img_index": [tbatch["img_index"][i]],
"image": tbatch["image"][i].unsqueeze(0),
"negs_imgs": tbatch["negs_imgs"][i]
}
# Ground Truth Image
(pie, pim, _, _) = pl_module.transformer.visual_embed(
txt_batch["image"],
max_image_len=pl_module.hparams.config["max_image_len"],
mask_it=False,
)
with torch.cuda.amp.autocast():
pos_score = pl_module.rank_output(
pl_module.infer(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
},
image_embeds=pie,
image_masks=pim,
)["cls_feats"]
)[:, 0]
# Negetive Images
negs_images = txt_batch["negs_imgs"]
negs_images_list = list()
for i in negs_images:
if i in image_mapper:
negs_images_list.append(image_mapper[i])
if len(negs_images_list) < len(negs_images):
# print("negs_images_list : ", len(negs_images_list))
negs_images_list = negs_images_list + negs_images_list[0:(len(negs_images)-len(negs_images_list))]
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True,
image_list=negs_images_list,
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=64,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
image_preload = list()
for _b in image_loader:
(ie, im, _, _) = pl_module.transformer.visual_embed(
_b["image"][0].to(pl_module.device),
max_image_len=pl_module.hparams.config["max_image_len"],
mask_it=False,
)
image_preload.append((ie, im, _b["raw_index"][0]))
neg_batch_score = list()
for img_batch in image_preload:
ie, im, _iid = img_batch
b, _, _ = ie.shape
_, l = txt_batch["text_ids"].shape
text_ids = txt_batch["text_ids"].expand(b, l)
text_masks = txt_batch["text_masks"].expand(b, l)
text_labels = txt_batch["text_labels"].expand(b, l)
with torch.cuda.amp.autocast():
score = pl_module.rank_output(
pl_module.infer(
{
"text_ids": text_ids,
"text_masks": text_masks,
"text_labels": text_labels,
},
image_embeds=ie,
image_masks=im,
)["cls_feats"]
)[:, 0]
neg_batch_score.append(score)
neg_batch_score = torch.cat(neg_batch_score)
img_batch_score = torch.cat([pos_score, neg_batch_score])
rank_scores.append(img_batch_score.cpu().tolist())
torch.distributed.barrier()
gather_rank_scores = all_gather(rank_scores)
scores = torch.tensor(gather_rank_scores)
scores = scores.view(scores.shape[1], -1)
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = topk10.indices
topk5_iids = topk5.indices
topk1_iids = topk1.indices
ir_r1 = (0==topk1_iids).float().max(dim=1)[0].mean()
ir_r5 = (0==topk5_iids).float().max(dim=1)[0].mean()
ir_r10 = (0==topk10_iids).float().max(dim=1)[0].mean()
return (ir_r1, ir_r5, ir_r10, -1,-1,-1)
def compute_old_irtr_recall(pl_module):
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=64,
num_workers=pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(image_dset, shuffle=False)
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=1,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
}
)
tiids = list()
for pre in text_preload:
tiids += pre["img_index"]
tiids = torch.tensor(tiids)
image_preload = list()
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
(ie, im, _, _) = pl_module.transformer.visual_embed(
_b["image"][0].to(pl_module.device),
max_image_len=pl_module.hparams.config["max_image_len"],
mask_it=False,
)
image_preload.append((ie, im, _b["img_index"][0]))
rank_scores = list()
rank_iids = list()
for img_batch in tqdm.tqdm(image_preload, desc="rank loop"):
_ie, _im, _iid = img_batch
_, l, c = _ie.shape
img_batch_score = list()
for txt_batch in text_preload:
fblen = len(txt_batch["text_ids"])
ie = _ie.expand(fblen, l, c)
im = _im.expand(fblen, l)
with torch.cuda.amp.autocast():
score = pl_module.rank_output(
pl_module.infer(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
},
image_embeds=ie,
image_masks=im,
)["cls_feats"]
)[:, 0]
img_batch_score.append(score)
img_batch_score = torch.cat(img_batch_score)
rank_scores.append(img_batch_score.cpu().tolist())
rank_iids.append(_iid)
torch.distributed.barrier()
gather_rank_scores = all_gather(rank_scores)
gather_rank_iids = all_gather(rank_iids)
iids = torch.tensor(gather_rank_iids)
iids = iids.view(-1)
scores = torch.tensor(gather_rank_scores)
scores = scores.view(len(iids), -1)
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
def compute_tr_recall_for_target_answer_set(pl_module):
text_dset = pl_module.trainer.datamodule.dms[0].val_dataset
is_test = pl_module.hparams.config["test_only"]
if is_test: text_dset = pl_module.trainer.datamodule.dms[0].test_dataset
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_dset.draw_false_text = 99
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator
# image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
# image_only=True
# )
# image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(text_dset, shuffle=False)
option_len = text_dset.draw_false_text + 1
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size = 1 ,#pl_module.hparams.config["batch_size"],
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
rank_scores = list()
rank_iids = list()
for dict_batch in tqdm.tqdm(text_loader, desc="rank loop"):
_bs,_c,_h,_w = dict_batch["image"][0].shape
text_ids = torch.stack(
[dict_batch[f"false_text_{i}_ids"] for i in range(option_len-1)], dim=1
)
text_masks = torch.stack(
[dict_batch[f"false_text_{i}_masks"] for i in range(option_len-1)], dim=1
)
text_labels = torch.stack(
[dict_batch[f"false_text_{i}_labels"] for i in range(option_len-1)], dim=1
)
text_ids = torch.cat([dict_batch["text_ids"].unsqueeze(1), text_ids], dim=1).to(pl_module.device)
text_masks = torch.cat([dict_batch["text_masks"].unsqueeze(1), text_masks], dim=1).to(pl_module.device)
text_labels = torch.cat([dict_batch["text_labels"].unsqueeze(1), text_labels], dim=1).to(pl_module.device)
images = dict_batch["image"][0].unsqueeze(1).expand(_bs,option_len,_c,_h,_w).to(pl_module.device)
infer_input = {
"image":[rearrange(images , "bs ol c h w -> (bs ol) c h w")],
"text_ids":rearrange(text_ids,"bs ol tl -> (bs ol) tl"),
"text_masks":rearrange(text_masks,"bs ol tl -> (bs ol) tl"),
"text_labels":rearrange(text_labels,"bs ol tl -> (bs ol) tl")
}
if pl_module.hparams.config["use_segment_ids"] and option_len > 1:
text_segment_ids = torch.stack(
[dict_batch[f"false_text_{i}_segment_ids"] for i in range(option_len-1)], dim=1
)
text_segment_ids = torch.cat([dict_batch["text_segment_ids"].unsqueeze(1) , text_segment_ids], dim=1).to(pl_module.device)
infer_input["text_segment_ids"] = rearrange(text_segment_ids , "bs fs tl -> (bs fs) tl")
infer = pl_module.infer(infer_input)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score , "(bs ol) -> bs ol", bs=_bs, ol=option_len)
rank_scores.extend(score.cpu().tolist())
rank_iids.extend(dict_batch["raw_index"])
torch.distributed.barrier()
gather_rank_scores = all_gather(rank_scores)
gather_rank_iids = all_gather(rank_iids)
iids = torch.tensor(gather_rank_iids)
iids = iids.view(-1)
scores = torch.tensor(gather_rank_scores)
scores = scores.view(len(iids), -1)
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
gt_ids = torch.zeros(len(scores))
#数据预处理时,已将ground truth放到options的头部,所以计算recall@k时,只需要计算topk中是否出现0即可
tr_r10 = (gt_ids.unsqueeze(1) == topk10.indices).float().max(dim=1)[0].mean()
tr_r5 = (gt_ids.unsqueeze(1) == topk5.indices).float().max(dim=1)[0].mean()
tr_r1 = (gt_ids.unsqueeze(1) == topk1.indices).float().max(dim=1)[0].mean()
rank = torch.distributed.get_rank()
if is_test and 0 == rank:
#由于imagechat存在少量图片的缺失,公平对比,这里使用原始对话总量
task = pl_module.hparams.config["datasets"][0]
if task == "imagechat":
tr_r10 = (gt_ids.unsqueeze(1) == topk10.indices).float().max(dim=1)[0].sum()/29991
tr_r5 = (gt_ids.unsqueeze(1) == topk5.indices).float().max(dim=1)[0].sum()/29991
tr_r1 = (gt_ids.unsqueeze(1) == topk1.indices).float().max(dim=1)[0].sum()/29991
# no need for ir metrics for text retrieval
return (torch.tensor(0), torch.tensor(0), torch.tensor(0) , tr_r1, tr_r5, tr_r10)
def compute_irtr_recall(pl_module):
datasets = pl_module.hparams.config['datasets']
#only calculate matching scores for targeted answer set, instead for all answers
if 'visdial' in datasets or 'imagechat' in datasets:
return compute_tr_recall_for_target_answer_set(pl_module)
elif 'mmdial_caps' in datasets:
return compute_mmdial_irtr_recall(pl_module)
else:
return compute_old_irtr_recall(pl_module) | null |
163,816 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_() | null |
163,817 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def arc_test_step(pl_module, batch, output):
return output | null |
163,818 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def arc_test_wrapup(outs, caplen, model_name):
rank = torch.distributed.get_rank()
iids, captions = list(), list()
for out in outs:
iids += out["iid"]
captions += out["captions"]
rets = list()
for iid, caption in zip(iids, captions):
rets.append({"image_id": iid, "caption": caption})
with open(f"coco_cap_len{caplen}_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob(f"coco_cap_len{caplen}_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result/arc", exist_ok=True)
jsons = sorted(jsons, key=lambda x: x["image_id"])
with open(f"result/arc/coco_cap_{model_name}_len{caplen}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"coco_cap_len{caplen}_{rank}.json") | null |
163,819 | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import random
from collections import defaultdict
from torch.utils.data.distributed import DistributedSampler
from torch.nn.modules.loss import _Loss
from einops import rearrange
from sklearn.metrics import precision_score, recall_score, f1_score
from pace.modules.dist_utils import all_gather
from pace.utils.glossary import slot_tokens, slot_values_keys, open_slots, slot_values
from pace.utils.write_mmconv_dst import make_results
from pace.utils.eval_mmconv_rg import evaluate_mmconvrg
def rg_test_step(pl_module, batch):
stop_token = pl_module.hparams.config["stop_token"]
pl_module.tokenizer.pad_token = pl_module.tokenizer.bos_token
pl_module.tokenizer.padding_side='left'
preds = []
batch_prompt = pl_module.tokenizer(batch["prompt"], add_special_tokens=True, padding=True, return_tensors="pt")["input_ids"].to(pl_module.device)
output_sequences = pl_module.transforms.generate(
input_ids=batch_prompt,
max_length=800,
pad_token_id=50256,
temperature=pl_module.hparams.config["temperature"],
top_k=pl_module.hparams.config["top_k"],
top_p=pl_module.hparams.config["top_p"],
repetition_penalty=1.0,
do_sample=True,
num_return_sequences=1,
)
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
for gen_idx, generated_sequence in enumerate(output_sequences):
generated_sequence = generated_sequence.tolist()
# Decode text
text = pl_module.tokenizer.decode(
generated_sequence, clean_up_tokenization_spaces=True
)
# Remove all text after the stop token
stop_idx = text.find(stop_token)+len(stop_token) if stop_token else None
text = text[:stop_idx]
total_sequence = (
batch["prompt"][gen_idx]
+ text[
len(
pl_module.tokenizer.decode(
batch_prompt[gen_idx], clean_up_tokenization_spaces=True
)
) :
]
)
preds.append(total_sequence)
return {"preds" : preds, "labels" : batch["text"], "ids": batch["ids"]} | null |
163,820 | import torch
import random
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from pace.modules.dist_utils import all_gather
from pace.modules.objectives import compute_irtr_recall
from pace.gadgets.my_metrics import Accuracy, VQAScore, Scalar, MMConvDSTScore
def set_metrics(pl_module):
for split in ["train", "val"]:
for k, v in pl_module.hparams.config["loss_names"].items():
if v < 1:
continue
if k == "irtr":
setattr(pl_module, f"{split}_irtr_loss", Scalar())
elif k == "mppd" or k == "mpfr":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "itm":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_wpa_loss", Scalar())
elif k == "dst":
setattr(pl_module, f"{split}_{k}_DSTScore", MMConvDSTScore())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "rg":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "intent":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
else:
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar()) | null |
163,821 | import torch
import random
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from pace.modules.dist_utils import all_gather
from pace.modules.objectives import compute_irtr_recall
from pace.gadgets.my_metrics import Accuracy, VQAScore, Scalar, MMConvDSTScore
def epoch_wrapup(pl_module):
phase = "train" if pl_module.training else "val"
the_metric = 0
if pl_module.hparams.config["get_recall_metric"] and not pl_module.training:
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_irtr_recall(pl_module)
print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r1", ir_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r5", ir_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r10", ir_r10, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r1", tr_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r5", tr_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r10", tr_r10, pl_module.global_step
)
datasets = pl_module.hparams.config['datasets']
#only calculate matching scores for targeted answer set, instead for all answers
if 'imagechat' in datasets:
the_metric += tr_r1.item() + tr_r5.item() + tr_r10.item()
else:
the_metric += ir_r1.item()+ir_r5.item()+ir_r10.item()
for loss_name, v in pl_module.hparams.config["loss_names"].items():
if v < 1:
continue
value = 0
if loss_name == "irtr":
if not pl_module.hparams.config["get_recall_metric"]:
value = getattr(pl_module, f"{phase}_irtr_loss").compute()
pl_module.log(
f"{loss_name}/{phase}/irtr_loss_epoch",
getattr(pl_module, f"{phase}_irtr_loss").compute(),
)
getattr(pl_module, f"{phase}_irtr_loss").reset()
elif loss_name == "mppd" or loss_name == "mpfr":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "itm":
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
pl_module.log(
f"{loss_name}/{phase}/wpa_loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_wpa_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_wpa_loss").reset()
elif loss_name == "dst":
value = getattr(pl_module, f"{phase}_dst_DSTScore").compute()
print(phase)
print(value)
for m in ["ac", "os", "sl", "joint", "ga"]:
pl_module.log(f"{loss_name}/{phase}/dstscore_epoch_{m}", value[m])
value = value["joint"]
getattr(pl_module, f"{phase}_dst_DSTScore").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "rg":
value = - (getattr(pl_module, f"{phase}_{loss_name}_loss").compute())
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
else:
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
# TODO
the_metric += value
# print(the_metric)
pl_module.log(f"{phase}/the_metric", the_metric) | null |
163,822 | import torch
import random
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from pace.modules.dist_utils import all_gather
from pace.modules.objectives import compute_irtr_recall
from pace.gadgets.my_metrics import Accuracy, VQAScore, Scalar, MMConvDSTScore
def check_non_acc_grad(pl_module):
if pl_module.token_type_embeddings.weight.grad is None:
return True
else:
grad = pl_module.token_type_embeddings.weight.grad
return (grad.sum() == 0).item() | null |
163,823 | import torch
import random
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from pace.modules.dist_utils import all_gather
from pace.modules.objectives import compute_irtr_recall
from pace.gadgets.my_metrics import Accuracy, VQAScore, Scalar, MMConvDSTScore
def set_task(pl_module):
pl_module.current_tasks = [
k for k, v in pl_module.hparams.config["loss_names"].items() if v >= 1
]
return | null |
163,824 | import torch
import random
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from pace.modules.dist_utils import all_gather
from pace.modules.objectives import compute_irtr_recall
from pace.gadgets.my_metrics import Accuracy, VQAScore, Scalar, MMConvDSTScore
def set_schedule(pl_module):
lr = pl_module.hparams.config["learning_rate"]
wd = pl_module.hparams.config["weight_decay"]
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"norm.bias",
"norm.weight",
"norm1.bias",
"norm1.weight",
"norm2.bias",
"norm2.weight",
"image_norm.bias",
"image_norm.weight",
"sentence_norm.bias",
"sentence_norm.weight",
"caps_norm.bias",
"caps_norm.weight",
"generation_norm.bias",
"generation_norm.weight"
]
head_names = ["classifier_gate", "classifier_span", "classifier_action"]
lr_mult = pl_module.hparams.config["lr_mult"]
end_lr = pl_module.hparams.config["end_lr"]
decay_power = pl_module.hparams.config["decay_power"]
optim_type = pl_module.hparams.config["optim_type"]
names = [n for n, p in pl_module.named_parameters()]
# TODO
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
],
"weight_decay": wd,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
],
"weight_decay": 0.0,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and any(bb in n for bb in head_names)
],
"weight_decay": wd,
"lr": lr * lr_mult,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay) and any(bb in n for bb in head_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult,
},
]
if optim_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8, betas=(0.9, 0.98)
)
elif optim_type == "adam":
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=lr)
elif optim_type == "sgd":
optimizer = torch.optim.SGD(optimizer_grouped_parameters, lr=lr, momentum=0.9)
if pl_module.trainer.max_steps is None:
max_steps = (
len(pl_module.trainer.datamodule.train_dataloader())
* pl_module.trainer.max_epochs
// pl_module.trainer.accumulate_grad_batches
)
else:
max_steps = pl_module.trainer.max_steps
warmup_steps = pl_module.hparams.config["warmup_steps"]
if isinstance(pl_module.hparams.config["warmup_steps"], float):
warmup_steps = int(max_steps * warmup_steps)
if decay_power == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
)
else:
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
lr_end=end_lr,
power=decay_power,
)
sched = {"scheduler": scheduler, "interval": "step"}
return (
[optimizer],
[sched],
) | null |
163,827 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_small_patch16_224` function. Write a Python function `def vit_small_patch16_224(pretrained=False, **kwargs)` to solve the following problem:
My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3.
Here is the function:
def vit_small_patch16_224(pretrained=False, **kwargs):
""" My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3."""
model_kwargs = dict(
patch_size=16,
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3.0,
qkv_bias=False,
norm_layer=nn.LayerNorm,
**kwargs,
)
if pretrained:
# NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model
model_kwargs.setdefault("qk_scale", 768 ** -0.5)
model = _create_vision_transformer(
"vit_small_patch16_224", pretrained=pretrained, **model_kwargs
)
return model | My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3. |
163,828 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_base_patch16_224` function. Write a Python function `def vit_base_patch16_224(pretrained=False, **kwargs)` to solve the following problem:
ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch16_224", pretrained=pretrained, **model_kwargs
)
return model | ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. |
163,829 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_base_patch32_224` function. Write a Python function `def vit_base_patch32_224(pretrained=False, **kwargs)` to solve the following problem:
ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
Here is the function:
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch32_224", pretrained=pretrained, **model_kwargs
)
return model | ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. |
163,830 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_base_patch16_384` function. Write a Python function `def vit_base_patch16_384(pretrained=False, **kwargs)` to solve the following problem:
ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch16_384", pretrained=pretrained, **model_kwargs
)
return model | ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. |
163,831 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_base_patch32_384` function. Write a Python function `def vit_base_patch32_384(pretrained=False, **kwargs)` to solve the following problem:
ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch32_384", pretrained=pretrained, **model_kwargs
)
return model | ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. |
163,832 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_large_patch16_224` function. Write a Python function `def vit_large_patch16_224(pretrained=False, **kwargs)` to solve the following problem:
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch16_224", pretrained=pretrained, **model_kwargs
)
return model | ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. |
163,833 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_large_patch32_224` function. Write a Python function `def vit_large_patch32_224(pretrained=False, **kwargs)` to solve the following problem:
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
Here is the function:
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch32_224", pretrained=pretrained, **model_kwargs
)
return model | ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. |
163,834 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_large_patch16_384` function. Write a Python function `def vit_large_patch16_384(pretrained=False, **kwargs)` to solve the following problem:
ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch16_384", pretrained=pretrained, **model_kwargs
)
return model | ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. |
163,835 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_large_patch32_384` function. Write a Python function `def vit_large_patch32_384(pretrained=False, **kwargs)` to solve the following problem:
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch32_384", pretrained=pretrained, **model_kwargs
)
return model | ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. |
163,836 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_base_patch16_224_in21k` function. Write a Python function `def vit_base_patch16_224_in21k(pretrained=False, **kwargs)` to solve the following problem:
ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_patch16_224_in21k", pretrained=pretrained, **model_kwargs
)
return model | ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. |
163,837 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_base_patch32_224_in21k` function. Write a Python function `def vit_base_patch32_224_in21k(pretrained=False, **kwargs)` to solve the following problem:
ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model | ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. |
163,838 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_large_patch16_224_in21k` function. Write a Python function `def vit_large_patch16_224_in21k(pretrained=False, **kwargs)` to solve the following problem:
ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch16_224_in21k", pretrained=pretrained, **model_kwargs
)
return model | ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. |
163,839 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_large_patch32_224_in21k` function. Write a Python function `def vit_large_patch32_224_in21k(pretrained=False, **kwargs)` to solve the following problem:
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model | ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. |
163,840 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_huge_patch14_224_in21k` function. Write a Python function `def vit_huge_patch14_224_in21k(pretrained=False, **kwargs)` to solve the following problem:
ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. NOTE: converted weights not currently available, too large for github release hosting.
Here is the function:
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: converted weights not currently available, too large for github release hosting.
"""
model_kwargs = dict(
patch_size=14,
embed_dim=1280,
depth=32,
num_heads=16,
representation_size=1280,
**kwargs,
)
model = _create_vision_transformer(
"vit_huge_patch14_224_in21k", pretrained=pretrained, **model_kwargs
)
return model | ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. NOTE: converted weights not currently available, too large for github release hosting. |
163,841 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_base_resnet50_224_in21k` function. Write a Python function `def vit_base_resnet50_224_in21k(pretrained=False, **kwargs)` to solve the following problem:
R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_base_resnet50_224_in21k(pretrained=False, **kwargs):
""" R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
# create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head
backbone = ResNetV2(
layers=(3, 4, 9),
num_classes=0,
global_pool="",
in_chans=kwargs.get("in_chans", 3),
preact=False,
stem_type="same",
conv_layer=StdConv2dSame,
)
model_kwargs = dict(
embed_dim=768,
depth=12,
num_heads=12,
hybrid_backbone=backbone,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_resnet50_224_in21k", pretrained=pretrained, **model_kwargs
)
return model | R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. |
163,842 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_base_resnet50_384` function. Write a Python function `def vit_base_resnet50_384(pretrained=False, **kwargs)` to solve the following problem:
R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
Here is the function:
def vit_base_resnet50_384(pretrained=False, **kwargs):
""" R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
# create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head
backbone = ResNetV2(
layers=(3, 4, 9),
num_classes=0,
global_pool="",
in_chans=kwargs.get("in_chans", 3),
preact=False,
stem_type="same",
conv_layer=StdConv2dSame,
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet50_384", pretrained=pretrained, **model_kwargs
)
return model | R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. |
163,843 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_small_resnet26d_224` function. Write a Python function `def vit_small_resnet26d_224(pretrained=False, **kwargs)` to solve the following problem:
Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights.
Here is the function:
def vit_small_resnet26d_224(pretrained=False, **kwargs):
""" Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3,
hybrid_backbone=backbone,
**kwargs,
)
model = _create_vision_transformer(
"vit_small_resnet26d_224", pretrained=pretrained, **model_kwargs
)
return model | Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. |
163,844 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_small_resnet50d_s3_224` function. Write a Python function `def vit_small_resnet50d_s3_224(pretrained=False, **kwargs)` to solve the following problem:
Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights.
Here is the function:
def vit_small_resnet50d_s3_224(pretrained=False, **kwargs):
""" Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights.
"""
backbone = resnet50d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[3],
)
model_kwargs = dict(
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3,
hybrid_backbone=backbone,
**kwargs,
)
model = _create_vision_transformer(
"vit_small_resnet50d_s3_224", pretrained=pretrained, **model_kwargs
)
return model | Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. |
163,845 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_base_resnet26d_224` function. Write a Python function `def vit_base_resnet26d_224(pretrained=False, **kwargs)` to solve the following problem:
Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights.
Here is the function:
def vit_base_resnet26d_224(pretrained=False, **kwargs):
""" Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet26d_224", pretrained=pretrained, **model_kwargs
)
return model | Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. |
163,846 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_base_resnet50d_224` function. Write a Python function `def vit_base_resnet50d_224(pretrained=False, **kwargs)` to solve the following problem:
Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
Here is the function:
def vit_base_resnet50d_224(pretrained=False, **kwargs):
""" Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
"""
backbone = resnet50d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet50d_224", pretrained=pretrained, **model_kwargs
)
return model | Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. |
163,847 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_deit_tiny_patch16_224` function. Write a Python function `def vit_deit_tiny_patch16_224(pretrained=False, **kwargs)` to solve the following problem:
DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
Here is the function:
def vit_deit_tiny_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
"vit_deit_tiny_patch16_224", pretrained=pretrained, **model_kwargs
)
return model | DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. |
163,848 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_deit_small_patch16_224` function. Write a Python function `def vit_deit_small_patch16_224(pretrained=False, **kwargs)` to solve the following problem:
DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
Here is the function:
def vit_deit_small_patch16_224(pretrained=False, **kwargs):
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
"vit_deit_small_patch16_224", pretrained=pretrained, **model_kwargs
)
return model | DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. |
163,849 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_deit_base_patch16_224` function. Write a Python function `def vit_deit_base_patch16_224(pretrained=False, **kwargs)` to solve the following problem:
DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
Here is the function:
def vit_deit_base_patch16_224(pretrained=False, **kwargs):
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_patch16_224", pretrained=pretrained, **model_kwargs
)
return model | DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. |
163,850 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_deit_base_patch16_384` function. Write a Python function `def vit_deit_base_patch16_384(pretrained=False, **kwargs)` to solve the following problem:
DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
Here is the function:
def vit_deit_base_patch16_384(pretrained=False, **kwargs):
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_patch16_384", pretrained=pretrained, **model_kwargs
)
return model | DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. |
163,851 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_deit_tiny_distilled_patch16_224` function. Write a Python function `def vit_deit_tiny_distilled_patch16_224(pretrained=False, **kwargs)` to solve the following problem:
DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
Here is the function:
def vit_deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
"vit_deit_tiny_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model | DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. |
163,852 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_deit_small_distilled_patch16_224` function. Write a Python function `def vit_deit_small_distilled_patch16_224(pretrained=False, **kwargs)` to solve the following problem:
DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
Here is the function:
def vit_deit_small_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
"vit_deit_small_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model | DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. |
163,853 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_deit_base_distilled_patch16_224` function. Write a Python function `def vit_deit_base_distilled_patch16_224(pretrained=False, **kwargs)` to solve the following problem:
DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
Here is the function:
def vit_deit_base_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model | DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. |
163,854 | import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
# model.pretrained_cfg = default_cfg # This is made by water
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
The provided code snippet includes necessary dependencies for implementing the `vit_deit_base_distilled_patch16_384` function. Write a Python function `def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs)` to solve the following problem:
DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
Here is the function:
def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_distilled_patch16_384",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model | DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. |
163,855 | import torch
import os
import json
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
BertTokenizer,
AutoTokenizer
)
def get_pretrained_tokenizer(from_pretrained, special_tokens_path=None, replace_unused_tokens:bool=False):
if special_tokens_path != None:
with open(special_tokens_path) as f:
special_tokens = json.load(f)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
if special_tokens_path!=None:
if replace_unused_tokens:
BertTokenizer.from_pretrained(
from_pretrained , truncation_side="left" , do_lower_case="uncased" in from_pretrained , never_split=special_tokens
)
else:
BertTokenizer.from_pretrained(
from_pretrained , truncation_side="left" , do_lower_case="uncased" in from_pretrained
)
torch.distributed.barrier()
if special_tokens_path!=None:
if replace_unused_tokens:
tokenizer = BertTokenizer.from_pretrained(
from_pretrained, truncation_side="left" ,do_lower_case="uncased" in from_pretrained , never_split=special_tokens
)
else:
tokenizer = BertTokenizer.from_pretrained(
from_pretrained, truncation_side="left" ,do_lower_case="uncased" in from_pretrained
)
tokenizer.add_special_tokens(special_tokens)
else:
tokenizer = BertTokenizer.from_pretrained(
from_pretrained, truncation_side="left" ,do_lower_case="uncased" in from_pretrained
)
return tokenizer | null |
163,856 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
def config():
exp_name = "pace"
seed = 0
datasets = ["photochat"] #,"f30k","coco"] # ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1})
batch_size = 4096 # this is a desired batch size; pl trainer will accumulate gradients when per step batch is smaller.
# Image setting
train_transform_keys = ["pixelbert"]
val_transform_keys = ["pixelbert"]
image_size = 384
max_image_len = -1
patch_size = 32
draw_false_image = 1
image_only = False
# Text Setting
vqav2_label_size = 3129
max_text_len = 40
tokenizer = "bert-base-uncased"
vocab_size = 30522
whole_word_masking = False
mlm_prob = 0.15
draw_false_text = 0
# Transformer Setting
vit = "vit_base_patch32_384"
need_expert_load = False
hidden_size = 768
num_heads = 12
num_layers = 12
mlp_ratio = 4
drop_rate = 0.1
# Optimizer Setting
optim_type = "adamw"
learning_rate = 1e-4
weight_decay = 0.01
decay_power = 1
max_epoch = 100
max_steps = 25000
warmup_steps = 2500
end_lr = 0
lr_mult = 1 # multiply lr for downstream heads
# Downstream Setting
get_recall_metric = False
# PL Trainer Setting
resume_from = None
fast_dev_run = False
val_check_interval = 1.0
test_only = False
# below params varies with the environment
data_root = ""
log_dir = "result"
per_gpu_batchsize = 0 # you should define this manually with per_gpu_batch_size=#
num_gpus = 1
num_nodes = 1
load_path = ""
num_workers = 8
precision = 16
# for generative model
model_config = None
cache_dir = None
add_special_tokens = None
gradient_clip_val = 0
stop_token = None
temperature = 1.0
top_k = 1
top_p = None
use_segment_ids = False
discard_image = False
label_smoothing = 0.
mask_source_words = False
max_pred_len = 20
max_source_len = 412
special_tokens_file = None
replace_unused_tokens = False
record_generated_sequence = False
task_type = ""
decode_prompt = ""
detokenize = None | null |
163,857 | from sacred import Experiment
from pace.modules import decode_utils
def env_water():
data_root = "/data/dataset"
log_dir = "/result"
# max_text_len = 120
num_gpus = 7
num_nodes = 1 | null |
163,858 | from sacred import Experiment
from pace.modules import decode_utils
def env_8():
data_root = "/data/dataset"
log_dir = "/result"
# max_text_len = 120
num_gpus = 8
num_nodes = 1 | null |
163,859 | from sacred import Experiment
from pace.modules import decode_utils
def env_debug():
data_root = "/data/dataset"
log_dir = "/result"
# max_text_len = 120
num_gpus = 1
num_nodes = 1 | null |
163,860 | from sacred import Experiment
from pace.modules import decode_utils
def env_yzc():
data_root = "/data/datasets/"
log_dir = "/result"
max_image_len = 200
max_text_len = 80
num_gpus = 1
num_nodes = 1
max_epoch = 1000 | null |
163,861 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_mlm_itm():
exp_name = "mlm_itm"
datasets = ["mmdial_caps"]#["photochat"] # ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1})
batch_size = 4096
max_epoch = 10
max_image_len = 200
max_text_len = 360 | null |
163,862 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_mlm_itm_randaug():
exp_name = "mlm_itm_randaug"
datasets = ["coco", "vg", "sbu", "gcc"]
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"itm": 1, "mlm": 1})
batch_size = 4096
max_epoch = 10
max_image_len = 200 | null |
163,863 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
def task_mlm_itm_mpp():
exp_name = "mlm_itm_mpp"
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1, "mpp": 1})
batch_size = 4096
max_epoch = 10
max_image_len = 200 | null |
163,864 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_photochat_intent():
exp_name = "finetune_photochat_intent"
datasets = ["photochat_intent"]
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"intent": 1, "itm": 1})
batch_size = 256
max_text_len = 360
max_epoch = 30
max_steps = None
warmup_steps = 0.1
draw_false_text = 15
learning_rate = 2e-4 | null |
163,865 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_rg_mmconv():
model_config = "generation"
record_generated_sequence = True
task_type = "generation"
decode_prompt = "<|belief|>"
detokenize = decode_utils.detokenize
tokenizer = "bert-base-uncased"
replace_unused_tokens = False
exp_name = "finetune_rg_mmconv"
datasets = ["mmconvrg"]
loss_names = _loss_names({"seq2seq":1})
batch_size = 256
mlm_prob = 0.25
max_epoch = 10
max_steps = None
max_text_len= 512
max_source_len=412
max_pred_len = 100
warmup_steps = 0.1
get_recall_metric = False
discard_image = True
draw_false_text = 0
learning_rate = 1e-4
special_tokens_file = "../pace/datamodules/vocabs/mmconv_special_tokens3.json" | null |
163,866 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_rg_simmc2():
model_config = "generation"
record_generated_sequence = True
exp_name = "finetune_rg_simmc2"
datasets = ["simmc2rg"]
detokenize = decode_utils.detokenize
loss_names = _loss_names({"seq2seq":1})
batch_size = 256
mlm_prob = 0.25
max_epoch = 10
max_steps = None
max_text_len= 512
max_source_len=412
max_pred_len = 100
warmup_steps = 0.1
get_recall_metric = False
discard_image = True
draw_false_text = 0
learning_rate = 1e-4 | null |
163,867 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_dst_simmc2():
model_config = "generation"
task_type = "generation"
record_generated_sequence = True
decode_prompt = "belief state : "
detokenize = decode_utils.detokenize
exp_name = "finetune_dst_simmc2"
datasets = ["simmc2dst"]
loss_names = _loss_names({"seq2seq":1})
batch_size = 256
mlm_prob = 0.25
max_epoch = 10
max_steps = None
max_text_len= 512
max_source_len=412
max_pred_len = 100
warmup_steps = 0.1
get_recall_metric = False
discard_image = True
draw_false_text = 0
learning_rate = 1e-4 | null |
163,868 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
def task_finetune_mmconvdst_randaug():
exp_name = "finetune_mmconvdst_randaug"
datasets = ["mmconvdst"]
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"dst": 1})
batch_size = 256
max_epoch = 20
max_steps = None
max_text_len = 520
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 1e-4
# val_check_interval = 0.1
lr_mult = 10 | null |
163,869 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
def task_finetune_irtr_photochat():
exp_name = "finetune_irtr_photochat"
datasets = ["photochat"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 256
# max_text_len = 80
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate = 1e-4 | null |
163,870 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_irtr_photochat_randaug():
exp_name = "finetune_irtr_photochat_randaug"
datasets = ["photochat"]
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 256
max_text_len = 360
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate = 1e-4 | null |
163,871 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_irtr_mmdial_randaug():
exp_name = "task_finetune_irtr_mmdial_randaug"
datasets = ["mmdial_caps"]
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 256
max_text_len = 360
max_epoch = 10
max_steps = None
warmup_steps = 0.1
# get_recall_metric = True
draw_false_text = 15
learning_rate = 2e-5 | null |
163,872 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_mmdial_intent():
exp_name = "finetune_mmdial_intent"
datasets = ["mmdial_intent"]
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"intent": 1, "itm": 1})
batch_size = 256
max_text_len = 360
max_epoch = 20
max_steps = None
warmup_steps = 0.1
draw_false_text = 10
learning_rate = 1e-4 | null |
163,873 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
def task_finetune_nlvr2():
exp_name = "finetune_nlvr2"
datasets = ["nlvr2"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 1e-4 | null |
163,874 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_nlvr2_randaug():
exp_name = "finetune_nlvr2_randaug"
datasets = ["nlvr2"]
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 1e-4 | null |
163,875 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_vqa():
exp_name = "finetune_vqa"
datasets = ["vqa"]
loss_names = _loss_names({"vqa": 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 1e-4
val_check_interval = 0.1
lr_mult = 10 | null |
163,876 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_vqa_randaug():
exp_name = "finetune_vqa_randaug"
datasets = ["vqa"]
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"vqa": 1})
batch_size = 640
max_epoch = 10
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 1e-4
val_check_interval = 0.1
lr_mult = 10 | null |
163,877 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_irtr_coco():
exp_name = "finetune_irtr_coco"
datasets = ["coco"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate = 1e-4 | null |
163,878 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_irtr_coco_randaug():
exp_name = "finetune_irtr_coco_randaug"
datasets = ["coco"]
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate = 1e-4 | null |
163,879 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_irtr_f30k():
exp_name = "finetune_irtr_f30k"
datasets = ["f30k"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate = 1e-4 | null |
163,880 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_irtr_f30k_randaug():
exp_name = "finetune_irtr_f30k_randaug"
datasets = ["f30k"]
train_transform_keys = ["pixelbert_randaug"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate = 1e-4 | null |
163,881 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_tr_imagechat():
exp_name = "finetune_tr_imagechat"
datasets = ["imagechat"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 256
max_epoch = 15
max_steps = None
max_text_len= 100
warmup_steps = 0.2
get_recall_metric = True
draw_false_text = 15
learning_rate = 1e-4
end_lr = 1e-6 | null |
163,882 | from sacred import Experiment
from pace.modules import decode_utils
def _loss_names(d):
ret = {
"itm": 0,
"mlm": 0,
"mpp": 0,
"vqa": 0,
"nlvr2": 0,
"irtr": 0,
"dst": 0,
"rg": 0,
"intent":0,
"dense":0,
"seq2seq":0
}
ret.update(d)
return ret
def task_finetune_tr_imagechat_randaug():
exp_name = "finetune_tr_imagechat_randaug"
datasets = ["imagechat"]
loss_names = _loss_names({"itm": 0.5, "irtr": 1})
batch_size = 256
max_epoch = 15
max_steps = None
train_transform_keys = ["pixelbert_randaug"]
max_text_len= 80
warmup_steps = 0.2
get_recall_metric = True
draw_false_text = 15
learning_rate = 1e-4#1e-4
end_lr = 1e-6 | null |
163,883 | from sacred import Experiment
from pace.modules import decode_utils
def step25k():
max_epoch = 100
max_steps = 25000 | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.