code stringlengths 101 5.91M |
|---|
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True):
fn._do_cuda_non_default_stream = (not condition)
return fn
return dec |
def simpleCNN2(num_classes=10, norm_layer_type='bn', conv_layer_type='conv2d', linear_layer_type='linear', activation_layer_type='relu'):
return Net_circular_CNN(num_classes=num_classes, norm_layer_type=norm_layer_type, conv_layer_type=conv_layer_type, linear_layer_type=linear_layer_type, activation_layer_type=activation_layer_type) |
def raise_isinstance_error(variable_name, possible_type, variable):
raise ValueError(f'{variable_name} has to be one of {possible_type}. Got {type(variable)} instead.') |
.parametrize('use_global_model', [True, False])
.parametrize('use_global_init_dataset', [True, False])
.parametrize('num_query_points_per_batch', [1, 2])
def test_bayesian_optimizer_creates_correct_datasets_for_rank3_points(use_global_model: bool, use_global_init_dataset: bool, num_query_points_per_batch: int) -> None:
batch_size = 4
if use_global_init_dataset:
init_data = {OBJECTIVE: mk_dataset([[0.5], [1.5]], [[0.25], [0.35]])}
else:
init_data = {LocalizedTag(OBJECTIVE, i): mk_dataset([[(0.5 + i)], [(1.5 + i)]], [[0.25], [0.35]]) for i in range(batch_size)}
init_data[OBJECTIVE] = mk_dataset([[0.5], [1.5]], [[0.25], [0.35]])
query_points = tf.reshape(tf.constant(range((batch_size * num_query_points_per_batch)), tf.float64), (num_query_points_per_batch, batch_size, 1))
search_space = Box([(- 1)], [1])
model = DatasetChecker(use_global_model, use_global_init_dataset, init_data, query_points)
if use_global_model:
models = {OBJECTIVE: model}
else:
models = copy_to_local_models(model, batch_size)
for (tag, model) in models.items():
model._tag = tag
optimizer = BayesianOptimizer((lambda x: Dataset(x, x)), search_space)
rule = LocalDatasetsFixedAcquisitionRule(query_points, batch_size)
optimizer.optimize(1, init_data, models, rule).final_result.unwrap() |
def register_Ns3EpcTftPacketFilter_methods(root_module, cls):
cls.add_constructor([param('ns3::EpcTft::PacketFilter const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Matches', 'bool', [param('ns3::EpcTft::Direction', 'd'), param('ns3::Ipv4Address', 'ra'), param('ns3::Ipv4Address', 'la'), param('uint16_t', 'rp'), param('uint16_t', 'lp'), param('uint8_t', 'tos')])
cls.add_method('Matches', 'bool', [param('ns3::EpcTft::Direction', 'd'), param('ns3::Ipv6Address', 'ra'), param('ns3::Ipv6Address', 'la'), param('uint16_t', 'rp'), param('uint16_t', 'lp'), param('uint8_t', 'tos')])
cls.add_instance_attribute('direction', 'ns3::EpcTft::Direction', is_const=False)
cls.add_instance_attribute('localAddress', 'ns3::Ipv4Address', is_const=False)
cls.add_instance_attribute('localIpv6Address', 'ns3::Ipv6Address', is_const=False)
cls.add_instance_attribute('localIpv6Prefix', 'ns3::Ipv6Prefix', is_const=False)
cls.add_instance_attribute('localMask', 'ns3::Ipv4Mask', is_const=False)
cls.add_instance_attribute('localPortEnd', 'uint16_t', is_const=False)
cls.add_instance_attribute('localPortStart', 'uint16_t', is_const=False)
cls.add_instance_attribute('precedence', 'uint8_t', is_const=False)
cls.add_instance_attribute('remoteAddress', 'ns3::Ipv4Address', is_const=False)
cls.add_instance_attribute('remoteIpv6Address', 'ns3::Ipv6Address', is_const=False)
cls.add_instance_attribute('remoteIpv6Prefix', 'ns3::Ipv6Prefix', is_const=False)
cls.add_instance_attribute('remoteMask', 'ns3::Ipv4Mask', is_const=False)
cls.add_instance_attribute('remotePortEnd', 'uint16_t', is_const=False)
cls.add_instance_attribute('remotePortStart', 'uint16_t', is_const=False)
cls.add_instance_attribute('typeOfService', 'uint8_t', is_const=False)
cls.add_instance_attribute('typeOfServiceMask', 'uint8_t', is_const=False)
return |
def dla169(pretrained=None, **kwargs):
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024], block=Bottleneck, residual_root=True, **kwargs)
if (pretrained is not None):
model.load_pretrained_model(data='imagenet', name='dla169', hash='0914e092')
return model |
class LiSHT_VGG(nn.Module):
def __init__(self, vgg_name):
super(LiSHT_VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 100)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), (- 1))
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if (x == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), LiSHT()]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers) |
class SENet(ImageNetBase):
_KEY_VARIABLE = {'classifier': 'Affine', 'pool': 'AveragePooling', 'lastconv': 'Add2_7_RepeatStart_4[1]', 'lastconv+relu': 'ReLU_25_RepeatStart_4[1]'}
def __init__(self):
self._load_nnp('SENet-154.nnp', 'SENet-154/SENet-154.nnp')
def _input_shape(self):
return (3, 224, 224)
def __call__(self, input_var=None, use_from=None, use_up_to='classifier', training=False, force_global_pooling=False, check_global_pooling=True, returns_net=False, verbose=0):
input_var = self.get_input_var(input_var)
callback = NnpNetworkPass(verbose)
callback.remove_and_rewire('ImageAugmentationX')
callback.set_variable('InputX', input_var)
self.configure_global_average_pooling(callback, force_global_pooling, check_global_pooling, 'AveragePooling', by_type=True)
callback.set_batch_normalization_batch_stat_all(training)
self.use_up_to(use_up_to, callback)
if (not training):
callback.remove_and_rewire('Dropout')
callback.fix_parameters()
batch_size = input_var.shape[0]
net = self.nnp.get_network('Training', batch_size=batch_size, callback=callback)
if returns_net:
return net
return list(net.outputs.values())[0] |
class XLNetConfig(PretrainedConfig):
model_type = 'xlnet'
def __init__(self, vocab_size=32000, d_model=1024, n_layer=24, n_head=16, d_inner=4096, ff_activation='gelu', untie_r=True, attn_type='bi', initializer_range=0.02, layer_norm_eps=1e-12, dropout=0.1, mem_len=512, reuse_len=None, bi_data=False, clamp_len=(- 1), same_length=False, summary_type='last', summary_use_proj=True, summary_activation='tanh', summary_last_dropout=0.1, start_n_top=5, end_n_top=5, pad_token_id=5, bos_token_id=1, eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.d_model = d_model
self.n_layer = n_layer
self.n_head = n_head
assert ((d_model % n_head) == 0)
if ('d_head' in kwargs):
assert (kwargs['d_head'] == (d_model // n_head)), f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({(d_model // n_head)})"
self.d_head = (d_model // n_head)
self.ff_activation = ff_activation
self.d_inner = d_inner
self.untie_r = untie_r
self.attn_type = attn_type
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.dropout = dropout
self.mem_len = mem_len
self.reuse_len = reuse_len
self.bi_data = bi_data
self.clamp_len = clamp_len
self.same_length = same_length
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_last_dropout = summary_last_dropout
self.start_n_top = start_n_top
self.end_n_top = end_n_top
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
def max_position_embeddings(self):
return (- 1)
def n_token(self):
return self.vocab_size
_token.setter
def n_token(self, value):
self.vocab_size = value
def hidden_size(self):
return self.d_model
def num_attention_heads(self):
return self.n_head
def num_hidden_layers(self):
return self.n_layer |
def quote_xml(inStr):
s1 = ((isinstance(inStr, str) and inStr) or ('%s' % inStr))
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1 |
def check_build_wheel(hooks, build_sys_requires):
with BuildEnvironment() as env:
try:
env.pip_install(build_sys_requires)
log.info('Installed static build dependencies')
except CalledProcessError:
log.error('Failed to install static build dependencies')
return False
try:
reqs = hooks.get_requires_for_build_wheel({})
log.info('Got build requires: %s', reqs)
except Exception:
log.error('Failure in get_requires_for_build_sdist', exc_info=True)
return False
try:
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
except CalledProcessError:
log.error('Failed to install dynamic build dependencies')
return False
td = mkdtemp()
log.info('Trying to build wheel in %s', td)
try:
try:
filename = hooks.build_wheel(td, {})
log.info('build_wheel returned %r', filename)
except Exception:
log.info('Failure in build_wheel', exc_info=True)
return False
if (not filename.endswith('.whl')):
log.error("Filename %s doesn't have .whl extension", filename)
return False
path = pjoin(td, filename)
if isfile(path):
log.info('Output file %s exists', path)
else:
log.error('Output file %s does not exist', path)
return False
if zipfile.is_zipfile(path):
log.info('Output file is a zip file')
else:
log.error('Output file is not a zip file')
return False
finally:
shutil.rmtree(td)
return True |
def PrintUsage(message):
sys.stderr.write(_USAGE)
if message:
sys.exit(('\nFATAL ERROR: ' + message))
else:
sys.exit(1) |
def rename(node: goos.ProblemGraphNode, name: str) -> goos.ProblemGraphNode:
return cast(node, type(node), name=name) |
class ExponentialDelaySampler():
max_scale: float = 100.0
min_scale: float = 10.0
random_state: int = None
def __post_init__(self) -> None:
if (self.random_state is None):
raise ValueError('`random_state` must be given')
self.random_ = check_random_state(self.random_state)
def exponential_delay_function(self, n_rounds: int, n_actions: int, **kwargs) -> np.ndarray:
delays_per_round = np.ceil(self.random_.exponential(scale=self.max_scale, size=n_rounds))
return np.tile(delays_per_round, (n_actions, 1)).T
def exponential_delay_function_expected_reward_weighted(self, expected_rewards: np.ndarray, **kwargs) -> np.ndarray:
scale = (self.min_scale + ((1 - expected_rewards) * (self.max_scale - self.min_scale)))
delays_per_round = np.ceil(self.random_.exponential(scale=scale, size=expected_rewards.shape))
return delays_per_round |
class Hypothesis(BaseHypothesis):
def __init__(self, dec_prefix, decoder_states, decoder_input):
BaseHypothesis.__init__(self, dec_prefix)
self.sql = []
self.keyword = None
self.nested_keywords = []
(self.avoid_items, self.confirmed_items) = ([], [])
self.decoder_states = decoder_states
self.decoder_input = decoder_input
def print_hypotheses(hypotheses):
for hyp in hypotheses:
print('logprob: {}, sql: {}\ntag_seq: {}\ndec_seq: {}'.format(hyp.logprob, hyp.sql, hyp.tag_seq, hyp.dec_seq)) |
def _dict2sarray(sorts, ctx):
sz = len(sorts)
_names = (Symbol * sz)()
_sorts = (Sort * sz)()
i = 0
for k in sorts:
v = sorts[k]
if z3_debug():
_z3_assert(isinstance(k, str), 'String expected')
_z3_assert(is_sort(v), 'Z3 sort expected')
_names[i] = to_symbol(k, ctx)
_sorts[i] = v.ast
i = (i + 1)
return (sz, _names, _sorts) |
class AbstractEntityDisambiguator(object):
def __init__(self, args):
self.args = args
self.max_features_size = self.args.max_features_size
with open(f'{self.args.database_dir}/wiki_entity_data/type_mappings/wiki/type_vocab_to_wikidataqid.json') as fin:
self.type_vocab_to_typeqid = ujson.load(fin)
self.typeqid_to_type_vocab = {v: k for (k, v) in self.type_vocab_to_typeqid.items()}
with open(f'{self.args.database_dir}/es_material/typeqid2id.json') as fin:
self.typeqid2id = ujson.load(fin)
self.id2typeqid = {v: k for (k, v) in self.typeqid2id.items()}
self.all_schema_types = set()
self.almond_type_mapping = dict()
self.wiki2normalized_type = list()
if self.args.almond_type_mapping_path:
with open(os.path.join(self.args.root, self.args.almond_type_mapping_path)) as fin:
self.almond_type_mapping = ujson.load(fin)
self.update_wiki2normalized_type()
else:
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'database_files/almond_type_mapping.json')) as fin:
almond_type_mapping_all_domains = ujson.load(fin)
for domain in self.args.ned_domains:
self.almond_type_mapping.update(almond_type_mapping_all_domains[domain])
self.update_wiki2normalized_type()
self.unk_id = 0
self.unk_type = self.id2typeqid[self.unk_id]
def update_wiki2normalized_type(self):
(matches, inclusions) = ([], [])
for (normalized_type, titles) in self.almond_type_mapping.items():
for title in titles:
if re.search('[.?*]', title):
inclusions.append((re.compile(fnmatch.translate(title)), normalized_type))
else:
matches.append((re.compile(fnmatch.translate(title)), normalized_type))
self.wiki2normalized_type.extend(matches)
self.wiki2normalized_type.extend(inclusions)
def normalize_types(self, type):
norm_type = None
type = type.lower()
for pair in self.wiki2normalized_type:
if pair[0].fullmatch(type):
norm_type = pair[1]
break
return norm_type
def process_examples(self, examples, split_path, utterance_field):
raise NotImplementedError()
def pad_features(self, features, max_size, pad_id):
if (len(features) > max_size):
features = features[:max_size]
else:
features += ([pad_id] * (max_size - len(features)))
return features
def convert_entities_to_strings(self, feat):
final_types = ''
if ('type_id' in self.args.entity_attributes):
all_types = ' | '.join(sorted((self.typeqid_to_type_vocab[self.id2typeqid[id]] for id in feat.type_id if (id != 0))))
final_types = (('( ' + all_types) + ' )')
final_qids = ''
if ('qid' in self.args.entity_attributes):
all_qids = ' | '.join(sorted((('Q' + str(id)) for id in feat.qid if (id != (- 1)))))
final_qids = (('[ ' + all_qids) + ' ]')
return (final_types, final_qids)
def add_entities_to_text(self, sentence, features):
sentence_tokens = sentence.split(' ')
assert (len(sentence_tokens) == len(features))
sentence_plus_types_tokens = []
i = 0
if (self.args.add_entities_to_text == 'insert'):
while (i < len(sentence_tokens)):
token = sentence_tokens[i]
feat = features[i]
if any([(val != 0) for val in feat.type_id]):
final_token = '<e> '
(final_types, final_qids) = self.convert_entities_to_strings(feat)
final_token += ((final_types + final_qids) + token)
i += 1
while ((i < len(sentence_tokens)) and (features[i] == feat)):
final_token += (' ' + sentence_tokens[i])
i += 1
final_token += ' </e>'
sentence_plus_types_tokens.append(final_token)
else:
sentence_plus_types_tokens.append(token)
i += 1
elif (self.args.add_entities_to_text == 'append'):
sentence_plus_types_tokens.extend(sentence_tokens)
sentence_plus_types_tokens.append('<e>')
while (i < len(sentence_tokens)):
feat = features[i]
if any([(val != 0) for val in feat.type_id]):
(final_types, final_qids) = self.convert_entities_to_strings(feat)
all_tokens = []
while ((i < len(sentence_tokens)) and (features[i] == feat)):
all_tokens.append(sentence_tokens[i])
i += 1
final_token = ' '.join(filter((lambda token: (token != '')), [*all_tokens, final_types, final_qids, ';']))
sentence_plus_types_tokens.append(final_token)
else:
i += 1
sentence_plus_types_tokens.append('</e>')
if (not sentence_plus_types_tokens):
return sentence
else:
return ' '.join(sentence_plus_types_tokens)
def replace_features_inplace(self, examples, all_token_type_ids, all_token_type_probs, all_token_qids, utterance_field):
assert (len(examples) == len(all_token_type_ids) == len(all_token_type_probs) == len(all_token_qids))
for (n, (ex, tokens_type_ids, tokens_type_probs, tokens_qids)) in enumerate(zip(examples, all_token_type_ids, all_token_type_probs, all_token_qids)):
features = [Entity(*tup) for tup in zip(tokens_type_ids, tokens_type_probs, tokens_qids)]
if (utterance_field == 'question'):
assert (len(tokens_type_ids) == len(tokens_type_probs) == len(tokens_qids) == len(ex.question.split(' ')))
examples[n].question_feature = features
examples[n].context_feature = ([Entity.get_pad_entity(self.max_features_size)] * len(ex.context.split(' ')))
examples[n].question = self.add_entities_to_text(ex.question, features)
else:
assert (len(tokens_type_ids) == len(tokens_type_probs) == len(tokens_qids) == len(ex.context.split(' ')))
examples[n].context_feature = features
examples[n].question_feature = ([Entity.get_pad_entity(self.max_features_size)] * len(ex.question.split(' ')))
examples[n].context = self.add_entities_to_text(ex.context, features) |
def dataset_dest_prefix(args, output_prefix, lang):
base = '{}/{}'.format(args.destdir, output_prefix)
if (lang is not None):
lang_part = '.{}-{}.{}'.format(args.source_lang, args.target_lang, lang)
elif args.only_source:
lang_part = ''
else:
lang_part = '.{}-{}'.format(args.source_lang, args.target_lang)
return '{}{}'.format(base, lang_part) |
class ToWeak(object):
def __init__(self, fname):
self.fname = fname
def __call__(self):
return u'\n'.join((unicode(a) for a in self.annotations())).encode(ENC)
def annotations(self):
for line in open(self.fname):
a = Annotation.from_string(line.rstrip('\n').decode(ENC))
for i in range(a.start, (a.end + 1)):
(yield Annotation(a.docid, i, (i + 1), a.candidates))
def add_arguments(cls, p):
p.add_argument('fname', metavar='FILE')
p.set_defaults(cls=cls)
return p |
class ResNetBottleneck(nn.Module):
expansion = 4
num_conv = 3
def __init__(self, inplanes, planes, stride):
super(ResNetBottleneck, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
self.conv_1x1 = ConvBNReLU(inplanes, planes, 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_3x3 = ConvBNReLU(planes, planes, 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_1x4 = ConvBNReLU(planes, (planes * self.expansion), 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
if (stride == 2):
self.downsample = ConvBNReLU(inplanes, (planes * self.expansion), 1, 1, 0, False, has_avg=True, has_bn=False, has_relu=False)
elif (inplanes != (planes * self.expansion)):
self.downsample = ConvBNReLU(inplanes, (planes * self.expansion), 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
else:
self.downsample = None
self.out_dim = (planes * self.expansion)
self.search_mode = 'basic'
def get_range(self):
return ((self.conv_1x1.get_range() + self.conv_3x3.get_range()) + self.conv_1x4.get_range())
def get_flops(self, divide):
flop_A = self.conv_1x1.get_flops(divide)
flop_B = self.conv_3x3.get_flops(divide)
flop_C = self.conv_1x4.get_flops(divide)
if hasattr(self.downsample, 'get_flops'):
flop_D = self.downsample.get_flops(divide)
else:
flop_D = 0
return (((flop_A + flop_B) + flop_C) + flop_D)
def forward(self, inputs):
bottleneck = self.conv_1x1(inputs)
bottleneck = self.conv_3x3(bottleneck)
bottleneck = self.conv_1x4(bottleneck)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = additive_func(residual, bottleneck)
return nn.functional.relu(out, inplace=True) |
_task('multilingual_translation')
class MultilingualTranslationTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='DIR', help='path to data directory')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language (only needed for inference)')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language (only needed for inference)')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left (default: False)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'], metavar='SRCTGT', help='replace beginning-of-sentence in source sentence with source or target language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true', help='replace beginning-of-sentence in target sentence with target language token')
def __init__(self, args, dicts, training):
super().__init__(args)
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
self.eval_lang_pairs = self.lang_pairs
self.model_lang_pairs = self.lang_pairs
self.langs = list(dicts.keys())
def setup_task(cls, args, **kwargs):
(dicts, training) = cls.prepare(args, **kwargs)
return cls(args, dicts, training)
def prepare(cls, args, **kargs):
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
if (args.lang_pairs is None):
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')}))
if ((args.source_lang is not None) or (args.target_lang is not None)):
training = False
else:
training = True
dicts = OrderedDict()
for lang in sorted_langs:
paths = utils.split_paths(args.data)
assert (len(paths) > 0)
dicts[lang] = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if (len(dicts) > 0):
assert (dicts[lang].pad() == dicts[sorted_langs[0]].pad())
assert (dicts[lang].eos() == dicts[sorted_langs[0]].eos())
assert (dicts[lang].unk() == dicts[sorted_langs[0]].unk())
if ((args.encoder_langtok is not None) or args.decoder_langtok):
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return (dicts, training)
def get_encoder_langtok(self, src_lang, tgt_lang):
if (self.args.encoder_langtok is None):
return self.dicts[src_lang].eos()
if (self.args.encoder_langtok == 'src'):
return _lang_token_index(self.dicts[src_lang], src_lang)
else:
return _lang_token_index(self.dicts[src_lang], tgt_lang)
def get_decoder_langtok(self, tgt_lang):
if (not self.args.decoder_langtok):
return self.dicts[tgt_lang].eos()
return _lang_token_index(self.dicts[tgt_lang], tgt_lang)
def alter_dataset_langtok(self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None):
if ((self.args.encoder_langtok is None) and (not self.args.decoder_langtok)):
return lang_pair_dataset
new_src_eos = None
if ((self.args.encoder_langtok is not None) and (src_eos is not None) and (src_lang is not None) and (tgt_lang is not None)):
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang)
else:
src_eos = None
new_tgt_bos = None
if (self.args.decoder_langtok and (tgt_eos is not None) and (tgt_lang is not None)):
new_tgt_bos = self.get_decoder_langtok(tgt_lang)
else:
tgt_eos = None
return TransformEosLangPairDataset(lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos)
def load_dataset(self, split, epoch=1, **kwargs):
paths = utils.split_paths(self.args.data)
assert (len(paths) > 0)
data_path = paths[((epoch - 1) % len(paths))]
def language_pair_dataset(lang_pair):
(src, tgt) = lang_pair.split('-')
langpair_dataset = load_langpair_dataset(data_path, split, src, self.dicts[src], tgt, self.dicts[tgt], combine=True, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions)
return self.alter_dataset_langtok(langpair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt)
self.datasets[split] = RoundRobinZipDatasets(OrderedDict([(lang_pair, language_pair_dataset(lang_pair)) for lang_pair in self.lang_pairs]), eval_key=(None if self.training else ('%s-%s' % (self.args.source_lang, self.args.target_lang))))
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if (constraints is not None):
raise NotImplementedError('Constrained decoding with the multilingual_translation task is not supported')
lang_pair = ('%s-%s' % (self.args.source_lang, self.args.target_lang))
return RoundRobinZipDatasets(OrderedDict([(lang_pair, self.alter_dataset_langtok(LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary), src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang))]), eval_key=lang_pair)
def build_model(self, args):
def check_args():
messages = []
if (len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0):
messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs))
if (self.args.encoder_langtok != args.encoder_langtok):
messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok))
if (self.args.decoder_langtok != args.decoder_langtok):
messages.append('--decoder-langtok should {} be set.'.format(('' if args.decoder_langtok else 'not')))
if (len(messages) > 0):
raise ValueError(' '.join(messages))
check_args()
from fairseq import models
model = models.build_model(args, self)
if (not isinstance(model, FairseqMultiModel)):
raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture')
return model
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
from collections import defaultdict
(agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, defaultdict(float))
curr_lang_pairs = [lang_pair for lang_pair in self.model_lang_pairs if ((sample[lang_pair] is not None) and (len(sample[lang_pair]) != 0))]
for (idx, lang_pair) in enumerate(curr_lang_pairs):
def maybe_no_sync():
if ((self.args.distributed_world_size > 1) and hasattr(model, 'no_sync') and (idx < (len(curr_lang_pairs) - 1))):
return model.no_sync()
else:
return contextlib.ExitStack()
with maybe_no_sync():
(loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair])
if ignore_grad:
loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f'{lang_pair}:{k}'] += logging_output[k]
return (agg_loss, agg_sample_size, agg_logging_output)
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
from collections import defaultdict
(agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, defaultdict(float))
for lang_pair in self.eval_lang_pairs:
if ((lang_pair not in sample) or (sample[lang_pair] is None) or (len(sample[lang_pair]) == 0)):
continue
(loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair])
agg_loss += loss.data.item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f'{lang_pair}:{k}'] += logging_output[k]
return (agg_loss, agg_sample_size, agg_logging_output)
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
if self.args.decoder_langtok:
bos_token = _lang_token_index(self.target_dictionary, self.args.target_lang)
else:
bos_token = self.target_dictionary.eos()
return generator.generate(models, sample, prefix_tokens=prefix_tokens, constraints=constraints, bos_token=bos_token)
def reduce_metrics(self, logging_outputs, criterion):
with metrics.aggregate():
super().reduce_metrics(logging_outputs, criterion)
for k in ['sample_size', 'nsentences', 'ntokens']:
metrics.log_scalar(k, sum((l[k] for l in logging_outputs)))
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def max_positions(self):
if (len(self.datasets.values()) == 0):
return {('%s-%s' % (self.args.source_lang, self.args.target_lang)): (self.args.max_source_positions, self.args.max_target_positions)}
return OrderedDict([(key, (self.args.max_source_positions, self.args.max_target_positions)) for split in self.datasets.keys() for key in self.datasets[split].datasets.keys()]) |
def save_graph(net, file_name, graph_name='net', op_only=True):
from caffe2.python import net_drawer
graph = None
ops = net.op
if (not op_only):
graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir='TB')
else:
graph = net_drawer.GetPydotGraphMinimal(ops, graph_name, rankdir='TB', minimal_dependency=True)
try:
graph.write_png(file_name)
except Exception as e:
print('Error when writing graph to image {}'.format(e)) |
def check_oth(distfn, arg, supp, msg):
npt.assert_allclose(distfn.sf(supp, *arg), (1.0 - distfn.cdf(supp, *arg)), atol=1e-10, rtol=1e-10)
q = np.linspace(0.01, 0.99, 20)
npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf((1.0 - q), *arg), atol=1e-10, rtol=1e-10)
median_sf = distfn.isf(0.5, *arg)
npt.assert_((distfn.sf((median_sf - 1), *arg) > 0.5))
npt.assert_((distfn.cdf((median_sf + 1), *arg) > 0.5)) |
def get_likelihood_grad_BO(likelihood, mz_hat, tz0_hat):
def A_func(mz_hat):
az = (mz_hat + tz0_hat)
return likelihood.compute_potential_BO(az=az, tz0_hat=tz0_hat)
grad_mz_hat_A = numerical_1st_derivative(mz_hat, A_func, EPSILON)
az = (mz_hat + tz0_hat)
vz = likelihood.compute_backward_v_BO(az=az, tz0_hat=tz0_hat)
tz = likelihood.backward_second_moment_FG(tz_hat=tz0_hat)
mz = (tz - vz)
return {'grad_mz_hat_A': grad_mz_hat_A, 'mz': mz, 'tz': tz, 'vz': vz} |
class ReformerModelWithLMHead(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class HalfCheetahDirEnv(HalfCheetahEnv):
def __init__(self, task={}):
self._task = task
self._goal_dir = task.get('direction', 1)
super(HalfCheetahDirEnv, self).__init__()
def step(self, action):
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
forward_vel = ((xposafter - xposbefore) / self.dt)
forward_reward = (self._goal_dir * forward_vel)
ctrl_cost = ((0.5 * 0.1) * np.sum(np.square(action)))
observation = self._get_obs()
reward = (forward_reward - ctrl_cost)
done = False
infos = dict(reward_forward=forward_reward, reward_ctrl=(- ctrl_cost), task=self._task)
return (observation, reward, done, infos)
def sample_tasks(self, num_tasks):
directions = ((2 * self.np_random.binomial(1, p=0.5, size=(num_tasks,))) - 1)
tasks = [{'direction': direction} for direction in directions]
return tasks
def reset_task(self, task):
self._task = task
self._goal_dir = task['direction'] |
.parametrize('is_mat', [(True, True), (True, False), (False, True)])
_utils.test()
def test_binary_i(is_mat):
(lhs_is_mat, rhs_is_mat) = is_mat
x = ti.Matrix.field(3, 2, ti.i32, 20)
if lhs_is_mat:
y = ti.Matrix.field(3, 2, ti.i32, ())
else:
y = ti.field(ti.i32, ())
if rhs_is_mat:
z = ti.Matrix.field(3, 2, ti.i32, ())
else:
z = ti.field(ti.i32, ())
if lhs_is_mat:
y.from_numpy(np.array([[0, 2], [9, 3], [7, 4]], np.int32))
else:
y[None] = 6
if rhs_is_mat:
z.from_numpy(np.array([[4, 5], [6, 3], [9, 2]], np.int32))
else:
z[None] = 5
def func():
x[0] = (y[None] + z[None])
x[1] = (y[None] - z[None])
x[2] = (y[None] * z[None])
x[3] = (y[None] // z[None])
x[4] = ti.raw_div(y[None], z[None])
x[5] = (y[None] % z[None])
x[6] = ti.raw_mod(y[None], z[None])
x[7] = (y[None] ** z[None])
x[8] = (y[None] == z[None])
x[9] = (y[None] != z[None])
x[10] = (y[None] > z[None])
x[11] = (y[None] >= z[None])
x[12] = (y[None] < z[None])
x[13] = (y[None] <= z[None])
x[14] = (y[None] & z[None])
x[15] = (y[None] ^ z[None])
x[16] = (y[None] | z[None])
x[17] = ti.min(y[None], z[None])
x[18] = ti.max(y[None], z[None])
x[19] = (y[None] << z[None])
func()
x = x.to_numpy()
y = y.to_numpy()
z = z.to_numpy()
assert test_utils.allclose(x[0], (y + z))
assert test_utils.allclose(x[1], (y - z))
assert test_utils.allclose(x[2], (y * z))
assert test_utils.allclose(x[3], (y // z))
assert test_utils.allclose(x[4], (y // z))
assert test_utils.allclose(x[5], (y % z))
assert test_utils.allclose(x[6], (y % z))
assert test_utils.allclose(x[7], (y ** z), rel=1e-05)
assert test_utils.allclose(x[8], (y == z))
assert test_utils.allclose(x[9], (y != z))
assert test_utils.allclose(x[10], (y > z))
assert test_utils.allclose(x[11], (y >= z))
assert test_utils.allclose(x[12], (y < z))
assert test_utils.allclose(x[13], (y <= z))
assert test_utils.allclose(x[14], (y & z))
assert test_utils.allclose(x[15], (y ^ z))
assert test_utils.allclose(x[16], (y | z))
assert test_utils.allclose(x[17], np.minimum(y, z))
assert test_utils.allclose(x[18], np.maximum(y, z))
assert test_utils.allclose(x[19], (y << z)) |
def create_master(config):
if config['debug_run_local']:
return create_master_local(config)
else:
return create_master_remote(config) |
class MultiHeadAttention(nn.Module):
def init_weights(layer):
if (type(layer) == nn.Linear):
nn.init.xavier_normal_(layer.weight)
def __init__(self, config, d_model, n_head, attention_mask=None):
super(MultiHeadAttention, self).__init__()
self.config = config
self.d_model = d_model
self.n_head = n_head
assert ((self.d_model % self.n_head) == 0), print('Word dim cannot be split into {} heads equally'.format(self.n_head))
self.d_k = (self.d_model // self.n_head)
self.d_v = self.d_k
self.proj_layer_query = nn.ModuleList([nn.Linear(self.config.d_model, self.d_v) for _ in range(self.config.n_head)])
self.proj_layer_key = nn.ModuleList([nn.Linear(self.config.d_model, self.d_v) for _ in range(self.config.n_head)])
self.proj_layer_val = nn.ModuleList([nn.Linear(self.config.d_model, self.d_v) for _ in range(self.config.n_head)])
self.attention = Attention.Attention(self.config, self.d_model, self.n_head)
self.layer_norm = nn.LayerNorm(self.d_model)
self.fc = nn.Linear(self.d_model, self.d_model)
self.dropout = nn.Dropout(p=self.config.dropout_rate, inplace=True)
nn.init.xavier_normal_(self.fc.weight)
self.proj_layer_query.apply(MultiHeadAttention.init_weights)
self.proj_layer_key.apply(MultiHeadAttention.init_weights)
self.proj_layer_val.apply(MultiHeadAttention.init_weights)
def forward(self, query, key, val, key_structure=None, val_structure=None, attention_mask=None):
residual = query
if (self.config.gpu == True):
query_head = Variable(torch.zeros((self.n_head, *query.shape[:(- 1)], self.d_k), device=torch.device('cuda')))
key_head = Variable(torch.zeros((self.n_head, *query.shape[:(- 1)], self.d_k), device=torch.device('cuda')))
val_head = Variable(torch.zeros((self.n_head, *query.shape[:(- 1)], self.d_k), device=torch.device('cuda')))
else:
query_head = Variable(torch.zeros((self.n_head, *query.shape[:(- 1)], self.d_k)))
key_head = Variable(torch.zeros((self.n_head, *query.shape[:(- 1)], self.d_k)))
val_head = Variable(torch.zeros((self.n_head, *query.shape[:(- 1)], self.d_k)))
for i in range(self.n_head):
query_head[i] = self.proj_layer_query[i](query).unsqueeze(0)
key_head[i] = self.proj_layer_key[i](key).unsqueeze(0)
val_head[i] = self.proj_layer_val[i](val).unsqueeze(0)
del query
del key
del val
torch.cuda.empty_cache()
query_head = query_head.permute(1, 0, *np.arange(2, len(query_head.shape))).contiguous()
key_head = key_head.permute(1, 0, *np.arange(2, len(query_head.shape))).contiguous()
val_head = val_head.permute(1, 0, *np.arange(2, len(query_head.shape))).contiguous()
if ((key_structure is not None) and (val_structure is not None)):
(self_atten_features, atten_values) = self.attention(query_head, key_head, val_head, key_structure=key_structure, val_structure=val_structure, attention_mask=attention_mask)
else:
(self_atten_features, atten_values) = self.attention(query_head, key_head, val_head, attention_mask=attention_mask)
del query_head
del key_head
del val_head
torch.cuda.empty_cache()
num_dim = len(self_atten_features.shape)
self_atten_features = self_atten_features.permute(0, *np.arange(2, (num_dim - 1)), 1, (num_dim - 1)).contiguous()
self_atten_features = self_atten_features.view(*self_atten_features.shape[:(- 2)], (- 1))
self_atten_features = self.fc(self_atten_features)
self.dropout(self_atten_features)
self_atten_features = self.layer_norm((self_atten_features + residual))
return (self_atten_features, atten_values) |
def test_record_fields_empty_parameters():
t = RecordType([], [], parameters={'p': [123]})
assert (str(ak.types.from_datashape(str(t), highlevel=False)) == str(t)) |
class OptConfig():
opt_type: str = 'adamW'
base_lr: float = 0.0001
weight_decay: float = 0.0001
betas: List[float] = field(default_factory=(lambda : [0.9, 0.99]))
grad_clip_norm: float = 1.0
sched_type: str = 'cosine'
max_steps: int = 0
min_lr: float = 0.0 |
.parametrize('round_number', range(ROUNDS_TO_TRAIN))
def test_get_tasks_for_collaborator(assigner, task_groups, authorized_cols, round_number):
tasks = assigner.get_tasks_for_collaborator(authorized_cols[0], round_number)
assert (tasks == task_groups[0]['tasks']) |
def get_labeled_episodic_dataloader(dataset_name: str, n_way: int, n_shot: int, support: bool, n_episodes=600, n_query_shot=15, n_epochs=1, augmentation: str=None, image_size: int=None, unlabeled_ratio: int=20, num_workers=2, split_seed=1, episode_seed=0):
(unlabeled, labeled) = get_split_dataset(dataset_name, augmentation, image_size=image_size, siamese=False, unlabeled_ratio=unlabeled_ratio, seed=split_seed)
sampler = EpisodicBatchSampler(labeled, n_way=n_way, n_shot=n_shot, n_query_shot=n_query_shot, n_episodes=n_episodes, support=support, n_epochs=n_epochs, seed=episode_seed)
return torch.utils.data.DataLoader(labeled, num_workers=num_workers, batch_sampler=sampler) |
def test_readable_file_size():
size_in_bytes = ((1024 * 1024) * 3.5)
readable_size = readable_file_size(size_in_bytes)
assert (readable_size == '3.50 MB') |
def grep_full_py_identifiers(tokens):
global py_keywords
tokens = list(tokens)
i = 0
while (i < len(tokens)):
(token_type, token) = tokens[i]
i += 1
if (token_type != 'id'):
continue
while (((i + 1) < len(tokens)) and (tokens[i] == ('op', '.')) and (tokens[(i + 1)][0] == 'id')):
token += ('.' + tokens[(i + 1)][1])
i += 2
if (token == ''):
continue
if (token in py_keywords):
continue
if (token[0] in '.'):
continue
(yield token) |
def find_valid_answer_spans(passage_tokens: List[Token], answer_texts: List[str]) -> List[Tuple[(int, int)]]:
normalized_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens]
word_positions: Dict[(str, List[int])] = defaultdict(list)
for (i, token) in enumerate(normalized_tokens):
word_positions[token].append(i)
spans = []
for answer_text in answer_texts:
answer_tokens = answer_text.lower().strip(STRIPPED_CHARACTERS).split()
num_answer_tokens = len(answer_tokens)
for span_start in word_positions[answer_tokens[0]]:
span_end = span_start
answer_index = 1
while ((answer_index < num_answer_tokens) and ((span_end + 1) < len(normalized_tokens))):
token = normalized_tokens[(span_end + 1)]
if (answer_tokens[answer_index] == token):
answer_index += 1
span_end += 1
elif (token in IGNORED_TOKENS):
span_end += 1
else:
break
if (num_answer_tokens == answer_index):
spans.append((span_start, span_end))
return spans |
def image_from_paths(paths, shape, is_grayscale=True, seed=None):
filename_queue = tf.train.string_input_producer(list(paths), shuffle=False, seed=seed)
reader = tf.WholeFileReader()
(filename, data) = reader.read(filename_queue)
image = tf.image.decode_png(data, channels=3, dtype=tf.uint8)
if is_grayscale:
image = tf.image.rgb_to_grayscale(image)
image.set_shape(shape)
return (filename, tf.to_float(image)) |
class MemoryElements():
def __init__(self, elements: Set[ActivationMemoryTensor], total_size: float):
self.elements = elements
self.total_size = total_size
def add_element(self, new_element: ActivationMemoryTensor):
self.elements.add(new_element)
self.total_size += new_element.total_size
def add_elements_set(self, new_elements_set: Set[ActivationMemoryTensor]):
self.elements.update(new_elements_set)
self.total_size += sum([e.total_size for e in new_elements_set])
def __eq__(self, other) -> bool:
if isinstance(other, MemoryElements):
return (self.elements == other.elements)
return False
def __hash__(self):
return hash(frozenset(self.elements))
def __copy__(self):
return MemoryElements({elm for elm in self.elements}, self.total_size) |
class convolution_bilstm(nn.Module):
def __init__(self, args):
super(CNN_BiLSTM, self).__init__()
self.args = args
self.hidden_dim = args.lstm_hidden_dim
self.num_layers = args.lstm_num_layers
V = args.embed_num
D = args.embed_dim
C = args.class_num
self.C = C
Ci = 1
Co = args.kernel_num
Ks = args.kernel_sizes
self.embed = nn.Embedding(V, D, padding_idx=args.paddingId)
if args.word_Embedding:
self.embed.weight.data.copy_(args.pretrained_weight)
self.convs1 = [nn.Conv2d(Ci, Co, (K, D), padding=((K // 2), 0), stride=1) for K in Ks]
print(self.convs1)
if (self.args.cuda is True):
for conv in self.convs1:
conv = conv.cuda()
self.bilstm = nn.LSTM(D, self.hidden_dim, num_layers=self.num_layers, dropout=args.dropout, bidirectional=True, bias=True)
L = ((len(Ks) * Co) + (self.hidden_dim * 2))
self.hidden2label1 = nn.Linear(L, (L // 2))
self.hidden2label2 = nn.Linear((L // 2), C)
self.dropout = nn.Dropout(args.dropout)
def forward(self, x):
embed = self.embed(x)
cnn_x = embed
cnn_x = torch.transpose(cnn_x, 0, 1)
cnn_x = cnn_x.unsqueeze(1)
cnn_x = [conv(cnn_x).squeeze(3) for conv in self.convs1]
cnn_x = [F.tanh(F.max_pool1d(i, i.size(2)).squeeze(2)) for i in cnn_x]
cnn_x = torch.cat(cnn_x, 1)
cnn_x = self.dropout(cnn_x)
bilstm_x = embed.view(len(x), embed.size(1), (- 1))
(bilstm_out, _) = self.bilstm(bilstm_x)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 1, 2)
bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
bilstm_out = F.tanh(bilstm_out)
cnn_x = torch.transpose(cnn_x, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
cnn_bilstm_out = torch.cat((cnn_x, bilstm_out), 0)
cnn_bilstm_out = torch.transpose(cnn_bilstm_out, 0, 1)
cnn_bilstm_out = self.hidden2label1(F.tanh(cnn_bilstm_out))
cnn_bilstm_out = self.hidden2label2(F.tanh(cnn_bilstm_out))
logit = cnn_bilstm_out
return logit |
def main(config):
device_ids = range(torch.cuda.device_count())
train_loaders = {}
val_loaders = {}
test_loaders = {}
for dataset_name in config.data.name:
datas = Dataset_wrap_csv(k_fold=config.data.k_fold, use_old_split=True, img_size=config.data.img_size, dataset_name=dataset_name, split_ratio=config.data.split_ratio, train_aug=config.data.train_aug, data_folder=config.data.data_folder)
(train_data, val_data, test_data) = (datas['train'], datas['test'], datas['test'])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.train.batch_size, shuffle=True, num_workers=config.train.num_workers, pin_memory=True, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=config.test.batch_size, shuffle=False, num_workers=config.test.num_workers, pin_memory=True, drop_last=False)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=config.test.batch_size, shuffle=False, num_workers=config.test.num_workers, pin_memory=True, drop_last=False)
train_loaders[dataset_name] = train_loader
val_loaders[dataset_name] = val_loader
test_loaders[dataset_name] = test_loader
print('{} has {} training samples'.format(dataset_name, len(train_loader.dataset)))
print('{} k_folder, {} val'.format(config.data.k_fold, config.data.use_val))
if (config.model == 'TransFuse'):
from Models.Hybrid_models.TransFuseFolder.TransFuse import TransFuse_L
model = TransFuse_L(pretrained=True, pretrained_folder=config.pretrained_folder)
elif (config.model == 'TransFuse_adapt'):
from Models.Hybrid_models.TransFuseFolder.TransFuse import TransFuse_S_adapt
model = TransFuse_S_adapt(pretrained=False, pretrained_folder=config.pretrained_folder, num_domains=K)
total_trainable_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
total_params = sum((p.numel() for p in model.parameters()))
print('{}M total parameters'.format((total_params / 1000000.0)))
print('{}M total trainable parameters'.format((total_trainable_params / 1000000.0)))
model = model.cuda()
if (len(device_ids) > 1):
model = torch.nn.DataParallel(model).cuda()
criterion = structure_loss
if (config.test.only_test == True):
test(config, model, config.test.test_model_dir, test_loaders, criterion)
else:
train_val(config, model, train_loaders, val_loaders, criterion)
test(config, model, best_model_dir, test_loaders, criterion) |
def downward_closure(cliques):
ans = set()
for proj in cliques:
ans.update(powerset(proj))
return list(sorted(ans, key=len)) |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name_or_path', default='BAAI/bge-large-zh-noinstruct', type=str)
parser.add_argument('--input_file', default='nli-zh-bge/nli_zh-train.jsonl', type=str)
parser.add_argument('--candidate_pool', default='STS-B/STS-B.train.data', type=str)
parser.add_argument('--output_file', default='bge_finetune_data.jsonl', type=str)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--range_for_sampling', default='2-20', type=str, help='range to sample negatives')
parser.add_argument('--use_gpu_for_searching', action='store_true', help='use faiss-gpu')
parser.add_argument('--negative_number', default=10, help='use faiss-gpu')
return parser.parse_args() |
class CommandLineParser():
def join(argv):
raise NotImplementedError
def split(cmd):
raise NotImplementedError |
class TransformerLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(TransformerLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean((- 1), keepdim=True)
s = (x - u).pow(2).mean((- 1), keepdim=True)
x = ((x - u) / torch.sqrt((s + self.variance_epsilon)))
return ((self.weight * x) + self.bias) |
def evaluate(model, init_dist, sampler, train_loader, val_loader, test_loader, preprocess, device, n_iters, n_samples, steps_per_iter=1, viz_every=100):
model = AISModel(model, init_dist)
model.to(device)
betas = np.linspace(0.0, 1.0, n_iters)
samples = init_dist.sample((n_samples,))
log_w = torch.zeros((n_samples,)).to(device)
gen_samples = []
for (itr, beta_k) in tqdm(enumerate(betas)):
if (itr == 0):
continue
beta_km1 = betas[(itr - 1)]
with torch.no_grad():
log_w = ((log_w + model(samples, beta_k)) - model(samples, beta_km1))
model_k = (lambda x: model(x, beta=beta_k))
for d in range(steps_per_iter):
samples = sampler.step(samples.detach(), model_k).detach()
if (((itr + 1) % viz_every) == 0):
gen_samples.append(samples.cpu().detach())
logZ_final = (log_w.logsumexp(0) - np.log(n_samples))
print('Final log(Z) = {:.4f}'.format(logZ_final))
model = model.model
logps = []
for (x, _) in train_loader:
x = preprocess(x.to(device))
logp_x = model(x).squeeze().detach()
logps.append(logp_x)
logps = torch.cat(logps)
train_ll = (logps.mean() - logZ_final)
logps = []
for (x, _) in val_loader:
x = preprocess(x.to(device))
logp_x = model(x).squeeze().detach()
logps.append(logp_x)
logps = torch.cat(logps)
val_ll = (logps.mean() - logZ_final)
logps = []
for (x, _) in test_loader:
x = preprocess(x.to(device))
logp_x = model(x).squeeze().detach()
logps.append(logp_x)
logps = torch.cat(logps)
test_ll = (logps.mean() - logZ_final)
return (logZ_final, train_ll, val_ll, test_ll, gen_samples) |
class RNNTTrainConfig(TrainConfig):
optimizer: str = 'adam'
init_lr: float = 1e-06
final_lr: float = 1e-06
peak_lr: float = 0.0001
warmup_steps: int = 400
num_epochs: int = 20
reduction: str = 'mean'
label_smoothing: float = 0.1
lr_scheduler: str = 'tri_stage_lr_scheduler' |
class BigMlpNet(nn.Module):
def __init__(self, args):
super(BigMlpNet, self).__init__()
if (args.dataset == 'mnist'):
input_dim = 784
elif (args.dataset.lower() == 'cifar10'):
input_dim = 3072
self.fc1 = nn.Linear(input_dim, args.num_hidden_nodes1, bias=(not args.disable_bias))
self.fc2 = nn.Linear(args.num_hidden_nodes1, args.num_hidden_nodes2, bias=(not args.disable_bias))
self.fc3 = nn.Linear(args.num_hidden_nodes2, args.num_hidden_nodes3, bias=(not args.disable_bias))
self.fc4 = nn.Linear(args.num_hidden_nodes3, args.num_hidden_nodes4, bias=(not args.disable_bias))
self.fc5 = nn.Linear(args.num_hidden_nodes4, 10, bias=(not args.disable_bias))
self.enable_dropout = args.enable_dropout
def forward(self, x):
x = x.view(x.shape[0], (- 1))
x = F.relu(self.fc1(x))
if self.enable_dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc2(x))
if self.enable_dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc3(x))
if self.enable_dropout:
x = F.dropout(x, training=self.training)
x = F.relu(self.fc4(x))
if self.enable_dropout:
x = F.dropout(x, training=self.training)
x = self.fc5(x)
return F.log_softmax(x) |
def sample_paths(policy_params, max_samples, max_path_length=np.inf, scope=None):
singleton_pool.run_each(_worker_set_policy_params, ([(policy_params, scope)] * singleton_pool.n_parallel))
return singleton_pool.run_collect(_worker_collect_one_path, threshold=max_samples, args=(max_path_length, scope), show_prog_bar=True) |
def choose_agent(agent_type=RANDOM):
if (agent_type == RANDOM):
return RandomAgent
elif (agent_type == HUMAN):
return HumanAgent
elif (agent_type == REINFORCE):
return ReinforceAgent |
def _write_yaml_to_memory(yaml: str, path: str='memory://test.yaml'):
with fsspec.open(path, 'w') as f:
f.write(yaml)
return path |
def qlCreateCollider(cloth, target):
objects_before = cmds.ls(assemblies=True)
cmds.select([cloth, target])
mel.eval('qlCreateCollider()')
objects_after = cmds.ls(assemblies=True)
colliders = list((set(objects_after) - set(objects_before)))
colliders = [cmds.rename(cl, ((((cloth + '_') + target) + '_') + cl)) for cl in colliders]
return colliders |
def replace_method(klass, method_name, func):
if (sys.version_info[0] < 3):
m = types.MethodType(func, None, klass)
else:
m = (lambda self, *args, **kw: func(self, *args, **kw))
setattr(klass, method_name, m) |
def package_configurations(target):
kernelsPackaged = 0
for fileName in os.listdir(PROJECT_CONFIG['build_dir']):
try:
conf = Configuration.get_conf(fileName)
except ValueError:
continue
if (conf.target != target):
continue
sourceDir = os.path.join(PROJECT_CONFIG['build_dir'], fileName)
packageFolder = os.path.join(target, fileName)
kernelName = None
kernelPath = None
for subFile in os.listdir(sourceDir):
if subFile.endswith('.xclbin'):
kernelName = subFile[:(- 7)]
kernelPath = os.path.join(sourceDir, subFile)
break
if ((kernelPath == None) or (not os.path.exists(kernelPath))):
continue
print('Packaging {}...'.format(fileName))
(folders, filesToCopy) = files_to_copy(conf, target)
for folder in folders:
try:
os.makedirs(os.path.join(packageFolder, folder))
except FileExistsError:
pass
filesCopied = 0
filesMissing = 0
for path in filesToCopy:
try:
shutil.copy(os.path.join(sourceDir, path), os.path.join(packageFolder, path))
filesCopied += 1
except FileNotFoundError:
filesMissing += 1
if (filesCopied == 0):
raise FileNotFoundError('Files not found!')
if (filesMissing > 0):
print('WARNING: only {} / {} files copied ({} files missing).'.format(filesCopied, (filesCopied + filesMissing), filesMissing))
kernelsPackaged += 1
if (kernelsPackaged > 0):
print((('Successfully packaged ' + str(kernelsPackaged)) + ' kernels and configuration files into "{}".').format(target))
else:
print('No kernels for target "{}" found in "{}".'.format(target, PROJECT_CONFIG['build_dir'])) |
class NNDataflow():
def __init__(self, network, batch_size, resource, cost, map_strategy):
if (not isinstance(network, Network)):
raise TypeError('NNDataflow: network must be a Network instance.')
if (not isinstance(resource, Resource)):
raise TypeError('NNDataflow: resource must be a Resource instance.')
if (not isinstance(cost, Cost)):
raise TypeError('NNDataflow: cost must be a Cost instance.')
if (not issubclass(map_strategy, MapStrategy)):
raise TypeError('NNDataflow: map_strategy must be a subclass of MapStrategy.')
self.network = network
self.batch_size = batch_size
self.resource = resource
self.cost = cost
self.map_strategy = map_strategy
self.layer_sched_dict = {}
layer2sched = {}
for layer_name in self.network:
layer = self.network[layer_name]
sched = layer2sched.get(layer, None)
if (sched is None):
sched = Scheduling(layer, self.batch_size, self.cost, self.map_strategy)
layer2sched[layer] = sched
self.layer_sched_dict[layer_name] = sched
self.ilp = InterLayerPipeline(self.network, self.batch_size, self.resource)
self.ordered_layer_list = self.ilp.ordered_layer_list()
self.nndf_tops = {}
self.cmp_key = (lambda nndf: (nndf.total_cost, nndf.total_time))
def schedule_search(self, options):
if (options.opt_goal == 'ed'):
self.cmp_key = (lambda nndf: (nndf.total_cost * nndf.total_time))
elif (options.opt_goal == 'd'):
self.cmp_key = (lambda nndf: (nndf.total_time, nndf.total_cost))
else:
assert (options.opt_goal == 'e')
segments = defaultdict(list)
for seg in self.ilp.gen_segment(options):
if (seg not in segments[seg[(- 1)][(- 1)]]):
segments[seg[(- 1)][(- 1)]].append(seg)
self.nndf_tops = {}
self.nndf_tops[None] = []
for (input_layout, ext_layout_dict) in self._gen_input_layout(options):
nndf = NNDataflowScheme(self.network, input_layout, ext_layout_dict)
self.nndf_tops[None].append(nndf)
for layer_name in self.ordered_layer_list:
if options.verbose:
sys.stderr.write('-> {}\n'.format(layer_name))
sys.stderr.flush()
tops = []
for seg in segments[layer_name]:
if options.verbose:
sys.stderr.write(' - {}\n'.format(seg.seg))
sys.stderr.flush()
tops += self._segment_schedule_search(seg, options)
tops = sorted(tops, key=self.cmp_key)[:options.ntops]
assert (layer_name not in self.nndf_tops)
self.nndf_tops[layer_name] = tops
nndf_tops = self.nndf_tops.get(self.ordered_layer_list[(- 1)], [])
if (not nndf_tops):
sys.stderr.write('No valid schedule found for {}.\n'.format(self.network.net_name))
for nndf in nndf_tops:
assert (len(nndf) == len(self.network))
cache_hits = 0
cache_misses = 0
seen_scheds = set()
for sched in self.layer_sched_dict.values():
if (sched in seen_scheds):
continue
seen_scheds.add(sched)
(h, m) = sched.cache_stats()
cache_hits += h
cache_misses += m
return (nndf_tops, (cache_hits, cache_misses))
def _segment_schedule_search(self, segment, options):
first_layer_idx = self.ordered_layer_list.index(segment[0][0])
if (first_layer_idx == 0):
prev_nndf_tops = self.nndf_tops[None]
else:
prev_nndf_tops = self.nndf_tops.get(self.ordered_layer_list[(first_layer_idx - 1)], [])
if (not prev_nndf_tops):
return []
nndf_tops = []
allocation = segment.allocation()
fwd_data_region_dict = {}
for sh_list in segment.ifm_fwd_dict.values():
r = allocation[sh_list[0].sp_idx][sh_list[0].tm_idx].proc_region
for idx in sh_list[1:]:
fwd_data_region_dict[idx] = r
for (fwd_src, fwd_dst_list) in segment.ofm_fwd_dict.items():
r = allocation[fwd_src.sp_idx][fwd_src.tm_idx].proc_region
for idx in fwd_dst_list:
fwd_data_region_dict[idx] = r
max_time_ovhd = options.layer_pipeline_time_ovhd
frontier = set()
for (constraint, hints) in segment.gen_constraint(max_time_ovhd):
if any((all(((h >= fh) for (h, fh) in zip(hints, fhints))) for fhints in frontier)):
continue
curr_nndf_tops = prev_nndf_tops
for (sp_idx, (ltpl, rtpl, ctpl)) in enumerate(zip(segment, allocation, constraint)):
for (tm_idx, (layer, resource, cstr)) in enumerate(zip(ltpl, rtpl, ctpl)):
curr_nndf_tops = self._layer_schedule_search(layer, resource, cstr, sp_idx, tm_idx, fwd_data_region_dict.get((sp_idx, tm_idx)), curr_nndf_tops, options)
seg_nndf_tops = [nndf for nndf in curr_nndf_tops if all(((timing.time_overhead <= max_time_ovhd) for timing in nndf.segment_timing_list))]
if seg_nndf_tops:
frontier.add(hints)
nndf_tops += seg_nndf_tops
return sorted(nndf_tops, key=self.cmp_key)[:options.ntops]
def _layer_schedule_search(self, layer_name, resource, constraint, spatial_idx, temporal_idx, fwd_data_region, prev_nndf_tops, options):
nndf_tops = []
layer_sched = self.layer_sched_dict[layer_name]
for prev_nndf in prev_nndf_tops:
ifmap_layout = prev_nndf.fmap_layout(self.network.prevs(layer_name))
if (fwd_data_region is not None):
ifmap_layout = DataLayout(frngs=ifmap_layout.frngs, regions=((fwd_data_region,) * len(ifmap_layout.frngs)), parts=tuple((p.projection(fwd_data_region, appl2frng=True) for p in ifmap_layout.parts)))
segment_idx = prev_nndf.last_seg_idx
if ((spatial_idx == 0) and (temporal_idx == 0)):
segment_idx += 1
sched_seq = (segment_idx, spatial_idx, temporal_idx)
constraint.update_by_prev(prev_nndf)
condition = SchedulingCondition(resource=resource, constraint=constraint, ifmap_layout=ifmap_layout, sched_seq=sched_seq)
try:
sched_tops = layer_sched.schedule_search(condition, options)
except Exception:
sys.stderr.write('Failed when scheduling layer {}.\n'.format(layer_name))
raise
for t in sched_tops:
nndf = prev_nndf.copy()
nndf[layer_name] = t
nndf_tops.append(nndf)
return sorted(nndf_tops, key=self.cmp_key)[:options.ntops]
def _gen_input_layout(self, options):
input_layer = self.network.input_layer()
input_frng = FmapRange(FmapPosition(b=0, n=0, h=0, w=0), FmapPosition(b=self.batch_size, n=input_layer.nofm, h=input_layer.hofm, w=input_layer.wofm))
ext_layer_names = self.network.ext_layers()
ext_layers = [self.network[l] for l in ext_layer_names]
ext_frngs = [FmapRange(FmapPosition(b=0, n=0, h=0, w=0), FmapPosition(b=self.batch_size, n=ext_layer.nofm, h=ext_layer.hofm, w=ext_layer.wofm)) for ext_layer in ext_layers]
input_region = ext_region = self.resource.src_data_region
for part in partition.gen_partition(input_layer, self.batch_size, input_region.dim, options, guaranteed=True):
input_layout = DataLayout(frngs=(input_frng,), regions=(input_region,), parts=(part.projection(input_region, appl2frng=True),))
ext_layout_dict = (dict(zip(ext_layer_names, [DataLayout(frngs=(ext_frng,), regions=(ext_region,), parts=(part.projection(ext_region, appl2frng=True),)) for ext_frng in ext_frngs])) if ext_layers else None)
(yield (input_layout, ext_layout_dict)) |
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None, proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = (mimetypes.guess_type(pathname)[0] or 'text/plain')
resp.headers = CaseInsensitiveDict({'Content-Type': content_type, 'Content-Length': stats.st_size, 'Last-Modified': modified})
resp.raw = open(pathname, 'rb')
resp.close = resp.raw.close
return resp
def close(self):
pass |
def test_warnings():
olderr = np.seterr(all='raise')
try:
orth.eval_legendre(1, 0)
orth.eval_laguerre(1, 1)
orth.eval_gegenbauer(1, 1, 0)
finally:
np.seterr(**olderr) |
.parametrize('seed', [412])
.parametrize('batch_size', [2, 16])
.parametrize('grid_size', [2, 8])
.parametrize('feature_size', [4])
.parametrize('m, M', [((- 1), 1)])
def test_query_on_triplane_forward_backward(seed, batch_size, grid_size, feature_size, m, M):
nn.clear_parameters()
ctx = get_extension_context('cudnn', device_id='0')
nn.set_default_context(ctx)
B = batch_size
G = grid_size
D = feature_size
rng = np.random.RandomState(seed)
query_data = (m + (rng.rand(batch_size, 3) * (M - m)))
initializer_data = (rng.randn(3, G, G, D) * 0.01)
query_data0 = query_data.astype(np.float32)
initializer_data0 = initializer_data.astype(np.float32)
query0 = nn.Variable.from_numpy_array(query_data0).apply(need_grad=True)
feature0 = nn.parameter.get_parameter_or_create('F0', (3, G, G, D), initializer_data0)
output0 = query_on_triplane_composite(query0, feature0, m, M)
query_data1 = query_data.astype(np.float32)
initializer_data1 = initializer_data.astype(np.float32)
query1 = nn.Variable.from_numpy_array(query_data1).apply(need_grad=True)
feature1 = nn.parameter.get_parameter_or_create('F1', (3, G, G, D), initializer_data1)
output1 = F.lanczos_query_on_triplane(query1, feature1, ([m] * 3), ([M] * 3))
output0.forward(clear_no_need_grad=True)
output1.forward(clear_no_need_grad=True)
np.testing.assert_allclose(output0.d, output1.d, atol=1e-06)
query0.grad.fill(0)
query1.grad.fill(0)
feature0.grad.fill(0)
feature1.grad.fill(0)
ograd = rng.randn(*output0.shape).astype(np.float32)
output0.backward(ograd, clear_buffer=True)
output1.backward(ograd, clear_buffer=True)
np.testing.assert_allclose(feature0.g, feature1.g, atol=5e-06) |
class WindowedMetric(BaseMetric):
def __init__(self, metric_cls, window_size, ignore_nonempty_last=True, **kwargs):
super().__init__()
self.ignore_nonempty_last = ignore_nonempty_last
self.window_size = window_size
self.metric_cls = metric_cls
self.metric = self._init_metric(**kwargs)
self.score_meter = AverageMeter()
self.step = 0
self.num_windows = 1
def _init_metric(self, **kwargs):
return self.metric_cls(**kwargs)
def update(self, y_true, y_pred):
self.step += 1
self.metric.update(y_true, y_pred)
if ((self.step % self.window_size) == 0):
self.num_windows += 1
score = self.metric.get()
self.score_meter.update(score)
self.metric = self._init_metric()
return self
def get(self):
if (self.num_windows == 1):
return self.metric.get()
elif ((not self.ignore_nonempty_last) and ((self.step % self.window_size) != 0)):
return ((self.metric.get() + (self.score_meter.get() * (self.num_windows - 1))) / self.num_windows)
else:
return self.score_meter.get() |
def train(epoch):
print(('\nEpoch: %d' % epoch))
model.train()
train_loss = 0
correct = 0
total = 0
for (batch_idx, (normal_inputs, anomaly_inputs)) in enumerate(zip(normal_train_loader, anomaly_train_loader)):
inputs = torch.cat([anomaly_inputs, normal_inputs], dim=1)
batch_size = inputs.shape[0]
inputs = inputs.view((- 1), inputs.size((- 1))).to(device)
outputs = model(inputs)
loss = criterion(outputs, batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
print('loss = {}', (train_loss / len(normal_train_loader)))
scheduler.step() |
class GenericSymbolicSubring(SymbolicRing):
def __init__(self, vars):
super().__init__()
self._vars_ = set(vars)
if (not all((v.is_symbol() for v in self._vars_))):
raise ValueError('Invalid variables: {}'.format(', '.join((str(v) for v in sorted(self._vars_, key=str) if (not v.is_symbol())))))
def _repr_variables_(self):
if (not self._vars_):
s = 'no variable'
elif (len(self._vars_) == 1):
s = 'the variable '
else:
s = 'the variables '
return (s + ', '.join((str(v) for v in sorted(self._vars_, key=str))))
def has_valid_variable(self, variable):
raise NotImplementedError('Not implemented in this abstract base class')
def _element_constructor_(self, x):
expression = super()._element_constructor_(x)
assert (expression.parent() is self)
if (not all((self.has_valid_variable(var) for var in expression.variables()))):
raise TypeError(('%s is not contained in %s' % (x, self)))
return expression
def _coerce_map_from_(self, P):
from sage.rings.infinity import InfinityRing
from sage.rings.qqbar import AA, QQbar
from sage.rings.real_lazy import RLF, CLF
if isinstance(P, type):
return SR._coerce_map_from_(P)
if (RLF.has_coerce_map_from(P) or CLF.has_coerce_map_from(P) or AA.has_coerce_map_from(P) or QQbar.has_coerce_map_from(P)):
return True
if ((P is InfinityRing) or isinstance(P, (sage.rings.abc.RealIntervalField, sage.rings.abc.ComplexIntervalField))):
return True
if P._is_numerical():
return (P not in (RLF, CLF, AA, QQbar))
def __eq__(self, other):
if (not isinstance(other, GenericSymbolicSubring)):
return False
return (self._vars_ == other._vars_)
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return hash(tuple(sorted(self._vars_))) |
def DeepResNext101V3PlusD_OS4(args, num_classes, criterion, criterion_aux):
print('Model : DeepLabv3+, Backbone : resnext-101')
return DeepV3Plus(num_classes, trunk='resnext-101', criterion=criterion, criterion_aux=criterion_aux, variant='D4', skip='m1', args=args) |
def test_function_that_needs_replacement():
def notworking(a: dace.float64[20]):
return np.allclose(a, a)
A = np.random.rand(20)
with dace.config.set_temporary('frontend', 'typed_callbacks_only', value=True):
with pytest.raises(DaceSyntaxError):
notworking(A) |
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) |
class AssertionMinimization(cv.ChromosomeVisitor):
_logger = logging.getLogger(__name__)
def __init__(self):
self._remaining_assertions: OrderedSet[Assertion] = OrderedSet()
self._deleted_assertions: OrderedSet[Assertion] = OrderedSet()
self._checked_line_numbers: OrderedSet[int] = OrderedSet()
def remaining_assertions(self) -> OrderedSet[Assertion]:
return self._remaining_assertions
def deleted_assertions(self) -> OrderedSet[Assertion]:
return self._deleted_assertions
def visit_test_suite_chromosome(self, chromosome: tsc.TestSuiteChromosome) -> None:
for test_case_chromosome in chromosome.test_case_chromosomes:
test_case_chromosome.accept(self)
self._logger.debug(f'Removed {len(self._deleted_assertions)} assertion(s) from test suite that do not increase checked coverage')
def visit_test_case_chromosome(self, chromosome: tcc.TestCaseChromosome) -> None:
for stmt in chromosome.test_case.statements:
to_remove: OrderedSet[Assertion] = OrderedSet()
for assertion in stmt.assertions:
new_checked_lines: OrderedSet[int] = OrderedSet()
for instr in assertion.checked_instructions:
new_checked_lines.add(instr.lineno)
if (isinstance(assertion, ExceptionAssertion) or (not new_checked_lines) or (not new_checked_lines.issubset(self._checked_line_numbers))):
self._checked_line_numbers.update(new_checked_lines)
self._remaining_assertions.add(assertion)
else:
to_remove.add(assertion)
for assertion in to_remove:
stmt.assertions.remove(assertion)
self._deleted_assertions.add(assertion) |
class GraphDataset(torch_geometric.data.Dataset):
def __init__(self, sample_files):
super().__init__(root=None, transform=None, pre_transform=None)
self.sample_files = sample_files
def len(self):
return len(self.sample_files)
def process_sample(self, filepath):
(BGFilepath, solFilePath) = filepath
with open(BGFilepath, 'rb') as f:
bgData = pickle.load(f)
with open(solFilePath, 'rb') as f:
solData = pickle.load(f)
BG = bgData
varNames = solData['var_names']
sols = solData['sols'][:50]
objs = solData['objs'][:50]
sols = np.round(sols, 0)
return (BG, sols, objs, varNames)
def get(self, index):
(BG, sols, objs, varNames) = self.process_sample(self.sample_files[index])
(A, v_map, v_nodes, c_nodes, b_vars) = BG
constraint_features = c_nodes
edge_indices = A._indices()
variable_features = v_nodes
edge_features = A._values().unsqueeze(1)
edge_features = torch.ones(edge_features.shape)
constraint_features[np.isnan(constraint_features)] = 1
graph = BipartiteNodeData(torch.FloatTensor(constraint_features), torch.LongTensor(edge_indices), torch.FloatTensor(edge_features), torch.FloatTensor(variable_features))
graph.num_nodes = (constraint_features.shape[0] + variable_features.shape[0])
graph.solutions = torch.FloatTensor(sols).reshape((- 1))
graph.objVals = torch.FloatTensor(objs)
graph.nsols = sols.shape[0]
graph.ntvars = variable_features.shape[0]
graph.varNames = varNames
varname_dict = {}
varname_map = []
i = 0
for iter in varNames:
varname_dict[iter] = i
i += 1
for iter in v_map:
varname_map.append(varname_dict[iter])
varname_map = torch.tensor(varname_map)
graph.varInds = [[varname_map], [b_vars]]
return graph |
def __getattr__(name):
return _sub_module_deprecation(sub_package='linalg', module='special_matrices', private_modules=['_special_matrices'], all=__all__, attribute=name) |
def get_context(dial, turn_id):
context = ''
for (idx, turn) in enumerate(dial['dialogue']):
if (idx <= turn_id):
context += (((' <system>: ' + turn['system_transcript']) + ' <user>: ') + turn['transcript'])
else:
break
return context |
def scheduler_from_config(scheduler_config, optimizer, epoch_length):
assert (scheduler_config['type'] in ('linear', 'step', 'poly', 'multistep'))
params = scheduler_config.getstruct('params')
if (scheduler_config['type'] == 'linear'):
if (scheduler_config['update_mode'] == 'batch'):
count = (epoch_length * scheduler_config.getint('epochs'))
else:
count = scheduler_config.getint('epochs')
beta = float(params['from'])
alpha = (float((params['to'] - beta)) / count)
scheduler = lr_scheduler.LambdaLR(optimizer, (lambda it: ((it * alpha) + beta)))
elif (scheduler_config['type'] == 'step'):
scheduler = lr_scheduler.StepLR(optimizer, params['step_size'], params['gamma'])
elif (scheduler_config['type'] == 'poly'):
if (scheduler_config['update_mode'] == 'batch'):
count = (epoch_length * scheduler_config.getint('epochs'))
else:
count = scheduler_config.getint('epochs')
scheduler = lr_scheduler.LambdaLR(optimizer, (lambda it: ((1 - (float(it) / count)) ** params['gamma'])))
elif (scheduler_config['type'] == 'multistep'):
scheduler = lr_scheduler.MultiStepLR(optimizer, params['milestones'], params['gamma'])
else:
raise ValueError("Unrecognized scheduler type {}, valid options: 'linear', 'step', 'poly', 'multistep'".format(scheduler_config['type']))
if (scheduler_config.getint('burn_in_steps') != 0):
scheduler = lr_scheduler.BurnInLR(scheduler, scheduler_config.getint('burn_in_steps'), scheduler_config.getfloat('burn_in_start'))
return scheduler |
class AnotherMixin():
def __init_subclass__(cls, custom_parameter, **kwargs):
super().__init_subclass__(**kwargs)
cls.custom_parameter = custom_parameter |
class RandomCurriculum(TrainingCurriculum):
def get_action_flag_and_dataloader_for_epoch(self, dataset, epoch):
return (False, DataLoader(dataset, sampler=self.random_sampler, batch_size=self.train_batch_size, collate_fn=self.random_collate_fn))
def summary(self):
return 'completely random curriculum' |
def preparse_calculus(code):
new_code = []
last_end = 0
for m in re.finditer(';(\\s*)([^\\W\\d]\\w*) *\\(([^()]+)\\) *= *([^;#=][^;]*)', code):
(ident, func, vars, expr) = m.groups()
stripped_vars = [v.replace(';', '').strip() for v in vars.split(',')]
if any((n.startswith(numeric_literal_prefix) for n in stripped_vars)):
raise ValueError('argument names should be valid python identifiers')
vars = ','.join(stripped_vars)
new_code.append(code[last_end:m.start()])
new_code.append((';%s__tmp__=var("%s"); %s = symbolic_expression(%s).function(%s)' % (ident, vars, func, expr, vars)))
last_end = m.end()
if (last_end == 0):
return code
new_code.append(code[m.end():])
return ''.join(new_code) |
class NSEM_DerivTests(unittest.TestCase):
def test_derivJvec_Z1dr(self):
self.assertTrue(DerivJvecTest(0.01))
def test_derivJvec_Z1d_e(self):
self.assertTrue(DerivJvecTest_1D(0.01)) |
def roberts_pos_diag(image, mask=None):
check_nD(image, 2)
if (image.dtype.kind == 'f'):
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
else:
image = img_as_float(image)
result = convolve(image, ROBERTS_PD_WEIGHTS)
return _mask_filter_result(result, mask) |
.skip(reason='Need to wait for changes on scikit-learn (see issue #89)')
def test_grid_search():
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
kne = KNORAE(pool_classifiers)
params = {'k': [1, 3, 5, 7]}
grid = GridSearchCV(kne, params)
grid.fit(X_dsel, y_dsel)
grid.best_estimator_.score(X_test, y_test) |
class VGG16_FPN(nn.Module):
def __init__(self, pretrained=True):
super(VGG16_FPN, self).__init__()
vgg = models.vgg16_bn(pretrained=pretrained)
features = list(vgg.features.children())
self.layer1 = nn.Sequential(*features[0:23])
self.layer2 = nn.Sequential(*features[23:33])
self.layer3 = nn.Sequential(*features[33:43])
in_channels = [256, 512, 512]
self.neck_seg = FPN(in_channels, 128, len(in_channels))
self.neck_reg = FPN(in_channels, 128, len(in_channels))
self.loc_head = nn.Sequential(nn.Dropout2d(0.1), ResBlock(in_dim=384, out_dim=128, dilation=0, norm='bn'), nn.ConvTranspose2d(128, 64, 2, stride=2, padding=0, output_padding=0, bias=False), nn.BatchNorm2d(64, momentum=BN_MOMENTUM), nn.ReLU(inplace=True), nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(32, momentum=BN_MOMENTUM), nn.ReLU(inplace=True), nn.ConvTranspose2d(32, 16, 2, stride=2, padding=0, output_padding=0, bias=False), nn.BatchNorm2d(16, momentum=BN_MOMENTUM), nn.ReLU(inplace=True), nn.Conv2d(16, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True))
def forward(self, x):
fea = []
x = self.layer1(x)
fea.append(x)
x = self.layer2(x)
fea.append(x)
x = self.layer3(x)
fea.append(x)
x = self.neck_reg(fea)
all_pre_map = self.loc_head(x)
return all_pre_map |
class OddManOutEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'odd_man_out.txt')
PROBINGEval.__init__(self, 'OddManOut', task_path, seed) |
def _create_mask(lengths, stride, like=None, use_gpu=True):
if use_gpu:
mask = (torch.arange(stride).cuda() + 1)
mask = (mask.unsqueeze(0) <= lengths.cuda().unsqueeze((- 1)))
else:
mask = (torch.arange(stride) + 1)
mask = (mask.unsqueeze(0) <= lengths.unsqueeze((- 1)))
if (like is not None):
for _ in range((like.dim() - mask.dim())):
mask = mask.unsqueeze((- 1))
return mask |
def runeval(args):
global ed
try:
prepare_connections()
tasks_left = True
stop_requested = False
task_id = None
retries = 0
while ((not stop_requested) and tasks_left):
task = None
taskq = None
previous_task_id = task_id
with open(args.queuefile, 'r') as qfile:
taskq = json.load(qfile)
(task, task_id) = next((e for e in zip(taskq, range(len(taskq))) if (e[0]['ready'] == False)), (None, None))
if (task is None):
tasks_left = False
continue
retries = (0 if (task_id != previous_task_id) else (retries + 1))
if (retries > 3):
print('Too many retries for the same task, aborting')
stop_requested = True
continue
if (retries > 0):
print(('Retrying (%d of 3), waiting 60 seconds...' % retries))
time.sleep(60)
try:
exp_meta = prepare_experiment(task)
create_logs(exp_meta, args.logprefix)
try:
exp_meta_lock = threading.RLock()
run_experiment(exp_meta, exp_meta_lock, args.localtime)
except KeyboardInterrupt:
stop_requested = True
running = False
exp_meta['reason_stop'] = 'KeyboardInterrupt'
finally:
try:
ed.logfile = None
except:
print('Closing the end device log failed:')
traceback.print_exc()
try:
exp_meta['ed_updown_map'] = create_updown_map(logfile(LOG_PREFIX_DUT, args.logprefix))
evaluate_experiment(exp_meta, args.localtime)
except:
stop_requested = True
running = False
exp_meta['reason_stop'] = ('Evaluation failure (was %s)' % exp_meta['reason_stop'])
print('Could not process experiment results')
traceback.print_exc()
print(exp_meta)
close_logs()
finally:
teardown_experiment()
if (('reason_stop' in exp_meta) and (exp_meta['reason_stop'] == 'finished')):
with open(args.queuefile, 'w') as qfile:
taskq[task_id]['ready'] = True
taskq[task_id]['runid'] = run_id
json.dump(taskq, qfile)
finally:
teardown_connections() |
def load_mnist():
((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data()
x_train = (x_train.reshape(x_train.shape[0], (- 1)) / 255)
x_test = (x_test.reshape(x_test.shape[0], (- 1)) / 255)
y_train = keras.utils.to_categorical(y_train, num_classes=10)
y_test = keras.utils.to_categorical(y_test, num_classes=10)
return (x_train, x_test, y_train, y_test) |
class RetriBertTokenizer(BertTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
model_input_names = ['attention_mask'] |
def _calculate_integral(inp, baseline, gradients):
gradients = ((gradients[:(- 1)] + gradients[1:]) / 2.0)
avg_grads = np.average(gradients, axis=0)
integrated_grads = ((inp - baseline) * avg_grads)
integrated_grads = np.sum(integrated_grads, axis=(- 1))
return integrated_grads |
def test_different_shape():
A = np.random.rand(20, 3).astype(np.float32)
B = np.random.rand(20, 3).astype(np.float32)
sdfg = make_sdfg([20, 3], [60], '1, 0', '3')
sdfg.simplify()
sdfg(A=A, B=B)
assert all(((not isinstance(node, dace.nodes.NestedSDFG)) for node in sdfg.node(0).nodes()))
expected = np.array([(2 ** 2), ((2 ** 2) + (2 ** 6))], dtype=np.float32)
result = np.array([A[(1, 0)], B[(1, 0)]], dtype=np.float32)
diff = np.linalg.norm((expected - result))
print('Difference:', diff)
assert (diff <= 1e-06) |
def test_reassignment_while():
def reassignment_while(a: dace.float64[(3, 3)], b: dace.float64[(3, 3)]) -> dace.float64[(3, 3)]:
out = np.copy(a)
i = 0
while (i < 10):
out = (out - b)
i += 1
return out
A = rng.random((3, 3))
B = rng.random((3, 3))
ref = reassignment_while.f(A, B)
sdfg = reassignment_while.to_sdfg(simplify=False)
func = sdfg.compile()
val = func(a=A, b=B)
assert np.allclose(val, ref)
val = reassignment_while(A, B)
assert np.allclose(val, ref) |
.parametrize('dtype', [ti.i64, ti.u64, ti.f64])
_utils.test(arch=supported_archs_taichi_ndarray, require=ti.extension.data64)
def test_ndarray_python_scope_read_64bit(dtype):
def run(x: ti.types.ndarray()):
for i in x:
x[i] = (i + ti.i64((2 ** 40)))
n = 4
a = ti.ndarray(dtype, shape=(n,))
run(a)
for i in range(n):
assert (a[i] == (i + (2 ** 40))) |
class Softshrink(Module):
__constants__ = ['lambd']
lambd: float
def __init__(self, lambd: float=0.5) -> None:
super(Softshrink, self).__init__()
self.lambd = lambd
def forward(self, input: Tensor) -> Tensor:
return F.softshrink(input, self.lambd)
def extra_repr(self) -> str:
return str(self.lambd) |
class Xor(Benchmark):
def __init__(self, dimensions=9):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 1.0)] * self.N), ([1.0] * self.N)))
self.global_optimum = [[1.0, (- 1.0), 1.0, (- 1.0), (- 1.0), 1.0, 1.0, (- 1.0), 0.421134]]
self.fglob = 0.9597588
def fun(self, x, *args):
self.nfev += 1
F11 = (x[6] / (1.0 + exp((((- x[0]) - x[1]) - x[4]))))
F12 = (x[7] / (1.0 + exp((((- x[2]) - x[3]) - x[5]))))
F1 = ((1.0 + exp((((- F11) - F12) - x[8]))) ** (- 2))
F21 = (x[6] / (1.0 + exp((- x[4]))))
F22 = (x[7] / (1.0 + exp((- x[5]))))
F2 = ((1.0 + exp((((- F21) - F22) - x[8]))) ** (- 2))
F31 = (x[6] / (1.0 + exp(((- x[0]) - x[4]))))
F32 = (x[7] / (1.0 + exp(((- x[2]) - x[5]))))
F3 = ((1.0 - ((1.0 + exp((((- F31) - F32) - x[8]))) ** (- 1))) ** 2)
F41 = (x[6] / (1.0 + exp(((- x[1]) - x[4]))))
F42 = (x[7] / (1.0 + exp(((- x[3]) - x[5]))))
F4 = ((1.0 - ((1.0 + exp((((- F41) - F42) - x[8]))) ** (- 1))) ** 2)
return (((F1 + F2) + F3) + F4) |
def gen_autograd_functions_lib(out, autograd_functions, template_path):
gen_autograd_functions(out, autograd_functions, template_path, 'Functions') |
def test_bare_reraise_single_exception():
program = 'def f(x):\n try:\n return 1 / x\n except ZeroDivisionError:\n raise\n'
__assert_found(program, 'ZeroDivisionError') |
class create_model_2(torch.nn.Module):
def __init__(self):
super(create_model_2, self).__init__()
self.conv1 = Conv2d(3, 3, kernel_size=1, stride=1)
self.bn = BatchNorm2d(3)
self.bn = bn_weight_change(self.bn)
self.bn2 = BatchNorm2d(3)
self.bn2 = bn_weight_change(self.bn2)
def forward(self, inp):
x = self.conv1(inp)
x2 = self.bn(x)
y = self.bn2(x)
return ((x2 + y) + inp) |
def _environ_cols_tput(*_):
try:
import shlex
cols = int(subprocess.check_call(shlex.split('tput cols')))
return cols
except:
pass
return None |
def RenderRegion(points, lines, region, filename):
dwg = svgwrite.Drawing(filename, profile='tiny')
for line in lines:
x1 = (1000 - int((((line[0] - region[0]) / (region[2] - region[0])) * 1000)))
y1 = int((((line[1] - region[1]) / (region[3] - region[1])) * 1000))
x2 = (1000 - int((((line[2] - region[0]) / (region[2] - region[0])) * 1000)))
y2 = int((((line[3] - region[1]) / (region[3] - region[1])) * 1000))
dwg.add(dwg.line((y1, x1), (y2, x2), stroke='blue'))
for p in points:
x = (1000 - int((((p[0] - region[0]) / (region[2] - region[0])) * 1000)))
y = int((((p[1] - region[1]) / (region[3] - region[1])) * 1000))
dwg.add(dwg.circle(center=(y, x), r=1, stroke='red'))
dwg.save() |
class FullHessianUpdateStrategy(HessianUpdateStrategy):
_syr = get_blas_funcs('syr', dtype='d')
_syr2 = get_blas_funcs('syr2', dtype='d')
_symv = get_blas_funcs('symv', dtype='d')
def __init__(self, init_scale='auto'):
self.init_scale = init_scale
self.first_iteration = None
self.approx_type = None
self.B = None
self.H = None
def initialize(self, n, approx_type):
self.first_iteration = True
self.n = n
self.approx_type = approx_type
if (approx_type not in ('hess', 'inv_hess')):
raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.")
if (self.approx_type == 'hess'):
self.B = np.eye(n, dtype=float)
else:
self.H = np.eye(n, dtype=float)
def _auto_scale(self, delta_x, delta_grad):
s_norm2 = np.dot(delta_x, delta_x)
y_norm2 = np.dot(delta_grad, delta_grad)
ys = np.abs(np.dot(delta_grad, delta_x))
if ((ys == 0.0) or (y_norm2 == 0) or (s_norm2 == 0)):
return 1
if (self.approx_type == 'hess'):
return (y_norm2 / ys)
else:
return (ys / y_norm2)
def _update_implementation(self, delta_x, delta_grad):
raise NotImplementedError('The method ``_update_implementation`` is not implemented.')
def update(self, delta_x, delta_grad):
if np.all((delta_x == 0.0)):
return
if np.all((delta_grad == 0.0)):
warn('delta_grad == 0.0. Check if the approximated function is linear. If the function is linear better results can be obtained by defining the Hessian as zero instead of using quasi-Newton approximations.', UserWarning, stacklevel=2)
return
if self.first_iteration:
if (self.init_scale == 'auto'):
scale = self._auto_scale(delta_x, delta_grad)
else:
scale = float(self.init_scale)
if (self.approx_type == 'hess'):
self.B *= scale
else:
self.H *= scale
self.first_iteration = False
self._update_implementation(delta_x, delta_grad)
def dot(self, p):
if (self.approx_type == 'hess'):
return self._symv(1, self.B, p)
else:
return self._symv(1, self.H, p)
def get_matrix(self):
if (self.approx_type == 'hess'):
M = np.copy(self.B)
else:
M = np.copy(self.H)
li = np.tril_indices_from(M, k=(- 1))
M[li] = M.T[li]
return M |
def remove_punctuation(strs):
return re.sub('[\\s+\\.\\!\\/<>,$%^*(+"\']+|[+!,?~#%......&*()]+', '', strs.strip()) |
.expensive
def test_gcsl_run():
os.environ['WANDB_MODE'] = 'offline'
subprocess.run(lunar_command, check=True) |
def check_positive(input_matrix: Union[(sparse.csr_matrix, np.ndarray)]):
if (not has_positive_entries(input_matrix)):
raise ValueError('Only positive values are expected.') |
def tetrahedralize_vtk_mesh(vtkdata):
tetra = vtk.vtkDataSetTriangleFilter()
if (vtk_version < 6):
tetra.SetInput(vtkdata)
else:
tetra.SetInputData(vtkdata)
tetra.Update()
return tetra.GetOutput() |
def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False):
if isinstance(ts, tf_ops.Graph):
if allow_graph:
return get_tensors(ts)
else:
raise TypeError('allow_graph is False: cannot convert a tf.Graph.')
else:
if (not is_iterable(ts)):
ts = [ts]
if (not ts):
return []
if check_graph:
check_types = (None if ignore_ops else tf_ops.Tensor)
get_unique_graph(ts, check_types=check_types)
return [t for t in ts if isinstance(t, tf_ops.Tensor)] |
def train_model(model, dataset, params, ckpt, ckpt_manager, out_file):
optimizer = tf.keras.optimizers.Adagrad(params['learning_rate'], initial_accumulator_value=params['adagrad_init_acc'], clipnorm=params['max_grad_norm'])
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 1))
dec_lens = tf.reduce_sum(tf.cast(mask, dtype=tf.float32), axis=(- 1))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
loss_ = (tf.reduce_sum(loss_, axis=(- 1)) / dec_lens)
return tf.reduce_mean(loss_)
(input_signature=(tf.TensorSpec(shape=[params['batch_size'], None], dtype=tf.int32), tf.TensorSpec(shape=[params['batch_size'], None], dtype=tf.int32), tf.TensorSpec(shape=[params['batch_size'], params['max_dec_len']], dtype=tf.int32), tf.TensorSpec(shape=[params['batch_size'], params['max_dec_len']], dtype=tf.int32), tf.TensorSpec(shape=[], dtype=tf.int32)))
def train_step(enc_inp, enc_extended_inp, dec_inp, dec_tar, batch_oov_len):
loss = 0
with tf.GradientTape() as tape:
(enc_hidden, enc_output) = model.call_encoder(enc_inp)
(predictions, _) = model(enc_output, enc_hidden, enc_inp, enc_extended_inp, dec_inp, batch_oov_len)
loss = loss_function(dec_tar, predictions)
variables = (((model.encoder.trainable_variables + model.attention.trainable_variables) + model.decoder.trainable_variables) + model.pointer.trainable_variables)
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
try:
f = open(out_file, 'w+')
for batch in dataset:
t0 = time.time()
loss = train_step(batch[0]['enc_input'], batch[0]['extended_enc_input'], batch[1]['dec_input'], batch[1]['dec_target'], batch[0]['max_oov_len'])
print('Step {}, time {:.4f}, Loss {:.4f}'.format(int(ckpt.step), (time.time() - t0), loss.numpy()))
f.write('Step {}, time {:.4f}, Loss {:.4f}\n'.format(int(ckpt.step), (time.time() - t0), loss.numpy()))
if (int(ckpt.step) == params['max_steps']):
ckpt_manager.save(checkpoint_number=int(ckpt.step))
print('Saved checkpoint for step {}'.format(int(ckpt.step)))
f.close()
break
if ((int(ckpt.step) % params['checkpoints_save_steps']) == 0):
ckpt_manager.save(checkpoint_number=int(ckpt.step))
print('Saved checkpoint for step {}'.format(int(ckpt.step)))
ckpt.step.assign_add(1)
f.close()
except KeyboardInterrupt:
ckpt_manager.save(int(ckpt.step))
print('Saved checkpoint for step {}'.format(int(ckpt.step)))
f.close() |
()
('data-path', default='data/ner_conll/en/test.txt')
('checkpoint-model-name', type=str, default='studio-ousia/luke-large-finetuned-conll-2003')
('--model-config-path', type=click.Path(exists=True))
('--checkpoint-tokenizer-name', type=str)
('--batch-size', type=int, default=32)
('--cuda-device', type=int, default=0)
('--result-save-path', type=click.Path(exists=False), default=None)
('--prediction-save-path', type=click.Path(exists=False), default=None)
('--iob-scheme', type=str, default='iob1')
('--file-encoding', type=str, default='utf-8')
def evaluate_transformers_checkpoint(data_path: str, checkpoint_model_name: str, checkpoint_tokenizer_name: Optional[str], model_config_path: Optional[str], batch_size: int, cuda_device: int, result_save_path: str, prediction_save_path: str, iob_scheme: str, file_encoding: str):
import_module_and_submodules('examples')
checkpoint_tokenizer_name = (checkpoint_tokenizer_name or tokenizer_name_mapping.get(checkpoint_model_name))
if (checkpoint_tokenizer_name is None):
raise ValueError('You need to specify which tokenizer to use with a new checkpoint.')
print(f'Use the tokenizer: {checkpoint_tokenizer_name}')
reader = ConllSpanReader(tokenizer=PretrainedTransformerTokenizer(model_name=checkpoint_tokenizer_name, add_special_tokens=False, tokenizer_kwargs={'add_prefix_space': True}), token_indexers={'tokens': PretrainedTransformerIndexer(model_name=checkpoint_tokenizer_name)}, use_entity_feature=True, iob_scheme=iob_scheme, encoding=file_encoding)
transformers_tokenizer = AutoTokenizer.from_pretrained(checkpoint_model_name)
transformers_model = LukeForEntitySpanClassification.from_pretrained(checkpoint_model_name)
vocab = Vocabulary()
vocab.add_transformer_vocab(transformers_tokenizer, 'tokens')
num_labels = len(transformers_model.config.id2label)
labels = [transformers_model.config.id2label[i] for i in range(num_labels)]
labels = [('O' if (l == 'NIL') else l) for l in labels]
vocab.add_tokens_to_namespace(labels, namespace='labels')
model_config_path = (model_config_path or model_config_mapping.get(checkpoint_model_name))
if (model_config_path is None):
raise ValueError('You need to specify which model config file to use with a new checkpoint.')
print(f'Use the model config: {model_config_path}')
params = Params.from_file(model_config_path, ext_vars={'TRANSFORMERS_MODEL_NAME': checkpoint_model_name})
if (prediction_save_path is not None):
params['prediction_save_path'] = prediction_save_path
model = Model.from_params(params, vocab=vocab)
model.classifier = transformers_model.classifier
model.eval()
if (cuda_device < 0):
device = torch.device('cpu')
else:
device = torch.device(f'cuda:{cuda_device}')
model = model.to(device)
loader = MultiProcessDataLoader(reader, data_path, batch_size=batch_size, shuffle=False)
loader.index_with(model.vocab)
with torch.no_grad():
for batch in tqdm.tqdm(loader):
batch = nn_util.move_to_device(batch, device)
output_dict = model(**batch)
metrics = model.get_metrics(reset=True)
print(metrics)
if (result_save_path is not None):
with open(result_save_path, 'w') as f:
json.dump(metrics, f) |
def test_beeswarm_input_is_explanation():
with pytest.raises(TypeError, match='beeswarm plot requires an `Explanation` object'):
_ = shap.plots.beeswarm(np.random.randn(20, 5), show=False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.