code stringlengths 101 5.91M |
|---|
_utils.test(arch=supported_archs_cgraph)
def test_repeated_arg_name():
n = 4
def test1(pos: ti.types.ndarray(ndim=1)):
for i in range(n):
pos[i] = 2.5
def test2(v: ti.f32):
for i in range(n):
print(v)
sym_pos = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'pos', ti.f32, ndim=1)
sym_pos1 = ti.graph.Arg(ti.graph.ArgKind.SCALAR, 'pos', ti.f32)
builder = ti.graph.GraphBuilder()
builder.dispatch(test1, sym_pos)
with pytest.raises(RuntimeError):
builder.dispatch(test2, sym_pos1) |
def test_regular_numpy_2_parm():
text = '[0 * int64[parameters={"foo": "bar"}], parameters={"bla": "bloop"}]'
parsedtype = ak.types.from_datashape(text, highlevel=False)
assert isinstance(parsedtype, ak.types.RegularType)
assert (str(parsedtype) == text) |
def test_fit_online_cartpole_with_dqn() -> None:
env = gym.make('CartPole-v1')
eval_env = gym.make('CartPole-v1')
algo = DQNConfig().create()
buffer = ReplayBuffer(InfiniteBuffer(), env=env)
explorer = LinearDecayEpsilonGreedy()
algo.fit_online(env, buffer, explorer, n_steps=100, eval_env=eval_env, logger_adapter=NoopAdapterFactory()) |
.parametrize('fraction, subsample_test, expected_train_size, expected_test_size', [(0.5, True, 40, 10), (0.5, False, 40, 20), (0.2, True, 16, 4), (0.2, False, 16, 20)])
def test_subsample_splitter_shapes(fraction, subsample_test, expected_train_size, expected_test_size):
n_samples = 100
(X, y) = make_classification(n_samples)
cv = _SubsampleMetaSplitter(base_cv=KFold(5), fraction=fraction, subsample_test=subsample_test, random_state=None)
for (train, test) in cv.split(X, y):
assert (train.shape[0] == expected_train_size)
assert (test.shape[0] == expected_test_size)
if subsample_test:
assert ((train.shape[0] + test.shape[0]) == int((n_samples * fraction)))
else:
assert (test.shape[0] == (n_samples // cv.base_cv.get_n_splits())) |
class BertConfig(PretrainedConfig):
model_type = 'bert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, gradient_checkpointing=False, position_embedding_type='absolute', use_cache=True, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.gradient_checkpointing = gradient_checkpointing
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache |
def extend_sssp_graph(ssspG, vars, ops, opG, node_order, index, input_vars, output_vars, binding, split_vars, split_idx='0', prev_split_idx=None):
split_vars = set(split_vars)
for i in range(len(node_order)):
prev_split_idx = (prev_split_idx or split_idx)
op = ops[node_order[i]]
vars_to_split_on = (split_vars & op.all_vars)
split_vars -= vars_to_split_on
if (op.name in input_vars):
in_vars = set(input_vars[op.name])
else:
in_vars = opG.edges[(node_order[(i - 1)], node_order[i])]['vars']
if (op.name in output_vars):
out_vars = set(output_vars[op.name])
else:
out_vars = opG.edges[(node_order[i], node_order[(i + 1)])]['vars']
if (i > 0):
in_concat = ('concat' in opG.edges[(node_order[(i - 1)], node_order[i])])
else:
in_concat = False
if (i < (len(node_order) - 1)):
out_concat = ('concat' in opG.edges[(node_order[i], node_order[(i + 1)])])
else:
out_concat = False
if vars_to_split_on:
if in_concat:
in_layouts = vars.get_valid_unique_layouts(op.name, tuple(in_vars), binding=freeze_dict(binding))
concat_node = f'{prev_split_idx}_{(index + 1)}_concat'
for layout in layout_iterator(in_layouts):
cfg_binding = vars.update_binding_from_cfg(binding, layout)
cfg = vars.get_op_config_from_binding(op.name, cfg_binding)
in_cfg = freeze_dict({var: cfg[var] for var in in_vars})
in_node = (f'{prev_split_idx}_{(index + 1)}', in_cfg)
ssspG.add_edge(concat_node, in_node, weight=0.0, cfg=None, op=None)
layouts = vars.get_valid_unique_layouts(op.name, tuple(vars_to_split_on), binding=freeze_dict(binding))
print(f"Splitting SSSP graph on variables {', '.join(vars_to_split_on)}, {layout_len(layouts)} layouts")
input_vars[op.name] = in_vars
for (cfg_idx, cfg) in enumerate(layout_iterator(layouts)):
split_binding = vars.update_binding_from_cfg(binding, cfg)
extend_sssp_graph(ssspG, vars, ops, opG, node_order[i:], (index + i), input_vars, output_vars, split_binding, split_vars, f'{cfg_idx}_{split_idx}', split_idx)
break
else:
if in_concat:
in_layouts = vars.get_valid_unique_layouts(op.name, tuple(in_vars), binding=freeze_dict(binding))
concat_node = f'{prev_split_idx}_{(index + i)}_concat'
for layout in layout_iterator(in_layouts):
cfg_binding = vars.update_binding_from_cfg(binding, layout)
cfg = vars.get_op_config_from_binding(op.name, cfg_binding)
in_cfg = freeze_dict({var: cfg[var] for var in in_vars})
in_node = (f'{prev_split_idx}_{(index + i)}', in_cfg)
ssspG.add_edge(concat_node, in_node, weight=0.0, cfg=None, op=None)
add_sssp_edges_for_op(ssspG, vars, op, (index + i), in_vars, out_vars, binding, split_idx, prev_split_idx)
if out_concat:
out_layouts = vars.get_valid_unique_layouts(op.name, tuple(out_vars), binding=freeze_dict(binding))
concat_node = f'{split_idx}_{((index + i) + 1)}_concat'
for layout in layout_iterator(out_layouts):
cfg_binding = vars.update_binding_from_cfg(binding, layout)
cfg = vars.get_op_config_from_binding(op.name, cfg_binding)
out_cfg = freeze_dict({var: cfg[var] for var in out_vars})
out_node = (f'{split_idx}_{((index + i) + 1)}', out_cfg)
ssspG.add_edge(out_node, concat_node, weight=0.0, cfg=None, op=None)
prev_split_idx = None |
def test_imperative_pf():
import nnabla.parametric_functions as PF
x = nn.NdArray([2, 3, 4, 5])
y = PF.batch_normalization(x) |
def parse_args(args):
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--work-dir', help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--gpu-ids', type=int, nargs='+', help='(Deprecated, please use --gpu-id) ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--gpu-id', type=int, default=0, help='id of gpu to use (only applicable to non-distributed testing)')
parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir', help='directory where painted images will be saved')
parser.add_argument('--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)')
parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help=str1)
parser.add_argument('--options', nargs='+', action=DictAction, help=str2)
parser.add_argument('--eval-options', nargs='+', action=DictAction, help=str3)
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args(args)
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.eval_options):
raise ValueError(str4)
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args |
def tmx2raw(tmx, debug):
to_file = tmx[0:(len(tmx) - len('.tmx'))]
to_folder = os.path.join(*os.path.split(tmx)[:(- 1)])
if os.path.exists(f'{to_folder}/bitext.en'):
(debug and print(f'{tmx} already extracted to {to_file}; so skip'))
return to_file
cmd = f'(cd {to_folder}; {TMX_TOOL} {tmx})'
call(cmd, debug)
return to_file |
def main():
lexer = new()
line = ''
while 1:
try:
line += raw_input('=>> ').decode('string_escape')
print(len(line), [c for c in line])
except EOFError:
reload(sys.modules['lexer.py'])
lexer.input(line)
print(list((tok for tok in lexer)))
line = '' |
class MathOpsPlan(BenchmarkPlan):
def __init__(self, arch: str):
super().__init__('math_ops', arch, basic_repeat_times=10)
math_dtype = DataType()
math_dtype.remove_integer()
self.create_plan(MathOps(), math_dtype, ElementNum(), ForLoopCycle(), MetricType())
self.add_func(['element16384'], unary_ops_throughput_default) |
def convert_mr_to_table(mr):
mr = mr.split(',')
table = []
for x in mr:
k = fix_key(x.split('[')[0].strip()).capitalize()
v = x.split('[')[1].split(']')[0].strip().capitalize()
table.append([k, v])
return table |
class LoraLmConfig():
initialize_from_hf: str
lora: LoraConfig = field(default_factory=LoraConfig)
data: LMDatasetConfig = field(default_factory=LMDatasetConfig)
trainer: TrainerConfig = field(default_factory=TrainerConfig)
optimizer: OptimizerConfig = field(default_factory=OptimizerConfig)
peft_save_path: Optional[str] = None
peft_hf_upload: Optional[str] = None
hf_save_steps: int = 1000
merged_hf_save_path: Optional[str] = None
merged_hf_upload: Optional[str] = None
trust_remote_code: bool = False |
class OpenAIGPTTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, unk_token='<unk>', **kwargs):
super(OpenAIGPTTokenizer, self).__init__(unk_token=unk_token, **kwargs)
self.max_len_single_sentence = self.max_len
self.max_len_sentences_pair = self.max_len
try:
import ftfy
from spacy.lang.en import English
_nlp = English()
self.nlp = _nlp.Defaults.create_tokenizer(_nlp)
self.fix_text = ftfy.fix_text
except ImportError:
logger.warning('ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.')
self.nlp = BasicTokenizer(do_lower_case=True)
self.fix_text = None
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
merges = merges_handle.read().split('\n')[1:(- 1)]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def vocab_size(self):
return len(self.encoder)
def bpe(self, token):
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
if (token in self.cache):
return self.cache[token]
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if (word == '\n </w>'):
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text):
split_tokens = []
if (self.fix_text is None):
text = self.nlp.tokenize(text)
for token in text:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
else:
text = self.nlp(text_standardize(self.fix_text(text)))
for token in text:
split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def save_vocabulary(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merge_file))
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file) |
def weight_translate(k, w):
k = key_translate(k)
if k.endswith('.weight'):
if (w.dim() == 2):
w = w.t()
elif (w.dim() == 1):
pass
else:
assert (w.dim() == 4)
w = w.permute(3, 2, 0, 1)
return w |
def Parallelize_GPU_BMUF(*args, **kwargs):
kwargs['cpu_device'] = False
Parallelize_BMUF(*args, **kwargs) |
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
config = BertConfig.from_json_file(bert_config_file)
print(f'Building PyTorch model from configuration: {config}')
model = BertForPreTraining(config)
load_tf_weights_in_bert(model, config, tf_checkpoint_path)
print(f'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict(), pytorch_dump_path) |
def DSIN(dnn_feature_columns, sess_feature_list, sess_max_count=5, bias_encoding=False, att_embedding_size=1, att_head_num=8, dnn_hidden_units=(256, 128, 64), dnn_activation='relu', dnn_dropout=0, dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-06, seed=1024, task='binary'):
hist_emb_size = sum(map((lambda fc: fc.embedding_dim), filter((lambda fc: (fc.name in sess_feature_list)), dnn_feature_columns)))
if ((att_embedding_size * att_head_num) != hist_emb_size):
raise ValueError(('hist_emb_size must equal to att_embedding_size * att_head_num ,got %d != %d *%d' % (hist_emb_size, att_embedding_size, att_head_num)))
features = build_input_features(dnn_feature_columns)
sparse_feature_columns = (list(filter((lambda x: isinstance(x, SparseFeat)), dnn_feature_columns)) if dnn_feature_columns else [])
dense_feature_columns = (list(filter((lambda x: isinstance(x, DenseFeat)), dnn_feature_columns)) if dnn_feature_columns else [])
varlen_sparse_feature_columns = (list(filter((lambda x: isinstance(x, VarLenSparseFeat)), dnn_feature_columns)) if dnn_feature_columns else [])
sparse_varlen_feature_columns = []
history_fc_names = list(map((lambda x: ('sess' + x)), sess_feature_list))
for fc in varlen_sparse_feature_columns:
feature_name = fc.name
if (feature_name in history_fc_names):
continue
else:
sparse_varlen_feature_columns.append(fc)
inputs_list = list(features.values())
user_behavior_input_dict = {}
for idx in range(sess_max_count):
sess_input = OrderedDict()
for (i, feat) in enumerate(sess_feature_list):
sess_input[feat] = features[((('sess_' + str(idx)) + '_') + feat)]
user_behavior_input_dict[('sess_' + str(idx))] = sess_input
user_sess_length = Input(shape=(1,), name='sess_length')
embedding_dict = {feat.embedding_name: Embedding(feat.vocabulary_size, feat.embedding_dim, embeddings_initializer=feat.embeddings_initializer, embeddings_regularizer=l2(l2_reg_embedding), name=((('sparse_emb_' + str(i)) + '-') + feat.name), mask_zero=(feat.name in sess_feature_list)) for (i, feat) in enumerate(sparse_feature_columns)}
query_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, sess_feature_list, sess_feature_list, to_list=True)
dnn_input_emb_list = embedding_lookup(embedding_dict, features, sparse_feature_columns, mask_feat_list=sess_feature_list, to_list=True)
dense_value_list = get_dense_input(features, dense_feature_columns)
query_emb = concat_func(query_emb_list, mask=True)
dnn_input_emb = Flatten()(concat_func(dnn_input_emb_list))
tr_input = sess_interest_division(embedding_dict, user_behavior_input_dict, sparse_feature_columns, sess_feature_list, sess_max_count, bias_encoding=bias_encoding)
Self_Attention = Transformer(att_embedding_size, att_head_num, dropout_rate=0, use_layer_norm=False, use_positional_encoding=(not bias_encoding), seed=seed, supports_masking=True, blinding=True)
sess_fea = sess_interest_extractor(tr_input, sess_max_count, Self_Attention)
interest_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True, supports_masking=False)([query_emb, sess_fea, user_sess_length])
lstm_outputs = BiLSTM(hist_emb_size, layers=2, res_layers=0, dropout_rate=0.2)(sess_fea)
lstm_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True)([query_emb, lstm_outputs, user_sess_length])
dnn_input_emb = Concatenate()([dnn_input_emb, Flatten()(interest_attention_layer), Flatten()(lstm_attention_layer)])
dnn_input_emb = combined_dnn_input([dnn_input_emb], dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input_emb)
output = Dense(1, use_bias=False)(output)
output = PredictionLayer(task)(output)
sess_input_list = []
for i in range(sess_max_count):
sess_name = ('sess_' + str(i))
sess_input_list.extend(get_inputs_list([user_behavior_input_dict[sess_name]]))
model = Model(inputs=(inputs_list + [user_sess_length]), outputs=output)
return model |
.operations('text')
def test_unknown_content_type(any_app_schema):
(_, *others, finished) = from_schema(any_app_schema, checks=(content_type_conformance,), hypothesis_settings=hypothesis.settings(max_examples=1, deadline=None)).execute()
assert finished.has_failures
check = others[1].result.checks[0]
assert (check.name == 'content_type_conformance')
assert (check.value == Status.failure)
assert (check.context.content_type == 'text/plain')
assert (check.context.defined_content_types == ['application/json']) |
def dump(data, encoding):
code_type_map = encoding['code_type_map']
code_beat_map = encoding['code_beat_map']
code_position_map = encoding['code_position_map']
code_pitch_map = encoding['code_pitch_map']
code_duration_map = encoding['code_duration_map']
code_instrument_map = encoding['code_instrument_map']
beat_dim = encoding['dimensions'].index('beat')
position_dim = encoding['dimensions'].index('position')
pitch_dim = encoding['dimensions'].index('pitch')
duration_dim = encoding['dimensions'].index('duration')
instrument_dim = encoding['dimensions'].index('instrument')
lines = []
for row in data:
event_type = code_type_map[int(row[0])]
if (event_type == 'start-of-song'):
lines.append('Start of song')
elif (event_type == 'end-of-song'):
lines.append('End of song')
elif (event_type == 'instrument'):
instrument = code_instrument_map[int(row[instrument_dim])]
lines.append(f'Instrument: {instrument}')
elif (event_type == 'start-of-notes'):
lines.append('Start of notes')
elif (event_type == 'note'):
beat = code_beat_map[int(row[beat_dim])]
position = code_position_map[int(row[position_dim])]
pitch = pretty_midi.note_number_to_name(code_pitch_map[int(row[pitch_dim])])
duration = code_duration_map[int(row[duration_dim])]
instrument = code_instrument_map[int(row[instrument_dim])]
lines.append(f'Note: beat={beat}, position={position}, pitch={pitch}, duration={duration}, instrument={instrument}')
else:
raise ValueError(f'Unknown event type: {event_type}')
return '\n'.join(lines) |
_quantizer(quantization_target=QuantizationTarget.Weights, quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC, QuantizationMethod.UNIFORM, QuantizationMethod.LUT_POT_QUANTIZER, QuantizationMethod.LUT_SYM_QUANTIZER], identifier=ConfigurableQuantizerIdentifier.CONFIGURABLE_ID)
class ConfigurableWeightsQuantizer(BaseKerasInferableQuantizer):
def __init__(self, node_q_cfg: List[CandidateNodeQuantizationConfig], float_weights: tf.Tensor, max_candidate_idx: int=0):
super(ConfigurableWeightsQuantizer, self).__init__()
self.node_q_cfg = node_q_cfg
self.float_weights = float_weights
self.max_candidate_idx = max_candidate_idx
verify_candidates_descending_order(self.node_q_cfg)
for qc in self.node_q_cfg:
if (qc.weights_quantization_cfg.enable_weights_quantization != self.node_q_cfg[0].weights_quantization_cfg.enable_weights_quantization):
Logger.error('Candidates with different weights enabled properties is currently not supported.')
self.quantized_weights = init_quantized_weights(node_q_cfg=self.node_q_cfg, float_weights=self.float_weights, fw_tensor_convert_func=partial(tf.convert_to_tensor, dtype=tf.float32))
self.active_quantization_config_index = self.max_candidate_idx
def set_weights_bit_width_index(self, index: int):
if (index >= len(self.node_q_cfg)):
Logger.error(f'Quantizer has {len(self.node_q_cfg)} possible nbits. Can not set index {index}')
self.active_quantization_config_index = index
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
return self.quantized_weights[self.active_quantization_config_index]
def get_config(self) -> Dict[(str, Any)]:
return {'float_weights': self.float_weights, 'node_q_cfg': self.node_q_cfg, 'active_quantization_config_index': self.active_quantization_config_index} |
def build_factors(num_poses: int, num_landmarks: int) -> T.Iterator[Factor]:
for i in range((num_poses - 1)):
(yield Factor(residual=odometry_residual, keys=[f'poses[{i}]', f'poses[{(i + 1)}]', f'distances[{i}]', 'epsilon']))
for i in range(num_poses):
for j in range(num_landmarks):
(yield Factor(residual=bearing_residual, keys=[f'poses[{i}]', f'landmarks[{j}]', f'angles[{i}][{j}]', 'epsilon'])) |
def safe_open(file_path: str, mode: str, newline: str=None):
create_file_path(file_path)
return open(file_path, mode, newline=newline, encoding='utf-8') |
def parse_iperf_run(data, skip=1, use=8):
tp_pat = re.compile('\\[ *\\d*\\] *([0-9\\.]*)- *([0-9\\.]*) sec.*Bytes *([0-9\\.]*) ([GM])bits.*')
tps_time = {}
for hn in fnmatch.filter(data['sims'].keys(), 'host.client.*'):
sim = data['sims'][hn]
for l in sim['stdout']:
m = tp_pat.match(l)
if (not m):
continue
time = int(float(m.group(1)))
if (time < skip):
continue
if (time >= (skip + use)):
continue
if (time not in tps_time):
tps_time[time] = []
if (m.group(4) == 'G'):
tps_time[time].append(float(m.group(3)))
elif (m.group(4) == 'M'):
m_tps = (float(m.group(3)) / 1000)
tps_time[time].append(m_tps)
tps = []
for t in sorted(tps_time.keys()):
x = sum(tps_time[t])
tps.append(x)
if (len(tps) == 0):
return None
return (sum(tps) / len(tps)) |
def CharFromBv(ch, ctx=None):
if (not is_expr(ch)):
raise Z3Expression('Bit-vector expression needed')
return _to_expr_ref(Z3_mk_char_from_bv(ch.ctx_ref(), ch.as_ast()), ch.ctx) |
class TestGPTQLossFunctions(unittest.TestCase):
SHAPE = [1, 16, 16, 3]
def _build_model(self) -> tf.keras.Model:
inputs = layers.Input(shape=self.SHAPE[1:])
x1 = layers.Conv2D(3, 4, use_bias=False)(inputs)
x = layers.ReLU()(x1)
x2 = layers.Conv2D(7, 8, use_bias=False)(x)
model = tf.keras.Model(inputs=inputs, outputs=[x1, x2])
return model
def _random_datagen(self):
for _ in range(10):
(yield [np.random.random(self.SHAPE)])
def _compute_gradients(loss_fn, fxp_model, input_data, in_y_float):
with tf.GradientTape(persistent=True) as tape:
y_fxp = fxp_model(input_data, training=True)
loss_value = loss_fn(y_fxp, in_y_float)
grads = tape.gradient(loss_value, fxp_model.trainable_weights)
return (loss_value, grads)
def _train(self, float_model, quantized_model, loss_fn):
in_optimizer = tf.keras.optimizers.SGD(learning_rate=20.0)
for input_data in self._random_datagen():
y_float = float_model(input_data)
(loss_value_step, grads) = self._compute_gradients(loss_fn, quantized_model, input_data, y_float)
in_optimizer.apply_gradients(zip(grads, quantized_model.trainable_weights))
def _compare(self, original_weights, trained_weights):
self.assertTrue(all([(np.mean((o != t)) > 0.9) for (o, t) in zip(original_weights, trained_weights)]))
def _init_test(self):
float_model = self._build_model()
quantized_model = self._build_model()
original_weights = [w.numpy() for w in quantized_model.trainable_weights]
return (float_model, quantized_model, original_weights)
def _run_and_compare(self, float_model, quantized_model, loss_fn, original_weights):
self._train(float_model, quantized_model, loss_fn)
trained_weights = [w.numpy() for w in quantized_model.trainable_weights]
self._compare(original_weights, trained_weights)
def test_mse_loss(self):
(float_model, quantized_model, original_weights) = self._init_test()
loss_fn = partial(multiple_tensors_mse_loss, fxp_w_list=None, flp_w_list=None, act_bn_mean=None, act_bn_std=None)
self._run_and_compare(float_model, quantized_model, loss_fn, original_weights)
def test_weighted_mse_loss(self):
(float_model, quantized_model, original_weights) = self._init_test()
loss_fn = partial(multiple_tensors_mse_loss, fxp_w_list=None, flp_w_list=None, act_bn_mean=None, act_bn_std=None, loss_weights=[0.9, 1.1])
self._run_and_compare(float_model, quantized_model, loss_fn, original_weights)
def test_activation_mse_loss(self):
(float_model, quantized_model, original_weights) = self._init_test()
loss = GPTQMultipleTensorsLoss(norm_loss=False)
loss_fn = partial(loss.__call__, fxp_w_list=None, flp_w_list=None, act_bn_mean=None, act_bn_std=None, weights_for_average_loss=None)
self._run_and_compare(float_model, quantized_model, loss_fn, original_weights)
def test_weighted_activation_mse_loss(self):
(float_model, quantized_model, original_weights) = self._init_test()
loss = GPTQMultipleTensorsLoss(norm_loss=False)
loss_fn = partial(loss.__call__, fxp_w_list=None, flp_w_list=None, act_bn_mean=None, act_bn_std=None, weights_for_average_loss=[0.9, 1.1])
self._run_and_compare(float_model, quantized_model, loss_fn, original_weights) |
def get_confusion_matrix_elements(groundtruth_list, predicted_list):
_assert_valid_lists(groundtruth_list, predicted_list)
if (_all_class_1_predicted_as_class_1(groundtruth_list, predicted_list) is True):
(tn, fp, fn, tp) = (0, 0, 0, np.float64(len(groundtruth_list)))
elif (_all_class_0_predicted_as_class_0(groundtruth_list, predicted_list) is True):
(tn, fp, fn, tp) = (np.float64(len(groundtruth_list)), 0, 0, 0)
else:
(tn, fp, fn, tp) = metrics.confusion_matrix(groundtruth_list, predicted_list).ravel()
(tn, fp, fn, tp) = (np.float64(tn), np.float64(fp), np.float64(fn), np.float64(tp))
return (tn, fp, fn, tp) |
def getEvalData_parseval(sen, edus):
span_list = re.split(' ', sen)
dic = {}
for i in range(len(span_list)):
temp = span_list[i]
IDK = re.split('[:,=]', temp)
nuclearity = (IDK[1][0] + IDK[5][0])
relation1 = IDK[2]
relation2 = IDK[6]
relation = (relation1 if (relation1 != 'span') else relation2)
start = str(edus[(int(IDK[0].strip('(')) - 1)])
end = str(edus[(int(IDK[(- 1)].strip(')')) - 1)])
span = ((start + '-') + end)
dic[span] = [relation, nuclearity]
return dic |
def create_pattern_layout():
return dbc.Row([dbc.Col(html.Div([create_description_card(), create_control_card(), html.Div(['initial child'], id='output-clientside', style={'display': 'none'})]), width=2), dbc.Col(html.Div([dbc.Row([dbc.Col(dbc.Card(dbc.CardBody([html.H4('Summary'), html.Div(id='log-summarization-summary')])), width=4), dbc.Col(dbc.Card(dbc.CardBody([html.H4('Attributes'), html.Div(id='attribute-options')])), width=8)]), html.B('Charts'), html.Hr(), dbc.Row([dbc.Col(dbc.Card(dbc.CardBody([dcc.Loading([create_summary_graph_layout()])])), width=4), dbc.Col(dbc.Card(dbc.CardBody([dcc.Loading([create_timeseries_grapy_layout()])])), width=8)]), html.B('Log Patterns'), html.Hr(), dbc.Card(dbc.CardBody([html.Div(id='log-patterns')]), id='pattern-log-card'), html.B('Dynamic Values'), html.Hr(), dbc.Card(dbc.CardBody([dcc.Loading(id='loading-dynamic-values', children=[html.Div(id='log-dynamic-lists')], type='default')]), id='pattern-dynamic-values'), html.B('Log Lines'), html.Hr(), dbc.Card(dbc.CardBody([dcc.Loading(id='loading-loglines', children=[dbc.Row(dbc.Col(html.Div(id='select-loglines')))], type='default')]), id='result_table_card', style={'maxwidth': '900px'})]))]) |
class Cluster(object):
sgx_image = '10.75.0.2:5000/sgx-app-mem:1.2'
standard_image = '10.75.0.2:5000/standard-app-mem:1.2'
def __init__(self):
kubernetes.config.load_kube_config()
self.api = CoreV1Api()
def pod_requests_sgx(pod: V1Pod) -> bool:
for container in pod.spec.containers:
for demands in (container.resources.limits, container.resources.requests):
if (isinstance(demands, dict) and ('intel.com/sgx' in demands.keys())):
return True
return False
def convert_k8s_suffix(k8s_value: str) -> float:
try:
return float(k8s_value)
except ValueError:
pass
suffixes = [('Ki', 2, 10), ('Mi', 2, 20), ('Gi', 2, 30), ('Ti', 2, 40), ('Pi', 2, 50), ('Ei', 2, 60), ('n', 10, (- 9)), ('u', 10, (- 6)), ('m', 10, (- 3)), ('k', 10, 3), ('M', 10, 6), ('G', 10, 9), ('T', 10, 12), ('P', 10, 15), ('E', 10, 18)]
for suffix in suffixes:
if k8s_value.endswith(suffix[0]):
k8s_value_without_suffix = k8s_value[:(- len(suffix[0]))]
return (float(k8s_value_without_suffix) * (suffix[1] ** suffix[2]))
return float(k8s_value)
def pod_sum_resources_requests(pod: V1Pod, metric: str):
return functools.reduce((lambda acc, container: (acc + Cluster.convert_k8s_suffix(container.resources.requests[metric]))), filter((lambda x: ((x.resources.requests is not None) and (metric in x.resources.requests))), pod.spec.containers), 0)
def launch_pod(self, pod_name: str, scheduler: str, duration: int, limit: float, actual: float, is_sgx: bool, node: str=None):
resource_requirements = (V1ResourceRequirements(limits={'intel.com/sgx': int((limit / 4096))}, requests={'intel.com/sgx': int((limit / 4096))}) if is_sgx else V1ResourceRequirements(limits={'memory': limit}, requests={'memory': limit}))
pod = V1Pod(api_version='v1', kind='Pod', metadata=V1ObjectMeta(name=pod_name), spec=V1PodSpec(termination_grace_period_seconds=0, scheduler_name=scheduler, containers=[V1Container(name='app', image=(self.sgx_image if is_sgx else self.standard_image), args=['-d', str(duration), str(int((actual / 4096)))], resources=resource_requirements)], restart_policy='OnFailure', node_name=node))
try:
self.api.create_namespaced_pod(DEFAULT_NAMESPACE, pod)
except ApiException:
print('Creating Pod failed!')
traceback.print_exc()
def list_pods(self) -> List[V1Pod]:
return self.api.list_namespaced_pod(DEFAULT_NAMESPACE).items |
def balanced_binary_cross_entropy_with_logits(logits: Tensor, targets: Tensor, gamma: float=1.0, ignore_index: Optional[int]=None, reduction: str='mean') -> Tensor:
pos_targets: Tensor = targets.eq(1).sum()
neg_targets: Tensor = targets.eq(0).sum()
num_targets = (pos_targets + neg_targets)
pos_weight = torch.pow((neg_targets / (num_targets + 1e-07)), gamma)
neg_weight = (1.0 - pos_weight)
pos_term = ((pos_weight.pow(gamma) * targets) * torch.nn.functional.logsigmoid(logits))
neg_term = ((neg_weight.pow(gamma) * (1 - targets)) * torch.nn.functional.logsigmoid((- logits)))
loss = (- (pos_term + neg_term))
if (ignore_index is not None):
loss = torch.masked_fill(loss, targets.eq(ignore_index), 0)
if (reduction == 'mean'):
loss = loss.mean()
if (reduction == 'sum'):
loss = loss.sum()
return loss |
def next_id_by_width(id_val, inc=1, width=16):
new_id = (id_val + inc)
while (new_id >= (1 << width)):
new_id -= (1 << width)
return new_id |
class Src2TrgIO(IO):
def __init__(self, tokenize_callback=None, trg_tokenize_callback=None, encoding=None, verbose: bool=True, **token_kwargs):
super().__init__(is_tokenized=False, tokenize_callback=tokenize_callback, encoding=encoding, verbose=verbose, **token_kwargs)
self.trg_tokenize_callback = trg_tokenize_callback
def _build_trg_tokens(self, text: Union[(str, List[str])], **kwargs):
if self.is_tokenized:
return TokenSequence.from_tokenized_text(text, **kwargs, **self.token_kwargs)
else:
return TokenSequence.from_raw_text(text, self.trg_tokenize_callback, **kwargs, **self.token_kwargs)
def read(self, src_path, trg_path):
data = []
with open(src_path, 'r', encoding=self.encoding) as f:
src_lines = [re.sub('\\s+', ' ', line.strip()) for line in f if (line.strip() != '')]
with open(trg_path, 'r', encoding=self.encoding) as f:
trg_lines = [re.sub('\\s+', ' ', line.strip()) for line in f if (line.strip() != '')]
assert (len(src_lines) == len(trg_lines))
for (src_line, trg_line) in tqdm.tqdm(zip(src_lines, trg_lines), total=len(src_lines), disable=(not self.verbose), ncols=100, desc='Loading src2trg data'):
src_tokens = self._build_tokens(src_line)
trg_tokens = self._build_trg_tokens(trg_line)
data.append({'tokens': src_tokens, 'full_trg_tokens': [trg_tokens]})
return data |
class TransformerSentenceEncoderLayer(nn.Module):
def __init__(self, embedding_dim: int=768, ffn_embedding_dim: int=3072, num_attention_heads: int=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', export: bool=False) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=False, add_zero_attn=False, self_attention=True)
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
def forward(self, x: torch.Tensor, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None):
residual = x
(x, attn) = self.self_attn(query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.final_layer_norm(x)
return (x, attn) |
class TransformerDecoderLayer(rf.Module):
def __init__(self, encoder_dim: Dim, out_dim: Dim=Dim(512, name='transformer-dec-default-out-dim'), *, ff_dim: Dim=NotSpecified, ff_activation: Callable[([Tensor], Tensor)]=rf.relu, dropout: float=0.1, num_heads: int=8, self_att: Optional[Union[(rf.CausalSelfAttention, rf.RelPosCausalSelfAttention, rf.Module, type, Any)]]=None, self_att_opts: Optional[Dict[(str, Any)]]=None, att_dropout: float=0.1):
super().__init__()
self.encoder_dim = encoder_dim
self.dropout = dropout
self.dropout_broadcast = rf.dropout_broadcast_default()
self.out_dim = out_dim
if (ff_dim is None):
ff_dim = (4 * out_dim)
self.ff = FeedForward(out_dim=out_dim, ff_dim=ff_dim, dropout=dropout, activation=ff_activation)
self.ff_layer_norm = rf.LayerNorm(out_dim)
if ((self_att is None) or isinstance(self_att, type)):
self_att_opts_ = dict(in_dim=out_dim, proj_dim=out_dim, key_dim_total=out_dim, value_dim_total=out_dim, num_heads=num_heads, att_dropout=att_dropout)
if self_att_opts:
self_att_opts_.update(self_att_opts)
if (self_att is None):
self.self_att = rf.CausalSelfAttention(**self_att_opts_)
else:
self.self_att = self_att(**self_att_opts_)
else:
self.self_att = self_att
self.self_att_layer_norm = rf.LayerNorm(out_dim)
self.cross_att = rf.CrossAttention(encoder_dim=self.encoder_dim, query_in_dim=out_dim, proj_dim=out_dim, key_dim_total=out_dim, value_dim_total=out_dim, num_heads=num_heads, att_dropout=att_dropout)
self.cross_att_layer_norm = rf.LayerNorm(out_dim)
def default_initial_state(self, *, batch_dims: Sequence[Dim]) -> rf.State:
return rf.State(self_att=self.self_att.default_initial_state(batch_dims=batch_dims))
def transform_encoder(self, encoder: Tensor, *, axis: Dim) -> rf.State:
return rf.State(cross_att=self.cross_att.transform_encoder(encoder, axis=axis))
def __call__(self, inp: Tensor, *, spatial_dim: Dim, state: rf.State, encoder: rf.State) -> Tuple[(Tensor, rf.State)]:
new_state = rf.State()
x_sa_ln = self.self_att_layer_norm(inp)
(x_sa, new_state.self_att) = self.self_att(x_sa_ln, axis=spatial_dim, state=state.self_att)
x_sa = rf.dropout(x_sa, self.dropout, axis=(self.dropout_broadcast and self.out_dim))
x_sa_out = (x_sa + inp)
x_ca_ln = self.cross_att_layer_norm(x_sa_out)
x_ca = self.cross_att(x_ca_ln, encoder.cross_att)
x_ca = rf.dropout(x_ca, self.dropout, axis=(self.dropout_broadcast and self.out_dim))
x_ca_out = (x_ca + x_sa_out)
x_ff_ln = self.ff_layer_norm(x_ca_out)
x_ff = self.ff(x_ff_ln)
x_ff = rf.dropout(x_ff, self.dropout, axis=(self.dropout_broadcast and self.out_dim))
x_ff_out = (x_ff + x_ca_out)
return (x_ff_out, new_state) |
class MIMOUNet(nn.Module):
def __init__(self, num_res=8):
super(MIMOUNet, self).__init__()
base_channel = 32
self.Encoder = nn.ModuleList([EBlock(base_channel, num_res), EBlock((base_channel * 2), num_res), EBlock((base_channel * 4), num_res)])
self.feat_extract = nn.ModuleList([BasicConv(1, base_channel, kernel_size=3, relu=True, stride=1), BasicConv(base_channel, (base_channel * 2), kernel_size=3, relu=True, stride=2), BasicConv((base_channel * 2), (base_channel * 4), kernel_size=3, relu=True, stride=2), BasicConv((base_channel * 4), (base_channel * 2), kernel_size=4, relu=True, stride=2, transpose=True), BasicConv((base_channel * 2), base_channel, kernel_size=4, relu=True, stride=2, transpose=True), BasicConv(base_channel, 1, kernel_size=3, relu=False, stride=1)])
self.Decoder = nn.ModuleList([DBlock((base_channel * 4), num_res), DBlock((base_channel * 2), num_res), DBlock(base_channel, num_res)])
self.Convs = nn.ModuleList([BasicConv((base_channel * 4), (base_channel * 2), kernel_size=1, relu=True, stride=1), BasicConv((base_channel * 2), base_channel, kernel_size=1, relu=True, stride=1)])
self.ConvsOut = nn.ModuleList([BasicConv((base_channel * 4), 1, kernel_size=3, relu=False, stride=1), BasicConv((base_channel * 2), 1, kernel_size=3, relu=False, stride=1)])
self.AFFs = nn.ModuleList([AFF((base_channel * 7), (base_channel * 1)), AFF((base_channel * 7), (base_channel * 2))])
self.FAM1 = FAM((base_channel * 4))
self.SCM1 = SCM((base_channel * 4))
self.FAM2 = FAM((base_channel * 2))
self.SCM2 = SCM((base_channel * 2))
def forward(self, x):
x = x.repeat(1, 1, 1, 1)
x_2 = F.interpolate(x, scale_factor=0.5)
x_4 = F.interpolate(x_2, scale_factor=0.5)
z2 = self.SCM2(x_2)
z4 = self.SCM1(x_4)
outputs = list()
x_ = self.feat_extract[0](x)
res1 = self.Encoder[0](x_)
z = self.feat_extract[1](res1)
z = self.FAM2(z, z2)
res2 = self.Encoder[1](z)
z = self.feat_extract[2](res2)
z = self.FAM1(z, z4)
z = self.Encoder[2](z)
z12 = F.interpolate(res1, scale_factor=0.5)
z21 = F.interpolate(res2, scale_factor=2)
z42 = F.interpolate(z, scale_factor=2)
z41 = F.interpolate(z42, scale_factor=2)
res2 = self.AFFs[1](z12, res2, z42)
res1 = self.AFFs[0](res1, z21, z41)
z = self.Decoder[0](z)
z_ = self.ConvsOut[0](z)
z = self.feat_extract[3](z)
outputs.append((z_ + x_4))
z = torch.cat([z, res2], dim=1)
z = self.Convs[0](z)
z = self.Decoder[1](z)
z_ = self.ConvsOut[1](z)
z = self.feat_extract[4](z)
outputs.append((z_ + x_2))
z = torch.cat([z, res1], dim=1)
z = self.Convs[1](z)
z = self.Decoder[2](z)
z = self.feat_extract[5](z)
outputs.append((z + x))
return outputs |
.lower_builtin(operator.getitem, RecordViewType, numba.types.StringLiteral)
def lower_getitem_field_record(context, builder, sig, args):
(_, (recordviewtype, wheretype)) = (sig.return_type, sig.args)
(recordviewval, whereval) = args
return recordviewtype.arrayviewtype.type.lower_getitem_field_record(context, builder, recordviewtype, recordviewval, wheretype.literal_value) |
class UnicodeSerializer(FileSerializer):
def to_line(self, obj):
u = ensure_unicode(obj)
return u.encode('utf-8')
def from_line(self, line):
return line.decode('utf-8') |
def plot(data_name='score.pkl'):
file_path = ['seq2seq', 'seq2seq-all']
name_maps = ['Leap', 'KG-MIML-Net']
df = pd.DataFrame()
for (idx, i) in enumerate(file_path):
df[name_maps[idx]] = pickle.load(open(os.path.join('saved', i, data_name), 'rb'))[0:30]
ax = df.plot(title='Jaccard_similarity_score', ylim=[0, 0.4], style='-o')
plt.xlabel('epoch')
plt.ylabel('score')
plt.show()
ax.get_figure().savefig('score.png')
plt.close() |
def eval(prediction_file, gold_file):
with open(prediction_file) as f:
prediction = json.load(f)
with open(gold_file) as f:
gold = json.load(f)
metrics = {'em': 0, 'f1': 0, 'prec': 0, 'recall': 0, 'sp_em': 0, 'sp_f1': 0, 'sp_prec': 0, 'sp_recall': 0, 'joint_em': 0, 'joint_f1': 0, 'joint_prec': 0, 'joint_recall': 0}
for dp in gold:
cur_id = dp['_id']
(em, prec, recall) = update_answer(metrics, prediction['answer'][cur_id], dp['answer'])
N = len(gold)
for k in metrics.keys():
metrics[k] /= N
print(metrics) |
class ServiceGenderizer():
def __init__(self, db_client, genderize_cache_col, genderapi_cache_col):
self.genderize_cache_col = db_client['genderCache'][genderize_cache_col]
self.genderapi_cache_col = db_client['genderCache'][genderapi_cache_col]
def get_genderize_gender(self, full_name):
full_name = full_name.lower()
first_name = utils.extract_first_name(full_name)
if (first_name is None):
return 'unknown'
else:
try:
gender_payload = {'name': first_name}
session = requests.Session()
gender_return = session.get(' params=gender_payload)
cache_obj = json.loads(gender_return.text)
gender = cache_obj['gender']
logger.debug('Genderize service call result for "{0}" ("{1}"): "{2}"'.format(first_name, full_name, gender))
if (gender is None):
gender = 'unknown'
cache_obj['gender'] = 'unknown'
self.genderize_cache_col.insert_one(cache_obj)
return gender
except Exception as e:
logger.exception('{e}: Failed to obtain gender from Genderize API call'.format(e))
return 'unknown'
def get_genderapi_gender(self, session, names):
assert all((name for name in names)), 'Empty strings exist in name list, please clean prior to sending for gender prediction'
results = {}
try:
payload = [{'full_name': utils.preprocess_text(name)} for name in names]
url = '
headers = {'Authorization': 'Bearer {}'.format(GENDERAPI_TOKEN)}
response = session.post(url, headers=headers, json=payload)
cache_obj = response.json()
for res in cache_obj:
full_name = res['input']['full_name']
if res['result_found']:
for field in ['input', 'details', 'result_found']:
res.pop(field, None)
logger.debug('Obtained GenderAPI service result for "{0}": "{1}"'.format(full_name, res['gender']))
if (res['gender'] not in ['male', 'female']):
res['gender'] = 'unknown'
results[full_name] = res['gender']
res['q'] = full_name.lower()
self.genderapi_cache_col.update_many({'q': res['q']}, {'$set': res}, upsert=True)
else:
logger.warning('No results found for GenderAPI service call for name: {0}'.format(full_name))
return results
return results
except Exception as e:
logger.exception('{0}: Failed to obtain gender from Gender-API call'.format(e))
return results
def run(self, session, results, unknowns):
names = list(unknowns.keys())
if GENDERAPI_ENABLED:
genderapi_results = self.get_genderapi_gender(session, names)
results.update(genderapi_results)
if GENDERIZE_ENABLED:
for name in names:
genderize_result = {name: self._get_genderize_gender(name)}
results.update(genderize_result)
return results |
class Stream_lmul(Stream_scalar):
def get_coefficient(self, n):
return (self._series[n] * self._scalar) |
def test_unary():
import time
t = time.time()
grad_test((lambda x: ti.sqrt(x)), (lambda x: np.sqrt(x)))
grad_test((lambda x: ti.exp(x)), (lambda x: np.exp(x)))
grad_test((lambda x: ti.log(x)), (lambda x: np.log(x)))
ti.core.print_profile_info()
print('Total time {:.3f}s'.format((time.time() - t))) |
def learn_weights(algorithm, observed_sampler, learning_proposal, fit_probability, B=15000):
S = selection_stat = observed_sampler.center
new_sampler = copy(observed_sampler)
learning_sample = []
for _ in range(B):
T = learning_proposal()
new_sampler = copy(observed_sampler)
new_sampler.center = (S + direction.dot((T - observed_target)))
Y = (algorithm(new_sampler) == observed_outcome)
learning_sample.append((T, Y))
learning_sample = np.array(learning_sample)
(T, Y) = learning_sample.T
conditional_law = fit_probability(T, Y)
return conditional_law |
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
print(('duration: %0.3fs' % delta))
print(('rmse: %f' % rmse(Y_test, clf.predict(X_test))))
print(('mean coef abs diff: %f' % abs((ref_coef - clf.coef_.ravel())).mean()))
return delta |
.mpl_image_compare
def test_random_summary_dot_with_data():
np.random.seed(0)
fig = plt.figure()
shap.summary_plot(np.random.randn(20, 5), np.random.randn(20, 5), plot_type='dot', show=False)
fig.set_layout_engine('tight')
return fig |
class KeepOpenFile(object):
def __init__(self, file):
self._file = file
def __getattr__(self, name):
return getattr(self._file, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
pass
def __repr__(self):
return repr(self._file)
def __iter__(self):
return iter(self._file) |
_module('numpy')
def ascontiguousarray(a, dtype=None):
return array(a, dtype, copy=False, order='C', ndmin=1) |
def novelty(individual: IndividualLike, container: Sequence, k: int=1, dist: Union[(str, Callable)]='euclidean', ignore_first: bool=False, default_novelty: float=0.1) -> float:
if (len(container) == 0):
return default_novelty
n_k = min(len(container), k)
distances: Sequence = features_distances(individual, container, dist)
if ignore_first:
nearest_neighbours_dists: Sequence = sorted(distances)[1:(n_k + 1)]
else:
nearest_neighbours_dists = sorted(distances)[:n_k]
return np.mean(nearest_neighbours_dists) |
.operations('empty')
def test_empty_response_interaction(any_app_schema):
(_, *others, _) = from_schema(any_app_schema, store_interactions=True).execute()
interactions = [event for event in others if isinstance(event, events.AfterExecution)][0].result.interactions
for interaction in interactions:
assert (interaction.request.body is None)
assert (interaction.response.body is None)
assert (interaction.response.encoding is None) |
def test_tasklet_with_global_state():
sdfg = dace.SDFG('test_tasklet_with_global_state')
state = sdfg.add_state()
sdfg.add_array('output', [1], dace.int32)
tasklet = state.add_tasklet('print_global_str', {}, {'out'}, 'out = *__state->global_int;', language=dace.dtypes.Language.CPP, state_fields=['int *global_int;'], code_init='__state->global_int = new int; *__state->global_int = 42;', code_exit='delete __state->global_int;')
state.add_edge(tasklet, 'out', state.add_write('output'), None, dace.Memlet('output[0]'))
output = np.zeros((1,), dtype=np.int32)
sdfg(output=output)
assert (output[0] == 42) |
def val(model, dataloaders, criterion, optimizer, config):
since = time.time()
test_dev = []
for phase in ['val']:
model.train(False)
running_loss = 0.0
lent = len(dataloaders[phase])
pbar = tqdm(total=(lent * config.batchSize))
for ide in range(lent):
data = dataloaders[phase][ide]
(inputs, labels) = (data['image'], data['landmarks'])
inputs = inputs.to(config.use_gpu)
heatmaps = model(inputs)
predicted_landmarks = utils.regression_voting(heatmaps, config.R2).to(config.use_gpu)
dev = utils.calculate_deviation(predicted_landmarks.detach(), labels.to(config.use_gpu).detach())
test_dev.append(dev)
pbar.update(config.batchSize)
pbar.close()
test_dev = (torch.stack(test_dev).squeeze() * config.spacing)
(test_SDR, test_SD, test_MRE) = utils.get_statistical_results(test_dev, config)
print(('test_MRE(SD): %f(%f), SDR([1mm, 2mm, 2.5mm, 3mm, 4mm]):' % (torch.mean(test_MRE).detach().cpu().numpy(), torch.mean(test_SD).detach().cpu().numpy())), torch.mean(test_SDR, 0).detach().cpu().numpy())
global best_MRE
global best_SD
global best_SDR
if (best_MRE > torch.mean(test_MRE).detach().cpu().numpy()):
best_MRE = torch.mean(test_MRE).detach().cpu().numpy()
best_SD = torch.mean(test_SD).detach().cpu().numpy()
best_SDR = torch.mean(test_SDR, 0).detach().cpu().numpy()
time_elapsed = (time.time() - since)
print('testing complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60)))
print(('Best val MRE(SD): %f(%f), SDR([1mm, 2mm, 2.5mm, 3mm, 4mm]):' % (best_MRE, best_SD)), best_SDR) |
class AirGraph():
def __init__(self, graph_dir, config_graph, gpu_id):
device = ('cuda:%d' % gpu_id)
(use_graph, fix_weight) = (config_graph['use'], config_graph['fix_weight'])
tempp_diag_zero = config_graph['tempp_diag_zero']
distri_type = config_graph['distri_type']
self.A_dist = torch.from_numpy(np.float32(np.load(os.path.join(graph_dir, 'dist.npy')))).to(device)
self.A_neighb = torch.from_numpy(np.float32(np.load(os.path.join(graph_dir, 'neigh.npy')))).to(device)
self.A_func = torch.from_numpy(np.float32(np.load(os.path.join(graph_dir, 'func.npy')))).to(device)
if (distri_type == 'kl'):
self.A_distri = torch.from_numpy(np.float32(np.load(os.path.join(graph_dir, 'distri_kl.npy')))).to(device)
elif (distri_type == 'ws'):
self.A_distri = torch.from_numpy(np.float32(np.load(os.path.join(graph_dir, 'distri_kl.npy')))).to(device)
else:
self.A_distri = torch.from_numpy(np.float32(pd.read_csv(os.path.join(graph_dir, 'areaparacorr_92_air.csv'), header=None).values)).to(device)
self.A_tempp = torch.from_numpy(np.float32(np.load(os.path.join(graph_dir, 'tempp_pm25.npy')))).to(device)
self.node_num = self.A_dist.shape[0]
if tempp_diag_zero:
self.A_tempp.fill_diagonal_(0)
self.use_graph = use_graph
self.fix_weight = fix_weight
self.graph_num = len(use_graph)
def get_used_graphs(self):
graph_list = []
for name in self.use_graph:
graph_list.append(self.get_graph(name))
return graph_list
def get_fix_weight(self):
return ((((((self.A_dist * 0.0829) + (self.A_neighb * 0.205)) + (self.A_distri * 0.1004)) + (self.A_tempp * 0.5276)) + (self.A_func * 0.0841)) / 5)
def get_graph(self, name):
if (name == 'dist'):
return self.A_dist
elif (name == 'neighb'):
return self.A_neighb
elif (name == 'distri'):
return self.A_distri
elif (name == 'tempp'):
return self.A_tempp
elif (name == 'func'):
return self.A_func
else:
raise NotImplementedError |
class TimeBasedSamplingDecorator(SamplingDecorator):
min_samples: int
max_samples: int
def __init__(self, base_alg: QDAlgorithm, min_samples=5, max_samples=100, **kwargs):
self.min_samples = min_samples
self.max_samples = max_samples
assert (self.min_samples >= 1)
assert (self.max_samples >= 1)
assert (self.max_samples >= self.min_samples)
super().__init__(base_alg, **kwargs)
def _ask_sampling(self) -> IndividualLike:
ind = self.base_alg.ask()
assert isinstance(ind, SampledIndividual)
nb_samples = int((self.min_samples + ((self.max_samples - self.min_samples) * (self.nb_evaluations / self.budget))))
self._ensure_enough_samples(ind, nb_samples)
return ind |
class ResNet(nn.Module):
def __init__(self, bottleneck=False):
super(ResNet, self).__init__()
depth = 50
num_classes = 1000
blocks = {50: Bottleneck}
layers = {50: [3, 4, 6, 3]}
assert layers[depth], 'invalid detph for ResNet (depth should be one of 18, 34, 50, 101, 152, and 200)'
self.inplanes = 64
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(blocks[depth], 64, layers[depth][0])
self.layer2 = self._make_layer(blocks[depth], 128, layers[depth][1], stride=2)
self.layer3 = self._make_layer(blocks[depth], 256, layers[depth][2], stride=2)
self.layer4 = self._make_layer(blocks[depth], 512, layers[depth][3], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x |
class ScheduleInitTest(unittest.TestCase):
m = torch.nn.Linear(50, 50)
optimizer = AdamW(m.parameters(), lr=10.0)
num_steps = 10
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for (a, b) in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def test_constant_scheduler(self):
scheduler = ConstantLRSchedule(self.optimizer)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = ([10.0] * self.num_steps)
self.assertEqual(len(lrs[0]), 1)
self.assertListEqual([l[0] for l in lrs], expected_learning_rates)
scheduler = ConstantLRSchedule(self.optimizer)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_constant_scheduler(self):
scheduler = WarmupConstantSchedule(self.optimizer, warmup_steps=4)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListEqual([l[0] for l in lrs], expected_learning_rates)
scheduler = WarmupConstantSchedule(self.optimizer, warmup_steps=4)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_linear_scheduler(self):
scheduler = WarmupLinearSchedule(self.optimizer, warmup_steps=2, t_total=10)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25, 0.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListEqual([l[0] for l in lrs], expected_learning_rates)
scheduler = WarmupLinearSchedule(self.optimizer, warmup_steps=2, t_total=10)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_cosine_scheduler(self):
scheduler = WarmupCosineSchedule(self.optimizer, warmup_steps=2, t_total=10)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38, 0.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListAlmostEqual([l[0] for l in lrs], expected_learning_rates, tol=0.01)
scheduler = WarmupCosineSchedule(self.optimizer, warmup_steps=2, t_total=10)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_cosine_hard_restart_scheduler(self):
scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, warmup_steps=2, cycles=2, t_total=10)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46, 0.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListAlmostEqual([l[0] for l in lrs], expected_learning_rates, tol=0.01)
scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, warmup_steps=2, cycles=2, t_total=10)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2]) |
def make_robotics_env(env_id, seed, rank=0):
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(env, (logger.get_dir() and os.path.join(logger.get_dir(), str(rank))), info_keywords=('is_success',))
env.seed(seed)
return env |
def resnext152(**kwargs):
model = ResNeXt(ResNeXtBottleneck, [3, 8, 36, 3], **kwargs)
return model |
def _mutation_type_error(data):
if (data[2] is None):
del data[2]
return_str = (str(data) + ' is not a valid quiver mutation type')
return_str += "\n Finite types have the form [ '?', n ] for type ? and rank n"
return_str += "\n Affine type A has the form [ 'A', [ i, j ], 1 ] for rank i+j"
return_str += "\n Affine type ? has the form [ '?', k, \\pm 1 ] for rank k+1"
return_str += "\n Elliptic type ? has the form [ '?', k, [i, j] ] (1 <= i,j <= 3) for rank k+2"
return_str += '\n For correct syntax in other types, please consult the documentation.'
raise ValueError(return_str) |
def cvt_mask_palette_VOC(data):
(src_path, dst_path) = data
mask = np.array(load_image_in_PIL(src_path, 'P'))
mask[(mask > 20)] = 0
mask = Image.fromarray(mask)
mask.putpalette(mask_palette)
mask.save(dst_path) |
def test_kernel_print():
N = 10000
ti.init()
def is_prime(n: int):
result = True
for k in range(2, (int((n ** 0.5)) + 1)):
if ((n % k) == 0):
result = False
break
return result
def count_primes(n: int) -> int:
count = 0
for k in range(2, n):
if is_prime(k):
count += 1
return count
count_primes(N) |
class IEncoder(rf.Module, ABC):
out_dim: Dim
def __call__(self, source: Tensor) -> Tensor:
raise NotImplementedError |
def _traverse(node, fn, visited, depth):
if ((node is None) or (node in visited)):
return
else:
visited.add(node)
if hasattr(node, 'saved_tensors'):
for ten in node.saved_tensors:
fn(node, ten, True)
if hasattr(node, 'variable'):
fn(node, node.variable.data, False)
for ten in _list_saved_tensors(node, visited):
fn(node, ten, True)
for (child, _) in getattr(node, 'next_functions', ()):
_traverse(child, fn, visited, (depth + 1)) |
class GAP(NodeClassification):
supported_activations = {'relu': torch.relu_, 'selu': torch.selu_, 'tanh': torch.tanh}
def __init__(self, num_classes, hops: Annotated[(int, ArgInfo(help='number of hops', option='-k'))]=2, hidden_dim: Annotated[(int, ArgInfo(help='dimension of the hidden layers'))]=16, encoder_layers: Annotated[(int, ArgInfo(help='number of encoder MLP layers'))]=2, base_layers: Annotated[(int, ArgInfo(help='number of base MLP layers'))]=1, head_layers: Annotated[(int, ArgInfo(help='number of head MLP layers'))]=1, combine: Annotated[(str, ArgInfo(help='combination type of transformed hops', choices=MultiMLP.supported_combinations))]='cat', activation: Annotated[(str, ArgInfo(help='type of activation function', choices=supported_activations))]='selu', dropout: Annotated[(float, ArgInfo(help='dropout rate'))]=0.0, batch_norm: Annotated[(bool, ArgInfo(help='if true, then model uses batch normalization'))]=True, encoder_epochs: Annotated[(int, ArgInfo(help='number of epochs for encoder pre-training (ignored if encoder_layers=0)'))]=100, **kwargs: Annotated[(dict, ArgInfo(help='extra options passed to base class', bases=[NodeClassification]))]):
super().__init__(num_classes, **kwargs)
if ((encoder_layers == 0) and (encoder_epochs > 0)):
console.warning('encoder_layers is 0, setting encoder_epochs to 0')
encoder_epochs = 0
self.hops = hops
self.encoder_layers = encoder_layers
self.encoder_epochs = encoder_epochs
activation_fn = self.supported_activations[activation]
self._encoder = EncoderModule(num_classes=num_classes, hidden_dim=hidden_dim, encoder_layers=encoder_layers, head_layers=1, normalize=True, activation_fn=activation_fn, dropout=dropout, batch_norm=batch_norm)
self._classifier = ClassificationModule(num_channels=(hops + 1), num_classes=num_classes, hidden_dim=hidden_dim, base_layers=base_layers, head_layers=head_layers, combination=combine, activation_fn=activation_fn, dropout=dropout, batch_norm=batch_norm)
def classifier(self) -> ClassificationModule:
return self._classifier
def reset_parameters(self):
self._encoder.reset_parameters()
super().reset_parameters()
def fit(self, data: Data, prefix: str='') -> Metrics:
self.data = data.to(self.device, non_blocking=True)
if (self.encoder_layers > 0):
self.data = self.pretrain_encoder(self.data, prefix=prefix)
self.data = self.compute_aggregations(self.data)
return super().fit(self.data, prefix=prefix)
def test(self, data: Optional[Data]=None, prefix: str='') -> Metrics:
if ((data is None) or (data == self.data)):
data = self.data
else:
data = data.to(self.device, non_blocking=True)
data.x = self._encoder.predict(data)
data = self.compute_aggregations(data)
return super().test(data, prefix=prefix)
def predict(self, data: Optional[Data]=None) -> torch.Tensor:
if ((data is None) or (data == self.data)):
data = self.data
else:
data.x = self._encoder.predict(data)
data = self.compute_aggregations(data)
return super().predict(data)
def _aggregate(self, x: torch.Tensor, adj_t: SparseTensor) -> torch.Tensor:
return matmul(adj_t, x)
def _normalize(self, x: torch.Tensor) -> torch.Tensor:
return F.normalize(x, p=2, dim=(- 1))
def pretrain_encoder(self, data: Data, prefix: str) -> Data:
console.info('pretraining encoder')
self._encoder.to(self.device)
self.trainer.fit(model=self._encoder, epochs=self.encoder_epochs, optimizer=self.configure_encoder_optimizer(), train_dataloader=self.data_loader(data, 'train'), val_dataloader=self.data_loader(data, 'val'), test_dataloader=None, checkpoint=True, prefix=f'{prefix}encoder/')
self.trainer.reset()
data.x = self._encoder.predict(data)
return data
def compute_aggregations(self, data: Data) -> Data:
with console.status('computing aggregations'):
x = F.normalize(data.x, p=2, dim=(- 1))
x_list = [x]
for _ in range(self.hops):
x = self._aggregate(x, data.adj_t)
x = self._normalize(x)
x_list.append(x)
data.x = torch.stack(x_list, dim=(- 1))
return data
def configure_encoder_optimizer(self) -> Optimizer:
Optim = {'sgd': SGD, 'adam': Adam}[self.optimizer_name]
return Optim(self._encoder.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('delta', [0.5, 1.0, 1.5])
def test_huber_loss_double_backward(seed, ctx, func_name, delta):
from nbla_test_utils import cap_ignore_region, backward_function_tester
rng = np.random.RandomState(seed)
inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 2) for _ in range(2)]
backward_function_tester(rng, F.huber_loss, inputs, func_args=[delta], atol_accum=0.01, ctx=ctx) |
def sinabs_to_exodus(model: torch.nn.Module):
mapping_list = [(sinabs_class, (lambda module, replacement=exodus_class: replacement(**module.arg_dict))) for (sinabs_class, exodus_class) in module_map.items()]
for (class_to_replace, mapper_fn) in mapping_list:
model = sinabs.conversion.replace_module(model, class_to_replace, mapper_fn=mapper_fn)
return model |
class PreciseBN(HookBase):
def __init__(self, period, model, data_loader, num_iter):
self._logger = logging.getLogger(__name__)
if (len(get_bn_modules(model)) == 0):
self._logger.info('PreciseBN is disabled because model does not contain BN layers in training mode.')
self._disabled = True
return
self._model = model
self._data_loader = data_loader
self._num_iter = num_iter
self._period = period
self._disabled = False
self._data_iter = None
def after_step(self):
next_iter = (self.trainer.iter + 1)
is_final = (next_iter == self.trainer.max_iter)
if (is_final or ((self._period > 0) and ((next_iter % self._period) == 0))):
self.update_stats()
def update_stats(self):
if self._disabled:
return
if (self._data_iter is None):
self._data_iter = iter(self._data_loader)
num_iter = 0
def data_loader():
nonlocal num_iter
num_iter += 1
if ((num_iter % 100) == 0):
self._logger.info('Running precise-BN ... {}/{} iterations.'.format(num_iter, self._num_iter))
(yield next(self._data_iter))
with EventStorage():
self._logger.info(('Running precise-BN for {} iterations... '.format(self._num_iter) + 'Note that this could produce different statistics every time.'))
update_bn_stats(self._model, data_loader(), self._num_iter) |
class LexerThread():
def __init__(self, lexer, text):
self.lexer = lexer
self.state = lexer.make_lexer_state(text)
def lex(self, parser_state):
return self.lexer.lex(self.state, parser_state)
def __copy__(self):
copied = object.__new__(LexerThread)
copied.lexer = self.lexer
copied.state = copy(self.state)
return copied |
def get_numpy(tensor):
if isinstance(tensor, TorchVariable):
return get_numpy(tensor.data)
if _use_gpu:
return tensor.cpu().numpy()
return tensor.numpy() |
def _maybe_get_const(value, desc):
if (_is_value(value) and (value.node().kind() == 'onnx::Constant')):
return _parse_arg(value, desc)
return value |
def setup_petsc_options(ksps: List[PETSc.KSP], ksp_options: List[_typing.KspOption]) -> None:
fenics.PETScOptions.clear()
opts = PETSc.Options()
for i in range(len(ksps)):
opts.clear()
for (key, value) in ksp_options[i].items():
opts.setValue(key, value)
ksps[i].setFromOptions() |
def test_arrow_nested_nested_array():
a = pyarrow.array([[[1.1, 2.2], [3.3], []], [], [[4.4, 5.5]]])
assert (to_list(ak._connect.pyarrow.handle_arrow(a)) == [[[1.1, 2.2], [3.3], []], [], [[4.4, 5.5]]]) |
def register_Ns3EpcS11SapMme_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcS11SapMme const &', 'arg0')])
cls.add_method('CreateSessionResponse', 'void', [param('ns3::EpcS11SapMme::CreateSessionResponseMessage', 'msg')], is_pure_virtual=True, is_virtual=True)
cls.add_method('DeleteBearerRequest', 'void', [param('ns3::EpcS11SapMme::DeleteBearerRequestMessage', 'msg')], is_pure_virtual=True, is_virtual=True)
cls.add_method('ModifyBearerResponse', 'void', [param('ns3::EpcS11SapMme::ModifyBearerResponseMessage', 'msg')], is_pure_virtual=True, is_virtual=True)
return |
def reshape(source: Tensor, in_dims: Sequence[Dim], out_dims: Sequence[Dim]) -> Tensor:
return source._raw_backend.reshape(source, in_dims=in_dims, out_dims=out_dims) |
class OrAttributeFilter(Filter):
def __init__(self, *filters: AttributeFilter):
self.filters = filters
def match(self, layer_config: Dict[(str, Any)]) -> bool:
for f in self.filters:
if f.match(layer_config):
return True
return False
def __repr__(self):
return ' | '.join([str(f) for f in self.filters]) |
def get_scheduler(optimizer, opt):
if (opt.lr_policy == 'lambda'):
def lambda_rule(epoch):
lr_l = (1.0 - (max(0, (((epoch + 1) + opt.iter) - opt.niter)) / float((opt.niter_decay + 1))))
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif (opt.lr_policy == 'step'):
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif (opt.lr_policy == 'plateau'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler |
class LogDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for (key, val) in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels) |
class DiscreteFlattenPreprocessor(Preprocessor):
def __init__(self, space: spaces.Discrete):
super(DiscreteFlattenPreprocessor, self).__init__(space)
self._size = space.n
def size(self):
return self._size
def shape(self):
return (self._size,)
def transform(self, data, nested=False) -> np.ndarray:
if nested:
data = _get_batched(data)
if isinstance(data, int):
array = np.zeros(self.size, dtype=np.float32)
array[data] = 1
return array
elif isinstance(data, np.ndarray):
array = data.reshape(((- 1), self.size))
return array
else:
raise TypeError(f'Unexpected type: {type(data)}')
def write(self, array, offset, data):
pass |
def get_3D_maze_blocks(map):
return {(i, k, j): get_tile(map[k][j][i]) for k in range(len(map)) for j in range(len(map[k])) for i in range(len(map[k][j]))} |
def configShower(textWidth=64):
config = configReader()
customPrint((Fore.YELLOW + 'Hyperparameters and Configurations'), textWidth=textWidth)
for c in config:
customPrint((('{}:'.format(c).upper() + Fore.YELLOW) + '{}'.format(config[c])), textWidth=textWidth, style='-') |
class DirichletNeumann(CompositeBase):
def __init__(self, N, quad='LG', bc=(0, 0), domain=((- 1), 1), dtype=float, padding_factor=1, dealias_direct=False, coordinates=None, **kw):
if isinstance(bc, (tuple, list)):
bc = BoundaryConditions({'left': {'D': bc[0]}, 'right': {'N': bc[1]}}, domain=domain)
CompositeBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype, bc=bc, padding_factor=padding_factor, dealias_direct=dealias_direct, coordinates=coordinates)
self._stencil = {0: 1, 1: (((2 * n) + 3) / (((n ** 2) + (4 * n)) + 4)), 2: ((- (((n ** 2) + (2 * n)) + 1)) / (((n ** 2) + (4 * n)) + 4))}
def boundary_condition():
return 'DirichletNeumann'
def short_name():
return 'DN' |
class HawksTests(unittest.TestCase):
def test_multiconfig_deep(self):
config = {'dataset': {'num_examples': [10, 100, 1000]}, 'constraints': {'overlap': {'limit': ['upper', 'lower']}}, 'ga': {'num_gens': [50, 100, 10, 200], 'mut_args_mean': {'random': {'dims': ['each', 'all']}}}}
obj = hawks.create_generator(config)
(total_configs, _, _) = obj._count_multiconfigs()
self.assertEqual(total_configs, 48)
def test_full_hawks_run(self):
test_fpath = (Path(hawks.__file__).parents[1] / 'tests')
gen = hawks.create_generator((test_fpath / 'validation.json'))
gen.run()
res = gen.get_stats()
known_result = pd.read_csv((test_fpath / 'validation.csv'), index_col=False)
print('Result:')
print(res)
print('Known result:')
print(known_result)
print('---')
equals = np.allclose(res.values, known_result.values)
self.assertTrue(equals)
def test_full_hawks_run_multiple(self):
test_fpath = (Path(hawks.__file__).parents[1] / 'tests')
gen = hawks.create_generator((test_fpath / 'validation.json'))
gen.run()
gen = hawks.create_generator((test_fpath / 'validation.json'))
gen.run()
res = gen.get_stats()
known_result = pd.read_csv((test_fpath / 'validation.csv'), index_col=False)
print(res)
print(known_result)
equals = np.allclose(res.values, known_result.values)
self.assertTrue(equals)
def test_full_hawks_run_multiple(self):
gen = hawks.create_generator('validation.json')
gen.run()
gen = hawks.create_generator('validation.json')
gen.run()
res = gen.get_stats()
known_result = pd.read_csv('validation.csv', index_col=False)
equals = np.allclose(res.values, known_result.values)
self.assertTrue(equals)
def test_incorrect_config_arg(self):
with self.assertRaises(ValueError):
gen = hawks.create_generator({'hawks': {'seed_num': 4, 'num_runs': 1}, 'objectives': {'silhouette': {'target': 0.9}}, 'constraints': {'eigenval_ratio': {'lim': 'upper'}}})
def test_nested_config_arg(self):
gen = hawks.create_generator({'constraints': {'overlap': {'limit': 'TEST'}}})
self.assertEqual(gen.full_config['constraints']['overlap']['limit'], 'TEST') |
class RIDNET(nn.Module):
def __init__(self, args):
super(RIDNET, self).__init__()
n_feats = args.n_feats
kernel_size = 3
reduction = args.reduction
rgb_mean = (0.4488, 0.4371, 0.404)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = ops.BasicBlock(3, n_feats, kernel_size, 1, 1)
self.b1 = Block(n_feats, n_feats)
self.b2 = Block(n_feats, n_feats)
self.b3 = Block(n_feats, n_feats)
self.b4 = Block(n_feats, n_feats)
self.tail = nn.Conv2d(n_feats, 3, kernel_size, 1, 1, 1)
def forward(self, x):
s = self.sub_mean(x)
h = self.head(s)
b1 = self.b1(h)
b2 = self.b2(b1)
b3 = self.b3(b2)
b_out = self.b4(b3)
res = self.tail(b_out)
out = self.add_mean(res)
f_out = (out + x)
return f_out |
def plot_corr_heatmap(corr_map, path):
sns.set(style='whitegrid', font_scale=1.5)
plt.figure(figsize=(20, 10))
pl = sns.heatmap(corr_map, annot=True, annot_kws={'size': 8}, fmt='.1g')
pl.get_figure().savefig(path, bbox_inches='tight')
plt.close() |
def get_states(states: dict):
reference = {}
for (domain, frame) in states.items():
for (slot, values) in frame['slot_values'].items():
if (slot != 'requested_slots'):
reference[slot] = values
return reference |
def test_load_img_from_numpy():
result = {'img': np.ones((32, 100, 3), dtype=np.uint8)}
load = LoadImageFromNdarray(color_type='color')
output = load(result)
assert (output['img'].shape[2] == 3)
assert (len(output['img'].shape) == 3)
result = {'img': np.ones((32, 100, 1), dtype=np.uint8)}
load = LoadImageFromNdarray(color_type='color')
output = load(result)
assert (output['img'].shape[2] == 3)
result = {'img': np.ones((32, 100, 3), dtype=np.uint8)}
load = LoadImageFromNdarray(color_type='grayscale', to_float32=True)
output = load(result)
assert (output['img'].shape[2] == 1) |
class Garment(object):
def __init__(self, name, type):
self.name = name
self.type = type
def to_filter_string(self):
return ((self.type + '/') + self.name)
def to_rel_folder(self):
return os.path.join(self.type, self.name)
def to_abs_path(self, data_root):
return os.path.join(data_root, self.type, self.name)
def to_spec_path(self, data_root):
return os.path.join(data_root, self.type, self.name, 'specification.json') |
class BaseModel():
def _create_model(self, X, Y):
raise NotImplementedError('')
def _update_model(self, X_all, Y_all, itr=0):
return
def predict(self, X):
return
def predict_withGradients(self, X):
return |
.service(**PAYLOAD_TOO_LARGE)
.openapi_version('3.0')
def test_too_large_payload(cli, schema_url, service):
result = cli.run(schema_url, 'my-api', '--report', f'--schemathesis-io-token={service.token}', f'--schemathesis-io-url={service.base_url}')
assert (result.exit_code == ExitCode.TESTS_FAILED), result.stdout
lines = get_stdout_lines(result.stdout)
assert ('Upload: FAILED' in lines)
assert (PAYLOAD_TOO_LARGE_MESSAGE in lines) |
def prepare_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('config', help='train config file path')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
hf_parser = HfArgumentParser((Seq2SeqTrainingArguments,))
(hf_parser, required) = block_required_error(hf_parser)
(args, unknown_args) = parser.parse_known_args(args)
(known_hf_args, unknown_args) = hf_parser.parse_known_args(unknown_args)
if unknown_args:
raise ValueError(f'''Some specified arguments are not used by the ArgumentParser or HfArgumentParser
: {unknown_args}''')
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
training_args = cfg.training_args
training_args.update(vars(known_hf_args))
req_but_not_assign = [item for item in required if (item not in training_args)]
if req_but_not_assign:
raise ValueError(f'Requires {req_but_not_assign} but not assign.')
cfg.training_args = training_args
training_args = Seq2SeqTrainingArguments(**training_args)
training_args = check_output_dir(training_args)
if is_main_process(training_args.local_rank):
to_logging_cfg = Config()
to_logging_cfg.model_args = cfg.model_args
to_logging_cfg.data_args = cfg.data_args
to_logging_cfg.training_args = cfg.training_args
logger.info(to_logging_cfg.pretty_text)
if training_args.should_log:
transformers.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.logging.set_verbosity(log_level)
transformers.logging.enable_default_handler()
transformers.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
logger.warning((f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}
''' + f' distributed training: {bool((training_args.local_rank != (- 1)))}, fp16 training: {training_args.fp16}'))
set_seed(training_args.seed)
return (cfg, training_args) |
def resnet50(cuda=True, model_root=None):
print('Building and initializing resnet-50 parameters')
from imagenet import resnet
m = resnet.resnet50(True, model_root)
if cuda:
m = m.cuda()
return (m, dataset.get, True) |
class TFRobertaForMultipleChoice(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def get_keywords():
git_refnames = ' (HEAD -> main)'
git_full = 'bdec5d6c54c5fa0af4bfe70b641b6d90'
git_date = '2023-12-22 15:23:06 +0100'
keywords = {'refnames': git_refnames, 'full': git_full, 'date': git_date}
return keywords |
class WRGAT(torch.nn.Module):
def __init__(self, num_features, num_classes, num_relations, dims=16, drop=0, root=True):
super(WRGAT, self).__init__()
self.conv1 = WeightedRGATConv(num_features, dims, num_relations=num_relations, root_weight=root)
self.conv2 = WeightedRGATConv(dims, num_classes, num_relations=num_relations, root_weight=root)
self.drop = torch.nn.Dropout(p=drop)
def forward(self, x, edge_index, edge_weight, edge_color):
x = F.relu(self.conv1(x, edge_index, edge_weight=edge_weight, edge_type=edge_color))
x = self.drop(x)
x = self.conv2(x, edge_index, edge_weight=edge_weight, edge_type=edge_color)
return (F.log_softmax(x, dim=1), x) |
def test():
ak_array = ak.Array([1, 2, 3])
assert (ak.operations.singletons(ak_array).to_list() == [[1], [2], [3]]) |
def expand_sub(substr, names):
substr = substr.replace('\\>', '')
substr = substr.replace('\\<', '')
lnames = find_repl_patterns(substr)
substr = named_re.sub('<\\1>', substr)
def listrepl(mobj):
thelist = conv(mobj.group(1).replace('\\,', ''))
if template_name_re.match(thelist):
return ('<%s>' % thelist)
name = None
for key in lnames.keys():
if (lnames[key] == thelist):
name = key
if (name is None):
name = unique_key(lnames)
lnames[name] = thelist
return ('<%s>' % name)
substr = list_re.sub(listrepl, substr)
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if (r not in rules):
thelist = lnames.get(r, names.get(r, None))
if (thelist is None):
raise ValueError(('No replicates found for <%s>' % r))
if ((r not in names) and (not thelist.startswith('_'))):
names[r] = thelist
rule = [i.replace('', ',') for i in thelist.split(',')]
num = len(rule)
if (numsubs is None):
numsubs = num
rules[r] = rule
base_rule = r
elif (num == numsubs):
rules[r] = rule
else:
print('Mismatch in number of replacements (base <{}={}>) for <{}={}>. Ignoring.'.format(base_rule, ','.join(rules[base_rule]), r, thelist))
if (not rules):
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name, ((k + 1) * [name]))[k]
newstr = ''
for k in range(numsubs):
newstr += (template_re.sub(namerepl, substr) + '\n\n')
newstr = newstr.replace('', '>')
newstr = newstr.replace('', '<')
return newstr |
class LinearRankMetricCodeNearestNeighborDecoder(Decoder):
def __init__(self, code):
super().__init__(code, code.ambient_space(), code._default_encoder_name)
def __eq__(self, other):
return (isinstance(other, LinearRankMetricCodeNearestNeighborDecoder) and (self.code() == other.code()))
def _repr_(self):
return ('Nearest neighbor decoder for %s' % self.code())
def _latex_(self):
return ('\\textnormal{Nearest neighbor decoder for }%s' % self.code()._latex_())
def decode_to_code(self, r):
C = self.code()
c_min = C.zero()
h_min = C.rank_weight_of_vector(r)
for c in C:
if (C.rank_weight_of_vector((c - r)) < h_min):
h_min = C.rank_weight_of_vector((c - r))
c_min = c
c_min.set_immutable()
return c_min
def decoding_radius(self):
return ((self.code().minimum_distance() - 1) // 2) |
def load_arch_lib(arch):
archlib_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), (arch.name + '_defs.py'))
return load_module(archlib_path) |
def split_dataset(dataset, n_splits):
return [Subset(dataset, np.arange(i, len(dataset), n_splits)) for i in range(n_splits)] |
def to_dataloader(dataset, bsz):
return torch.utils.data.DataLoader(dataset, batch_size=bsz, shuffle=True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.