code stringlengths 101 5.91M |
|---|
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return ('%s%r' % (self.tp_name, self.args)) |
def shapes_equal(this: TensorType, that: TensorType) -> TensorType:
return ((tf.rank(this) == tf.rank(that)) and tf.reduce_all((tf.shape(this) == tf.shape(that)))) |
class BaseModel(object):
def __init__(self, hparams, mode, iterator, source_vocab_table, target_vocab_table, reverse_target_vocab_table=None, scope=None, extra_args=None):
assert isinstance(iterator, iterator_utils.BatchedInput)
self.iterator = iterator
self.mode = mode
self.src_vocab_table = source_vocab_table
self.tgt_vocab_table = target_vocab_table
self.src_vocab_size = hparams.src_vocab_size
self.tgt_vocab_size = hparams.tgt_vocab_size
self.num_layers = hparams.num_layers
self.num_gpus = hparams.num_gpus
self.time_major = hparams.time_major
self.single_cell_fn = None
if extra_args:
self.single_cell_fn = extra_args.single_cell_fn
initializer = model_helper.get_initializer(hparams.init_op, hparams.random_seed, hparams.init_weight)
tf.get_variable_scope().set_initializer(initializer)
self.init_embeddings(hparams, scope)
self.batch_size = tf.size(self.iterator.source_sequence_length)
with tf.variable_scope((scope or 'build_network')):
with tf.variable_scope('decoder/output_projection'):
self.output_layer = layers_core.Dense(hparams.tgt_vocab_size, use_bias=False, name='output_projection')
res = self.build_graph(hparams, scope=scope)
if (self.mode == tf.contrib.learn.ModeKeys.TRAIN):
self.train_loss = res[1]
self.word_count = (tf.reduce_sum(self.iterator.source_sequence_length) + tf.reduce_sum(self.iterator.target_sequence_length))
elif (self.mode == tf.contrib.learn.ModeKeys.EVAL):
self.eval_loss = res[1]
elif (self.mode == tf.contrib.learn.ModeKeys.INFER):
(self.infer_logits, _, self.final_context_state, self.sample_id) = res
self.sample_words = reverse_target_vocab_table.lookup(tf.to_int64(self.sample_id))
if (self.mode != tf.contrib.learn.ModeKeys.INFER):
self.predict_count = tf.reduce_sum(self.iterator.target_sequence_length)
self.global_step = tf.Variable(0, trainable=False)
params = tf.trainable_variables()
if (self.mode == tf.contrib.learn.ModeKeys.TRAIN):
self.learning_rate = tf.constant(hparams.learning_rate)
self.learning_rate = self._get_learning_rate_warmup(hparams)
self.learning_rate = self._get_learning_rate_decay(hparams)
if (hparams.optimizer == 'sgd'):
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
tf.summary.scalar('lr', self.learning_rate)
elif (hparams.optimizer == 'adam'):
assert (float(hparams.learning_rate) <= 0.001), ('! High Adam learning rate %g' % hparams.learning_rate)
opt = tf.train.AdamOptimizer(self.learning_rate)
gradients = tf.gradients(self.train_loss, params, colocate_gradients_with_ops=hparams.colocate_gradients_with_ops)
(clipped_gradients, gradient_norm_summary) = model_helper.gradient_clip(gradients, max_gradient_norm=hparams.max_gradient_norm)
self.update = opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step)
self.train_summary = tf.summary.merge(([tf.summary.scalar('lr', self.learning_rate), tf.summary.scalar('train_loss', self.train_loss)] + gradient_norm_summary))
if (self.mode == tf.contrib.learn.ModeKeys.INFER):
self.infer_summary = self._get_infer_summary(hparams)
self.saver = tf.train.Saver(tf.global_variables())
utils.print_out('# Trainable variables')
for param in params:
utils.print_out((' %s, %s, %s' % (param.name, str(param.get_shape()), param.op.device)))
def _get_learning_rate_warmup(self, hparams):
warmup_steps = hparams.learning_rate_warmup_steps
warmup_factor = hparams.learning_rate_warmup_factor
print((' learning_rate=%g, learning_rate_warmup_steps=%d, learning_rate_warmup_factor=%g, starting_learning_rate=%g' % (hparams.learning_rate, warmup_steps, warmup_factor, (hparams.learning_rate * (warmup_factor ** warmup_steps)))))
inv_decay = (warmup_factor ** tf.to_float((warmup_steps - self.global_step)))
return tf.cond((self.global_step < hparams.learning_rate_warmup_steps), (lambda : (inv_decay * self.learning_rate)), (lambda : self.learning_rate), name='learning_rate_warump_cond')
def _get_learning_rate_decay(self, hparams):
if (hparams.learning_rate_decay_scheme and (hparams.learning_rate_decay_scheme == 'luong')):
start_decay_step = int((hparams.num_train_steps / 2))
decay_steps = int((hparams.num_train_steps / 10))
decay_factor = 0.5
else:
start_decay_step = hparams.start_decay_step
decay_steps = hparams.decay_steps
decay_factor = hparams.decay_factor
print((' decay_scheme=%s, start_decay_step=%d, decay_steps %d, decay_factor %g' % (hparams.learning_rate_decay_scheme, hparams.start_decay_step, hparams.decay_steps, hparams.decay_factor)))
return tf.cond((self.global_step < start_decay_step), (lambda : self.learning_rate), (lambda : tf.train.exponential_decay(self.learning_rate, (self.global_step - start_decay_step), decay_steps, decay_factor, staircase=True)), name='learning_rate_decay_cond')
def init_embeddings(self, hparams, scope):
(self.embedding_encoder, self.embedding_decoder) = model_helper.create_emb_for_encoder_and_decoder(share_vocab=hparams.share_vocab, src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, src_embed_size=hparams.num_units, tgt_embed_size=hparams.num_units, num_partitions=hparams.num_embeddings_partitions, scope=scope)
def train(self, sess):
assert (self.mode == tf.contrib.learn.ModeKeys.TRAIN)
return sess.run([self.update, self.train_loss, self.predict_count, self.train_summary, self.global_step, self.word_count, self.batch_size])
def eval(self, sess):
assert (self.mode == tf.contrib.learn.ModeKeys.EVAL)
return sess.run([self.eval_loss, self.predict_count, self.batch_size])
def build_graph(self, hparams, scope=None):
utils.print_out(('# creating %s graph ...' % self.mode))
dtype = tf.float32
num_layers = hparams.num_layers
num_gpus = hparams.num_gpus
with tf.variable_scope((scope or 'dynamic_seq2seq'), dtype=dtype):
(encoder_outputs, encoder_state) = self._build_encoder(hparams)
(logits, sample_id, final_context_state) = self._build_decoder(encoder_outputs, encoder_state, hparams)
if (self.mode != tf.contrib.learn.ModeKeys.INFER):
with tf.device(model_helper.get_device_str((num_layers - 1), num_gpus)):
loss = self._compute_loss(logits)
else:
loss = None
return (logits, loss, final_context_state, sample_id)
def _build_encoder(self, hparams):
pass
def _build_encoder_cell(self, hparams, num_layers, num_residual_layers, base_gpu=0):
return model_helper.create_rnn_cell(unit_type=hparams.unit_type, num_units=hparams.num_units, num_layers=num_layers, num_residual_layers=num_residual_layers, forget_bias=hparams.forget_bias, dropout=hparams.dropout, num_gpus=hparams.num_gpus, mode=self.mode, base_gpu=base_gpu, single_cell_fn=self.single_cell_fn)
def _get_infer_maximum_iterations(self, hparams, source_sequence_length):
if hparams.tgt_max_len_infer:
maximum_iterations = hparams.tgt_max_len_infer
utils.print_out((' decoding maximum_iterations %d' % maximum_iterations))
else:
decoding_length_factor = 2.0
max_encoder_length = tf.reduce_max(source_sequence_length)
maximum_iterations = tf.to_int32(tf.round((tf.to_float(max_encoder_length) * decoding_length_factor)))
return maximum_iterations
def _build_decoder(self, encoder_outputs, encoder_state, hparams):
tgt_sos_id = tf.cast(self.tgt_vocab_table.lookup(tf.constant(hparams.sos)), tf.int32)
tgt_eos_id = tf.cast(self.tgt_vocab_table.lookup(tf.constant(hparams.eos)), tf.int32)
num_layers = hparams.num_layers
num_gpus = hparams.num_gpus
iterator = self.iterator
maximum_iterations = self._get_infer_maximum_iterations(hparams, iterator.source_sequence_length)
with tf.variable_scope('decoder') as decoder_scope:
(cell, decoder_initial_state) = self._build_decoder_cell(hparams, encoder_outputs, encoder_state, iterator.source_sequence_length)
if (self.mode != tf.contrib.learn.ModeKeys.INFER):
target_input = iterator.target_input
if self.time_major:
target_input = tf.transpose(target_input)
decoder_emb_inp = tf.nn.embedding_lookup(self.embedding_decoder, target_input)
helper = tf.contrib.seq2seq.TrainingHelper(decoder_emb_inp, iterator.target_sequence_length, time_major=self.time_major)
my_decoder = tf.contrib.seq2seq.BasicDecoder(cell, helper, decoder_initial_state)
(outputs, final_context_state, _) = tf.contrib.seq2seq.dynamic_decode(my_decoder, output_time_major=self.time_major, swap_memory=True, scope=decoder_scope)
sample_id = outputs.sample_id
device_id = (num_layers if (num_layers < num_gpus) else (num_layers - 1))
with tf.device(model_helper.get_device_str(device_id, num_gpus)):
logits = self.output_layer(outputs.rnn_output)
else:
beam_width = hparams.beam_width
length_penalty_weight = hparams.length_penalty_weight
start_tokens = tf.fill([self.batch_size], tgt_sos_id)
end_token = tgt_eos_id
if (beam_width > 0):
my_decoder = tf.contrib.seq2seq.BeamSearchDecoder(cell=cell, embedding=self.embedding_decoder, start_tokens=start_tokens, end_token=end_token, initial_state=decoder_initial_state, beam_width=beam_width, output_layer=self.output_layer, length_penalty_weight=length_penalty_weight)
else:
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(self.embedding_decoder, start_tokens, end_token)
my_decoder = tf.contrib.seq2seq.BasicDecoder(cell, helper, decoder_initial_state, output_layer=self.output_layer)
(outputs, final_context_state, _) = tf.contrib.seq2seq.dynamic_decode(my_decoder, maximum_iterations=maximum_iterations, output_time_major=self.time_major, swap_memory=True, scope=decoder_scope)
if (beam_width > 0):
logits = tf.no_op()
sample_id = outputs.predicted_ids
else:
logits = outputs.rnn_output
sample_id = outputs.sample_id
return (logits, sample_id, final_context_state)
def get_max_time(self, tensor):
time_axis = (0 if self.time_major else 1)
return (tensor.shape[time_axis].value or tf.shape(tensor)[time_axis])
def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state, source_sequence_length):
pass
def _compute_loss(self, logits):
target_output = self.iterator.target_output
if self.time_major:
target_output = tf.transpose(target_output)
max_time = self.get_max_time(target_output)
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_output, logits=logits)
target_weights = tf.sequence_mask(self.iterator.target_sequence_length, max_time, dtype=logits.dtype)
if self.time_major:
target_weights = tf.transpose(target_weights)
loss = (tf.reduce_sum((crossent * target_weights)) / tf.to_float(self.batch_size))
return loss
def _get_infer_summary(self, hparams):
return tf.no_op()
def infer(self, sess):
assert (self.mode == tf.contrib.learn.ModeKeys.INFER)
return sess.run([self.infer_logits, self.infer_summary, self.sample_id, self.sample_words])
def decode(self, sess):
(_, infer_summary, _, sample_words) = self.infer(sess)
if self.time_major:
sample_words = sample_words.transpose()
return (sample_words, infer_summary) |
def write_cv_desc_df():
ls = glob.glob
records = []
for f in ls('*.json'):
d = {}
with open(f, 'rb') as fd:
r = json.load(fd)
d['name'] = f
d['alg'] = alg(f)
d['seed'] = r['config']['seed']
d['agg'] = r['config']['step_every']
agg = d['agg']
mb = r['config']['bs_train']
d['batch'] = (agg * mb)
d['total_time'] = r['config']['exp_total_time']
d['acc'] = r['results']['test_acc'][(- 1)]
records.append(d)
df = pd.DataFrame.from_records(records)
print(df)
desc = df.groupby(['alg', 'batch', 'agg']).describe()
desc = desc[['acc', 'total_time']]
print(desc)
desc.to_csv('desc.csv', index=True)
df.to_csv('df.csv', index=False) |
def plt_props():
plt.rcParams['font.size'] = 12
plt.rcParams['axes.labelsize'] = 12
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.style'] = 'normal'
plt.rcParams['font.variant'] = 'normal'
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['figure.figsize'] = (6, 4)
plt.rcParams['lines.linewidth'] = 2.5
plt.rcParams['lines.markersize'] = 8 |
def write_version_py():
content = "# GENERATED VERSION FILE\n# TIME: {}\n__version__ = '{}'\nshort_version = '{}'\nversion_info = ({})\n"
sha = get_hash()
with open('VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join([(x if x.isdigit() else f'"{x}"') for x in SHORT_VERSION.split('.')])
VERSION = ((SHORT_VERSION + '+') + sha)
version_file_str = content.format(time.asctime(), VERSION, SHORT_VERSION, VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str) |
class ShareSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
self.waiting = False
self.closed = False
nenvs = len(env_fns)
(self.remotes, self.work_remotes) = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
(observation_space, share_observation_space, action_space) = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space)
def step_async(self, actions):
for (remote, action) in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
(obs, share_obs, rews, dones, infos, available_actions) = zip(*results)
return (obs, np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions))
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
(obs, share_obs, available_actions) = zip(*results)
return (obs, np.stack(share_obs), np.stack(available_actions))
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def anneal_reward_shaping_factor(self, steps):
for (remote, step) in zip(self.remotes, steps):
remote.send(('anneal_reward_shaping_factor', step))
def reset_featurize_type(self, featurize_types):
for (remote, featurize_type) in zip(self.remotes, featurize_types):
remote.send(('reset_featurize_type', featurize_type))
def load_policy(self, load_policy_cfgs):
for (remote, load_policy_cfg) in zip(self.remotes, load_policy_cfgs):
remote.send(('load_policy', load_policy_cfg))
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='rgb_array'):
for remote in self.remotes:
remote.send(('render', mode))
if (mode == 'rgb_array'):
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame) |
.skipif((not m.has_optional), reason='no <optional>')
def test_move_and_copy_load_optional():
cstats = m.move_and_copy_cstats()
(c_m, c_mc, c_c) = (cstats['MoveOnlyInt'], cstats['MoveOrCopyInt'], cstats['CopyOnlyInt'])
assert (m.move_optional(10) == 10)
assert (m.move_or_copy_optional(11) == 11)
assert (m.copy_optional(12) == 12)
assert (m.move_optional_tuple((3, 4, 5)) == 12)
assert ((c_m.copy_assignments + c_m.copy_constructions) == 0)
assert (c_m.move_assignments == 2)
assert (c_m.move_constructions == 5)
assert ((c_mc.copy_assignments + c_mc.copy_constructions) == 0)
assert (c_mc.move_assignments == 2)
assert (c_mc.move_constructions == 5)
assert (c_c.copy_assignments == 2)
assert (c_c.copy_constructions == 5)
assert (((c_m.alive() + c_mc.alive()) + c_c.alive()) == 0) |
class SampleCountMetricPrinter(EventWriter):
def __init__(self):
self.logger = logging.getLogger(__name__)
def write(self):
storage = get_event_storage()
batch_stats_strs = []
for (key, buf) in storage.histories().items():
if key.startswith('batch/'):
batch_stats_strs.append(f'{key} {buf.avg(20)}')
self.logger.info(', '.join(batch_stats_strs)) |
class GEGLU(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
assert ((x.shape[(- 1)] % 2) == 0)
(a, b) = x.chunk(2, dim=(- 1))
return (a * F.gelu(b)) |
_if_32bit
.parametrize('X_train, y_train, X_test', [[X, Y, T], [X2, Y2, T2], [X_blobs[:80], y_blobs[:80], X_blobs[80:]], [iris.data, iris.target, iris.data]])
.parametrize('kernel', ['linear', 'poly', 'rbf', 'sigmoid'])
.parametrize('sparse_container', (CSR_CONTAINERS + LIL_CONTAINERS))
def test_svc(X_train, y_train, X_test, kernel, sparse_container):
X_train = sparse_container(X_train)
clf = svm.SVC(gamma=1, kernel=kernel, probability=True, random_state=0, decision_function_shape='ovo')
check_svm_model_equal(clf, X_train, y_train, X_test) |
.lower_builtin('begin_list', ArrayBuilderType)
def lower_beginlist(context, builder, sig, args):
(arraybuildertype,) = sig.args
(arraybuilderval,) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(context, builder, libawkward.ArrayBuilder_beginlist, (proxyin.rawptr,))
return context.get_dummy_value() |
_module()
class ADE20KDataset(CustomDataset):
CLASSES = ('wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag')
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], [102, 255, 0], [92, 0, 255]]
def __init__(self, **kwargs):
super(ADE20KDataset, self).__init__(img_suffix='.jpg', seg_map_suffix='.png', reduce_zero_label=True, **kwargs) |
class HybridRecommender(BaseRecommender, ABC):
def fit(self, log: SparkDataFrame, user_features: Optional[SparkDataFrame]=None, item_features: Optional[SparkDataFrame]=None) -> None:
self._fit_wrap(log=log, user_features=user_features, item_features=item_features)
def predict(self, log: SparkDataFrame, k: int, users: Optional[Union[(SparkDataFrame, Iterable)]]=None, items: Optional[Union[(SparkDataFrame, Iterable)]]=None, user_features: Optional[SparkDataFrame]=None, item_features: Optional[SparkDataFrame]=None, filter_seen_items: bool=True, recs_file_path: Optional[str]=None) -> Optional[SparkDataFrame]:
return self._predict_wrap(log=log, k=k, users=users, items=items, user_features=user_features, item_features=item_features, filter_seen_items=filter_seen_items, recs_file_path=recs_file_path)
def fit_predict(self, log: SparkDataFrame, k: int, users: Optional[Union[(SparkDataFrame, Iterable)]]=None, items: Optional[Union[(SparkDataFrame, Iterable)]]=None, user_features: Optional[SparkDataFrame]=None, item_features: Optional[SparkDataFrame]=None, filter_seen_items: bool=True, recs_file_path: Optional[str]=None) -> Optional[SparkDataFrame]:
return self._fit_predict(log=log, k=k, users=users, items=items, user_features=user_features, item_features=item_features, filter_seen_items=filter_seen_items, recs_file_path=recs_file_path)
def predict_pairs(self, pairs: SparkDataFrame, log: Optional[SparkDataFrame]=None, user_features: Optional[SparkDataFrame]=None, item_features: Optional[SparkDataFrame]=None, recs_file_path: Optional[str]=None, k: Optional[int]=None) -> Optional[SparkDataFrame]:
return self._predict_pairs_wrap(pairs=pairs, log=log, user_features=user_features, item_features=item_features, recs_file_path=recs_file_path, k=k)
def get_features(self, ids: SparkDataFrame, features: Optional[SparkDataFrame]) -> Optional[Tuple[(SparkDataFrame, int)]]:
return self._get_features_wrap(ids, features) |
def get_detection_model(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
model.roi_heads.nms_thresh = 0.3
return model |
class VenmoWithdrawMoney(VirtualFunctionTool):
name = 'VenmoWithdrawMoney'
summary = "Withdraw money from the user's Venmo balance to a linked bank account."
parameters: List[ArgParameter] = [{'name': 'amount', 'type': 'number', 'description': 'The amount of money to withdraw, must be positive.', 'required': True}, {'name': 'account_id', 'type': 'string', 'description': 'The unique identifier of the linked bank account.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'result', 'type': 'object', 'description': "An object containing 'success' (boolean, indicates whether the transaction was successful), 'transaction_id' (string, the unique identifier of the transaction, if successful), and 'error_message' (string, if unsuccessful)."}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'amount' is negative or exceeds the user's balance."}, {'name': 'NotFoundException', 'description': "The 'account_id' does not exist."}] |
class ASTResolver():
def resolve_to(node, wanted, scope):
if isinstance(node, ast.Name):
return (scope.get(node.id) is wanted)
if (not isinstance(node, ast.Attribute)):
return False
v = node.value
chain = [node.attr]
while isinstance(v, ast.Attribute):
chain.append(v.attr)
v = v.value
if (not isinstance(v, ast.Name)):
return False
chain.append(v.id)
for attr in reversed(chain):
try:
if isinstance(scope, dict):
scope = scope[attr]
else:
scope = getattr(scope, attr)
except (KeyError, AttributeError):
return False
return (scope is wanted) |
_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
def broadcast_arrays(*args, **kwargs):
subok = kwargs.pop('subok', False)
if kwargs:
raise TypeError('broadcast_arrays() got an unexpected keyword argument {!r}'.format(list(kwargs.keys())[0]))
args = [np.array(_m, copy=False, subok=subok) for _m in args]
shape = _broadcast_shape(*args)
if all(((array.shape == shape) for array in args)):
return args
return [_broadcast_to(array, shape, subok=subok, readonly=False) for array in args] |
class SimpleLasagneModel(object):
def __init__(self, input_vars, target_vars, l_out, loss, optimizer, learning_rate=0.001, id=None):
if (not isinstance(input_vars, Sequence)):
raise ValueError(('input_vars should be a sequence, instead got %s' % (input_vars,)))
if (not isinstance(target_vars, Sequence)):
raise ValueError(('target_vars should be a sequence, instead got %s' % (input_vars,)))
self.get_options()
self.input_vars = input_vars
self.l_out = l_out
self.loss = loss
self.optimizer = optimizer
self.id = id
id_tag = ((self.id + '/') if self.id else '')
id_tag_log = ((self.id + ': ') if self.id else '')
if (self.options.verbosity >= 6):
output_model_structure(l_out)
params = self.params()
(monitored, train_loss_grads, synth_vars) = self.get_train_loss(target_vars, params)
self.monitored_tags = monitored.keys()
if self.options.true_grad_clipping:
scaled_grads = total_norm_constraint(train_loss_grads, self.options.true_grad_clipping)
else:
scaled_grads = train_loss_grads
updates = optimizer(scaled_grads, params, learning_rate=learning_rate)
if (not self.options.no_nan_suppression):
updates = apply_nan_suppression(updates, print_mode='none')
if self.options.detect_nans:
mode = MonitorMode(post_func=detect_nan)
else:
mode = None
if (self.options.verbosity >= 2):
print((id_tag_log + 'Compiling training function'))
params = ((input_vars + target_vars) + synth_vars)
if (self.options.verbosity >= 6):
print(('params = %s' % (params,)))
self.train_fn = theano.function(params, monitored.values(), updates=updates, mode=mode, name=(id_tag + 'train'), on_unused_input='warn')
if (self.options.run_dir and (not self.options.no_graphviz)):
self.visualize_graphs({'loss': monitored['loss']}, out_dir=self.options.run_dir)
test_prediction = get_output(l_out, deterministic=True)
if (self.options.verbosity >= 2):
print((id_tag_log + 'Compiling prediction function'))
if (self.options.verbosity >= 6):
print(('params = %s' % (input_vars,)))
self.predict_fn = theano.function(input_vars, test_prediction, mode=mode, name=(id_tag + 'predict'), on_unused_input='ignore')
if (self.options.run_dir and (not self.options.no_graphviz)):
self.visualize_graphs({'test_prediction': test_prediction}, out_dir=self.options.run_dir)
def visualize_graphs(self, monitored, out_dir):
id_tag = ((self.id + '.') if self.id else '')
for (tag, graph) in monitored.iteritems():
tag = tag.replace('/', '.')
pydotprint(graph, outfile=os.path.join(out_dir, ((id_tag + tag) + '.svg')), format='svg', var_with_name_simple=True)
def params(self):
return get_all_params(self.l_out, trainable=True)
def get_train_loss(self, target_vars, params):
assert (len(target_vars) == 1)
prediction = get_output(self.l_out)
mean_loss = self.loss(prediction, target_vars[0]).mean()
monitored = [('loss', mean_loss)]
grads = T.grad(mean_loss, params)
if self.options.monitor_grads:
for (p, grad) in zip(params, grads):
monitored.append((('grad/' + p.name), grad))
return (OrderedDict(monitored), grads, [])
def fit(self, Xs, ys, batch_size, num_epochs, summary_writer=None, step=0):
if (not isinstance(Xs, Sequence)):
raise ValueError(('Xs should be a sequence, instead got %s' % (Xs,)))
if (not isinstance(ys, Sequence)):
raise ValueError(('ys should be a sequence, instead got %s' % (ys,)))
history = OrderedDict(((tag, []) for tag in self.monitored_tags))
id_tag = ((self.id + '/') if self.id else '')
params = self.params()
progress.start_task('Epoch', num_epochs)
epoch_start = time.time()
for epoch in range(num_epochs):
progress.progress(epoch)
history_epoch = OrderedDict(((tag, []) for tag in self.monitored_tags))
num_minibatches_approx = ((len(ys[0]) // batch_size) + 1)
progress.start_task('Minibatch', num_minibatches_approx)
for (i, batch) in enumerate(self.minibatches(Xs, ys, batch_size, shuffle=True)):
progress.progress(i)
if (self.options.verbosity >= 8):
print(('types: %s' % ([type(v) for t in batch for v in t],)))
print(('shapes: %s' % ([v.shape for t in batch for v in t],)))
(inputs, targets, synth) = batch
monitored = self.train_fn(*((inputs + targets) + synth))
for (tag, value) in zip(self.monitored_tags, monitored):
if (self.options.verbosity >= 10):
print(('%s: %s' % (tag, value)))
history_epoch[tag].append(value)
progress.end_task()
for (tag, values) in history_epoch.items():
values_array = np.array([np.asarray(v) for v in values])
history[tag].append(values_array)
mean_values = np.mean(values_array, axis=0)
if (len(mean_values.shape) == 0):
summary_writer.log_scalar((step + epoch), tag, mean_values)
else:
summary_writer.log_histogram((step + epoch), tag, mean_values)
if self.options.monitor_params:
for param in params:
val = param.get_value()
tag = ('param/' + param.name)
if (len(val.shape) == 0):
summary_writer.log_scalar((step + epoch), tag, val)
else:
summary_writer.log_histogram((step + epoch), tag, val)
epoch_end = time.time()
examples_per_sec = (len(ys[0]) / (epoch_end - epoch_start))
summary_writer.log_scalar((step + epoch), (id_tag + 'examples_per_sec'), examples_per_sec)
epoch_start = epoch_end
progress.end_task()
return history
def predict(self, Xs):
if (not isinstance(Xs, Sequence)):
raise ValueError(('Xs should be a sequence, instead got %s' % (Xs,)))
id_tag_log = ((self.id + ': ') if self.id else '')
if (self.options.verbosity >= 8):
print((id_tag_log + ('predict shapes: %s' % [x.shape for x in Xs])))
return self.predict_fn(*Xs)
def minibatches(self, inputs, targets, batch_size, shuffle=False):
'Lifted mostly verbatim from iterate_minibatches in\n
num_examples = len(targets[0])
assert all(((len(X) == num_examples) for X in inputs)), repr(([type(X) for X in inputs] + [type(y) for y in targets]))
assert all(((len(y) == num_examples) for y in targets)), repr(([type(X) for X in inputs] + [type(y) for y in targets]))
if shuffle:
indices = np.arange(num_examples)
rng.shuffle(indices)
last_batch = max(0, (num_examples - batch_size))
for start_idx in range(0, (last_batch + 1), batch_size):
if shuffle:
excerpt = indices[start_idx:(start_idx + batch_size)]
else:
excerpt = slice(start_idx, (start_idx + batch_size))
(yield ([X[excerpt] for X in inputs], [y[excerpt] for y in targets], []))
def __getstate__(self):
state = dict(self.__dict__)
state['loss'] = Unpicklable('loss')
state['l_out'] = Unpicklable('l_out')
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.get_options()
def get_options(self):
if (not hasattr(self, 'options')):
options = config.options()
self.options = argparse.Namespace(**options.__dict__) |
class MacdonaldPolynomials_generic(sfa.SymmetricFunctionAlgebra_generic):
def __init__(self, macdonald):
s = self.__class__.__name__[21:].capitalize()
sfa.SymmetricFunctionAlgebra_generic.__init__(self, macdonald._sym, basis_name=(('Macdonald ' + s) + macdonald._name_suffix), prefix=('Mcd' + s))
self.q = macdonald.q
self.t = macdonald.t
self._macdonald = macdonald
self._s = self._macdonald._s
if hasattr(self, '_s_cache'):
category = ModulesWithBasis(self.base_ring())
self.register_coercion(SetMorphism(Hom(self._s, self, category), self._s_to_self))
self._s.register_coercion(SetMorphism(Hom(self, self._s, category), self._self_to_s))
def _s_to_self(self, x):
return self._from_cache(x, self._s_cache, self._s_to_self_cache, q=self.q, t=self.t)
def _self_to_s(self, x):
return self._s._from_cache(x, self._s_cache, self._self_to_s_cache, q=self.q, t=self.t)
def c1(self, part):
return c1(part, self.q, self.t)
def c2(self, part):
return c2(part, self.q, self.t)
def product(self, left, right):
return self((self._s(left) * self._s(right)))
def macdonald_family(self):
return self._macdonald
class Element(sfa.SymmetricFunctionAlgebra_generic.Element):
def nabla(self, q=None, t=None, power=1):
parent = self.parent()
if ((q is None) and (t is None)):
Ht = parent._macdonald.Ht()
else:
if (q is None):
q = parent.q
if (t is None):
t = parent.t
Ht = parent.realization_of().macdonald(q=q, t=t).Ht()
return parent(Ht(self).nabla(power=power)) |
class TwoHyperplaneClassifier(nn.Module):
def __init__(self, x_dim, y_dim, P1, P2, a1=None, a2=None, b1=None, b2=None, ksig=5):
super(TwoHyperplaneClassifier, self).__init__()
if (a1 is None):
self.a1 = Parameter(torch.matmul(torch.randn(1, int(x_dim)), torch.t(P1)))
else:
self.a1 = Parameter(torch.matmul(torch.Tensor(a1), torch.t(P1)))
if (a2 is None):
self.a2 = Parameter(torch.matmul(torch.randn(1, int(x_dim)), torch.t(P2)))
else:
self.a2 = Parameter(torch.matmul(torch.Tensor(a2), torch.t(P2)))
if (b1 is None):
self.b1 = Parameter(torch.Tensor(1))
nn.init.constant_(self.b1, 0.0)
else:
assert (b1.shape == int(y_dim))
self.b1 = Parameter(torch.Tensor(b1))
if (b2 is None):
self.b2 = Parameter(torch.Tensor(1))
nn.init.constant_(self.b2, 0.0)
else:
assert (b2.shape == int(y_dim))
self.b2 = Parameter(torch.Tensor(b2))
self.ksig = ksig
'\n Perform classification: yhat = sig(k*(a1^Tx-b1))*sig(k*(a2^Tx-b2))\n Inputs:\n - x : input data sample\n Outputs:\n - yhat : ( p(yhat=0), p(yhat=1) )\n - a1 : slope of 1st hyperplane\n - a2 : slope of 2nd hyperplane\n '
def forward(self, x):
z1 = F.linear(x, self.a1, ((- 1) * self.b1))
z2 = F.linear(x, self.a2, ((- 1) * self.b2))
yhat_class0 = (torch.sigmoid((self.ksig * z1)) * torch.sigmoid((self.ksig * z2)))
yhat_class1 = (1.0 - yhat_class0)
yhat = torch.cat((yhat_class0, yhat_class1), 1)
return (yhat, self.a1, self.a2) |
class Conv3dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, IC, OC, kernel, stride, N, D, H, W, device):
self.input = torch.rand(N, IC, D, H, W, device=device)
self.conv3d = nn.Conv3d(IC, OC, kernel, stride=stride).to(device=device)
self.set_module_name('Conv3d')
def forward(self):
return self.conv3d(self.input) |
def ttoi(tensor):
tensor = tensor.squeeze()
img = tensor.cpu().numpy()
img = img.transpose(1, 2, 0)
return img |
class ProgressBarTransferHook(TransferHook):
def on_dispatch_start(self):
return
def __init__(self, dest_region_tags: List[str]):
self.spinner = Progress(SpinnerColumn(), TextColumn('Dispatching chunks...{task.description}'), BarColumn(), DownloadColumn(binary_units=True), transient=True)
self.dest_region_tags = dest_region_tags
self.pbar = None
self.transfer_task = {}
self.chunks_dispatched = 0
self.bytes_dispatched = 0
self.chunks_completed = defaultdict(int)
self.bytes_completed = defaultdict(int)
self.dispatch_task = self.spinner.add_task('', total=None)
self.spinner.start()
def on_chunk_dispatched(self, chunks: List[Chunk]):
if (len(chunks) == 0):
self.bytes_dispatched = 0
else:
self.bytes_dispatched += sum([chunk.chunk_length_bytes for chunk in chunks])
self.chunks_dispatched += len(chunks)
self.spinner.update(self.dispatch_task, description=f' {self.chunks_dispatched} chunks (~{format_bytes(self.bytes_dispatched)} dispatched)')
def on_dispatch_end(self):
self.spinner.stop()
self.pbar = Progress(SpinnerColumn(), TextColumn('Transfer progress{task.description}'), BarColumn(), DownloadColumn(binary_units=True), TransferSpeedColumn(), TimeRemainingColumn(), transient=True)
for region_tag in self.dest_region_tags:
self.transfer_task[region_tag] = self.pbar.add_task(region_tag, total=self.bytes_dispatched)
self.pbar.start()
def on_chunk_completed(self, chunks: List[Chunk], region_tag: Optional[str]=None):
assert (region_tag is not None), f'Must specify region tag for progress bar'
self.chunks_completed[region_tag] += len(chunks)
self.bytes_completed[region_tag] += sum([chunk.chunk_length_bytes for chunk in chunks])
self.pbar.update(self.transfer_task[region_tag], completed=self.bytes_completed[region_tag])
def on_transfer_end(self):
self.pbar.stop()
def on_transfer_error(self, error):
console.log(error)
raise exceptions.SkyplaneGatewayException('Transfer failed with error', error) |
def test_reflection_coeffs():
random = np.random.RandomState(1234)
y_d = random.randn(10)
y_z = (random.randn(10) + 1j)
reflection_coeffs_d = [1]
reflection_coeffs_z = [1]
for i in range(2, 10):
reflection_coeffs_d.append(solve_toeplitz(y_d[:(i - 1)], b=y_d[1:i])[(- 1)])
reflection_coeffs_z.append(solve_toeplitz(y_z[:(i - 1)], b=y_z[1:i])[(- 1)])
y_d_concat = np.concatenate((y_d[(- 2):0:(- 1)], y_d[:(- 1)]))
y_z_concat = np.concatenate((y_z[(- 2):0:(- 1)].conj(), y_z[:(- 1)]))
(_, ref_d) = levinson(y_d_concat, b=y_d[1:])
(_, ref_z) = levinson(y_z_concat, b=y_z[1:])
assert_allclose(reflection_coeffs_d, ref_d[:(- 1)])
assert_allclose(reflection_coeffs_z, ref_z[:(- 1)]) |
def BIBD_141_6_1():
from sage.sets.recursively_enumerated_set import RecursivelyEnumeratedSet
from .incidence_structures import IncidenceStructure
a = 'a'
inf = (None, None)
bibd = [((0, 0), (16, 0), (24, 0), (24, 1), (15, 2), (25, 2)), ((0, 0), (3, 0), (26, 0), (13, 1), (33, 1), (34, a)), ((0, 0), (13, 0), (18, 0), (15, 1), (7, 2), (0, a)), ((0, 0), (2, 0), (14, 1), (23, 1), (26, a), (32, a)), ((0, 0), (4, 0), (29, 1), (6, 2), (9, a), (20, a)), ((0, 0), (1, 0), (12, 2), (2, a), (4, a), (19, a)), (inf, (0, 0), (7, 0), (14, 0), (21, 0), (28, 0)), (inf, (0, a), (7, a), (14, a), (21, a), (28, a))]
gens = (lambda B: [frozenset((((((x * 16) % 35), (((y + 1) % 3) if (y != a) else a)) if ((x, y) != inf) else inf) for (x, y) in B)), frozenset((((((x + 1) % 35), y) if ((x, y) != inf) else inf) for (x, y) in B))])
bibd = RecursivelyEnumeratedSet([frozenset(e) for e in bibd], successors=gens)
return IncidenceStructure(bibd)._blocks |
def conv_input_length(output_length, filter_size, padding, stride):
if (output_length is None):
return None
assert (padding in {'same', 'valid', 'full'})
if (padding == 'same'):
pad = (filter_size // 2)
elif (padding == 'valid'):
pad = 0
elif (padding == 'full'):
pad = (filter_size - 1)
return ((((output_length - 1) * stride) - (2 * pad)) + filter_size) |
class EisensteinExtensionFieldCappedRelative(EisensteinExtensionGeneric, pAdicCappedRelativeFieldGeneric):
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='NTL'):
unram_prec = (((prec + poly.degree()) - 1) // poly.degree())
ntl_poly = ntl_ZZ_pX([a.lift() for a in poly.list()], (poly.base_ring().prime() ** unram_prec))
shift_poly = ntl_ZZ_pX([a.lift() for a in shift_seed.list()], (shift_seed.base_ring().prime() ** unram_prec))
if (unram_prec <= 30):
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), unram_prec, unram_prec, prec, True, ntl_poly, 'small', 'e', shift_poly)
else:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), 30, unram_prec, prec, True, ntl_poly, 'big', 'e', shift_poly)
self._shift_seed = shift_seed
self._exact_modulus = exact_modulus
self._implementation = implementation
EisensteinExtensionGeneric.__init__(self, poly, prec, print_mode, names, pAdicZZpXCRElement) |
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, model_ema: Optional[ModelEma]=None, mixup_fn: Optional[Mixup]=None, log_writer=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None, num_training_steps_per_epoch=None, update_freq=None):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
if (loss_scaler is None):
model.zero_grad()
model.micro_steps = 0
else:
optimizer.zero_grad()
for (data_iter_step, (samples, targets)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = (data_iter_step // update_freq)
if (step >= num_training_steps_per_epoch):
continue
it = (start_steps + step)
if ((lr_schedule_values is not None) or ((wd_schedule_values is not None) and ((data_iter_step % update_freq) == 0))):
for (i, param_group) in enumerate(optimizer.param_groups):
if (lr_schedule_values is not None):
param_group['lr'] = (lr_schedule_values[it] * param_group['lr_scale'])
if ((wd_schedule_values is not None) and (param_group['weight_decay'] > 0)):
param_group['weight_decay'] = wd_schedule_values[it]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
if (loss_scaler is None):
samples = samples.half()
(loss, output) = train_class_batch(model, samples, targets, criterion)
else:
with torch.cuda.amp.autocast():
(loss, output) = train_class_batch(model, samples, targets, criterion)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
if (loss_scaler is None):
loss /= update_freq
model.backward(loss)
model.step()
if (((data_iter_step + 1) % update_freq) == 0):
if (model_ema is not None):
model_ema.update(model)
grad_norm = None
loss_scale_value = get_loss_scale_for_deepspeed(model)
else:
is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order, update_grad=(((data_iter_step + 1) % update_freq) == 0))
if (((data_iter_step + 1) % update_freq) == 0):
optimizer.zero_grad()
if (model_ema is not None):
model_ema.update(model)
loss_scale_value = loss_scaler.state_dict()['scale']
torch.cuda.synchronize()
if (mixup_fn is None):
class_acc = (output.max((- 1))[(- 1)] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.0
max_lr = 0.0
for group in optimizer.param_groups:
min_lr = min(min_lr, group['lr'])
max_lr = max(max_lr, group['lr'])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if (group['weight_decay'] > 0):
weight_decay_value = group['weight_decay']
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if (log_writer is not None):
log_writer.update(loss=loss_value, head='loss')
log_writer.update(class_acc=class_acc, head='loss')
log_writer.update(loss_scale=loss_scale_value, head='opt')
log_writer.update(lr=max_lr, head='opt')
log_writer.update(min_lr=min_lr, head='opt')
log_writer.update(weight_decay=weight_decay_value, head='opt')
log_writer.update(grad_norm=grad_norm, head='opt')
log_writer.set_step()
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
def test_block8(module, name):
test_conv2d(module.branch0, (name + '/Branch_0/Conv2d_1x1'))
test_conv2d(module.branch1[0], (name + '/Branch_1/Conv2d_0a_1x1'))
test_conv2d(module.branch1[1], (name + '/Branch_1/Conv2d_0b_1x3'))
test_conv2d(module.branch1[2], (name + '/Branch_1/Conv2d_0c_3x1'))
test_conv2d_nobn(module.conv2d, (name + '/Conv2d_1x1')) |
class DepthConcat(Concat):
def windowNarrow(self, output, currentOutput, offset):
outputWindow = output.narrow(self.dimension, offset, currentOutput.size(self.dimension))
for dim in range(len(self.outputSize)):
currentSize = currentOutput.size(dim)
if ((dim != self.dimension) and (self.outputSize[dim] != currentSize)):
start = int(math.floor(((self.outputSize[dim] - currentSize) / 2)))
outputWindow = outputWindow.narrow(dim, start, currentSize)
return outputWindow
def updateOutput(self, input):
outs = []
for i in range(len(self.modules)):
currentOutput = self.modules[i].updateOutput(input)
outs.append(currentOutput)
if (i == 0):
size = list(currentOutput.size())
else:
size[self.dimension] += currentOutput.size(self.dimension)
for dim in range(len(self.outputSize)):
if (dim != self.dimension):
size[dim] = max(size[dim], currentOutput.size(dim))
self.outputSize = torch.Size(size)
self.output.resize_(self.outputSize).zero_()
offset = 0
for (i, module) in enumerate(self.modules):
currentOutput = outs[i]
outputWindow = self.windowNarrow(self.output, currentOutput, offset)
outputWindow.copy_(currentOutput)
offset = (offset + currentOutput.size(self.dimension))
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput.resize_as_(input)
offset = 0
for (i, module) in enumerate(self.modules):
currentOutput = module.output
gradOutputWindow = self.windowNarrow(gradOutput, currentOutput, offset)
currentGradInput = module.updateGradInput(input, gradOutputWindow)
if (i == 0):
self.gradInput.copy_(currentGradInput)
else:
self.gradInput.add_(currentGradInput)
offset += currentOutput.size(self.dimension)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
offset = 0
for (i, module) in enumerate(self.modules):
currentOutput = module.output
gradOutputWindow = self.windowNarrow(gradOutput, currentOutput, offset)
module.accGradParameters(input, gradOutputWindow, scale)
offset += currentOutput.size(self.dimension)
def backward(self, input, gradOutput, scale=1):
self.gradInput.resize_as_(input)
offset = 0
for (i, module) in enumerate(self.modules):
currentOutput = module.output
gradOutputWindow = self.windowNarrow(gradOutput, currentOutput, offset)
currentGradInput = module.backward(input, gradOutputWindow)
if (i == 0):
self.gradInput.copy_(currentGradInput)
else:
self.gradInput.add_(currentGradInput)
offset = (offset + currentOutput.size(self.dimension))
return self.gradInput
def accUpdateGradParameters(self, input, gradOutput, lr):
offset = 0
for (i, module) in enumerate(self.modules):
currentOutput = module.output
gradOutputWindow = self.windowNarrow(gradOutput, currentOutput, offset)
module.accUpdateGradParameters(input, gradOutputWindow, lr)
offset = (offset + currentOutput.size(self.dimension)) |
class Module():
def __init__(self):
self.lrp_var = None
self.lrp_param = 1.0
def backward(self, DY):
return DY
def train(self, X, Y, *args, **kwargs):
def forward(self, X, lrp_aware=False):
return X
def update(self, lrate):
pass
def clean(self):
pass
def set_lrp_parameters(self, lrp_var=None, param=None):
self.lrp_var = lrp_var
self.lrp_param = param
def lrp(self, R, lrp_var=None, param=None):
if ((lrp_var is None) and (param is None)):
lrp_var = self.lrp_var
param = self.lrp_param
if ((lrp_var is None) or (lrp_var.lower() == 'none') or (lrp_var.lower() == 'simple')):
return self._simple_lrp(R)
elif ((lrp_var.lower() == 'slow') or (lrp_var.lower() == 'simple_slow')):
return self._simple_lrp_slow(R)
elif (lrp_var.lower() == 'flat'):
return self._flat_lrp(R)
elif ((lrp_var.lower() == 'ww') or (lrp_var.lower() == 'w^2')):
return self._ww_lrp(R)
elif (lrp_var.lower() == 'zb'):
return self._zb_lrp(R, param)
elif (lrp_var.lower() == 'epsilon'):
return self._epsilon_lrp(R, param)
elif (lrp_var.lower() == 'epsilon_slow'):
return self._epsilon_lrp_slow(R, param)
elif ((lrp_var.lower() == 'alphabeta') or (lrp_var.lower() == 'alpha')):
return self._alphabeta_lrp(R, param)
elif ((lrp_var.lower() == 'alphabeta_slow') or (lrp_var.lower() == 'alpha_slow')):
return self._alphabeta_lrp_slow(R, param)
else:
raise Exception('Unknown lrp variant {}'.format(lrp_var))
def _simple_lrp(self, R):
raise NotImplementedError(('_simple_lrp missing in ' + self.__class__.__name__))
def _simple_lrp_slow(self, R):
raise NotImplementedError(('_simple_lrp_slow missing in ' + self.__class__.__name__))
def _flat_lrp(self, R):
raise NotImplementedError(('_flat_lrp missing in ' + self.__class__.__name__))
def _ww_lrp(self, R):
raise NotImplementedError(('_ww_lrp missing in ' + self.__class__.__name__))
def _zb_lrp(self, R, param):
raise NotImplementedError(('_zB_lrp missing in ' + self.__class__.__name__))
def _epsilon_lrp(self, R, param):
raise NotImplementedError(('_epsilon_lrp missing in ' + self.__class__.__name__))
def _epsilon_lrp_slow(self, R, param):
raise NotImplementedError(('_epsilon_lrp_slow missing in ' + self.__class__.__name__))
def _alphabeta_lrp(self, R, param):
raise NotImplementedError(('_alphabeta_lrp missing in ' + self.__class__.__name__))
def _alphabeta_lrp_slow(self, R, param):
raise NotImplementedError(('_alphabeta_lrp_slow missing in ' + self.__class__.__name__))
def to_cupy(self):
raise NotImplementedError(('to_cupy missing in ' + self.__class__.__name__))
def to_numpy(self):
raise NotImplementedError(('to_numpy missing in ' + self.__class__.__name__)) |
def _jit_compile(name, sources, extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths, build_directory: str, verbose: bool, with_cuda: Optional[bool], is_python_module, keep_intermediates=True) -> None:
if (with_cuda is None):
with_cuda = any(map(_is_cuda_file, sources))
with_cudnn = any([('cudnn' in f) for f in (extra_ldflags or [])])
old_version = JIT_EXTENSION_VERSIONER.get_version(name)
version = JIT_EXTENSION_VERSIONER.bump_version_if_changed(name, sources, build_arguments=[extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths], build_directory=build_directory, with_cuda=with_cuda)
if (version > 0):
if ((version != old_version) and verbose):
print(('The input conditions for extension module {} have changed. '.format(name) + 'Bumping to version {0} and re-building as {1}_v{0}...'.format(version, name)))
name = '{}_v{}'.format(name, version)
if (version != old_version):
baton = FileBaton(os.path.join(build_directory, 'lock'))
if baton.try_acquire():
try:
with GeneratedFileCleaner(keep_intermediates=keep_intermediates) as clean_ctx:
if (IS_HIP_EXTENSION and (with_cuda or with_cudnn)):
hipify_python.hipify(project_directory=build_directory, output_directory=build_directory, includes=os.path.join(build_directory, '*'), extra_files=[os.path.abspath(s) for s in sources], show_detailed=verbose, is_pytorch_extension=True, clean_ctx=clean_ctx)
_write_ninja_file_and_build_library(name=name, sources=sources, extra_cflags=(extra_cflags or []), extra_cuda_cflags=(extra_cuda_cflags or []), extra_ldflags=(extra_ldflags or []), extra_include_paths=(extra_include_paths or []), build_directory=build_directory, verbose=verbose, with_cuda=with_cuda)
finally:
baton.release()
else:
baton.wait()
elif verbose:
print('No modifications detected for re-loaded extension module {}, skipping build step...'.format(name))
if verbose:
print(f'Loading extension module {name}...')
return _import_module_from_library(name, build_directory, is_python_module) |
def send_to_servers(binary_image, url_face: str, url_age_gender: str) -> None:
data = {'image': binary_image}
logging.info(f'image loaded!')
logging.debug(f'sending image to server...')
data = jsonpickle.encode(data)
response = requests.post(url_face, json=data)
logging.info(f'got {response} from server!...')
response = jsonpickle.decode(response.text)
face_detection_recognition = response['face_detection_recognition']
logging.info(f'{len(face_detection_recognition)} faces deteced!')
bboxes = [fdr['bbox'] for fdr in face_detection_recognition]
det_scores = [fdr['det_score'] for fdr in face_detection_recognition]
landmarks = [fdr['landmark'] for fdr in face_detection_recognition]
embeddings = [fdr['normed_embedding'] for fdr in face_detection_recognition]
data = np.array(embeddings).reshape((- 1), 512).astype(np.float32)
data = pickle.dumps(data)
data = {'embeddings': data}
data = jsonpickle.encode(data)
logging.debug(f'sending embeddings to server ...')
response = requests.post(url_age_gender, json=data)
logging.info(f'got {response} from server!...')
response = jsonpickle.decode(response.text)
ages = response['ages']
genders = response['genders']
return (genders, ages, bboxes, det_scores, landmarks, embeddings) |
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
if (num_cpu is None):
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if (config is None):
config = tf.ConfigProto(allow_soft_placement=True, inter_op_parallelism_threads=num_cpu, intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph) |
class TestSum(test_util.TestCase):
def setUp(self):
self.test_configs = [((1, 2, 3, 4), True), ((1, 2, 3, 4), False)]
def testSum(self):
for (input_size, in_place) in self.test_configs:
op = core.CreateOperator('Sum', ['X1', 'X2'], [('Y' if (not in_place) else 'X1')])
X1 = (np.random.rand(*input_size).astype(np.float32) - 0.5)
X2 = (np.random.rand(*input_size).astype(np.float32) - 0.5)
res = device_checker.CheckSimple(op, [X1, X2], [0])
self.assertTrue(res)
for checker in gradient_checkers:
(res, grad, grad_estimated) = checker.CheckSimple(op, [X1, X2], 0, [0])
self.assertTrue(res)
(res, grad, grad_estimated) = checker.CheckSimple(op, [X1, X2], 1, [0])
self.assertTrue(res) |
def get_include_dir(module):
include_dir = osp.join(*module.split('.'), 'include')
if osp.exists(include_dir):
return [osp.abspath(include_dir)]
else:
return [] |
class WILDSAmazonProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'amazon.train.tsv'), quotechar='"'), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'amazon.val.tsv'), quotechar='"'), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'amazon.test.tsv'), quotechar='"'), 'test')
def get_labels(self):
return ['0', '1', '2', '3', '4']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, i))
text_a = line[0]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples |
class Waypoint():
def __init__(self, location, lane_node, lane_position):
self.location = (np.array(location) if (not isinstance(location, np.ndarray)) else location)
self.lane_node = lane_node
self.lane_position = lane_position
def next(self, step_size):
waypoint_next = []
(x, y) = sumolib.geomhelper.positionAtShapeOffset(self.lane_node.lane.getShape(), (self.lane_position + step_size))
location = np.array([x, y])
if (np.linalg.norm((location - self.location)) >= (step_size - 0.1)):
waypoint = Waypoint(location, self.lane_node, (self.lane_position + step_size))
waypoint_next.append(waypoint)
return waypoint_next
for to_lane_node in self.lane_node.outgoing:
to_lane = to_lane_node.lane
offset_step = 0.1
lane_offset = offset_step
position_old = np.array([0.0, 0.0])
while True:
(x, y) = sumolib.geomhelper.positionAtShapeOffset(to_lane.getShape(), lane_offset)
location = np.array([x, y])
if np.all((location == position_old)):
break
if (np.linalg.norm((location - self.location)) >= step_size):
break
position_old = location
lane_offset += offset_step
waypoint = Waypoint(location, to_lane_node, lane_offset)
waypoint_next.append(waypoint)
return waypoint_next
def next_up_to_distance(self, step_size, max_distance, road_option):
if ((road_option == RoadOptionsExtended.SWITCHRIGHT) or (road_option == RoadOptionsExtended.SWITCHLEFT)):
road_option = RoadOptionsExtended.FOLLOW
waypoints = [self]
branched = False
distance = 0.0
while (distance < max_distance):
waypoint_next = waypoints[(- 1)].next(step_size)
if (len(waypoint_next) == 0):
break
elif (len(waypoint_next) == 1):
waypoints.append(waypoint_next[0])
distance += step_size
else:
if (branched or (road_option == RoadOptionsExtended.FOLLOW)):
for waypoint in waypoint_next:
if waypoint.on_route:
waypoints.append(waypoint)
branched = True
break
elif ((road_option == RoadOptionsExtended.LEFT) or (road_option == RoadOptionsExtended.RIGHT)):
for waypoint in waypoint_next:
if (not waypoint.on_route):
waypoints.append(waypoint)
branched = True
break
distance += step_size
return waypoints
def _nearest_waypoint_on_lane(self, lane_node):
if (lane_node is None):
return None
(lane_position, _) = lane_node.lane.getClosestLanePosAndDist(self.location, perpendicular=False)
position = sumolib.geomhelper.positionAtShapeOffset(lane_node.lane.getShape(), lane_position)
waypoint = Waypoint(position, lane_node, lane_position)
return waypoint
def left(self):
return self._nearest_waypoint_on_lane(self.lane_node.left)
def right(self):
return self._nearest_waypoint_on_lane(self.lane_node.right)
def on_route(self):
return self.lane_node.on_route
def road_options(self):
road_options = []
if (self.left is not None):
road_options.append(RoadOptionsExtended.SWITCHLEFT)
if (self.right is not None):
road_options.append(RoadOptionsExtended.SWITCHRIGHT)
step_size = 1.0
waypoint_next = self.next(step_size)
cur_dist = step_size
if (len(waypoint_next) == 0):
return road_options
road_options.append(RoadOptionsExtended.FOLLOW)
while True:
if (len(waypoint_next) == 0):
break
elif (len(waypoint_next) == 1):
waypoint_next = waypoint_next[0].next(step_size)
cur_dist += step_size
if (cur_dist > 50.0):
break
else:
road_options.append(RoadOptionsExtended.LEFT)
road_options.append(RoadOptionsExtended.RIGHT)
break
return road_options |
def load_pil(path, standardize=False):
if path.endswith('.png'):
return load_png(path, standardize=standardize)
elif (path.endswith('.jpeg') or path.endswith('.jpg')):
return load_jpeg(path, standardize=standardize)
return load_tiff(path, standardize=standardize) |
def test_hdbscan_no_clusters():
(labels, p, persist, ctree, ltree, mtree) = hdbscan(X, min_cluster_size=(len(X) + 1))
n_clusters_1 = (len(set(labels)) - int(((- 1) in labels)))
assert (n_clusters_1 == 0)
labels = HDBSCAN(min_cluster_size=(len(X) + 1)).fit(X).labels_
n_clusters_2 = (len(set(labels)) - int(((- 1) in labels)))
assert (n_clusters_2 == 0) |
class Data2VecAudioForXVector(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
_builder('objaverse_mm_caption_instruct')
class ObjaverseCaptionInstructBuilder(ObjaverseCaptionBuilder):
train_dataset_cls = ObjaverseCaptionInstructDataset
eval_dataset_cls = ObjaverseCaptionEvalDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/objaverse/defaults_mm_cap_instruct.yaml'} |
class AlignedModule(nn.Module):
def __init__(self, c1, c2, k=3):
super().__init__()
self.down_h = nn.Conv2d(c1, c2, 1, bias=False)
self.down_l = nn.Conv2d(c1, c2, 1, bias=False)
self.flow_make = nn.Conv2d((c2 * 2), 2, k, 1, 1, bias=False)
def forward(self, low_feature: Tensor, high_feature: Tensor) -> Tensor:
high_feature_origin = high_feature
(H, W) = low_feature.shape[(- 2):]
low_feature = self.down_l(low_feature)
high_feature = self.down_h(high_feature)
high_feature = F.interpolate(high_feature, size=(H, W), mode='bilinear', align_corners=True)
flow = self.flow_make(torch.cat([high_feature, low_feature], dim=1))
high_feature = self.flow_warp(high_feature_origin, flow, (H, W))
return high_feature
def flow_warp(self, x: Tensor, flow: Tensor, size: tuple) -> Tensor:
norm = torch.tensor([[[[*size]]]]).type_as(x).to(x.device)
H = torch.linspace((- 1.0), 1.0, size[0]).view((- 1), 1).repeat(1, size[1])
W = torch.linspace((- 1.0), 1.0, size[1]).repeat(size[0], 1)
grid = torch.cat((W.unsqueeze(2), H.unsqueeze(2)), dim=2)
grid = grid.repeat(x.shape[0], 1, 1, 1).type_as(x).to(x.device)
grid = (grid + (flow.permute(0, 2, 3, 1) / norm))
output = F.grid_sample(x, grid, align_corners=False)
return output |
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input |
def register_Ns3PyVizLastPacketsSample_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::PyViz::LastPacketsSample const &', 'arg0')])
cls.add_instance_attribute('lastDroppedPackets', 'std::vector< ns3::PyViz::PacketSample >', is_const=False)
cls.add_instance_attribute('lastReceivedPackets', 'std::vector< ns3::PyViz::RxPacketSample >', is_const=False)
cls.add_instance_attribute('lastTransmittedPackets', 'std::vector< ns3::PyViz::TxPacketSample >', is_const=False)
return |
_config
def task_finetune_irtr_msvd():
exp_name = 'finetune_irtr_msvd'
datasets = ['msvd']
loss_names = _loss_names({'ind_itc': 1, 'itm': 0.5, 'irtr': 1})
batch_size = 256
max_epoch = 50
max_steps = None
warmup_steps = 0.1
get_recall_metric = False
get_itc_recall_metric = False
get_ind_recall_metric = True
draw_false_text = 10
learning_rate = 0.0001 |
class CustomAnomalyDataset(CustomDataset, TSADBaseDataset):
def __init__(self, rootdir, test_frac=0.5, assume_no_anomaly=False, time_col=None, time_unit='s', data_cols=None, index_cols=None):
self.assume_no_anomaly = assume_no_anomaly
super().__init__(rootdir=rootdir, test_frac=test_frac, time_col=time_col, time_unit=time_unit, data_cols=data_cols, index_cols=index_cols)
def metadata_cols(self):
return ['anomaly', 'trainval']
def check_ts_for_metadata(self, ts, col):
if (col == 'anomaly'):
if (col not in ts):
if self.assume_no_anomaly:
ts[col] = False
else:
raise ValueError(f'Time series {ts} does not have metadata column {col}.')
ts[col] = ts[col].astype(bool)
else:
ts = super().check_ts_for_metadata(ts, col)
return ts |
class DataIterator():
def __init__(self, mode, data, batch_size=128, neg_sample=1, all_items=None, items_usr_clicked=None, shuffle=True):
self.mode = mode
self.data = data
self.datasize = data.shape[0]
self.neg_count = neg_sample
self.batch_size = batch_size
self.item_usr_clicked = items_usr_clicked
self.all_items = all_items
self.shuffle = shuffle
self.seed = 0
self.idx = 0
self.total_batch = round((self.datasize / float(self.batch_size)))
def __iter__(self):
return self
def reset(self):
self.idx = 0
if self.shuffle:
self.data = self.data.sample(frac=1).reset_index(drop=True)
self.seed = (self.seed + 1)
random.seed(self.seed)
def __next__(self):
if (self.idx >= self.datasize):
self.reset()
raise StopIteration
nums = self.batch_size
if ((self.datasize - self.idx) < self.batch_size):
nums = (self.datasize - self.idx)
cur = self.data.iloc[self.idx:(self.idx + nums)]
batch_user = cur['user'].values
batch_seq = []
for seq in cur['seq'].values:
batch_seq.append(seq)
batch_pos = []
for t in cur['target'].values:
batch_pos.append(t)
batch_neg = []
if (self.mode == 'train'):
for u in cur['user']:
user_item_set = (set(self.all_items) - set(self.item_usr_clicked[u]))
batch_neg.append(random.sample(user_item_set, self.neg_count))
self.idx += self.batch_size
return (batch_user, batch_seq, batch_pos, batch_neg) |
def get_collaborator(plan, name, model, aggregator):
plan = copy(plan)
return plan.get_collaborator(name, task_runner=model, client=aggregator) |
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt, **kwargs):
super().__init__(**kwargs)
locArgs = get_default_cpc_config()
checkpoint = torch.load(ckpt, map_location='cpu')
loadArgs(locArgs, argparse.Namespace(**checkpoint['config']))
encoderNet = getEncoder(locArgs)
arNet = getAR(locArgs)
self.model = cpcmodel(encoderNet, arNet)
self.model.load_state_dict(checkpoint['weights'], strict=False)
if (len(self.hooks) == 0):
self.add_hook('self.model.gEncoder', (lambda input, output: output.transpose(1, 2)))
self.add_hook('self.model.gAR', (lambda input, output: output))
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
padded_wav = pad_sequence(wavs, batch_first=True)
features = self.model(padded_wav.unsqueeze(1), None)[0] |
def prepare_timit(data_folder, save_json_train, save_json_valid, save_json_test, phn_set=39, uppercase=False, skip_prep=False):
if skip_prep:
return
(dev_spk, test_spk) = _get_speaker()
avoid_sentences = ['sa1', 'sa2']
extension = ['.wav']
if uppercase:
avoid_sentences = [item.upper() for item in avoid_sentences]
extension = [item.upper() for item in extension]
dev_spk = [item.upper() for item in dev_spk]
test_spk = [item.upper() for item in test_spk]
if skip([save_json_train, save_json_valid, save_json_test]):
logger.info('Skipping preparation, completed in previous run.')
return
_check_timit_folders(uppercase, data_folder)
msg = 'Creating json files for the TIMIT Dataset..'
logger.info(msg)
splits = ['train', 'test', 'test']
annotations = [save_json_train, save_json_valid, save_json_test]
match_or = [None, dev_spk, test_spk]
for (split, save_file, match) in zip(splits, annotations, match_or):
if uppercase:
match_lst = (extension + [split.upper()])
else:
match_lst = (extension + [split])
wav_lst = get_all_files(data_folder, match_and=match_lst, match_or=match, exclude_or=avoid_sentences)
if (split == 'dev'):
print(wav_lst)
create_json(wav_lst, save_file, uppercase, phn_set) |
def get_args():
parser = argparse.ArgumentParser()
home = os.path.expanduser('~')
data_dir = os.path.join('data', 'semeval')
eval_dir = os.path.join('out/semeval', 'basic-class')
store_dir = os.path.join('semeval', 'store')
parser.add_argument('-d', '--data_dir', default=data_dir)
parser.add_argument('-e', '--eval_dir', default=eval_dir)
parser.add_argument('-s', '--store_dir', default=store_dir)
parser.add_argument('--run_ids')
parser.add_argument('--eval_name', default='test')
parser.add_argument('--eval_period', type=int, default=200)
parser.add_argument('--start_step', type=int)
parser.add_argument('--end_step', type=int)
parser.add_argument('--steps', default='')
parser.add_argument('--threshold', type=float, default=0.5)
parser.add_argument('--ensemble', action='store_true')
return parser.parse_args() |
class RobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = RobertaTokenizer
def setUp(self):
super(RobertaTokenizationTest, self).setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return RobertaTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = 'lower newer'
output_text = 'lower newer'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = RobertaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = 'lower newer'
bpe_tokens = ['Glow', 'er', 'G', 'n', 'e', 'w', 'er']
tokens = tokenizer.tokenize(text, add_prefix_space=True)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + [tokenizer.unk_token])
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def roberta_dict_integration_testing(self):
tokenizer = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=False), [0, 31414, 232, 328, 2])
self.assertListEqual(tokenizer.encode('Hello world! cece herlolip 418', add_special_tokens=False), [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2])
def test_sequence_builders(self):
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_text_from_decode = tokenizer.encode('sequence builders', add_special_tokens=True)
encoded_pair_from_decode = tokenizer.encode('sequence builders', 'multi-sequence build', add_special_tokens=True)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == encoded_text_from_decode)
assert (encoded_pair == encoded_pair_from_decode) |
class TruncationOpManagerInference():
def __load_quantizer__(self, qtype, qparams):
qtype_name = qtype.rstrip('')
quant_params = (qparams[qtype_name] if (qtype_name in qparams) else {})
quantizer = qtypes.__dict__[(qtype_name + '_quantizer')](qtype, quant_params)
return (quantizer, quant_params)
def __init__(self, args, qparams):
self.verbose = False
self.activation_quantizer = None
self.origin_linear = nn.Linear
self.origin_conv2d = nn.Conv2d
self.origin_batch_norm = nn.BatchNorm2d
if (args.qtype is not None):
self.quantize = True
(self.activation_quantizer, _) = self.__load_quantizer__(args.qtype, qparams)
(self.linear_layer_quantizer, _) = self.__load_quantizer__('int8', qparams)
(self.weights_quantizer, _) = self.__load_quantizer__('int8', qparams)
(self.quantizer_4bit, _) = self.__load_quantizer__('int4', qparams)
(self.quantizer_8bit, _) = self.__load_quantizer__('int8', qparams)
def set_8bit_list(self, ignore_list):
self.ignore_ids = ignore_list
def enable(self):
self.quantize_linear()
self.quantize_conv2d()
self.quantize_batch_norm()
def disable(self):
nn.Linear = self.origin_linear
nn.Conv2d = self.origin_conv2d
nn.BatchNorm2d = self.origin_batch_norm
def quantize_matmul(self):
def quantized_matmul(tensor1, tensor2):
tensor1_ = attacher.pytorch_attach(tensor1, self.activation_quantizer, None)
tensor2_ = attacher.pytorch_attach(tensor2, self.activation_quantizer, None)
res = self.origin_matmul(tensor1_, tensor2_)
return attacher.pytorch_attach(res, self.activation_quantizer, None)
torch.Tensor.matmul = quantized_matmul
def quantize_linear(self):
nn.Linear = LinearWithId
def quantize_conv2d(self):
nn.Conv2d = Conv2dWithId
def quantize_batch_norm(self):
nn.BatchNorm2d = BatchNorm2dWithId
def quantize_tensor(self, tensor, fprop=True, bprop=True):
fprop = (self.activation_quantizer if fprop else None)
return attacher.pytorch_attach(tensor, fprop, None)
def quantize_instant(self, tensor, tag='', stat_id=None):
ignore_cond = False
if (stat_id is not None):
ignore_cond = np.array([(l == stat_id) for l in self.ignore_ids]).any()
if ignore_cond:
return self.quantizer_8bit(tensor, tag, stat_id)
elif ((tag == 'activation_linear') and (tensor.shape[1] == 1000)):
return self.linear_layer_quantizer(tensor, tag, stat_id)
elif (tag == 'activation'):
return self.activation_quantizer(tensor, tag, stat_id)
else:
return self.weights_quantizer(tensor, tag, stat_id) |
def load_checkpoint(args, trainer, epoch_itr):
os.makedirs(args.save_dir, exist_ok=True)
checkpoint_path = os.path.join(args.save_dir, args.restore_file)
if os.path.isfile(checkpoint_path):
extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler, eval(args.optimizer_overrides))
if (extra_state is not None):
epoch_itr.load_state_dict(extra_state['train_iterator'])
print('| loaded checkpoint {} (epoch {} {} updates)'.format(checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
trainer.lr_step(epoch_itr.epoch)
trainer.lr_step_update(trainer.get_num_updates())
if ('best' in extra_state):
save_checkpoint.best = extra_state['best']
return True
return False |
def get_backbone(backbone_arch='resnet50', backbone_config={}):
if ('resnet' in backbone_arch.lower()):
return backbones.ResNet(backbone_arch, **backbone_config)
elif ('dinov2' in backbone_arch.lower()):
return backbones.DINOv2(model_name=backbone_arch, **backbone_config) |
def test_ner():
from pycorrector.utils.tokenizer import segment
from pycorrector import Corrector
c = Corrector()
c.check_corrector_initialized()
c.check_detector_initialized()
error_sentences = ['', ',', ',', '', '', '', '', '', '', '', ',,', '', ',,,,']
for line in error_sentences:
print(line)
print('segment:', segment(line))
print('tokenize:', c.tokenizer.tokenize(line))
print(c.detect(line))
correct_sent = c.correct(line)
print('original sentence:{} => correct sentence:{}'.format(line, correct_sent)) |
class MySQL(Estimator):
def __init__(self, table, bucket, seed):
super(MySQL, self).__init__(table=table, version=table.version, bucket=bucket, seed=seed)
self.conn = mysql.connector.connect(user=MYSQL_USER, password=MYSQL_PSWD, host=MYSQL_HOST, port=MYSQL_PORT, database=MYSQL_DB)
self.conn.autocommit = True
self.cursor = self.conn.cursor()
start_stmp = time.time()
self.cursor.execute(f"analyze table `{self.table.name}` update histogram on {','.join([c.name for c in table.columns.values()])} with {bucket} buckets;")
rows = self.cursor.fetchall()
L.info(f'{rows}')
dur_min = ((time.time() - start_stmp) / 60)
L.info(f'construct statistics finished, using {dur_min:.4f} minutes')
def query(self, query):
sql = 'explain {}'.format(query_2_sql(query, self.table, aggregate=False, dbms='mysql'))
start_stmp = time.time()
self.cursor.execute(sql)
dur_ms = ((time.time() - start_stmp) * 1000.0)
res = self.cursor.fetchall()
assert (len(res) == 1), res
card = np.round(((0.01 * res[0][10]) * self.table.row_num))
return (card, dur_ms) |
def build_ref_doc(args):
doc = args[0]
lang = args[1]
format = args[2]
kwds = args[3]
args = args[4:]
if (format == 'inventory'):
kwds['use_multidoc_inventory'] = False
getattr(ReferenceSubBuilder(doc, lang), format)(*args, **kwds) |
def evaluate(args, model, tokenizer, mode, prefix=''):
eval_task = args.task_name
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, mode)
if ((not os.path.exists(eval_output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(eval_output_dir)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if ((args.n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))):
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet', 'albert']) else None)
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (preds is None):
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = (eval_loss / nb_eval_steps)
preds_score = preds.copy()
if (args.output_mode == 'classification'):
preds = np.argmax(preds, axis=1)
elif (args.output_mode == 'regression'):
preds = np.squeeze(preds)
(result, _) = eval_metrics[args.task_name](out_label_ids, preds, args.label_list)
result['loss'] = eval_loss
output_eval_file = os.path.join(args.output_dir, (args.result_prefix + '{}_results.txt'.format((mode if (mode != 'dev') else 'eval'))))
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in result.keys():
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
return (result, preds_score) |
def res_block(x_in):
x = Conv2D(x_in.shape[(- 1)], 3, padding='same', activation='relu')(x_in)
x = Conv2D(x_in.shape[(- 1)], 3, padding='same')(x)
x = Add()([x_in, x])
return x |
class TestPalmyraWindowService():
TEST_PROMPT_LENGTH: int = 51
TEST_TOKEN_IDS: List[int] = [464, 3337, 329, 4992, 319, 5693, 32329, 357, 9419, 23264, 8, 318, 281, 987, 40625, 10219, 4642, 503, 286, 262, 13863, 5136, 329, 5524, 12, 19085, 1068, 35941, 9345, 357, 7801, 40, 8, 326, 12031, 284, 787, 7531, 14901, 287, 262, 2050, 11, 2478, 11, 290, 14833, 286, 8489, 4981, 13]
TEST_TOKENS: List[str] = ['The', ' Center', ' for', ' Research', ' on', ' Foundation', ' Models', ' (', 'CR', 'FM', ')', ' is', ' an', ' inter', 'disciplinary', ' initiative', ' born', ' out', ' of', ' the', ' Stanford', ' Institute', ' for', ' Human', '-', 'Cent', 'ered', ' Artificial', ' Intelligence', ' (', 'HA', 'I', ')', ' that', ' aims', ' to', ' make', ' fundamental', ' advances', ' in', ' the', ' study', ',', ' development', ',', ' and', ' deployment', ' of', ' foundation', ' models', '.']
def setup_method(self):
self.temporary_directory = TemporaryDirectory()
service: TokenizerService = get_tokenizer_service(self.temporary_directory.name)
self.window_service = WindowServiceFactory.get_window_service('writer/palmyra-large', service)
def teardown_method(self, method):
self.temporary_directory.cleanup()
def test_max_request_length(self):
assert (self.window_service.max_request_length == 2048)
def test_encode(self):
assert (self.window_service.encode(TEST_PROMPT).token_values == self.TEST_TOKEN_IDS)
def test_decode(self):
assert (self.window_service.decode(self.window_service.encode(TEST_PROMPT).tokens) == TEST_PROMPT)
def test_tokenize(self):
assert (self.window_service.tokenize(TEST_PROMPT) == self.TEST_TOKENS)
def test_tokenize_and_count(self):
assert (self.window_service.get_num_tokens(TEST_PROMPT) == self.TEST_PROMPT_LENGTH)
def test_fits_within_context_window(self):
assert self.window_service.fits_within_context_window(TEST_PROMPT, (self.window_service.max_request_length - self.TEST_PROMPT_LENGTH))
assert (not self.window_service.fits_within_context_window(TEST_PROMPT, ((self.window_service.max_request_length - self.TEST_PROMPT_LENGTH) + 1)))
def test_truncate_from_right(self):
long_prompt: str = (TEST_PROMPT * 200)
assert (not self.window_service.fits_within_context_window(long_prompt))
truncated_long_prompt: str = self.window_service.truncate_from_right(long_prompt)
assert (self.window_service.get_num_tokens(truncated_long_prompt) == self.window_service.max_request_length)
assert self.window_service.fits_within_context_window(truncated_long_prompt) |
class LinearActivation(nn.Module):
def __init__(self, input_dim: int, output_dim: int, dropout_prob: Optional[float]=None, k_lipschitz: Optional[float]=None, activation: nn.Module=nn.ReLU(), bias: bool=True):
super().__init__()
self.dropout = nn.Identity()
if (dropout_prob is not None):
self.dropout = nn.Dropout(p=dropout_prob)
self.linear = SpectralLinear(input_dim, output_dim, k_lipschitz, bias=bias)
self.act = nn.Identity()
if (activation is not None):
self.act = activation
def forward(self, x: Tensor) -> Tensor:
return self.act(self.linear(self.dropout(x)))
def reset_parameters(self):
self.linear.reset_parameters()
self.dropout.reset_parameters() |
.parametrize('value, expected_lines', [pytest.param(False, OrderedSet([21, 24])), pytest.param(True, OrderedSet([21, 22]))])
def test_tracking_covered_statements_bool_predicate(simple_module, value, expected_lines):
tracer = ExecutionTracer()
adapter = LineCoverageInstrumentation(tracer)
transformer = InstrumentationTransformer(tracer, [adapter])
simple_module.bool_predicate.__code__ = transformer.instrument_module(simple_module.bool_predicate.__code__)
tracer.current_thread_identifier = threading.current_thread().ident
simple_module.bool_predicate(value)
assert tracer.get_trace().covered_line_ids
assert (tracer.lineids_to_linenos(tracer.get_trace().covered_line_ids) == expected_lines) |
def get_extensions():
Extension = CppExtension
define_macros = []
libraries = []
extra_compile_args = {'cxx': []}
extra_link_args = []
info = parallel_info()
if (('parallel backend: OpenMP' in info) and ('OpenMP not found' not in info)):
extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']
if (sys.platform == 'win32'):
extra_compile_args['cxx'] += ['/openmp']
else:
extra_compile_args['cxx'] += ['-fopenmp']
else:
print('Compiling without OpenMP...')
if WITH_CUDA:
Extension = CUDAExtension
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = ([] if (nvcc_flags == '') else nvcc_flags.split(' '))
nvcc_flags += ['-arch=sm_35', '--expt-relaxed-constexpr']
extra_compile_args['nvcc'] = nvcc_flags
extensions_dir = osp.join('csrc')
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
extensions = []
for main in main_files:
name = main.split(os.sep)[(- 1)][:(- 4)]
sources = [main]
path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')
if osp.exists(path):
sources += [path]
path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')
if (WITH_CUDA and osp.exists(path)):
sources += [path]
extension = Extension(('torch_geometric_autoscale._' + name), sources, include_dirs=[extensions_dir], define_macros=define_macros, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, libraries=libraries)
extensions += [extension]
return extensions |
def add_CombinerServicer_to_server(servicer, server):
rpc_method_handlers = {'ModelUpdateRequestStream': grpc.unary_stream_rpc_method_handler(servicer.ModelUpdateRequestStream, request_deserializer=fedn__pb2.ClientAvailableMessage.FromString, response_serializer=fedn__pb2.ModelUpdateRequest.SerializeToString), 'ModelUpdateStream': grpc.unary_stream_rpc_method_handler(servicer.ModelUpdateStream, request_deserializer=fedn__pb2.ClientAvailableMessage.FromString, response_serializer=fedn__pb2.ModelUpdate.SerializeToString), 'ModelValidationRequestStream': grpc.unary_stream_rpc_method_handler(servicer.ModelValidationRequestStream, request_deserializer=fedn__pb2.ClientAvailableMessage.FromString, response_serializer=fedn__pb2.ModelValidationRequest.SerializeToString), 'ModelValidationStream': grpc.unary_stream_rpc_method_handler(servicer.ModelValidationStream, request_deserializer=fedn__pb2.ClientAvailableMessage.FromString, response_serializer=fedn__pb2.ModelValidation.SerializeToString), 'SendModelUpdate': grpc.unary_unary_rpc_method_handler(servicer.SendModelUpdate, request_deserializer=fedn__pb2.ModelUpdate.FromString, response_serializer=fedn__pb2.Response.SerializeToString), 'SendModelValidation': grpc.unary_unary_rpc_method_handler(servicer.SendModelValidation, request_deserializer=fedn__pb2.ModelValidation.FromString, response_serializer=fedn__pb2.Response.SerializeToString)}
generic_handler = grpc.method_handlers_generic_handler('grpc.Combiner', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) |
def mutually_orthogonal_latin_squares(k, n, partitions=False, check=True):
from sage.combinat.designs.orthogonal_arrays import orthogonal_array
from sage.matrix.constructor import Matrix
from .database import MOLS_constructions
if (k is None):
raise TypeError('k must be a positive integer')
try:
Integer(k)
except TypeError:
raise
if (k < 0):
raise ValueError('k must be positive')
if (n == 1):
matrices = ([Matrix([[0]])] * k)
elif (k >= n):
raise EmptySetError('there exist at most n-1 MOLS of size n if n>=2')
elif ((n in MOLS_constructions) and (k <= MOLS_constructions[n][0])):
(_, construction) = MOLS_constructions[n]
matrices = construction()[:k]
elif (orthogonal_array((k + 2), n, existence=True) is not Unknown):
if orthogonal_array((k + 2), n, existence=True):
pass
else:
raise EmptySetError('there does not exist {} MOLS of order {}!'.format(k, n))
OA = sorted(orthogonal_array((k + 2), n, check=False))
matrices = [[] for _ in repeat(None, k)]
for L in OA:
for i in range(2, (k + 2)):
matrices[(i - 2)].append(L[i])
matrices = [[M[(i * n):((i + 1) * n)] for i in range(n)] for M in matrices]
matrices = [Matrix(M) for M in matrices]
else:
raise NotImplementedError("I don't know how to build {} MOLS of order {}".format(k, n))
if check:
assert are_mutually_orthogonal_latin_squares(matrices)
if (partitions is True):
partitions = [[[((i * n) + j) for j in range(n)] for i in range(n)], [[((j * n) + i) for j in range(n)] for i in range(n)]]
for m in matrices:
partition = [[] for _ in repeat(None, n)]
for i in range(n):
for j in range(n):
partition[m[(i, j)]].append(((i * n) + j))
partitions.append(partition)
if partitions:
return partitions
else:
return matrices |
class BlockGroup(nn.Module):
num_channels: int = None
num_blocks: int = None
strides: int = None
def setup(self):
assert (self.num_blocks > 0)
self.blocks = ([Block(num_channels=self.num_channels, strides=self.strides)] + [Block(num_channels=self.num_channels, strides=1) for _ in range((self.num_blocks - 1))])
def __call__(self, x):
return reverse_compose(x, self.blocks) |
class RandomScaledBreakoutWorld(BreakoutWorld):
scale_range_start = 0.95
scale_range_end = 1.0
def reset_world(self):
super(RandomScaledBreakoutWorld, self).reset_world()
self.scale = self.np_random.uniform(self.scale_range_start, self.scale_range_end)
def parameters(self):
parameters = super(RandomScaledBreakoutWorld, self).parameters
parameters.update({'scale': self.scale})
return parameters |
def main():
parser = argparse.ArgumentParser(description='Read discourse corpora (.dis, .rs3, .lisp(thiago)) and output desired files (discourse mrg files and edu files).')
parser.add_argument('--treebank', default='./DataSets/RST/RST_multilingual/gum/rst/rstweb', dest='treebank', action='store', help='Input directory to read (RST files)')
parser.add_argument('--outpath', default='./DataSets/RST/RST_multilingual/gum_transferred', dest='outpath', action='store', help='Output directory')
parser.add_argument('--format', dest='format', action='store', choices=['rs3', 'dis', 'thiago'], default='rs3', help='Format (Default=dis)')
parser.add_argument('--mapping', default=True, dest='mapping', action='store_true', help='If True, map the relations using the mapping defined in relationSet.py, i.e. the 18 coarse grained classes as proposed in Carlson et al. 2001, taking into account the modifications proposed for the other corpora defined in EACL17 paper. (Default=True)')
parser.add_argument('--draw', dest='draw', action='store_true', help='Draw a ps file for each tree (Default=True)')
args = parser.parse_args()
if (not os.path.isdir(args.outpath)):
os.mkdir(args.outpath)
read(args.treebank, args.outpath, mapping=args.mapping, draw=args.draw, format=args.format) |
def ground_truth(N, seq):
table = np.zeros((N, N), np.int32)
for i in range((N - 1), (- 1), (- 1)):
for j in range((i + 1), N):
if ((j - 1) >= 0):
table[(i, j)] = max(table[(i, j)], table[(i, (j - 1))])
if ((i + 1) < N):
table[(i, j)] = max(table[(i, j)], table[((i + 1), j)])
if (((j - 1) >= 0) and ((i + 1) < N)):
if (i < (j - 1)):
table[(i, j)] = max(table[(i, j)], (table[((i + 1), (j - 1))] + match_gt(seq[i], seq[j])))
else:
table[(i, j)] = max(table[(i, j)], table[((i + 1), (j - 1))])
for k in range((i + 1), j):
table[(i, j)] = max(table[(i, j)], (table[(i, k)] + table[((k + 1), j)]))
return table |
class Seq2Nugget(object):
def __init__(self, train_config, detection_config, boundary_config):
self.initialize(train_config, detection_config, boundary_config)
def initialize(self, train_config, detection_config, boundary_config):
for key in train_config:
self.__dict__[key] = train_config[key]
self.word_dict = WordDictionary()
self.word_dict.restore(self.word_dict_file)
self.pos_dict = WordDictionary()
self.pos_dict.restore(self.pos_dict_file)
self.char_dict = CharDictionary()
self.char_dict.restore(self.char_dict_file)
self.detection_model_config = detection_config
self.boundary_model_config = boundary_config
self.detection_model_config['word2id'] = self.word_dict.word2id
self.detection_model_config['pos2id'] = self.pos_dict.word2id
self.detection_model_config['char2id'] = self.char_dict.word2id
self.boundary_model_config['word2id'] = self.word_dict.word2id
self.boundary_model_config['pos2id'] = self.pos_dict.word2id
self.boundary_model_config['char2id'] = self.char_dict.word2id
self.detection_model = DetectionModel(**self.detection_model_config)
self.boundary_model = BoundaryModel(**self.boundary_model_config)
self.detection_model.to(device=self.device)
self.boundary_model.to(device=self.device)
self.joint_loss_fn = BagLossWithMarginLayer()
self.detection_loss_fn = BagLossLayer()
self.boundary_loss_fn = MaxMarginLossLayer(self.max_seq_len, self.max_margin).to(device=self.device)
self.detection_optimizer = torch.optim.Adadelta(self.detection_model.parameters(), lr=self.detection_learning_rate)
self.boundary_optimizer = torch.optim.Adadelta(self.boundary_model.parameters(), lr=self.boundary_learning_rate)
self.result_logger = Seq2NuggetLogger()
self.match_eval = ExactMatchEvaluator()
self.det_eval = DetectionEvaluator()
self.current_epoch = (- 1)
self.detection_lr_decay_before = 0
self.boundary_lr_decay_before = 0
if self.is_save_config:
self.save_config(train_config, detection_config, boundary_config)
def update_detection_lr(self):
if ((self.detection_lr_decay_before >= self.weight_decay_round) and ((self.current_epoch - self.result_logger.best_detection_epoch()) >= self.weight_decay_round)):
self.detection_lr_decay_before = 0
for g in self.detection_optimizer.param_groups:
g['lr'] /= 2
return True
self.detection_lr_decay_before += 1
return False
def update_boundary_lr(self):
if ((self.boundary_lr_decay_before >= self.weight_decay_round) and ((self.current_epoch - self.result_logger.best_boundary_epoch()) >= self.weight_decay_round)):
self.boundary_lr_decay_before = 0
for g in self.boundary_optimizer.param_groups:
g['lr'] /= 2
return True
self.boundary_lr_decay_before += 1
return False
def update_all_lr(self):
if ((self.current_epoch - self.result_logger.best_all_epoch()) >= self.weight_decay_round):
for g in self.boundary_optimizer.param_groups:
g['lr'] /= 2
for g in self.detection_optimizer.param_groups:
g['lr'] /= 2
return True
return False
def restore_best_detection(self, dev_data, test_data):
logger.info(('Restore previous best detection model at epoch %d.' % self.current_epoch))
self.restore_using_best(True, False)
self.decode_and_log(dev_data, test_data)
logger.info(('\n' + str(self.result_logger.result_at_epoch())))
def restore_best_boundary(self, dev_data, test_data):
logger.info(('Restore previous best Boundary model at epoch %d.' % self.current_epoch))
self.restore_using_best(False, True)
self.decode_and_log(dev_data, test_data)
logger.info(('\n' + str(self.result_logger.result_at_epoch())))
def train(self, train_data, dev_data=None, test_data=None):
if (not self.detection_coldstart):
self.current_epoch += 1
logger.info(('Trying to restore previous detection model at epoch %d.' % self.current_epoch))
self.restore_best_detection(dev_data, test_data)
if (not self.boundary_coldstart):
self.current_epoch += 1
logger.info(('Trying to restore previous boundary model at epoch %d.' % self.current_epoch))
self.restore_best_boundary(dev_data, test_data)
for epoch in xrange(self.detection_train_epoch):
self.current_epoch += 1
epoch_loss = self.train_detection_epoch(train_data)
logger.info(('Training detection model at epoch %d, the loss is %f' % (self.current_epoch, epoch_loss)))
self.decode_and_log(dev_data, test_data)
logger.info(('\n' + str(self.result_logger.result_at_epoch())))
if self.result_logger.is_best_detection():
logger.info(('Better detection performance achieved at epoch %d, saving the detection model.' % self.current_epoch))
self.save_best(True, False)
if self.update_detection_lr():
self.current_epoch += 1
logger.info(('Detection Learning rate decay at epoch %d' % self.current_epoch))
self.restore_best_detection(dev_data, test_data)
if self.detection_train_epoch:
self.current_epoch += 1
logger.info(('Finish training detection model at epoch %d.' % self.current_epoch))
self.restore_best_detection(dev_data, test_data)
for epoch in xrange(self.boundary_train_epoch):
self.current_epoch += 1
epoch_loss = self.train_boundary_epoch(train_data)
logger.info(('Training boundary model at epoch %d, the loss is %f' % (self.current_epoch, epoch_loss)))
self.decode_and_log(dev_data, test_data)
logger.info(('\n' + str(self.result_logger.result_at_epoch())))
if self.result_logger.is_best_boundary():
logger.info(('Better Boundary performance achieved at epoch %d, saving the boundary model.' % self.current_epoch))
self.save_best(False, True)
if self.update_boundary_lr():
self.current_epoch += 1
logger.info(('Boundary Learning rate decay at epoch %d' % self.current_epoch))
self.restore_best_boundary(dev_data, test_data)
if self.boundary_train_epoch:
self.current_epoch += 1
logger.info(('Finish training boundary model at epoch %d.' % self.current_epoch))
self.restore_best_boundary(dev_data, test_data)
for epoch in xrange(self.joint_train_epoch):
self.current_epoch += 1
epoch_loss = self.train_joint_epoch(train_data)
logger.info(('Training joint model at epoch %d, the loss is %f' % (self.current_epoch, epoch_loss)))
self.decode_and_log(dev_data, test_data)
logger.info(('\n' + str(self.result_logger.result_at_epoch())))
if (self.train_detection_in_joint and self.result_logger.is_best_detection()):
logger.info(('Better detection performance achieved at epoch %d, saving the detection model.' % self.current_epoch))
self.save_best(True, False)
if self.result_logger.is_best_boundary():
logger.info(('Better Boundary performance achieved at epoch %d, saving the boundary model.' % self.current_epoch))
self.save_best(False, True)
if self.result_logger.is_best_overall():
logger.info(('Better Overall performance achieved at epoch %d, saving the boundary model.' % self.current_epoch))
self.save_best(False, True)
logger.info(('Save two overall best models at epoch %d.' % self.current_epoch))
self.save_all_model(((self.save_dir + 'snapshot_best_overall') + '.dat'))
if (self.update_detection_lr() or self.update_boundary_lr()):
self.current_epoch += 1
logger.info(('Learning rate decay at epoch %d' % self.current_epoch))
self.restore_best_detection(dev_data, test_data)
self.restore_best_boundary(dev_data, test_data)
self.current_epoch += 1
logger.info(('Finish the entire training at epoch %d, now restore previous best models.' % self.current_epoch))
self.restore_using_best(True, True)
self.decode_and_log(dev_data, test_data, is_save=True)
logger.info(('\n' + str(self.result_logger.result_at_epoch())))
def decode_and_log(self, dev_data=None, test_data=None, is_save=False):
if dev_data:
dev_result = self.test(dev_data)
(dev_p, dev_r, dev_f1) = self.match_eval.eval(dev_result, dev_data.annotations)
(dev_dp, dev_dr, dev_df1) = self.det_eval.eval(dev_result, dev_data.annotations)
self.result_logger.update_detection_result('dev', dev_dp, dev_dr, dev_df1)
self.result_logger.update_boundary_result('dev', dev_p, dev_r, dev_f1)
if is_save:
self.save_decode_result(dev_result, ('dev_epoch_%d' % self.current_epoch))
if test_data:
test_result = self.test(test_data)
(test_p, test_r, test_f1) = self.match_eval.eval(test_result, test_data.annotations)
(test_dp, test_dr, test_df1) = self.det_eval.eval(test_result, test_data.annotations)
self.result_logger.update_detection_result('test', test_dp, test_dr, test_df1)
self.result_logger.update_boundary_result('test', test_p, test_r, test_f1)
if is_save:
self.save_decode_result(test_result, ('test_epoch_%d' % self.current_epoch))
def train_detection_epoch(self, data):
epoch_loss = 0.0
self.detection_model.train()
self.boundary_model.train()
for batch in data.mini_batches_for_train(self.batch_size):
words = torch.tensor(batch['words'], device=self.device)
poss = torch.tensor(batch['poss'], device=self.device)
seq_len = torch.tensor(batch['seq_len'], device=self.device)
chars = torch.tensor(batch['chars'], device=self.device)
char_len = torch.tensor(batch['char_len'], device=self.device)
cls_labels = torch.tensor(batch['cls_labels'], device=self.device)
left_labels = torch.tensor(batch['left_labels'], device=self.device)
right_labels = torch.tensor(batch['right_labels'], device=self.device)
seq_mask = torch.tensor(batch['seq_mask'], device=self.device)
packages = torch.tensor(batch['packages'], device=self.device)
cls_pred = self.detection_model(do_softmax=False, words=words, poss=poss, chars=chars, char_len=char_len, seq_len=seq_len)
weight = ([self.negative_weight] + ([1.0] * (self.detection_model.output_dim - 1)))
weight = torch.tensor(weight).to(device=self.device)
detection_loss = self.detection_loss_fn(cls_pred, cls_labels, packages, seq_mask, weight=weight)
self.detection_optimizer.zero_grad()
detection_loss.backward()
self.detection_optimizer.step()
epoch_loss += detection_loss.item()
return epoch_loss
def train_boundary_epoch(self, data):
epoch_loss = 0.0
self.detection_model.train()
self.boundary_model.train()
for batch in data.mini_batches_for_train(self.batch_size):
words = torch.tensor(batch['words'], device=self.device)
poss = torch.tensor(batch['poss'], device=self.device)
seq_len = torch.tensor(batch['seq_len'], device=self.device)
chars = torch.tensor(batch['chars'], device=self.device)
char_len = torch.tensor(batch['char_len'], device=self.device)
cls_labels = torch.tensor(batch['cls_labels'], device=self.device)
left_labels = torch.tensor(batch['left_labels'], device=self.device)
right_labels = torch.tensor(batch['right_labels'], device=self.device)
seq_mask = torch.tensor(batch['seq_mask'], device=self.device)
packages = torch.tensor(batch['packages'], device=self.device)
(left_pred, right_pred) = self.boundary_model(do_softmax=False, words=words, poss=poss, chars=chars, char_len=char_len, seq_len=seq_len)
left_pred = torch.tanh(left_pred)
right_pred = torch.tanh(right_pred)
(B, T1, T2) = left_pred.shape
assert (T1 == T2)
left_pred = left_pred.view((B * T1), T2)
right_pred = right_pred.view((B * T1), T2)
left_labels = left_labels.view((B * T1))
right_labels = right_labels.view((B * T1))
NIL_ID = data.label2id['NIL']
anchor_mask = (cls_labels != NIL_ID).float()
boundary_loss = (self.boundary_loss_fn(left_pred, left_labels, False) + self.boundary_loss_fn(right_pred, right_labels, False))
boundary_loss = (boundary_loss.view(B, T1) * anchor_mask)
boundary_loss = (torch.sum(boundary_loss) / torch.sum(anchor_mask))
self.boundary_optimizer.zero_grad()
boundary_loss.backward()
torch.nn.utils.clip_grad_norm_(self.boundary_model.parameters(), max_norm=3)
self.boundary_optimizer.step()
epoch_loss += boundary_loss.item()
return epoch_loss
def train_joint_epoch(self, data):
epoch_loss = 0.0
self.detection_model.train()
self.boundary_model.train()
for batch in data.mini_batches_for_train(self.batch_size):
words = torch.tensor(batch['words'], device=self.device)
poss = torch.tensor(batch['poss'], device=self.device)
seq_len = torch.tensor(batch['seq_len'], device=self.device)
chars = torch.tensor(batch['chars'], device=self.device)
char_len = torch.tensor(batch['char_len'], device=self.device)
cls_labels = torch.tensor(batch['cls_labels'], device=self.device)
left_labels = torch.tensor(batch['left_labels'], device=self.device)
right_labels = torch.tensor(batch['right_labels'], device=self.device)
seq_mask = torch.tensor(batch['seq_mask'], device=self.device)
packages = torch.tensor(batch['packages'], device=self.device)
cls_pred = self.detection_model(do_softmax=False, words=words, poss=poss, chars=chars, char_len=char_len, seq_len=seq_len)
(left_pred, right_pred) = self.boundary_model(do_softmax=False, words=words, poss=poss, chars=chars, char_len=char_len, seq_len=seq_len)
left_pred = torch.tanh(left_pred)
right_pred = torch.tanh(right_pred)
weight = ([self.negative_weight] + ([1.0] * (self.detection_model.output_dim - 1)))
weight = torch.tensor(weight).to(device=self.device)
(B, T1, T2) = left_pred.shape
assert (T1 == T2)
left_pred = left_pred.view((B * T1), T2)
right_pred = right_pred.view((B * T1), T2)
left_labels = left_labels.view((B * T1))
right_labels = right_labels.view((B * T1))
NIL_ID = data.label2id['NIL']
anchor_mask = (cls_labels != NIL_ID).float()
boundary_loss = (self.boundary_loss_fn(left_pred, left_labels, False) + self.boundary_loss_fn(right_pred, right_labels, False))
boundary_loss = (boundary_loss.view(B, T1) * anchor_mask)
loss = self.joint_loss_fn(cls_pred, cls_labels, packages, boundary_loss, seq_mask, weight=weight)
self.detection_optimizer.zero_grad()
self.boundary_optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.boundary_model.parameters(), max_norm=3)
if self.train_detection_in_joint:
self.detection_optimizer.step()
self.boundary_optimizer.step()
epoch_loss += loss.item()
return epoch_loss
def test(self, data):
self.detection_model.eval()
self.boundary_model.eval()
test_result = {}
for batch in data.mini_batches_for_test(batch_size=100):
words = torch.tensor(batch['words'], device=self.device)
poss = torch.tensor(batch['poss'], device=self.device)
seq_len = torch.tensor(batch['seq_len'], device=self.device)
chars = torch.tensor(batch['chars'], device=self.device)
char_len = torch.tensor(batch['char_len'], device=self.device)
cls_pred = self.detection_model(do_softmax=False, words=words, poss=poss, chars=chars, char_len=char_len, seq_len=seq_len)
(left_pred, right_pred) = self.boundary_model(do_softmax=False, words=words, poss=poss, chars=chars, char_len=char_len, seq_len=seq_len)
cls_outputs = torch.argmax(cls_pred, dim=2).tolist()
left_outputs = torch.argmax(left_pred, dim=2).tolist()
right_outputs = torch.argmax(right_pred, dim=2).tolist()
assert (len(batch['sent_ids']) == len(left_outputs))
for (sent_idx, (sent_id, sent_cls_pred, sent_len)) in enumerate(zip(batch['sent_ids'], cls_outputs, seq_len.tolist())):
for (token_idx, label) in enumerate(sent_cls_pred):
if (token_idx >= sent_len):
continue
if (label == data.label2id['NIL']):
continue
if (not (sent_id in test_result)):
test_result[sent_id] = []
b = left_outputs[sent_idx][token_idx]
e = (right_outputs[sent_idx][token_idx] + 1)
test_result[sent_id].append((b, e, data.id2label[label], token_idx))
return test_result
def save_config(self, train_config, detection_config, boundary_config):
file_name = (self.save_dir + 'model_config.dat')
f = shelve.open(file_name)
f['train_config'] = train_config
f['detection_config'] = detection_config
f['boundary_config'] = boundary_config
f.close()
def save_detection_model(self, file_name):
torch.save(self.detection_model.state_dict(), (file_name + '.detection_model'))
def save_boundary_model(self, file_name):
torch.save(self.boundary_model.state_dict(), (file_name + '.boundary_model'))
def restore_detection_model(self, file_name):
if os.path.exists((file_name + '.detection_model')):
self.detection_model.load_state_dict(torch.load((file_name + '.detection_model'), map_location=(lambda storage, loc: storage.cuda(self.device))))
logger.info(('Restore detection model from %s' % (file_name + '.detection_model')))
else:
logger.info(('Restore from %s failed, no file exists.' % (file_name + '.detection_model')))
def restore_boundary_model(self, file_name):
if os.path.exists((file_name + '.boundary_model')):
self.boundary_model.load_state_dict(torch.load((file_name + '.boundary_model'), map_location=(lambda storage, loc: storage.cuda(self.device))))
logger.info(('Restore boundary model from %s' % (file_name + '.boundary_model')))
else:
logger.info(('Restore from %s failed, no file exists.' % (file_name + '.boundary_model')))
def resotre_all_model(self, file_name, dev_data, test_data):
self.restore_detection_model(file_name, dev_data, test_data)
self.restore_boundary_model(file_name, dev_data, test_data)
def save_all_model(self, file_name):
self.save_detection_model(file_name)
self.save_boundary_model(file_name)
def restore_config(self, file_name):
f = shelve.open(file_name)
for key in f:
self.__dict[key] = f[key]
def restore_model(self, file_name):
self.model = torch.load((file_name + '.model'))
self.optimizer = torch.load((file_name + '.optimizer'))
f = shelve.open((file_name + '.info'))
self.best_f1 = f['best_f1']
self.current_epoch = f['current_epoch']
f.close()
def restore_using_best(self, restore_detection=True, restore_boundary=True):
file_name = ((self.save_dir + 'snapshot_best') + '.dat')
if restore_detection:
self.restore_detection_model(file_name)
if restore_boundary:
self.restore_boundary_model(file_name)
def save_epoch(self):
file_name = (((self.save_dir + 'snapshot_') + str(self.current_epoch)) + '.dat')
self.save_model(file_name)
def save_best(self, save_detection=True, save_boundary=True):
file_name = ((self.save_dir + 'snapshot_best') + '.dat')
if save_detection:
self.save_detection_model(file_name)
if save_boundary:
self.save_boundary_model(file_name)
def save_decode_result(self, result, prefix):
file_name = (((self.save_dir + prefix) + '_result') + '.dat')
output = open(file_name, 'w')
for sent_id in result:
output.write((str(sent_id) + '\t'))
for (b, e, tp, c) in result[sent_id]:
s = '|'.join(map(str, [b, e, tp, c]))
output.write((s + '\t'))
output.write('\n') |
def test_sorting(state: Dict) -> bool:
try:
correct_list = sorted(string_to_list(state['original']))
sorted_list = string_to_list(state['current'])
return (sorted_list == correct_list)
except:
return False |
class StringDeletion():
def __init__(self, old_token_idx, token_pos, tokenizer):
self.old_token_idx = int(old_token_idx)
self.old_token = tokenizer.decode(self.old_token_idx)
self.token_pos = int(token_pos)
def __str__(self):
prefix = f'{self.token_pos}{self.old_token_idx}-{self.token_pos}{self.old_token_idx}_'
return (prefix + 'del') |
class BaseDataType(ABC):
def signed(self):
return (self.min() < 0)
def __eq__(self, other):
if isinstance(other, BaseDataType):
return (self.get_canonical_name() == other.get_canonical_name())
elif isinstance(other, str):
return (self.get_canonical_name() == other)
else:
return NotImplemented
def __hash__(self):
return hash(self.get_canonical_name())
def name(self):
return self.get_canonical_name()
def __repr__(self):
return self.get_canonical_name()
def __str__(self):
return self.get_canonical_name()
def bitwidth(self):
pass
def min(self):
pass
def max(self):
pass
def allowed(self, value):
pass
def get_num_possible_values(self):
pass
def is_integer(self):
pass
def is_fixed_point(self):
pass
def get_hls_datatype_str(self):
pass
def to_numpy_dt(self):
pass
def get_canonical_name(self): |
def drop_blocks(drop_prob=0.0):
return [None, None, (partial(DropBlock2d, drop_prob=drop_prob, block_size=5, gamma_scale=0.25) if drop_prob else None), (partial(DropBlock2d, drop_prob=drop_prob, block_size=3, gamma_scale=1.0) if drop_prob else None)] |
class TestGroupedBatchSampler(unittest.TestCase):
def test_respect_order_simple(self):
drop_uneven = False
dataset = [i for i in range(40)]
group_ids = [(i // 10) for i in dataset]
sampler = SequentialSampler(dataset)
for batch_size in [1, 3, 5, 6]:
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
merged_result = list(itertools.chain.from_iterable(result))
self.assertEqual(merged_result, dataset)
def test_respect_order(self):
drop_uneven = False
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SequentialSampler(dataset)
expected = [[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]], [[0, 1, 3], [2, 4, 5], [6, 9], [7, 8]], [[0, 1, 3, 6], [2, 4, 5, 7], [8], [9]]]
for (idx, batch_size) in enumerate([1, 3, 4]):
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
self.assertEqual(result, expected[idx])
def test_respect_order_drop_uneven(self):
batch_size = 3
drop_uneven = True
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SequentialSampler(dataset)
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[0, 1, 3], [2, 4, 5]]
self.assertEqual(result, expected)
def test_subset_sampler(self):
batch_size = 3
drop_uneven = False
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SubsetSampler([0, 3, 5, 6, 7, 8])
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[0, 3, 6], [5, 7, 8]]
self.assertEqual(result, expected)
def test_permute_subset_sampler(self):
batch_size = 3
drop_uneven = False
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SubsetSampler([5, 0, 6, 1, 3, 8])
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[5, 8], [0, 6, 1], [3]]
self.assertEqual(result, expected)
def test_permute_subset_sampler_drop_uneven(self):
batch_size = 3
drop_uneven = True
dataset = [i for i in range(10)]
group_ids = [0, 0, 1, 0, 1, 1, 0, 1, 1, 0]
sampler = SubsetSampler([5, 0, 6, 1, 3, 8])
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
expected = [[0, 6, 1]]
self.assertEqual(result, expected)
def test_len(self):
batch_size = 3
drop_uneven = True
dataset = [i for i in range(10)]
group_ids = [random.randint(0, 1) for _ in dataset]
sampler = RandomSampler(dataset)
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
result = list(batch_sampler)
self.assertEqual(len(result), len(batch_sampler))
self.assertEqual(len(result), len(batch_sampler))
batch_sampler = GroupedBatchSampler(sampler, group_ids, batch_size, drop_uneven)
batch_sampler_len = len(batch_sampler)
result = list(batch_sampler)
self.assertEqual(len(result), batch_sampler_len)
self.assertEqual(len(result), len(batch_sampler)) |
class SequenceFeatureExtractor(FeatureExtractionMixin):
def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs):
self.feature_size = feature_size
self.sampling_rate = sampling_rate
self.padding_value = padding_value
self.padding_side = kwargs.pop('padding_side', 'right')
self.return_attention_mask = kwargs.pop('return_attention_mask', True)
super().__init__(**kwargs)
def pad(self, processed_features: Union[(BatchFeature, List[BatchFeature], Dict[(str, BatchFeature)], Dict[(str, List[BatchFeature])], List[Dict[(str, BatchFeature)]])], padding: Union[(bool, str, PaddingStrategy)]=True, max_length: Optional[int]=None, truncation: bool=False, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[(str, TensorType)]]=None) -> BatchFeature:
if (isinstance(processed_features, (list, tuple)) and isinstance(processed_features[0], (dict, BatchFeature))):
processed_features = {key: [example[key] for example in processed_features] for key in processed_features[0].keys()}
if (self.model_input_names[0] not in processed_features):
raise ValueError(f'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature` to this method that includes {self.model_input_names[0]}, but you provided {list(processed_features.keys())}')
required_input = processed_features[self.model_input_names[0]]
return_attention_mask = (return_attention_mask if (return_attention_mask is not None) else self.return_attention_mask)
if (not required_input):
if return_attention_mask:
processed_features['attention_mask'] = []
return processed_features
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
index = 0
while (len(required_input[index]) == 0):
index += 1
if (index < len(required_input)):
first_element = required_input[index][0]
if (return_tensors is None):
if is_tf_tensor(first_element):
return_tensors = 'tf'
elif is_torch_tensor(first_element):
return_tensors = 'pt'
elif isinstance(first_element, (int, float, list, tuple, np.ndarray)):
return_tensors = 'np'
else:
raise ValueError(f'type of {first_element} unknown: {type(first_element)}. Should be one of a python, numpy, pytorch or tensorflow object.')
for (key, value) in processed_features.items():
if isinstance(value[0], (int, float)):
processed_features[key] = to_numpy(value)
else:
processed_features[key] = [to_numpy(v) for v in value]
padding_strategy = self._get_padding_strategies(padding=padding, max_length=max_length)
required_input = processed_features[self.model_input_names[0]]
batch_size = len(required_input)
if (not all(((len(v) == batch_size) for v in processed_features.values()))):
raise ValueError('Some items in the output dictionary have a different batch size than others.')
truncated_inputs = []
for i in range(batch_size):
inputs = {k: v[i] for (k, v) in processed_features.items()}
inputs_slice = self._truncate(inputs, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, truncation=truncation)
truncated_inputs.append(inputs_slice)
if (padding_strategy == PaddingStrategy.LONGEST):
max_length = max((len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs))
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
outputs = self._pad(truncated_inputs[i], max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask)
for (key, value) in outputs.items():
if (key not in batch_outputs):
batch_outputs[key] = []
if (value.dtype is np.dtype(np.float64)):
value = value.astype(np.float32)
batch_outputs[key].append(value)
return BatchFeature(batch_outputs, tensor_type=return_tensors)
def _pad(self, processed_features: Union[(Dict[(str, np.ndarray)], BatchFeature)], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict:
required_input = processed_features[self.model_input_names[0]]
if (padding_strategy == PaddingStrategy.LONGEST):
max_length = len(required_input)
if ((max_length is not None) and (pad_to_multiple_of is not None) and ((max_length % pad_to_multiple_of) != 0)):
max_length = (((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of)
needs_to_be_padded = ((padding_strategy != PaddingStrategy.DO_NOT_PAD) and (len(required_input) < max_length))
if (return_attention_mask and ('attention_mask' not in processed_features)):
processed_features['attention_mask'] = np.ones(len(required_input), dtype=np.int32)
if needs_to_be_padded:
difference = (max_length - len(required_input))
if (self.padding_side == 'right'):
if return_attention_mask:
processed_features['attention_mask'] = np.pad(processed_features['attention_mask'], (0, difference))
padding_shape = (((0, difference), (0, 0)) if (self.feature_size > 1) else (0, difference))
processed_features[self.model_input_names[0]] = np.pad(required_input, padding_shape, 'constant', constant_values=self.padding_value)
elif (self.padding_side == 'left'):
if return_attention_mask:
processed_features['attention_mask'] = np.pad(processed_features['attention_mask'], (difference, 0))
padding_shape = (((difference, 0), (0, 0)) if (self.feature_size > 1) else (difference, 0))
processed_features[self.model_input_names[0]] = np.pad(required_input, padding_shape, 'constant', constant_values=self.padding_value)
else:
raise ValueError(('Invalid padding strategy:' + str(self.padding_side)))
return processed_features
def _truncate(self, processed_features: Union[(Dict[(str, np.ndarray)], BatchFeature)], max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, truncation: Optional[bool]=None):
if (not truncation):
return processed_features
elif (truncation and (max_length is None)):
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.')
required_input = processed_features[self.model_input_names[0]]
if ((max_length is not None) and (pad_to_multiple_of is not None) and ((max_length % pad_to_multiple_of) != 0)):
max_length = (((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of)
needs_to_be_truncated = (len(required_input) > max_length)
if needs_to_be_truncated:
processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length]
if ('attention_mask' in processed_features):
processed_features['attention_mask'] = processed_features['attention_mask'][:max_length]
return processed_features
def _get_padding_strategies(self, padding=False, max_length=None):
if (padding is not False):
if (padding is True):
padding_strategy = PaddingStrategy.LONGEST
elif (not isinstance(padding, PaddingStrategy)):
padding_strategy = PaddingStrategy(padding)
elif isinstance(padding, PaddingStrategy):
padding_strategy = padding
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
if (max_length is None):
if (padding_strategy == PaddingStrategy.MAX_LENGTH):
raise ValueError(f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined')
if ((padding_strategy != PaddingStrategy.DO_NOT_PAD) and (self.padding_value is None)):
raise ValueError('Asking to pad but the feature_extractor does not have a padding value. Please select a value to use as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.')
return padding_strategy |
def generate_exp_directory(cfg, exp_name=None, expid=None, run_name=None, additional_id=None):
if (run_name is None):
if (expid is None):
expid = (time.strftime('%Y%m%d-%H%M%S-') + str(shortuuid.uuid()))
if (additional_id is not None):
expid += ('-' + str(additional_id))
if isinstance(exp_name, list):
exp_name = '-'.join(exp_name)
run_name = '-'.join([exp_name, expid])
cfg.run_name = run_name
cfg.run_dir = os.path.join(cfg.root_dir, cfg.run_name)
cfg.exp_dir = cfg.run_dir
cfg.log_dir = cfg.run_dir
cfg.ckpt_dir = os.path.join(cfg.run_dir, 'checkpoint')
cfg.log_path = os.path.join(cfg.run_dir, (cfg.run_name + '.log'))
if (cfg.get('rank', 0) == 0):
pathlib.Path(cfg.ckpt_dir).mkdir(parents=True, exist_ok=True) |
class TestCNN(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 5
self.input_width = 10
self.input_height = 10
self.obs_input = np.ones((self.batch_size, self.input_width, self.input_height, 3))
input_shape = self.obs_input.shape[1:]
self._input_ph = tf.compat.v1.placeholder(tf.float32, shape=((None,) + input_shape), name='input')
self.hidden_nonlinearity = tf.nn.relu
.parametrize('filters, strides', [(((32, (1, 1)),), (1,)), (((32, (3, 3)),), (1,)), (((32, (2, 3)),), (1,)), (((32, (3, 3)),), (2,)), (((32, (2, 3)),), (2,)), (((32, (1, 1)), (64, (1, 1))), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (1, 1)), (((32, (2, 3)), (64, (3, 3))), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (2, 2)), (((32, (2, 3)), (64, (3, 3))), (2, 2))])
def test_output_shape_same(self, filters, strides):
with tf.compat.v1.variable_scope('CNN'):
self.cnn = cnn(input_var=self._input_ph, filters=filters, strides=strides, name='cnn', padding='SAME', hidden_w_init=tf.constant_initializer(1), hidden_nonlinearity=self.hidden_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
result = self.sess.run(self.cnn, feed_dict={self._input_ph: self.obs_input})
height_size = self.input_height
width_size = self.input_width
for stride in strides:
height_size = int((((height_size + stride) - 1) / stride))
width_size = int((((width_size + stride) - 1) / stride))
flatten_shape = ((width_size * height_size) * filters[(- 1)][0])
assert (result.shape == (5, flatten_shape))
.parametrize('filters, strides', [(((32, (1, 1)),), (1,)), (((32, (3, 3)),), (1,)), (((32, (2, 3)),), (1,)), (((32, (3, 3)),), (2,)), (((32, (2, 3)),), (2,)), (((32, (1, 1)), (64, (1, 1))), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (1, 1)), (((32, (2, 3)), (64, (3, 3))), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (2, 2)), (((32, (2, 3)), (64, (3, 3))), (2, 2))])
def test_output_shape_valid(self, filters, strides):
with tf.compat.v1.variable_scope('CNN'):
self.cnn = cnn(input_var=self._input_ph, filters=filters, strides=strides, name='cnn', padding='VALID', hidden_w_init=tf.constant_initializer(1), hidden_nonlinearity=self.hidden_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
result = self.sess.run(self.cnn, feed_dict={self._input_ph: self.obs_input})
height_size = self.input_height
width_size = self.input_width
for (filter_iter, stride) in zip(filters, strides):
height_size = (int(((height_size - filter_iter[1][0]) / stride)) + 1)
width_size = (int(((width_size - filter_iter[1][1]) / stride)) + 1)
flatten_shape = ((height_size * width_size) * filters[(- 1)][0])
assert (result.shape == (self.batch_size, flatten_shape))
.parametrize('filters, in_channels, strides', [(((32, (1, 1)),), (3,), (1,)), (((32, (3, 3)),), (3,), (1,)), (((32, (2, 3)),), (3,), (1,)), (((32, (3, 3)),), (3,), (2,)), (((32, (2, 3)),), (3,), (2,)), (((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1)), (((32, (2, 3)), (64, (3, 3))), (3, 32), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2)), (((32, (2, 3)), (64, (3, 3))), (3, 32), (2, 2))])
def test_output_with_identity_filter(self, filters, in_channels, strides):
with tf.compat.v1.variable_scope('CNN'):
self.cnn = cnn(input_var=self._input_ph, filters=filters, strides=strides, name='cnn1', padding='VALID', hidden_w_init=tf.constant_initializer(1), hidden_nonlinearity=self.hidden_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
result = self.sess.run(self.cnn, feed_dict={self._input_ph: self.obs_input})
filter_sum = 1
for (filter_iter, in_channel) in zip(filters, in_channels):
filter_sum *= ((filter_iter[1][0] * filter_iter[1][1]) * in_channel)
height_size = self.input_height
width_size = self.input_width
for (filter_iter, stride) in zip(filters, strides):
height_size = (int(((height_size - filter_iter[1][0]) / stride)) + 1)
width_size = (int(((width_size - filter_iter[1][1]) / stride)) + 1)
flatten_shape = ((height_size * width_size) * filters[(- 1)][0])
h_out = np.full((self.batch_size, flatten_shape), filter_sum, dtype=np.float32)
np.testing.assert_array_equal(h_out, result)
.parametrize('filters, in_channels, strides', [(((32, (1, 1)),), (3,), (1,)), (((32, (3, 3)),), (3,), (1,)), (((32, (2, 3)),), (3,), (1,)), (((32, (3, 3)),), (3,), (2,)), (((32, (2, 3)),), (3,), (2,)), (((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1)), (((32, (2, 3)), (64, (3, 3))), (3, 32), (1, 1)), (((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2)), (((32, (2, 3)), (64, (3, 3))), (3, 32), (2, 2))])
def test_output_with_random_filter(self, filters, in_channels, strides):
with tf.compat.v1.variable_scope('CNN'):
self.cnn2 = cnn(input_var=self._input_ph, filters=filters, strides=strides, name='cnn1', padding='VALID', hidden_nonlinearity=self.hidden_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
result = self.sess.run(self.cnn2, feed_dict={self._input_ph: self.obs_input})
two_layer = (len(filters) == 2)
with tf.compat.v1.variable_scope('CNN', reuse=True):
h0_w = tf.compat.v1.get_variable('cnn1/h0/weight').eval()
h0_b = tf.compat.v1.get_variable('cnn1/h0/bias').eval()
if two_layer:
h1_w = tf.compat.v1.get_variable('cnn1/h1/weight').eval()
h1_b = tf.compat.v1.get_variable('cnn1/h1/bias').eval()
filter_weights = ((h0_w, h1_w) if two_layer else (h0_w,))
filter_bias = ((h0_b, h1_b) if two_layer else (h0_b,))
input_val = convolve(_input=self.obs_input, filter_weights=filter_weights, filter_bias=filter_bias, strides=strides, filters=filters, in_channels=in_channels, hidden_nonlinearity=self.hidden_nonlinearity)
dense_out = input_val.reshape((self.batch_size, (- 1))).astype(np.float32)
np.testing.assert_array_almost_equal(dense_out, result)
.parametrize('filters, in_channels, strides, pool_shape, pool_stride', [(((32, (1, 1)),), (3,), (1,), 1, 1), (((32, (3, 3)),), (3,), (1,), 1, 1), (((32, (2, 3)),), (3,), (1,), 1, 1), (((32, (3, 3)),), (3,), (2,), 2, 2), (((32, (2, 3)),), (3,), (2,), 2, 2), (((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1), 1, 1), (((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1), 1, 1), (((32, (2, 3)), (64, (3, 3))), (3, 32), (1, 1), 1, 1)])
def test_output_with_max_pooling(self, filters, in_channels, strides, pool_shape, pool_stride):
with tf.compat.v1.variable_scope('CNN'):
self.cnn2 = cnn_with_max_pooling(input_var=self._input_ph, filters=filters, strides=strides, name='cnn1', pool_shapes=(pool_shape, pool_shape), pool_strides=(pool_stride, pool_stride), padding='VALID', hidden_w_init=tf.constant_initializer(1), hidden_nonlinearity=self.hidden_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
result = self.sess.run(self.cnn2, feed_dict={self._input_ph: self.obs_input})
two_layer = (len(filters) == 2)
with tf.compat.v1.variable_scope('CNN', reuse=True):
h0_w = tf.compat.v1.get_variable('cnn1/h0/weight').eval()
h0_b = tf.compat.v1.get_variable('cnn1/h0/bias').eval()
if two_layer:
h1_w = tf.compat.v1.get_variable('cnn1/h1/weight').eval()
h1_b = tf.compat.v1.get_variable('cnn1/h1/bias').eval()
filter_weights = ((h0_w, h1_w) if two_layer else (h0_w,))
filter_bias = ((h0_b, h1_b) if two_layer else (h0_b,))
input_val = self.obs_input
for (filter_iter, filter_weight, _filter_bias, in_channel) in zip(filters, filter_weights, filter_bias, in_channels):
input_val = convolve(_input=input_val, filter_weights=(filter_weight,), filter_bias=(_filter_bias,), strides=strides, filters=(filter_iter,), in_channels=(in_channel,), hidden_nonlinearity=self.hidden_nonlinearity)
input_val = max_pooling(_input=input_val, pool_shape=pool_shape, pool_stride=pool_stride)
dense_out = input_val.reshape((self.batch_size, (- 1))).astype(np.float32)
np.testing.assert_array_equal(dense_out, result)
def test_invalid_padding(self):
with pytest.raises(ValueError):
with tf.compat.v1.variable_scope('CNN'):
self.cnn = cnn(input_var=self._input_ph, filters=((32, (3, 3)),), strides=(1,), name='cnn', padding='UNKNOWN')
def test_invalid_padding_max_pooling(self):
with pytest.raises(ValueError):
with tf.compat.v1.variable_scope('CNN'):
self.cnn = cnn_with_max_pooling(input_var=self._input_ph, filters=((32, (3, 3)),), strides=(1,), name='cnn', pool_shapes=(1, 1), pool_strides=(1, 1), padding='UNKNOWN') |
def test_arrow_coverage100():
a = ak.operations.from_iter([True, True, False, False, True, False, True, False]).layout
assert (a.to_arrow().to_pylist() == to_list(a))
a = ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'), parameters={'__array__': 'byte'}), parameters={'__array__': 'bytestring'})
assert (a.to_arrow().to_pylist() == [b'hello', b'there'])
a = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, True, False, False, True, True])), ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10, 15, 20, 25, 30], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellotherehellotherehellothere', 'u1'), parameters={'__array__': 'byte'}), parameters={'__array__': 'bytestring'}), valid_when=False)
assert (a.to_arrow().to_pylist() == [b'hello', None, b'hello', b'there', None, None])
a = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, True])), ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'), parameters={'__array__': 'byte'}), parameters={'__array__': 'bytestring'}), valid_when=False)
assert (a.to_arrow().to_pylist() == [b'hello', None])
a = ak.contents.IndexedOptionArray(ak.index.Index32(np.array([(- 1), 1, (- 1), 0, 0, (- 1)], 'i4')), ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'), parameters={'__array__': 'byte'}), parameters={'__array__': 'bytestring'}))
assert (a.to_arrow().to_pylist() == [None, b'there', None, b'hello', b'hello', None])
a = ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'), parameters={'__array__': 'char'}), parameters={'__array__': 'string'})
assert (a.to_arrow().to_pylist() == ['hello', 'there'])
a = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, True, False, False, True, True])), ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10, 15, 20, 25, 30], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellotherehellotherehellothere', 'u1'), parameters={'__array__': 'char'}), parameters={'__array__': 'string'}), valid_when=False)
assert (a.to_arrow().to_pylist() == ['hello', None, 'hello', 'there', None, None])
assert (to_list(ak._connect.pyarrow.handle_arrow(a.to_arrow())) == ['hello', None, 'hello', 'there', None, None])
a = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, True, False, False, True, True])), ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 5, 10, 15, 20, 25, 30], 'i8')), ak.contents.NumpyArray(np.frombuffer(b'hellotherehellotherehellothere', 'u1'), parameters={'__array__': 'char'}), parameters={'__array__': 'string'}), valid_when=False)
assert (a.to_arrow().to_pylist() == ['hello', None, 'hello', 'there', None, None])
assert (to_list(ak._connect.pyarrow.handle_arrow(a.to_arrow())) == ['hello', None, 'hello', 'there', None, None])
a = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, True, False, False, True, True])), ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 5, 10, 15, 20, 25, 30], 'i8')), ak.contents.NumpyArray(np.frombuffer(b'hellotherehellotherehellothere', 'u1'), parameters={'__array__': 'byte'}), parameters={'__array__': 'bytestring'}), valid_when=False)
assert (a.to_arrow().to_pylist() == [b'hello', None, b'hello', b'there', None, None])
assert (to_list(ak._connect.pyarrow.handle_arrow(a.to_arrow())) == [b'hello', None, b'hello', b'there', None, None])
a = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, True])), ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'), parameters={'__array__': 'char'}), parameters={'__array__': 'string'}), valid_when=False)
assert (a.to_arrow().to_pylist() == ['hello', None])
a = ak.contents.IndexedOptionArray(ak.index.Index32(np.array([(- 1), 1, (- 1), 0, 0, (- 1)], 'i4')), ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'), parameters={'__array__': 'char'}), parameters={'__array__': 'string'}))
assert (a.to_arrow().to_pylist() == [None, 'there', None, 'hello', 'hello', None])
a = ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1')))
assert (a.to_arrow().to_pylist() == [[104, 101, 108, 108, 111], [116, 104, 101, 114, 101]])
a = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, True, False, False, True, True])), ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10, 15, 20, 25, 30], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellotherehellotherehellothere', 'u1'))), valid_when=False)
assert (a.to_arrow().to_pylist() == [[104, 101, 108, 108, 111], None, [104, 101, 108, 108, 111], [116, 104, 101, 114, 101], None, None])
a = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, True])), ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'))), valid_when=False)
assert (a.to_arrow().to_pylist() == [[104, 101, 108, 108, 111], None])
a = ak.contents.IndexedOptionArray(ak.index.Index32(np.array([(- 1), 1, (- 1), 0, 0, (- 1)], 'i4')), ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'))))
assert (a.to_arrow().to_pylist() == [None, [116, 104, 101, 114, 101], None, [104, 101, 108, 108, 111], [104, 101, 108, 108, 111], None])
a = ak.contents.IndexedOptionArray(ak.index.Index32(np.array([(- 1), 1, (- 1), 0, 0, (- 1)], 'i4')), ak.contents.RegularArray(ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), 3, zeros_length=0))
assert (a.to_arrow().to_pylist() == [None, [4.4, 5.5, 6.6], None, [1.1, 2.2, 3.3], [1.1, 2.2, 3.3], None])
a = ak.contents.IndexedOptionArray(ak.index.Index32(np.array([(- 1), 1, (- 1), 0, 0, (- 1), 1, (- 1)], 'i4')), ak.contents.RegularArray(ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), 3, zeros_length=0))
assert (a.to_arrow().to_pylist() == [None, [4.4, 5.5, 6.6], None, [1.1, 2.2, 3.3], [1.1, 2.2, 3.3], None, [4.4, 5.5, 6.6], None])
a = ak.contents.IndexedOptionArray(ak.index.Index64(np.array([(- 1), 1, (- 1), 0, 0, (- 1), 1, (- 1)], 'i8')), ak.contents.RegularArray(ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), 3, zeros_length=0))
assert (a.to_arrow().to_pylist() == [None, [4.4, 5.5, 6.6], None, [1.1, 2.2, 3.3], [1.1, 2.2, 3.3], None, [4.4, 5.5, 6.6], None])
a = ak.contents.ByteMaskedArray.simplified(ak.index.Index8(np.array([True, True, True, True, False, False])), ak.contents.IndexedOptionArray(ak.index.Index32(np.array([(- 1), 1, (- 1), 0, 0, (- 1)], 'i4')), ak.contents.RegularArray(ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), 3, zeros_length=0)), valid_when=True)
assert (a.to_arrow().to_pylist() == [None, [4.4, 5.5, 6.6], None, [1.1, 2.2, 3.3], None, None])
a = ak.contents.UnmaskedArray(ak.contents.ListOffsetArray(ak.index.Index32(np.array([0, 5, 10], 'i4')), ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'))))
assert (a.to_arrow().to_pylist() == [[104, 101, 108, 108, 111], [116, 104, 101, 114, 101]])
a = pyarrow.array(['one', 'two', 'three', 'two', 'two', 'one', 'three', 'one']).dictionary_encode()
b = ak._connect.pyarrow.handle_arrow(a)
assert isinstance(b, ak.contents.IndexedOptionArray)
assert (to_list(b) == ['one', 'two', 'three', 'two', 'two', 'one', 'three', 'one'])
a = ak.highlevel.Array([[1.1, 2.2, 3.3], [], None, [4.4, 5.5]]).layout
assert (to_list(ak._connect.pyarrow.handle_arrow(a.to_arrow())) == [[1.1, 2.2, 3.3], [], None, [4.4, 5.5]])
a = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, False, False, True, True, False, False])), ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 999, 314, 4.4, 5.5])), valid_when=False)
assert (a.to_arrow().to_pylist() == [1.1, 2.2, 3.3, None, None, 4.4, 5.5])
a = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, False, False, True, True, False, False])), ak.operations.from_iter([b'hello', b'', b'there', b'yuk', b'', b'o', b'hellothere']).layout, valid_when=False)
assert (a.to_arrow().to_pylist() == [b'hello', b'', b'there', None, None, b'o', b'hellothere'])
a = ak.contents.ByteMaskedArray(ak.index.Index8([True, True, False, True]), ak.operations.from_iter([[1.1, 2.2, 3.3], [], [999], [4.4, 5.5]]).layout, valid_when=True)
assert (to_list(ak._connect.pyarrow.handle_arrow(a.to_arrow())) == [[1.1, 2.2, 3.3], [], None, [4.4, 5.5]])
a = ak.operations.from_iter([[1, 2, 3], [], [4, 5], 999, 123]).layout
assert (a.to_arrow().to_pylist() == [[1, 2, 3], [], [4, 5], 999, 123])
assert (to_list(ak._connect.pyarrow.handle_arrow(a.to_arrow())) == [[1, 2, 3], [], [4, 5], 999, 123]) |
def clip(x, min_value, max_value):
if ((max_value is not None) and (max_value < min_value)):
max_value = min_value
min_value = _to_tensor(min_value, x.dtype.base_dtype)
max_value = _to_tensor(max_value, x.dtype.base_dtype)
return tf.clip_by_value(x, min_value, max_value) |
class SqueezeExcite1d(nn.Module):
def __init__(self, channels, reduction=16):
super().__init__()
channels_reduced = (channels // reduction)
self.w1 = torch.nn.Parameter(torch.randn(channels_reduced, channels).unsqueeze(0))
self.w2 = torch.nn.Parameter(torch.randn(channels, channels_reduced).unsqueeze(0))
def forward(self, x):
z = torch.mean(x, dim=2, keepdim=True)
intermed = F.relu(torch.matmul(self.w1, z))
s = F.sigmoid(torch.matmul(self.w2, intermed))
return (s * x) |
class HighResolutionNet(nn.Module):
def __init__(self, cfg, bn_type, bn_momentum, **kwargs):
self.inplanes = 64
self.drop_path_rate = cfg['DROP_PATH_RATE']
super(HighResolutionNet, self).__init__()
Norm = norm_dict[cfg.NORM]
Activation = activation_dict[cfg.ACTIVATION]
self.stem = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False), Norm(64), Activation, nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False), Norm(64), Activation)
self.layer1 = self._make_layer(blocks_dict[cfg.STEM_BLOCK], dim=64, blocks=2, drop_path=0, channel_expansion=cfg['STEM_EXPANSION'])
self.stage2_cfg = cfg['STAGE2']
depths = (self.stage2_cfg['NUM_MODULES'] * self.stage2_cfg['NUM_BLOCKS'][0])
dp_rates = [x.item() for x in torch.linspace(0, self.drop_path_rate, depths)]
num_channels = self.stage2_cfg['NUM_CHANNELS']
self.transition1 = self._make_transition_layer([64], num_channels, Norm=Norm, Activation=Activation)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels, Norm, Activation, dp_rates=dp_rates)
if os.environ.get('keep_imagenet_head'):
(self.incre_modules, self.downsamp_modules, self.final_layer) = self._make_head(pre_stage_channels, bn_type=bn_type, bn_momentum=bn_momentum)
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer, Norm, Activation):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
transition_layers.append(nn.Sequential(nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), Norm(num_channels_cur_layer[i]), Activation))
else:
conv3x3s = []
inchannels = num_channels_pre_layer[(num_branches_pre - 1)]
outchannels = num_channels_cur_layer[num_branches_pre]
for j in range(((i - num_branches_pre) + 1)):
conv3x3s.append(nn.Sequential(nn.Conv2d(inchannels, outchannels, 3, stride=2, padding=1, bias=False), Norm(outchannels), Activation))
inchannels = num_channels_cur_layer[(num_branches_pre + j)]
outchannels = num_channels_cur_layer[min(((num_branches_pre + j) + 1), (num_branches_cur - 1))]
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, blocks, dim, drop_path=0, channel_expansion=4):
downsample = None
layers = []
layers.append(block(dim, dim, drop_path, expansion=channel_expansion))
for i in range(1, blocks):
layers.append(block(dim, dim, drop_path=0, expansion=channel_expansion))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, norm, activation, dp_rates: list=[0.0]):
num_modules = layer_config['NUM_MODULES']
modules = []
for i in range(num_modules):
if (i == (num_modules - 1)):
multi_scale_output = False
else:
multi_scale_output = True
dp_rates_4module = dp_rates[(i * layer_config['NUM_BLOCKS'][0]):]
modules.append(HighResolutionModule(layer_config, num_inchannels, norm, activation, multi_scale_output=multi_scale_output, dp_rates_4module=dp_rates_4module, module_idx=i))
num_inchannels = modules[(- 1)].get_num_inchannels()
return (nn.Sequential(*modules), num_inchannels)
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
x_list = self.stage2(x_list)
return x_list
def init_weights(self, pretrained=''):
Log.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=0.02)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
Log.info('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
load_dict = {k[9:]: v for (k, v) in pretrained_dict.items() if (k[9:] in model_dict.keys())}
Log.info('Missing keys: {}'.format(list((set(model_dict) - set(load_dict)))))
model_dict.update(load_dict)
self.load_state_dict(model_dict) |
def evaluate_lfw(distances, labels, num_folds=10, far_target=0.001):
thresholds_roc = np.arange(0, 4, 0.01)
(true_positive_rate, false_positive_rate, precision, recall, accuracy, best_distances) = calculate_roc_values(thresholds=thresholds_roc, distances=distances, labels=labels, num_folds=num_folds)
roc_auc = auc(false_positive_rate, true_positive_rate)
thresholds_val = np.arange(0, 4, 0.001)
(tar, far) = calculate_val(thresholds_val=thresholds_val, distances=distances, labels=labels, far_target=far_target, num_folds=num_folds)
return (true_positive_rate, false_positive_rate, precision, recall, accuracy, roc_auc, best_distances, tar, far) |
.parametrize('ti_dtype', [ti.f32, ti.f64])
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], exclude=[vk_on_mac])
def test_matrixfree_cg(ti_dtype):
GRID = 32
Ax = ti.field(dtype=ti_dtype, shape=(GRID, GRID))
x = ti.field(dtype=ti_dtype, shape=(GRID, GRID))
b = ti.field(dtype=ti_dtype, shape=(GRID, GRID))
def init():
for (i, j) in ti.ndrange(GRID, GRID):
xl = (i / (GRID - 1))
yl = (j / (GRID - 1))
b[(i, j)] = (ti.sin(((2 * math.pi) * xl)) * ti.sin(((2 * math.pi) * yl)))
x[(i, j)] = 0.0
def compute_Ax(v: ti.template(), mv: ti.template()):
for (i, j) in v:
l = (v[((i - 1), j)] if ((i - 1) >= 0) else 0.0)
r = (v[((i + 1), j)] if ((i + 1) <= (GRID - 1)) else 0.0)
t = (v[(i, (j + 1))] if ((j + 1) <= (GRID - 1)) else 0.0)
b = (v[(i, (j - 1))] if ((j - 1) >= 0) else 0.0)
mv[(i, j)] = (((((20 * v[(i, j)]) - l) - r) - t) - b)
def check_solution(sol: ti.template(), ans: ti.template(), tol: ti_dtype) -> bool:
exit_code = True
for (i, j) in ti.ndrange(GRID, GRID):
if (ti.abs((ans[(i, j)] - sol[(i, j)])) < tol):
pass
else:
exit_code = False
return exit_code
A = LinearOperator(compute_Ax)
init()
MatrixFreeCG(A, b, x, maxiter=((10 * GRID) * GRID), tol=1e-18, quiet=True)
compute_Ax(x, Ax)
result = check_solution(Ax, b, tol=1e-06)
assert result |
def test_distributed(test_module, test_directory, options):
mpi_available = (subprocess.call('command -v mpiexec', shell=True) == 0)
if (options.verbose and (not mpi_available)):
print_to_stderr('MPI not available -- MPI backend tests will be skipped')
config = DISTRIBUTED_TESTS_CONFIG
for (backend, env_vars) in config.items():
if ((sys.platform == 'win32') and (backend != 'gloo')):
continue
if ((backend == 'mpi') and (not mpi_available)):
continue
for with_init_file in {True, False}:
if ((sys.platform == 'win32') and (not with_init_file)):
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = 'with {} init_method'
with_init = init_str.format(('file' if with_init_file else 'env'))
print_to_stderr('Running distributed tests for the {} backend{}'.format(backend, with_init))
os.environ['TEMP_DIR'] = tmp_dir
os.environ['BACKEND'] = backend
os.environ['INIT_METHOD'] = 'env://'
os.environ.update(env_vars)
if with_init_file:
if (test_module in ['test_distributed_fork', 'test_distributed_spawn']):
init_method = f'{FILE_SCHEMA}{tmp_dir}/'
else:
init_method = f'{FILE_SCHEMA}{tmp_dir}/shared_init_file'
os.environ['INIT_METHOD'] = init_method
try:
os.mkdir(os.path.join(tmp_dir, 'barrier'))
os.mkdir(os.path.join(tmp_dir, 'test_dir'))
if (backend == 'mpi'):
with open(os.devnull, 'w') as devnull:
noprefix_opt = ('--noprefix' if (subprocess.call('mpiexec -n 1 --noprefix bash -c ""', shell=True, stdout=devnull, stderr=subprocess.STDOUT) == 0) else '')
mpiexec = ['mpiexec', '-n', '3', noprefix_opt]
return_code = run_test(test_module, test_directory, options, launcher_cmd=mpiexec)
else:
return_code = run_test(test_module, test_directory, options)
if (return_code != 0):
return return_code
finally:
shutil.rmtree(tmp_dir)
return 0 |
def run_azimint_naive(device_type: dace.dtypes.DeviceType):
(N, npt) = (40000, 100)
(data, radius) = initialize(N)
if (device_type in {dace.dtypes.DeviceType.CPU, dace.dtypes.DeviceType.GPU}):
sdfg = dace_azimint_naive.to_sdfg()
sdfg = auto_optimize(sdfg, device_type)
val = sdfg(data=data, radius=radius, N=N, npt=npt)
elif (device_type == dace.dtypes.DeviceType.FPGA):
sdfg = dace_azimint_naive.to_sdfg(simplify=True)
applied = sdfg.apply_transformations([FPGATransformSDFG])
assert (applied == 1)
from dace.libraries.standard import Reduce
Reduce.default_implementation = 'FPGAPartialReduction'
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated([InlineSDFG], print_report=True)
sdfg.specialize(dict(N=N, npt=npt))
val = sdfg(data=data, radius=radius)
ref = numpy_azimint_naive(data, radius, npt)
assert (np.allclose(val, ref) or (relerror(val, ref) < 1e-10))
return sdfg |
def set_logger_dir(dirname, action=None, prefix=''):
dirname = os.path.normpath(dirname)
global LOG_DIR, _FILE_HANDLER
if _FILE_HANDLER:
_logger.removeHandler(_FILE_HANDLER)
del _FILE_HANDLER
def dir_nonempty(dirname):
return (osp.isdir(dirname) and len([x for x in os.listdir(dirname) if (x[0] != '.')]))
if dir_nonempty(dirname):
if (not action):
_logger.warning("Log directory {} exists! Use 'd' to delete it. ".format(dirname))
_logger.warning("If you're resuming from a previous run, you can choose to keep it.\nPress any other key to exit. ")
while (not action):
action = input('Select Action: k (keep) / d (delete) / q (quit):').lower().strip()
act = action
if (act == 'b'):
backup_name = (dirname + _get_time_str())
shutil.move(dirname, backup_name)
info("Directory '{}' backuped to '{}'".format(dirname, backup_name))
elif (act == 'd'):
shutil.rmtree(dirname, ignore_errors=True)
if dir_nonempty(dirname):
shutil.rmtree(dirname, ignore_errors=False)
elif (act == 'n'):
dirname = (dirname + _get_time_str())
info('Use a new log directory {}'.format(dirname))
elif (act == 'k'):
pass
else:
raise OSError('Directory {} exits!'.format(dirname))
LOG_DIR = dirname
mkdir_p(dirname)
if ((not prefix.endswith('_')) and (len(prefix) > 0)):
prefix = (prefix + '_')
_set_file(osp.join(dirname, '{}log.log'.format(prefix))) |
def _mul(input, *args):
x = raw__sub__(input, *args)
if (not NET_INITTED):
return x
layer_name = log.add_layer(name='mul')
top_blobs = log.add_blobs([x], name='mul_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise', bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)
layer.param.eltwise_param.operation = 0
log.cnet.add_layer(layer)
return x |
def get_device():
is_device_available = {'cuda': torch.cuda.is_available(), 'mlu': is_mlu_available()}
device_list = [k for (k, v) in is_device_available.items() if v]
return (device_list[0] if (len(device_list) == 1) else 'cpu') |
_REGISTRY.register()
class VisualClassify(nn.Module):
def __init__(self, cfg):
super(VisualClassify, self).__init__()
self.cfg = cfg
self.visual_conv = MODEL_REGISTRY.get(cfg.VIS.MODEL_NAME)(cfg)
init_weights(self, cfg.MODEL.FC_INIT_STD, cfg.MODEL.ZERO_INIT_FINAL_BN)
def forward(self, visual_clip):
return (self.visual_conv.get_feature_map(visual_clip),) |
def hierarchical_cnn_res_gate(rep_tensor, rep_mask, n_gram=5, layer_num=5, hn=None, scope=None, is_train=None, keep_prob=1.0, wd=0.0):
if ((n_gram % 2) == 1):
padding_front = padding_back = int(((n_gram - 1) / 2))
else:
padding_front = ((n_gram - 1) // 2)
padding_back = (padding_front + 1)
padding = [[0, 0], [padding_front, padding_back], [0, 0], [0, 0]]
(bs, sl, vec) = (tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2])
org_ivec = rep_tensor.get_shape().as_list()[2]
ivec = (hn or org_ivec)
with tf.variable_scope((scope or 'cnn_for_sentence_encoding')):
rep_tensor = mask_for_high_rank(rep_tensor, rep_mask)
iter_rep = rep_tensor
layer_res_list = []
for layer_idx in range(layer_num):
with tf.variable_scope(('conv_maxpool_%s' % layer_idx)):
iter_rep_etd = tf.expand_dims(iter_rep, 3)
iter_rep_etd_dp = dropout(iter_rep_etd, keep_prob, is_train)
feature_size = (org_ivec if (layer_idx == 0) else ivec)
filter_shape = [n_gram, feature_size, 1, (2 * ivec)]
W = tf.get_variable('W', filter_shape, tf.float32)
b = tf.get_variable('b', [(2 * ivec)], tf.float32)
iter_rep_etd_pad = tf.pad(iter_rep_etd_dp, padding)
conv = tf.nn.conv2d(iter_rep_etd_pad, W, strides=[1, 1, 1, 1], padding='VALID', name='conv')
map_res = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')
map_res = tf.squeeze(map_res, [2])
(map_res_a, map_res_b) = tf.split(map_res, num_or_size_splits=2, axis=2)
iter_rep = (map_res_a * tf.nn.sigmoid(map_res_b))
if (len(layer_res_list) > 0):
iter_rep = (iter_rep + layer_res_list[(- 1)])
layer_res_list.append(iter_rep)
if (wd > 0.0):
add_reg_without_bias()
return iter_rep |
def main():
logger.info('Parsing Spec...')
spec = S.parse(toy_spec_str)
logger.info('Parsing succeeded')
logger.info('Building sample program...')
prog = D.Builder(spec).from_sexp_string(toy_dsl_sexp)
logger.info('Build program = {}'.format(prog))
interpreter = ToyInterpreter()
logger.info('Executing program on inputs {}...'.format(input0))
out_value = execute(interpreter, prog, input0)
logger.info('Execution finished with output = {}'.format(out_value))
assert (out_value == toy_dsl_func(input0))
logger.info('Executing program on inputs {}...'.format(input1))
out_value = execute(interpreter, prog, input1)
logger.info('Execution finished with output = {}'.format(out_value))
assert (out_value == toy_dsl_func(input1)) |
def get_patient_list(min_patient, cad_prescription_taken_by_patient):
patients_list = set()
for (drug, patients) in cad_prescription_taken_by_patient.items():
if (len(patients) >= min_patient):
for patient in patients:
patients_list.add(patient)
return patients_list |
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.test_cfg = None
def forward(self, imgs, rescale=False, return_loss=False):
return imgs
def train_step(self, data_batch, optimizer, **kwargs):
outputs = {'loss': 0.5, 'log_vars': {'accuracy': 0.98}, 'num_samples': 1}
return outputs |
def test_Numpy_from_buffer():
def f5(debug=True):
growablebuffer = ak.numba.GrowableBuffer(np.float64)
growablebuffer.append(66.6)
growablebuffer.append(77.7)
return growablebuffer
out = f5()
assert (out.snapshot().tolist() == [66.6, 77.7])
def f6():
growablebuffer = ak.numba.GrowableBuffer(np.float64)
growablebuffer.append(66.6)
growablebuffer.append(77.7)
return ak._connect.numba.layoutbuilder._from_buffer(growablebuffer)
out = f6()
assert isinstance(out, lb.Numpy)
assert (out.dtype == np.dtype(np.float64))
assert (len(out) == 2)
assert (ak.to_list(out.snapshot()) == [66.6, 77.7]) |
class BaseModel(torch.nn.Module):
def load(self, path):
parameters = torch.load(path, map_location=torch.device('cpu'))
if ('optimizer' in parameters):
parameters = parameters['model']
self.load_state_dict(parameters) |
def get_parameter_widgets(param_dict):
param_names = []
widget_list = []
test_related = []
for key in param_dict.keys():
if (key == 'output_path'):
widget_list.append(wg.Text(description='Output Path:', value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'log_path'):
widget_list.append(wg.Text(description='Log Path:', value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(False)
if (key == 'gpus'):
widget_list.append(wg.IntSlider(description='Use GPU:', min=0, max=1, value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'no_resume'):
widget_list.append(wg.Checkbox(description='Resume:', value=(not param_dict[key]), style=style))
param_names.append(('--' + key))
test_related.append(False)
if (key == 'pretrained'):
widget_list.append(wg.Text(description='Path To The Pretrained Model:', value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(False)
if (key == 'augmentations'):
widget_list.append(wg.Text(description='Augmentation Dictionary File:', value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(False)
if (key == 'epochs'):
widget_list.append(wg.BoundedIntText(description='Epochs:', min=1, max=10000, value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(False)
if (key == 'backbone'):
widget_list.append(wg.Dropdown(description='Network Architecture:', options=['UNet3D_PixelShuffle_inject', 'UNet2D_PixelShuffle_inject'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'in_channels'):
widget_list.append(wg.BoundedIntText(description='Input Channels:', value=param_dict[key], min=1, max=10000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'out_channels'):
widget_list.append(wg.BoundedIntText(description='Output Channels:', value=param_dict[key], min=1, max=10000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'feat_channels'):
widget_list.append(wg.BoundedIntText(description='Feature Channels:', value=param_dict[key], min=2, max=10000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'patch_size'):
if (not (param_dict[key] is str)):
param_dict[key] = ' '.join([str(p) for p in param_dict[key]])
widget_list.append(wg.Text(description='Patch Size (z,y,x):', value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'out_activation'):
widget_list.append(wg.Dropdown(description='Output Activation:', options=['tanh', 'sigmoid', 'hardtanh', 'relu', 'leakyrelu', 'none'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'layer_norm'):
widget_list.append(wg.Dropdown(description='Layer Normalization:', options=['instance', 'batch', 'none'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 't_channels'):
widget_list.append(wg.BoundedIntText(description='T Channels:', value=param_dict[key], min=1, max=10000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'data_norm'):
widget_list.append(wg.Dropdown(description='Data Normalization:', options=['percentile', 'minmax', 'meanstd', 'minmax_shifted', 'none'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'data_root'):
widget_list.append(wg.Text(value=param_dict[key], description='Data Root:', style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'train_list'):
widget_list.append(wg.Text(value=param_dict[key], description='Train List:', style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'test_list'):
widget_list.append(wg.Text(value=param_dict[key], description='Test List:', style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'val_list'):
widget_list.append(wg.Text(value=param_dict[key], description='Validation List:', style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'image_groups'):
if (not (param_dict[key] is str)):
param_dict[key] = ' '.join([str(p) for p in param_dict[key]])
widget_list.append(wg.Text(description='Image Groups:', value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'mask_groups'):
if (not (param_dict[key] is str)):
param_dict[key] = ' '.join([str(p) for p in param_dict[key]])
widget_list.append(wg.Text(description='Mask Groups:', value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'dist_handling'):
widget_list.append(wg.Dropdown(description='Distance Handling:', options=['float', 'bool', 'bool_inv', 'exp', 'tanh', 'none'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'dist_scaling'):
if (not (param_dict[key] is str)):
param_dict[key] = ' '.join([str(p) for p in param_dict[key]])
widget_list.append(wg.Dropdown(description='Distance Scaling:', value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'seed_handling'):
widget_list.append(wg.Dropdown(description='Seed Handling:', options=['float', 'bool', 'none'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'boundary_handling'):
widget_list.append(wg.Dropdown(description='Boundary Handling:', options=['bool', 'none'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'instance_handling'):
widget_list.append(wg.Dropdown(description='Instance Handling:', options=['bool', 'none'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'strides'):
if (not (param_dict[key] is str)):
param_dict[key] = ' '.join([str(p) for p in param_dict[key]])
widget_list.append(wg.Dropdown(description='Strides:', value=param_dict[key], style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'sh_order'):
widget_list.append(wg.BoundedIntText(description='SH Order:', value=param_dict[key], min=0, max=10000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'mean_low'):
widget_list.append(wg.BoundedFloatText(description='Sampling Mean Weight Low:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'mean_high'):
widget_list.append(wg.BoundedFloatText(description='Sampling Mean Weight High:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'var_high'):
widget_list.append(wg.BoundedFloatText(description='Sampling Variance Weight High:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'variance_levels'):
widget_list.append(wg.BoundedIntText(description='Sampling Variance Levels:', value=param_dict[key], min=0, max=10000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'strategy'):
widget_list.append(wg.Dropdown(description='Sampling Variance Strategy:', options=['random', 'structured'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'image_noise_channel'):
widget_list.append(wg.BoundedIntText(description='Image Noise Channel:', value=param_dict[key], min=(- 5), max=5, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'mask_noise_channel'):
widget_list.append(wg.BoundedIntText(description='Mask Noise Channel:', value=param_dict[key], min=(- 5), max=5, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'noise_type'):
widget_list.append(wg.Dropdown(description='Noise Type:', options=['gaussian', 'rayleigh', 'laplace'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'samples_per_epoch'):
widget_list.append(wg.BoundedIntText(description='Samples Per Epoch:', value=param_dict[key], min=(- 1), max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'batch_size'):
widget_list.append(wg.BoundedIntText(description='Batch Size:', value=param_dict[key], min=1, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'learning_rate'):
widget_list.append(wg.BoundedFloatText(description='Learning Rate:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'background_weight'):
widget_list.append(wg.BoundedFloatText(description='Background Weight:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'seed_weight'):
widget_list.append(wg.BoundedFloatText(description='Seed Weight:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'boundary_weight'):
widget_list.append(wg.BoundedFloatText(description='Boundary Weight:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'flow_weight'):
widget_list.append(wg.BoundedFloatText(description='Flow Weight:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'centroid_weight'):
widget_list.append(wg.BoundedFloatText(description='Centroid Weight:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'encoding_weight'):
widget_list.append(wg.BoundedFloatText(description='Encoding Weight:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'robustness_weight'):
widget_list.append(wg.BoundedFloatText(description='Robustness Weight:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'variance_interval'):
widget_list.append(wg.BoundedIntText(description='Sampling Variance Interval:', value=param_dict[key], min=0, max=10000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'ada_update_period'):
widget_list.append(wg.BoundedIntText(description='ADA Update Period:', value=param_dict[key], min=0, max=10000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'ada_update'):
widget_list.append(wg.BoundedFloatText(description='ADA Update Step:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'ada_target'):
widget_list.append(wg.BoundedFloatText(description='ADA Target:', value=param_dict[key], min=0, max=1000000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'num_samples'):
widget_list.append(wg.BoundedIntText(description='Number Of Samples:', value=param_dict[key], min=0, max=10000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'num_timesteps'):
widget_list.append(wg.BoundedIntText(description='Number Of Timesteps:', value=param_dict[key], min=0, max=1000, style=style))
param_names.append(('--' + key))
test_related.append(True)
if (key == 'diffusion_schedule'):
widget_list.append(wg.Dropdown(description='Diffusion Schedule:', options=['cosine', 'linear', 'quadratic', 'sigmoid'], value=param_dict[key], layout={'width': 'max-content'}, style=style))
param_names.append(('--' + key))
test_related.append(True)
return (param_names, widget_list, test_related) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.