code stringlengths 101 5.91M |
|---|
def is_borcherds_cartan_matrix(M):
if (not is_Matrix(M)):
return False
if (not M.is_square()):
return False
n = M.ncols()
for i in range(n):
if (M[(i, i)] == 0):
return False
if ((M[(i, i)] % 2) == 1):
return False
for j in range((i + 1), n):
if ((M[(i, j)] > 0) or (M[(j, i)] > 0)):
return False
elif ((M[(i, j)] == 0) and (M[(j, i)] != 0)):
return False
elif ((M[(j, i)] == 0) and (M[(i, j)] != 0)):
return False
return True |
def make_dataset(dir, max_dataset_size=float('inf')):
images = []
dir = dir.replace(dir.split('/')[(- 1)], '')
assert os.path.isdir(dir), ('%s is not a valid directory' % dir)
dir_ = dir
sub_list = [os.path.join(dir_, o) for o in os.listdir(dir_) if os.path.isdir(os.path.join(dir_, o))]
images = []
for sub_dirr in sub_list:
images.append([os.path.join(sub_dirr, file) for file in os.listdir(sub_dirr) if file.endswith('.png')])
images = list(itertools.chain(*images))
images = [os.path.join(dir_, file) for file in os.listdir(dir_) if file.endswith('.png')]
return images[:min(max_dataset_size, len(images))] |
class MSRVTTChoiceDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def dataset_cls(self):
return MSRVTTChoiceDataset
def dataset_cls_no_false(self):
return MSRVTTChoiceDataset
def dataset_name(self):
return 'msrvtt_choice' |
def perform_distributed_training(setup_trainer_and_train, config, results_dir=None):
assert (config['trainer']['num_gpus'] > 1)
num_devices = config['trainer']['num_gpus']
e = event_messenger
procs = []
if (results_dir is None):
results_dir = f'{time.time():10.0f}'
for device_id in range(num_devices):
proc = PyCUDADeviceContextProcessWrapper(target=setup_trainer_and_train, kwargs={'run_configuration': config, 'device_id': device_id, 'num_devices': num_devices, 'event_messenger': e, 'results_directory': results_dir, 'verbose': (device_id == 0)})
procs.append(proc)
for p in procs:
p.start()
for p in procs:
p.join()
if p.exception:
print(p.exception)
print('Exiting the Parent Process.') |
def plot_step_current_response(cur_in, mem_rec, vline1):
(fig, ax) = plt.subplots(2, figsize=(8, 6), sharex=True)
ax[0].plot(cur_in, c='tab:orange')
ax[0].set_ylim([0, 0.2])
ax[0].set_ylabel('Input Current ($I_{in}$)')
ax[0].set_title("Lapicque's Neuron Model With Step Input")
ax[1].plot(mem_rec)
ax[1].set_ylim([0, 0.6])
ax[1].set_ylabel('Membrane Potential ($U_{mem}$)')
if vline1:
ax[1].axvline(x=vline1, ymin=0, ymax=2.2, alpha=0.25, linestyle='dashed', c='black', linewidth=2, zorder=0, clip_on=False)
plt.xlabel('Time step')
plt.show() |
class TestLearningRate(serial.SerializedTestCase):
(**hu.gcs_cpu_only)
(deadline=None, max_examples=50)
def test_alter_learning_rate_op(self, gc, dc):
iter = np.random.randint(low=1, high=100000.0, size=1)
active_period = int(np.random.randint(low=1, high=1000.0, size=1))
inactive_period = int(np.random.randint(low=1, high=1000.0, size=1))
base_lr = float(np.random.random(1))
def ref(iter):
iter = float(iter)
reminder = (iter % (active_period + inactive_period))
if (reminder < active_period):
return (np.array(base_lr),)
else:
return (np.array(0.0),)
op = core.CreateOperator('LearningRate', 'iter', 'lr', policy='alter', active_first=True, base_lr=base_lr, active_period=active_period, inactive_period=inactive_period)
self.assertReferenceChecks(gc, op, [iter], ref)
(**hu.gcs_cpu_only)
def test_hill_learning_rate_op(self, gc, dc):
iter = np.random.randint(low=1, high=100000.0, size=1)
num_iter = int(np.random.randint(low=100.0, high=.0, size=1))
start_multiplier = 0.0001
gamma = 1.0
power = 0.5
end_multiplier = 0.01
base_lr = float(np.random.random(1))
def ref(iter):
iter = float(iter)
if (iter < num_iter):
lr = (start_multiplier + (((1.0 - start_multiplier) * iter) / num_iter))
else:
iter -= num_iter
lr = math.pow((1.0 + (gamma * iter)), (- power))
lr = max(lr, end_multiplier)
return (np.array((base_lr * lr)),)
op = core.CreateOperator('LearningRate', 'data', 'out', policy='hill', base_lr=base_lr, num_iter=num_iter, start_multiplier=start_multiplier, gamma=gamma, power=power, end_multiplier=end_multiplier)
self.assertReferenceChecks(gc, op, [iter], ref)
(**hu.gcs_cpu_only)
def test_slope_learning_rate_op(self, gc, dc):
iter = np.random.randint(low=1, high=100000.0, size=1)
num_iter_1 = int(np.random.randint(low=100.0, high=1000.0, size=1))
multiplier_1 = 1.0
num_iter_2 = (num_iter_1 + int(np.random.randint(low=100.0, high=1000.0, size=1)))
multiplier_2 = 0.5
base_lr = float(np.random.random(1))
def ref(iter):
iter = float(iter)
if (iter < num_iter_1):
lr = multiplier_1
else:
lr = max((multiplier_1 + (((iter - num_iter_1) * (multiplier_2 - multiplier_1)) / (num_iter_2 - num_iter_1))), multiplier_2)
return (np.array((base_lr * lr)),)
op = core.CreateOperator('LearningRate', 'data', 'out', policy='slope', base_lr=base_lr, num_iter_1=num_iter_1, multiplier_1=multiplier_1, num_iter_2=num_iter_2, multiplier_2=multiplier_2)
self.assertReferenceChecks(gc, op, [iter], ref)
(**hu.gcs_cpu_only)
(max_examples=10)
def test_gate_learningrate(self, gc, dc):
iter = np.random.randint(low=1, high=100000.0, size=1)
num_iter = int(np.random.randint(low=100.0, high=1000.0, size=1))
base_lr = float(np.random.uniform((- 1), 1))
multiplier_1 = float(np.random.uniform((- 1), 1))
multiplier_2 = float(np.random.uniform((- 1), 1))
def ref(iter):
iter = float(iter)
if (iter < num_iter):
return (np.array((multiplier_1 * base_lr)),)
else:
return (np.array((multiplier_2 * base_lr)),)
op = core.CreateOperator('LearningRate', 'data', 'out', policy='gate', num_iter=num_iter, multiplier_1=multiplier_1, multiplier_2=multiplier_2, base_lr=base_lr)
self.assertReferenceChecks(gc, op, [iter], ref)
(gc=hu.gcs['gc'], min_num_iter=st.integers(min_value=10, max_value=20), max_num_iter=st.integers(min_value=50, max_value=100))
(max_examples=2, deadline=None)
def test_composite_learning_rate_op(self, gc, min_num_iter, max_num_iter):
np.random.seed(65535)
num_lr_policy = 4
iter_nums = np.random.randint(low=min_num_iter, high=max_num_iter, size=num_lr_policy)
accu_iter_num = copy.deepcopy(iter_nums)
for i in range(1, num_lr_policy):
accu_iter_num[i] += accu_iter_num[(i - 1)]
total_iter_nums = accu_iter_num[(- 1)]
policy_lr_scale = np.random.uniform(low=0.1, high=2.0, size=num_lr_policy)
step_size = np.random.randint(low=2, high=(min_num_iter // 2))
step_gamma = np.random.random()
exp_gamma = np.random.random()
base_lr = 0.1
def step_lr(iter, lr_scale):
return (math.pow(step_gamma, (iter // step_size)) * lr_scale)
def exp_lr(iter, lr_scale):
return (math.pow(exp_gamma, iter) * lr_scale)
def fixed_lr(iter, lr_scale):
return lr_scale
def one_policy_check_ref(iter, lr_scale):
iter = int(iter)
exp_lr_val = exp_lr(iter, lr_scale=lr_scale)
return (np.array((base_lr * exp_lr_val)),)
op = core.CreateOperator('LearningRate', 'data', 'out', policy='composite', sub_policy_num_iters=iter_nums[:1], sub_policy_0_lr_scale=policy_lr_scale[0], sub_policy_0_policy='exp', sub_policy_0_gamma=exp_gamma, base_lr=base_lr)
for iter_idx in range(1, (total_iter_nums + 1)):
self.assertReferenceChecks(gc, op, [np.asarray([iter_idx])], partial(one_policy_check_ref, lr_scale=policy_lr_scale[0]))
def all_sub_policy_check_ref(iter, lr_scale):
assert (iter <= accu_iter_num[3])
if (iter <= accu_iter_num[0]):
lr = exp_lr(iter, lr_scale=lr_scale)
elif (iter <= accu_iter_num[1]):
lr = step_lr(iter, lr_scale=lr_scale)
elif (iter <= accu_iter_num[2]):
lr = fixed_lr(iter, lr_scale=lr_scale)
else:
lr = exp_lr(iter, lr_scale=lr_scale)
return (np.array((base_lr * lr)),)
op = core.CreateOperator('LearningRate', 'data', 'out', policy='composite', sub_policy_num_iters=iter_nums, sub_policy_0_policy='exp', sub_policy_0_lr_scale=policy_lr_scale[0], sub_policy_0_gamma=exp_gamma, sub_policy_1_policy='step', sub_policy_1_lr_scale=policy_lr_scale[1], sub_policy_1_stepsize=step_size, sub_policy_1_gamma=step_gamma, sub_policy_2_policy='fixed', sub_policy_2_lr_scale=policy_lr_scale[2], sub_policy_3_policy='exp', sub_policy_3_gamma=exp_gamma, sub_policy_3_lr_scale=policy_lr_scale[3], base_lr=base_lr)
iter_policy = 0
for iter_idx in range(1, (total_iter_nums + 1)):
if (iter_idx > accu_iter_num[iter_policy]):
iter_policy += 1
self.assertReferenceChecks(gc, op, [np.asarray([iter_idx])], partial(all_sub_policy_check_ref, lr_scale=policy_lr_scale[iter_policy])) |
def visualize_hands(frame, hands_bboxes, hands_kps, hands_kp_scores, vis_thres=0.05):
img = frame.copy()
for (bbox, kps, kp_scores) in zip(hands_bboxes, hands_kps, hands_kp_scores):
part_line = {}
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), RED, 1)
for n in range(21):
if (kp_scores[n] <= vis_thres):
continue
(cor_x, cor_y) = (int(kps[(n, 0)]), int(kps[(n, 1)]))
part_line[n] = (int(cor_x), int(cor_y))
cv2.circle(img, (int(cor_x), int(cor_y)), 5, p_color[n], thickness=(- 1))
for (i, (start_p, end_p)) in enumerate(l_pair):
if ((start_p in part_line) and (end_p in part_line)):
start_xy = part_line[start_p]
end_xy = part_line[end_p]
cv2.line(img, start_xy, end_xy, line_color[i], thickness=2)
return img |
_model_architecture('transformer_lm', 'transformer_lm_gpt3_2_7')
def transformer_lm_gpt3_2_7(args):
args.decoder_layers = safe_getattr(args, 'decoder_layers', 32)
args.decoder_embed_dim = safe_getattr(args, 'decoder_embed_dim', 2560)
args.decoder_attention_heads = safe_getattr(args, 'decoder_attention_heads', 32)
base_gpt3_architecture(args) |
def process_variant(variant):
rl_variant = variant['rl_variant']
if args.debug:
rl_variant['algo_kwargs']['base_kwargs']['num_epochs'] = 4
rl_variant['algo_kwargs']['base_kwargs']['batch_size'] = 128
rl_variant['vis_kwargs']['num_samples_for_video'] = 2
rl_variant['vis_kwargs']['save_period'] = 2
if (('env_kwargs' in variant) and ('num_goals_presampled' in variant['env_kwargs'])):
variant['env_kwargs']['num_goals_presampled'] = 10
if (('vis_kwargs' in rl_variant) and ('num_goals_presampled' in rl_variant['vis_kwargs'])):
rl_variant['vis_kwargs']['num_goals_presampled'] = 10
variant['tag'] = ('max-tau-' + str(rl_variant['algo_kwargs']['tdm_kwargs']['max_tau']))
rl_variant['train_algo'] = ((rl_variant['train_algo'] + '-') + variant['tag'])
variant['train_algo'] = rl_variant['train_algo'] |
class BayesianNetwork(nn.Module):
def __init__(self, inputSize, CLASSES, layers, activations, SAMPLES, BATCH_SIZE, NUM_BATCHES, hasScalarMixturePrior, PI, SIGMA_1, SIGMA_2, GOOGLE_INIT=False):
super().__init__()
self.inputSize = inputSize
self.activations = activations
self.CLASSES = CLASSES
self.SAMPLES = SAMPLES
self.BATCH_SIZE = BATCH_SIZE
self.NUM_BATCHES = NUM_BATCHES
self.DEPTH = 0
self.GOOGLE_INIT = GOOGLE_INIT
assert ((activations.size - layers.size) == 1)
self.SIGMA_1 = SIGMA_1
self.hasScalarMixturePrior = hasScalarMixturePrior
if (hasScalarMixturePrior == True):
self.SIGMA_2 = SIGMA_2
self.PI = PI
self.layers = nn.ModuleList([])
if (layers.size == 0):
self.layers.append(BayesianLinear(inputSize, CLASSES, self))
self.DEPTH += 1
else:
self.layers.append(BayesianLinear(inputSize, layers[0], self))
self.DEPTH += 1
for i in range((layers.size - 1)):
self.layers.append(BayesianLinear(layers[i], layers[(i + 1)], self))
self.DEPTH += 1
self.layers.append(BayesianLinear(layers[(layers.size - 1)], CLASSES, self))
self.DEPTH += 1
def forward(self, x, infer=False):
x = x.view((- 1), self.inputSize)
layerNumber = 0
for i in range(self.activations.size):
if (self.activations[i] == 'relu'):
x = F.relu(self.layers[layerNumber](x, infer))
elif (self.activations[i] == 'softmax'):
x = F.log_softmax(self.layers[layerNumber](x, infer), dim=1)
else:
x = self.layers[layerNumber](x, infer)
layerNumber += 1
return x
def get_lpw_lqw(self):
lpw = 0.0
lpq = 0.0
for i in range(self.DEPTH):
lpw += self.layers[i].lpw
lpq += self.layers[i].lqw
return (lpw, lpq)
def BBB_loss(self, input, target, batch_idx=None):
(s_log_pw, s_log_qw, s_log_likelihood, sample_log_likelihood) = (0.0, 0.0, 0.0, 0.0)
for _ in range(self.SAMPLES):
output = self.forward(input)
(sample_log_pw, sample_log_qw) = self.get_lpw_lqw()
if (self.CLASSES > 1):
sample_log_likelihood = (- F.nll_loss(output, target, reduction='sum'))
else:
sample_log_likelihood = (- (0.5 * ((target - output) ** 2)).sum())
s_log_pw += sample_log_pw
s_log_qw += sample_log_qw
s_log_likelihood += sample_log_likelihood
(l_pw, l_qw, l_likelihood) = ((s_log_pw / self.SAMPLES), (s_log_qw / self.SAMPLES), (s_log_likelihood / self.SAMPLES))
if (batch_idx is None):
return (((1.0 / self.NUM_BATCHES) * (l_qw - l_pw)) - l_likelihood)
else:
return ((((2.0 ** ((self.NUM_BATCHES - batch_idx) - 1.0)) / ((2.0 ** self.NUM_BATCHES) - 1)) * (l_qw - l_pw)) - l_likelihood) |
def weighted_kappa_calc(classes, table, P, TOP, POP, weight):
p_e = 0
p_a = 0
try:
w_max = max(map((lambda x: max(x.values())), weight.values()))
for i in classes:
for j in classes:
v_i_j = (1 - (weight[i][j] / w_max))
p_e += (((P[i] * TOP[j]) * v_i_j) / (POP[i] ** 2))
p_a += ((table[i][j] * v_i_j) / POP[i])
weighted_kappa = reliability_calc(p_e, p_a)
return weighted_kappa
except Exception:
return 'None' |
class Control(Consumer):
def __init__(self, network, action_size, include_state=False):
self.network = network
self.action_size = action_size
self.include_state = include_state
super().__init__()
def consume(self, inputs):
s = self.get(inputs, 'sentences')
ctrnet_images = self.get(inputs, 'ctrnet_images')
examples = ctrnet_images.shape[1]
width = ctrnet_images.shape[(- 2)]
height = ctrnet_images.shape[(- 3)]
s = tf.expand_dims(tf.expand_dims(tf.expand_dims(s, axis=1), axis=1), axis=1)
tiled = tf.tile(s, [1, examples, height, width, 1])
ctrnet_input = tf.concat([ctrnet_images, tiled], axis=(- 1))
emb_plus_channels = ctrnet_input.shape[(- 1)]
ctrnet_input = tf.reshape(ctrnet_input, ((- 1), height, width, emb_plus_channels))
net_ins = [NetworkInput(name='ctr_images', layer_type='conv', layer_num=0, tensor=ctrnet_input)]
if self.include_state:
states = self.get(inputs, 'ctrnet_states')
states = tf.reshape(states, ((- 1), states.shape[(- 1)]))
net_ins.append(NetworkInput(name='ctrnet_states', layer_type='fc', layer_num=0, tensor=states, merge_mode='concat'))
net_out = NetworkHead(name='output_action', nodes=self.action_size)
with tf.variable_scope('control_net', reuse=tf.AUTO_REUSE):
outputs = self.network.forward(net_ins, [net_out], self.get(inputs, 'training'))
inputs['output_actions'] = tf.reshape(self.get(outputs, 'output_action'), ((- 1), examples, self.action_size))
return inputs |
def to_pretty_midi_key_signature(key_signature: KeySignature, map_time: Callable=None) -> Optional[PmKeySignature]:
if (key_signature.root is None):
return None
if (key_signature.mode not in ('major', 'minor')):
return None
key_name = f'{PITCH_NAMES[key_signature.root]} {key_signature.mode}'
if (map_time is not None):
time = map_time(key_signature.time)
else:
time = key_signature.time
return PmKeySignature(key_number=key_name_to_key_number(key_name), time=time) |
def read_dtype(mat_stream, a_dtype):
num_bytes = a_dtype.itemsize
arr = np.ndarray(shape=(), dtype=a_dtype, buffer=mat_stream.read(num_bytes), order='F')
return arr |
class Tokenizer(nn.Module):
def __init__(self, args, nchars, emb_dim, hidden_dim, dropout, feat_dropout):
super().__init__()
self.args = args
feat_dim = args['feat_dim']
self.embeddings = nn.Embedding(nchars, emb_dim, padding_idx=0)
self.rnn = nn.LSTM((emb_dim + feat_dim), hidden_dim, num_layers=self.args['rnn_layers'], bidirectional=True, batch_first=True, dropout=(dropout if (self.args['rnn_layers'] > 1) else 0))
if (self.args['conv_res'] is not None):
self.conv_res = nn.ModuleList()
self.conv_sizes = [int(x) for x in self.args['conv_res'].split(',')]
for (si, size) in enumerate(self.conv_sizes):
l = nn.Conv1d((emb_dim + feat_dim), (hidden_dim * 2), size, padding=(size // 2), bias=(self.args.get('hier_conv_res', False) or (si == 0)))
self.conv_res.append(l)
if self.args.get('hier_conv_res', False):
self.conv_res2 = nn.Conv1d(((hidden_dim * 2) * len(self.conv_sizes)), (hidden_dim * 2), 1)
self.tok_clf = nn.Linear((hidden_dim * 2), 1)
self.sent_clf = nn.Linear((hidden_dim * 2), 1)
if self.args['use_mwt']:
self.mwt_clf = nn.Linear((hidden_dim * 2), 1)
if args['hierarchical']:
in_dim = (hidden_dim * 2)
self.rnn2 = nn.LSTM(in_dim, hidden_dim, num_layers=1, bidirectional=True, batch_first=True)
self.tok_clf2 = nn.Linear((hidden_dim * 2), 1, bias=False)
self.sent_clf2 = nn.Linear((hidden_dim * 2), 1, bias=False)
if self.args['use_mwt']:
self.mwt_clf2 = nn.Linear((hidden_dim * 2), 1, bias=False)
self.dropout = nn.Dropout(dropout)
self.dropout_feat = nn.Dropout(feat_dropout)
self.toknoise = nn.Dropout(self.args['tok_noise'])
def forward(self, x, feats):
emb = self.embeddings(x)
emb = self.dropout(emb)
feats = self.dropout_feat(feats)
emb = torch.cat([emb, feats], 2)
(inp, _) = self.rnn(emb)
if (self.args['conv_res'] is not None):
conv_input = emb.transpose(1, 2).contiguous()
if (not self.args.get('hier_conv_res', False)):
for l in self.conv_res:
inp = (inp + l(conv_input).transpose(1, 2).contiguous())
else:
hid = []
for l in self.conv_res:
hid += [l(conv_input)]
hid = torch.cat(hid, 1)
hid = F.relu(hid)
hid = self.dropout(hid)
inp = (inp + self.conv_res2(hid).transpose(1, 2).contiguous())
inp = self.dropout(inp)
tok0 = self.tok_clf(inp)
sent0 = self.sent_clf(inp)
if self.args['use_mwt']:
mwt0 = self.mwt_clf(inp)
if self.args['hierarchical']:
if (self.args['hier_invtemp'] > 0):
(inp2, _) = self.rnn2((inp * (1 - self.toknoise(torch.sigmoid(((- tok0) * self.args['hier_invtemp']))))))
else:
(inp2, _) = self.rnn2(inp)
inp2 = self.dropout(inp2)
tok0 = (tok0 + self.tok_clf2(inp2))
sent0 = (sent0 + self.sent_clf2(inp2))
if self.args['use_mwt']:
mwt0 = (mwt0 + self.mwt_clf2(inp2))
nontok = F.logsigmoid((- tok0))
tok = F.logsigmoid(tok0)
nonsent = F.logsigmoid((- sent0))
sent = F.logsigmoid(sent0)
if self.args['use_mwt']:
nonmwt = F.logsigmoid((- mwt0))
mwt = F.logsigmoid(mwt0)
if self.args['use_mwt']:
pred = torch.cat([nontok, ((tok + nonsent) + nonmwt), ((tok + sent) + nonmwt), ((tok + nonsent) + mwt), ((tok + sent) + mwt)], 2)
else:
pred = torch.cat([nontok, (tok + nonsent), (tok + sent)], 2)
return pred |
def initialize(N):
from numpy.random import default_rng
rng = default_rng(42)
(t0, p0, t1, p1) = (rng.random((N,)), rng.random((N,)), rng.random((N,)), rng.random((N,)))
return (t0, p0, t1, p1) |
def cb_pose(data):
t = data.header.stamp
image = vf.get_latest(t, remove_older=True)
if (image is None):
rospy.logwarn('No received images.')
return
(h, w) = image.shape[:2]
if (resize_ratio > 0):
image = cv2.resize(image, (int((resize_ratio * w)), int((resize_ratio * h))), interpolation=cv2.INTER_LINEAR)
humans = []
for (p_idx, person) in enumerate(data.persons):
human = Human([])
for body_part in person.body_part:
part = BodyPart('', body_part.part_id, body_part.x, body_part.y, body_part.confidence)
human.body_parts[body_part.part_id] = part
humans.append(human)
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
pub_img.publish(cv_bridge.cv2_to_imgmsg(image, 'bgr8')) |
class VSRCaptionEvalDataset(VSRCaptionDataset):
def __getitem__(self, index):
data = super().__getitem__(index)
if (data != None):
del data['text_input']
return data |
class TestDeepModels(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n_past = 16
self.max_forecast_steps = 8
self.early_stop_patience = 4
self.num_epochs = 2
self.use_gpu = True
self.batch_size = 32
df = self._obtain_df('weather')
bound = (16 * 20)
train_df = df[0:bound]
test_df = df[bound:(2 * bound)]
self.train_df = train_df
self.test_df = test_df
self.train_data = TimeSeries.from_pd(self.train_df)
self.test_data = TimeSeries.from_pd(self.test_df)
def test_deep_ar_predict_univariate(self):
print(('-' * 80))
logger.info(('test_deep_ar_predict_univariate\n' + ('-' * 80)))
self._test_deep_ar(20)
def test_deep_ar_predict_multivariate(self):
print(('-' * 80))
logger.info(('test_deep_ar_predict_multivariate\n' + ('-' * 80)))
self._test_deep_ar(None)
def test_autoformer_predict_univariate(self):
print(('-' * 80))
logger.info(('test_autoformer_predict_univariate\n' + ('-' * 80)))
self._test_autoformer(9)
def test_autoformer_predict_multivariate(self):
print(('-' * 80))
logger.info(('test_autoformer_predict_multivariate\n' + ('-' * 80)))
self._test_autoformer(None)
def test_informer_predict_univariate(self):
print(('-' * 80))
logger.info(('test_informer_predict_univariate\n' + ('-' * 80)))
self._test_informer(3)
def test_informer_predict_multivariate(self):
print(('-' * 80))
logger.info(('test_informer_predict_multivariate\n' + ('-' * 80)))
self._test_informer(None)
def test_etsformer_predict_univariate(self):
print(('-' * 80))
logger.info(('test_etsformer_predict_univariate\n' + ('-' * 80)))
self._test_etsformer(15)
def test_etsformer_predict_multivariate(self):
print(('-' * 80))
logger.info(('test_etsformer_predict_multivariate\n' + ('-' * 80)))
self._test_etsformer(None)
def test_transformer_predict_univariate(self):
print(('-' * 80))
logger.info(('test_transformer_predict_univariate\n' + ('-' * 80)))
self._test_transformer(0)
def test_transformer_predict_multivariate(self):
print(('-' * 80))
logger.info(('test_transformer_predict_multivariate\n' + ('-' * 80)))
self._test_transformer(None)
def _test_deep_ar(self, target_seq_index):
logger.info('Testing Deep AR forecasting')
config = DeepARConfig(n_past=self.n_past, max_forecast_steps=self.max_forecast_steps, early_stop_patience=self.early_stop_patience, num_epochs=self.num_epochs, use_gpu=self.use_gpu, batch_size=self.batch_size, target_seq_index=target_seq_index)
forecaster = DeepARForecaster(config)
self._test_model(forecaster, self.train_data, self.test_data)
def _test_autoformer(self, target_seq_index):
logger.info('Testing Autoformer forecasting')
start_token_len = 3
config = AutoformerConfig(n_past=self.n_past, max_forecast_steps=self.max_forecast_steps, start_token_len=start_token_len, early_stop_patience=self.early_stop_patience, num_epochs=self.num_epochs, use_gpu=self.use_gpu, batch_size=self.batch_size, target_seq_index=target_seq_index)
forecaster = AutoformerForecaster(config)
self._test_model(forecaster, self.train_data, self.test_data)
def _test_transformer(self, target_seq_index):
logger.info('Testing Transformer forecasting')
start_token_len = 3
config = TransformerConfig(n_past=self.n_past, max_forecast_steps=self.max_forecast_steps, start_token_len=start_token_len, early_stop_patience=self.early_stop_patience, num_epochs=self.num_epochs, use_gpu=self.use_gpu, batch_size=self.batch_size, target_seq_index=target_seq_index)
forecaster = TransformerForecaster(config)
self._test_model(forecaster, self.train_data, self.test_data)
def _test_informer(self, target_seq_index):
logger.info('Testing Informer forecasting')
start_token_len = 3
config = InformerConfig(n_past=self.n_past, max_forecast_steps=self.max_forecast_steps, start_token_len=start_token_len, early_stop_patience=self.early_stop_patience, num_epochs=self.num_epochs, use_gpu=self.use_gpu, batch_size=self.batch_size, target_seq_index=target_seq_index)
forecaster = InformerForecaster(config)
self._test_model(forecaster, self.train_data, self.test_data)
def _test_etsformer(self, target_seq_index):
logger.info('Testing ETSformer forecasting')
config = ETSformerConfig(n_past=self.n_past, max_forecast_steps=self.max_forecast_steps, top_K=3, early_stop_patience=self.early_stop_patience, num_epochs=self.num_epochs, use_gpu=self.use_gpu, batch_size=self.batch_size, target_seq_index=target_seq_index)
forecaster = ETSformerForecaster(config)
self._test_model(forecaster, self.train_data, self.test_data)
def _obtain_df(self, dataset_name='weather'):
data_dir = join(rootdir, 'data')
if (dataset_name == 'weather'):
data_url = '
data_folder = join(data_dir, 'weather')
data_file_path = join(data_folder, 'weather.csv')
else:
raise NotImplementedError
if (not exists(data_file_path)):
while True:
try:
gdown.download_folder(data_url, quiet=False, use_cookies=False)
except TimeoutError:
logger.error('Timeout Error, try downloading again...')
else:
logger.info(('Successfully downloaded %s!' % dataset_name))
break
shutil.move(('./%s' % dataset_name), data_folder)
weather_ds = CustomDataset(data_folder)
(df, metadata) = weather_ds[0]
return df
def _test_model(self, forecaster, train_data, test_data):
config = forecaster.config
model_name = forecaster.deep_model_class.__name__
model_save_path = join('./models', model_name.lower())
logger.info(model_name)
forecaster.train(train_data)
forecaster.save(model_save_path)
dataset = RollingWindowDataset(test_data, target_seq_index=config.target_seq_index, n_past=config.n_past, n_future=config.max_forecast_steps, ts_index=True)
(test_prev, test) = dataset[0]
forecaster.load(model_save_path)
(pred, _) = forecaster.forecast(test.time_stamps, time_series_prev=test_prev)
assert ((pred.dim == 1) if (forecaster.target_seq_index is not None) else train_data.dim)
try:
shutil.rmtree(model_save_path)
except OSError as e:
logger.error(f'Error: {e.filename} - {e.strerror}.') |
def plot_hl(pred_json, save_dir_i, base_json=None):
pred_saliency = np.array(pred_json['pred'])
pred_saliency = norm(pred_saliency)
gt_saliency = np.array(pred_json['gt'])
gt_saliency = norm(gt_saliency)
duration = pred_json['duration']
(t_min, t_max) = (0, duration)
x = np.arange(t_min, t_max, clip_len)
x = x[:len(pred_saliency)]
plt.figure(figsize=(1, 1))
if (not base_json):
(fig, ax) = plt.subplots(1, 1, figsize=(50, 2))
plt.plot(x, gt_saliency, label='GT Saliency', color=color1_dark, linewidth=6, linestyle='solid')
plt.plot(x, pred_saliency, label="UniVTG's Prediction", color=color2_dark, linewidth=6, linestyle='solid')
else:
(fig, ax) = plt.subplots(1, 1, figsize=(50, 3))
base_saliency = np.array(base_json['pred_saliency_scores'])
base_saliency = norm(base_saliency)
plt.plot(x, gt_saliency, label='GT Saliency', color=color1_dark, linewidth=6, linestyle='solid')
plt.plot(x, pred_saliency, label="UniVTG's Prediction", color=color2_dark, linewidth=6, linestyle='solid')
plt.plot(x, base_saliency, label="MomentDETR's Prediction", color=color3_dark, linewidth=6, linestyle='solid')
for label in ax.get_xticklabels():
label.set_fontproperties(font_prop1)
for label in ax.get_yticklabels():
label.set_fontproperties(font_prop1)
for spine in ax.spines.values():
spine.set_visible(False)
ax.set_xlim(left=0, right=(duration - clip_len))
offset = (pred_json['duration'] * 0.01)
ax.text(offset, (- 0.2), '0.0', va='center', ha='center', color='black', fontproperties=font_prop1)
ax.text(((pred_json['duration'] - clip_len) - offset), (- 0.2), f"{pred_json['duration']:.1f}", va='center', ha='center', color='black', fontproperties=font_prop1)
for i in np.arange(0, (pred_json['duration'] + (gap / 2)), gap)[1:(- 1)]:
ax.text(i, (- 0.2), '{:.1f}'.format(i), va='center', ha='center', color='black', fontproperties=font_prop1)
ax.set_yticks([])
ax.set_xticks([])
ax.tick_params(axis='both', labelsize=fontsize1)
legend = ax.legend(prop=font_prop3, loc='upper left', bbox_to_anchor=(0, 1.1))
(lines, labels) = (legend.get_lines(), legend.get_texts())
for (line, label) in zip(lines, labels):
label.set_color(line.get_color())
for position in ['top', 'right']:
ax.spines[position].set_visible(False)
for position in ['bottom', 'left']:
ax.spines[position].set_visible(True)
ax.spines[position].set_linewidth(2)
plt.savefig(os.path.join(save_dir_i, '2_hl.jpg'), bbox_inches='tight', pad_inches=0.2, dpi=100)
return |
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True, errors='replace', separator='&', cls=None):
if (cls is None):
from .datastructures import MultiDict
cls = MultiDict
if (isinstance(s, text_type) and (not isinstance(separator, text_type))):
separator = separator.decode((charset or 'ascii'))
elif (isinstance(s, bytes) and (not isinstance(separator, bytes))):
separator = separator.encode((charset or 'ascii'))
return cls(_url_decode_impl(s.split(separator), charset, decode_keys, include_empty, errors)) |
def gt_lesion_segm_stat_sbct(args):
stat_save_path = 'lesion_stat'
if (not os.path.exists(stat_save_path)):
os.makedirs(stat_save_path)
with open(args.set_txt_path) as f:
case_list = [x.strip() for x in f.readlines()]
bins = list(range(args.hist_bin_min, (args.hist_bin_max + 1), args.hist_bin_size))
tumor_int_hist_list = []
for cur_case_path in case_list:
img_path = os.path.join(args.data_root_path, cur_case_path, 'img_crop.nii.gz')
segm_path = os.path.join(args.data_root_path, cur_case_path, 'tumor_segm_crop.nii.gz')
img = nib.load(img_path)
img = img.get_fdata()
img = medpy_smooth.anisotropic_diffusion(img, niter=5, kappa=50, gamma=0.1, voxelspacing=None, option=3)
segm = nib.load(segm_path)
segm = segm.get_fdata()
segm = segm.astype(np.uint8)
for tumor_idx in list(np.unique(segm))[1:]:
cur_tumor_segm = (segm == tumor_idx)
cur_tumor_int_list = list(img[cur_tumor_segm].reshape((- 1)))
cur_tumor_int_list = sorted(cur_tumor_int_list)
(hist, bin_edges) = np.histogram(cur_tumor_int_list, bins)
tumor_int_hist_list.append(hist)
tumor_int_hist_list = np.array(tumor_int_hist_list)
np.save(os.path.join(stat_save_path, 'tumor_int_hist_list_sbct.npy'), tumor_int_hist_list)
cum_hist = np.zeros(tumor_int_hist_list.shape[1])
for idx in range(tumor_int_hist_list.shape[0]):
norm_hist = (tumor_int_hist_list[idx] / np.sum(tumor_int_hist_list[idx]))
cum_hist = (cum_hist + norm_hist)
cum_hist = (cum_hist / tumor_int_hist_list.shape[0])
(fig, ax1) = plt.subplots()
ax2 = ax1.twinx()
fig.set_size_inches(18, 9)
ax1.bar(bins[:(- 1)], cum_hist, width=8, color='gray', alpha=1, label='intensity histogram')
ax1.set_xticks([((i * args.hist_bin_size) - (0.5 * args.hist_bin_size)) for i in range((len(cum_hist) + 1))])
temp = []
for (idx, elem) in enumerate(bins):
if ((idx % 2) == 0):
temp.append(str(elem))
else:
temp.append('')
ax1.set_xticklabels(temp)
ax1.set_yticks([0, 0.04, 0.08, 0.12, 0.16, 0.2])
ax1.set_yticklabels([0, 0.04, 0.08, 0.12, 0.16, 0.2])
ax1.tick_params(axis='x', labelsize=fontsize)
ax1.tick_params(axis='y', labelsize=fontsize)
ax1.set_xlabel('Hounsfield unit', fontsize=fontsize)
ax1.set_ylabel('proportion', fontsize=fontsize, color='gray')
ax1.legend(loc=2, fontsize=fontsize)
N = 1000
data = []
for idx in range(len(cum_hist)):
cur_ub = bins[(idx + 1)]
cur_data = ([(cur_ub - (args.hist_bin_size * 0.5))] * int(np.round((N * cum_hist[idx]))))
data += cur_data
kde = stats.gaussian_kde(data)
opt = optimize.minimize_scalar((lambda x: (- kde(x))))
max_val = (- opt.fun[0])
xx = np.linspace(args.hist_bin_min, args.hist_bin_max, (args.hist_bin_max - args.hist_bin_min))
ax2.plot(xx, (kde(xx) / max_val), color='black', label='ILP function')
ax2.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
ax2.set_yticklabels([0, 0.2, 0.4, 0.6, 0.8, 1])
ax2.tick_params(axis='x', labelsize=fontsize)
ax2.tick_params(axis='y', labelsize=fontsize)
ax2.set_ylabel('probability', fontsize=fontsize, color='black')
ax2.legend(loc=1, fontsize=fontsize)
plt.savefig(os.path.join(stat_save_path, 'tumor_int_hist_func_sbct.eps')) |
class NadamOptimizer(tf_compat.v1.train.AdamOptimizer):
def _apply_dense(self, grad, var):
from tensorflow.python.training import training_ops
from tensorflow.python.ops import math_ops
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
(beta1_power, beta2_power) = self._get_beta_accumulators()
return training_ops.apply_adam(var, m, v, math_ops.cast(beta1_power, var.dtype.base_dtype), math_ops.cast(beta2_power, var.dtype.base_dtype), math_ops.cast(self._lr_t, var.dtype.base_dtype), math_ops.cast(self._beta1_t, var.dtype.base_dtype), math_ops.cast(self._beta2_t, var.dtype.base_dtype), math_ops.cast(self._epsilon_t, var.dtype.base_dtype), grad, use_locking=self._use_locking, use_nesterov=True).op
def _resource_apply_dense(self, grad, var):
from tensorflow.python.training import training_ops
from tensorflow.python.ops import math_ops
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
(beta1_power, beta2_power) = self._get_beta_accumulators()
return training_ops.resource_apply_adam(var.handle, m.handle, v.handle, math_ops.cast(beta1_power, grad.dtype.base_dtype), math_ops.cast(beta2_power, grad.dtype.base_dtype), math_ops.cast(self._lr_t, grad.dtype.base_dtype), math_ops.cast(self._beta1_t, grad.dtype.base_dtype), math_ops.cast(self._beta2_t, grad.dtype.base_dtype), math_ops.cast(self._epsilon_t, grad.dtype.base_dtype), grad, use_locking=self._use_locking, use_nesterov=True)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
(beta1_power, beta2_power) = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = ((lr_t * math_ops.sqrt((1 - beta2_power))) / (1 - beta1_power))
m = self.get_slot(var, 'm')
m_scaled_g_values = (grad * (1 - beta1_t))
m_t = state_ops.assign(m, (m * beta1_t), use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
m_bar = (m_scaled_g_values + (beta1_t * array_ops.gather(m_t, indices)))
v = self.get_slot(var, 'v')
v_scaled_g_values = ((grad * grad) * (1 - beta2_t))
v_t = state_ops.assign(v, (v * beta2_t), use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_t_slice = array_ops.gather(v_t, indices)
v_sqrt = math_ops.sqrt(v_t_slice)
var_update = scatter_add(var, indices, (((- lr) * m_bar) / (v_sqrt + epsilon_t)))
return control_flow_ops.group(*[var_update, m_bar, v_t]) |
class LookAtObjInLightTask(BaseTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def goal_satisfied(self, state):
pcs = self.goal_conditions_met(state)
return (pcs[0] == pcs[1])
def goal_conditions_met(self, state):
ts = 2
s = 0
targets = self.get_targets()
toggleables = get_objects_with_name_and_prop(targets['toggle'], 'toggleable', state.metadata)
pickupables = get_objects_with_name_and_prop(targets['object'], 'pickupable', state.metadata)
inventory_objects = state.metadata['inventoryObjects']
if ('Sliced' in targets['object']):
ts += 1
if (len([p for p in pickupables if ('Sliced' in p['objectId'])]) >= 1):
s += 1
if ((len(inventory_objects) > 0) and (inventory_objects[0]['objectId'] in [p['objectId'] for p in pickupables])):
s += 1
if np.any([(t['isToggled'] and t['visible']) for t in toggleables]):
s += 1
return (s, ts)
def reset(self):
super().reset() |
def assert_allclose(actual, desired, rtol=1e-07, atol=0, equal_nan=True, err_msg='', verbose=True):
__tracebackhide__ = True
import numpy as np
def compare(x, y):
return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan)
(actual, desired) = (np.asanyarray(actual), np.asanyarray(desired))
header = ('Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol))
assert_array_compare(compare, actual, desired, err_msg=str(err_msg), verbose=verbose, header=header, equal_nan=equal_nan) |
class NNPolicy(object):
def __init__(self):
pass
def multi_state_policy(self, states, agent_indices):
raise NotImplementedError()
def multi_obs_policy(self, states):
raise NotImplementedError() |
class GPT2LoraInt8Engine(CausalLoraEngine):
config_name: str = 'gpt2_lora_engine_int8'
def __init__(self, weights_path: Optional[Union[(str, Path)]]=None):
super().__init__(model_name='gpt2', weights_path=weights_path, load_8bit=True, target_modules=['c_attn'])
self.tokenizer.pad_token = self.tokenizer.eos_token |
_module()
class SOLOv2(SingleStageInstanceSegmentor):
def __init__(self, backbone, neck=None, bbox_head=None, mask_head=None, train_cfg=None, test_cfg=None, init_cfg=None, pretrained=None):
super().__init__(backbone=backbone, neck=neck, bbox_head=bbox_head, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg, pretrained=pretrained) |
def run_episodic_random_agent(args, worker_idx=None):
if args.do_testing:
fout = open('result/{}_random_{}.csv'.format(args.env_name, args.seed), 'w')
env = make_env(args.env_name, worker_idx=worker_idx)
env.seed(((args.seed + 0) if (worker_idx is None) else worker_idx))
obs_dim = env.observation_space.shape[0]
CRB_num = (env.cap_num, env.reg_num, env.bat_num)
CRB_dim = (2, env.reg_act_num, env.bat_act_num)
print('NumCap, NumReg, NumBat: {}'.format(CRB_num))
print('ObsDim, ActDim: {}, {}'.format(obs_dim, sum(CRB_num)))
print(('-' * 80))
if args.do_testing:
train_profiles = random.sample(range(env.num_profiles), k=(env.num_profiles // 2))
test_profiles = [i for i in range(env.num_profiles) if (i not in train_profiles)]
else:
train_profiles = list(range(env.num_profiles))
total_numsteps = 0
for i_episode in itertools.count(start=1):
episode_reward = 0
episode_steps = 0
done = False
load_profile_idx = random.choice(train_profiles)
obs = env.reset(load_profile_idx=load_profile_idx)
while (not done):
action = env.random_action()
(next_obs, reward, done, info) = env.step(action)
episode_steps += 1
total_numsteps += 1
episode_reward += reward
mask = (1 if (episode_steps == env.horizon) else float((not done)))
obs = next_obs
print('episode: {}, profile: {}, total numsteps: {}, episode steps: {}, reward: {}'.format(i_episode, load_profile_idx, total_numsteps, episode_steps, round(episode_reward, 2)))
total_numsteps += 24
if (total_numsteps >= args.num_steps):
break
if (args.do_testing and ((i_episode % 5) == 0)):
(avg_reward, std) = random_evaluate(env, test_profiles)
fout.write('{},{},{}\n'.format(total_numsteps, avg_reward, std))
fout.flush()
print('')
print('Avg., Std. Reward: {}, {}'.format(round(avg_reward, 2), round(std, 2)))
print('') |
class CenterLoss(nn.Module):
def __init__(self, num_classes=751, feat_dim=2048, use_gpu=True):
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
def forward(self, x, labels):
assert (x.size(0) == labels.size(0)), 'features.size(0) is not equal to labels.size(0)'
batch_size = x.size(0)
distmat = (torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t())
distmat.addmm(x, self.centers.t(), beta=1, alpha=(- 2))
classes = torch.arange(self.num_classes).long()
if self.use_gpu:
classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist = (distmat * mask.float())
loss = (dist.clamp(min=1e-12, max=.0).sum() / batch_size)
return loss |
def register_Ns3GlobalRouteManagerImpl_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('DeleteGlobalRoutes', 'void', [], is_virtual=True)
cls.add_method('BuildGlobalRoutingDatabase', 'void', [], is_virtual=True)
cls.add_method('InitializeRoutes', 'void', [], is_virtual=True)
cls.add_method('DebugUseLsdb', 'void', [param('ns3::GlobalRouteManagerLSDB *', 'arg0')])
cls.add_method('DebugSPFCalculate', 'void', [param('ns3::Ipv4Address', 'root')])
return |
class ExtraData(UnpackValueError):
def __init__(self, unpacked, extra):
self.unpacked = unpacked
self.extra = extra
def __str__(self):
return 'unpack(b) received extra data.' |
def make_layers(cfg, batch_norm=False, filter_size=1):
layers = []
in_channels = 3
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=1), Downsample(filt_size=filter_size, stride=2, channels=in_channels)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers) |
class SpacyPreprocessorParameters(NamedTuple):
text_field: str
doc_field: str
language: str
disable: Optional[List[str]]
pre: List[BasePreprocessor]
memoize: bool
memoize_key: Optional[HashingFunction]
gpu: bool |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input-dir', required=True)
args = parser.parse_args()
parse(args.input_dir) |
class TestDiverseBeamSearch(TestSequenceGeneratorBase):
def setUp(self):
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
self.src_tokens = torch.LongTensor([[self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos]])
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [torch.FloatTensor([[0.0, unk, 0.9, 0.1], [0.0, unk, 0.9, 0.1], [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3]]), torch.FloatTensor([[0.0, unk, 0.6, 0.4], [0.0, unk, 0.6, 0.4], [0.25, unk, 0.35, 0.4], [0.25, unk, 0.35, 0.4]]), torch.FloatTensor([[1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [0.9, unk, 0.1, 0.0], [0.9, unk, 0.1, 0.0]])]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_diverse_beam_search(self):
search_strategy = search.DiverseBeamSearch(self.tgt_dict, num_groups=2, diversity_strength=0.0)
generator = SequenceGenerator(self.tgt_dict, beam_size=2, search_strategy=search_strategy)
sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}}
hypos = generator.generate([self.model], sample)
(eos, w1, w2) = (self.eos, self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0])
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0])
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9])
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9]) |
def run_epoch(model, train_loader, optimizer, center, device, is_angular):
(total_loss, total_num) = (0.0, 0)
for ((img1, img2), _) in tqdm(train_loader, desc='Train...'):
(img1, img2) = (img1.to(device), img2.to(device))
optimizer.zero_grad()
out_1 = model(img1)
out_2 = model(img2)
out_1 = (out_1 - center)
out_2 = (out_2 - center)
loss = contrastive_loss(out_1, out_2)
if is_angular:
loss += ((out_1 ** 2).sum(dim=1).mean() + (out_2 ** 2).sum(dim=1).mean())
loss.backward()
optimizer.step()
total_num += img1.size(0)
total_loss += (loss.item() * img1.size(0))
return (total_loss / total_num) |
def get_all_fsps_in_sent(sent, sentann, fspno, lex_unit, frame, isfulltextann, corpus):
numannosets = 0
fsps = {}
fspset = set([])
for anno in sent.findall('fn:annotationSet', ns):
annotation_id = anno.attrib['ID']
if ((annotation_id == '2019791') and (VERSION == '1.5')):
continue
numannosets += 1
if (numannosets == 1):
continue
anno_id = anno.attrib['ID']
if isfulltextann:
if ('luName' in anno.attrib):
if ((anno.attrib['status'] == 'UNANN') and ('test' not in corpus)):
continue
lex_unit = anno.attrib['luName']
frame = anno.attrib['frameName']
if (frame == 'Test35'):
continue
else:
continue
logger.write((((((('\tannotation: ' + str(anno_id)) + '\t') + frame) + '\t') + lex_unit) + '\n'))
fsp = FrameAnnotation(lex_unit, frame, sentann)
for layer in anno.findall('fn:layer', ns):
layertype = layer.attrib['name']
if (layertype not in relevantfelayers):
continue
if (layertype == 'Target'):
for label in layer.findall('fn:label', ns):
startend = process_xml_labels(label, layertype)
if (startend is None):
break
fsp.add_target(startend, logger)
elif ((layer.attrib['name'] == 'FE') and (layer.attrib['rank'] == '1')):
for label in layer.findall('fn:label', ns):
startend = process_xml_labels(label, layertype)
if (startend is None):
if ('itype' in label.attrib):
logger.write((('\t\tIssue: itype = ' + label.attrib['itype']) + '\n'))
continue
else:
break
fsp.add_fe(startend, label.attrib['name'], logger)
if (not fsp.foundtarget):
logger.write('\t\tSkipping: missing target\n')
continue
if (not fsp.foundfes):
logger.write('\t\tIssue: missing FSP annotations\n')
if (fsp not in fspset):
fspno += 1
fsps[anno_id] = fsp
fspset.add(fsp)
else:
logger.write('\t\tRepeated frames encountered for same sentence\n')
return (numannosets, fspno, fsps) |
def strlist2multihot(strlist, classlist):
return np.sum(np.eye(len(classlist))[strlist2indlist(strlist, classlist)], axis=0) |
class YolosFeatureExtractor(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
class _BFS(Function):
def forward(ctx, edge_index, max_adj_per_vertex):
(sorted_index, sorted_parent, sorted_child) = _C.bfs_forward(edge_index, max_adj_per_vertex)
return (sorted_index, sorted_parent, sorted_child) |
def gradient_penalty(output, on):
gradients = tf.gradients(output, [on])[0]
grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1, 2, 3]))
return tf.reduce_mean(((grad_l2 - 1) ** 2)) |
class ScalableGNN(torch.nn.Module):
def __init__(self, num_nodes: int, hidden_channels: int, num_layers: int, pool_size: Optional[int]=None, buffer_size: Optional[int]=None, device=None):
super().__init__()
self.num_nodes = num_nodes
self.hidden_channels = hidden_channels
self.num_layers = num_layers
self.pool_size = ((num_layers - 1) if (pool_size is None) else pool_size)
self.buffer_size = buffer_size
self.histories = torch.nn.ModuleList([History(num_nodes, hidden_channels, device) for _ in range((num_layers - 1))])
self.pool: Optional[AsyncIOPool] = None
self._async = False
self.__out: Optional[Tensor] = None
def emb_device(self):
return self.histories[0].emb.device
def device(self):
return self.histories[0]._device
def _apply(self, fn: Callable) -> None:
super()._apply(fn)
if ((str(self.emb_device) == 'cpu') and (str(self.device)[:4] == 'cuda') and (self.pool_size is not None) and (self.buffer_size is not None)):
self.pool = AsyncIOPool(self.pool_size, self.buffer_size, self.histories[0].embedding_dim)
self.pool.to(self.device)
return self
def reset_parameters(self):
for history in self.histories:
history.reset_parameters()
def __call__(self, x: Optional[Tensor]=None, adj_t: Optional[SparseTensor]=None, batch_size: Optional[int]=None, n_id: Optional[Tensor]=None, offset: Optional[Tensor]=None, count: Optional[Tensor]=None, loader: EvalSubgraphLoader=None, **kwargs) -> Tensor:
if (loader is not None):
return self.mini_inference(loader)
self._async = ((self.pool is not None) and (batch_size is not None) and (n_id is not None) and (offset is not None) and (count is not None))
if ((batch_size is not None) and (not self._async) and (str(self.emb_device) == 'cpu') and (str(self.device)[:4] == 'cuda')):
warnings.warn('Asynchronous I/O disabled, although history and model sit on different devices.')
if self._async:
for hist in self.histories:
self.pool.async_pull(hist.emb, None, None, n_id[batch_size:])
out = self.forward(x, adj_t, batch_size, n_id, offset, count, **kwargs)
if self._async:
for hist in self.histories:
self.pool.synchronize_push()
self._async = False
return out
def push_and_pull(self, history, x: Tensor, batch_size: Optional[int]=None, n_id: Optional[Tensor]=None, offset: Optional[Tensor]=None, count: Optional[Tensor]=None) -> Tensor:
if ((n_id is None) and (x.size(0) != self.num_nodes)):
return x
if ((n_id is None) and (x.size(0) == self.num_nodes)):
history.push(x)
return x
assert (n_id is not None)
if (batch_size is None):
history.push(x, n_id)
return x
if (not self._async):
history.push(x[:batch_size], n_id[:batch_size], offset, count)
h = history.pull(n_id[batch_size:])
return torch.cat([x[:batch_size], h], dim=0)
else:
out = self.pool.synchronize_pull()[:(n_id.numel() - batch_size)]
self.pool.async_push(x[:batch_size], offset, count, history.emb)
out = torch.cat([x[:batch_size], out], dim=0)
self.pool.free_pull()
return out
def _out(self):
if (self.__out is None):
self.__out = torch.empty(self.num_nodes, self.out_channels, pin_memory=True)
return self.__out
_grad()
def mini_inference(self, loader: SubgraphLoader) -> Tensor:
loader = [(sub_data + ({},)) for sub_data in loader]
for (data, batch_size, n_id, offset, count, state) in loader:
x = data.x.to(self.device)
adj_t = data.adj_t.to(self.device)
out = self.forward_layer(0, x, adj_t, state)[:batch_size]
self.pool.async_push(out, offset, count, self.histories[0].emb)
self.pool.synchronize_push()
for i in range(1, len(self.histories)):
for (_, batch_size, n_id, offset, count, _) in loader:
self.pool.async_pull(self.histories[(i - 1)].emb, offset, count, n_id[batch_size:])
for (batch, batch_size, n_id, offset, count, state) in loader:
adj_t = batch.adj_t.to(self.device)
x = self.pool.synchronize_pull()[:n_id.numel()]
out = self.forward_layer(i, x, adj_t, state)[:batch_size]
self.pool.async_push(out, offset, count, self.histories[i].emb)
self.pool.free_pull()
self.pool.synchronize_push()
for (_, batch_size, n_id, offset, count, _) in loader:
self.pool.async_pull(self.histories[(- 1)].emb, offset, count, n_id[batch_size:])
for (batch, batch_size, n_id, offset, count, state) in loader:
adj_t = batch.adj_t.to(self.device)
x = self.pool.synchronize_pull()[:n_id.numel()]
out = self.forward_layer((self.num_layers - 1), x, adj_t, state)[:batch_size]
self.pool.async_push(out, offset, count, self._out)
self.pool.free_pull()
self.pool.synchronize_push()
return self._out
_grad()
def forward_layer(self, layer: int, x: Tensor, adj_t: SparseTensor, state: Dict[(str, Any)]) -> Tensor:
raise NotImplementedError |
def config_dict(config_path=CONFIG_PATH):
config = configparser.ConfigParser()
config.read(config_path)
d = dict()
for section_key in config.sections():
sd = dict()
section = config[section_key]
for key in section:
val = section[key]
try:
sd[key] = int(val)
except ValueError:
try:
sd[key] = float(val)
except ValueError:
try:
sd[key] = section.getboolean(key)
except ValueError:
sd[key] = val
d[section_key] = sd
return d |
def _get_read_cursor(source, parallelism=None):
from . import _fmm_core
ret_stream_to_close = None
if (parallelism is None):
parallelism = PARALLELISM
try:
source = os.fspath(source)
is_path = True
except TypeError:
is_path = False
if is_path:
path = str(source)
if path.endswith('.gz'):
import gzip
source = gzip.GzipFile(path, 'r')
ret_stream_to_close = source
elif path.endswith('.bz2'):
import bz2
source = bz2.BZ2File(path, 'rb')
ret_stream_to_close = source
else:
return (_fmm_core.open_read_file(path, parallelism), ret_stream_to_close)
if hasattr(source, 'read'):
if isinstance(source, io.TextIOBase):
source = _TextToBytesWrapper(source)
return (_fmm_core.open_read_stream(source, parallelism), ret_stream_to_close)
else:
raise TypeError('Unknown source type') |
def img_mask_pad(image, mask, target=(288, 288)):
padding = PadIfNeeded(p=1.0, min_height=target[0], min_width=target[1])
paded = padding(image=image, mask=mask)
return (paded['image'], paded['mask']) |
_utils.test(require=ti.extension.data64)
def test_global_buffer_misalignment():
def test(x: ti.f32):
a = x
b = ti.cast(0.12, ti.f64)
for i in range(8):
b += a
for i in range(8):
test(0.1) |
def save_images(images, index, outdir, classes, labels):
images_ = ((images.cpu().detach().permute((0, 2, 3, 1)) * std) + mean)
for (i, image) in enumerate(images_):
plt.imsave(os.path.join(outdir, f'{((index + i) + 1)}_image_{classes[labels[i].item()]}.jpg'), image.numpy()) |
def worker_func(model_cls, model_kwargs, checkpoint, dataset, data_func, gpu_id, idx_queue, result_queue):
model = model_cls(**model_kwargs)
load_checkpoint(model, checkpoint, map_location='cpu')
torch.cuda.set_device(gpu_id)
model.cuda()
model.eval()
with torch.no_grad():
while True:
idx = idx_queue.get()
data = dataset[idx]
result = model(**data_func(data, gpu_id))
result_queue.put((idx, result)) |
def load_state_epoch(model_dir, model, epoch):
model_path = ((model_dir + '/model.pth-') + str(epoch))
checkpoint = torch.load(model_path, map_location='cuda:{}'.format(torch.cuda.current_device()))
model.load_state_dict(checkpoint['state_dict'], strict=False)
ckpt_keys = set(checkpoint['state_dict'].keys())
own_keys = set(model.state_dict().keys())
missing_keys = (own_keys - ckpt_keys)
for k in missing_keys:
print('missing keys from checkpoint {}: {}'.format(model_dir, k))
print("=> loaded model from checkpoint '{}'".format(model_dir)) |
def tsne_by_gender(vecs, labels, title, words=None):
tsne = TSNE(n_components=2, random_state=0)
vecs_2d = tsne.fit_transform(vecs)
num_labels = len(set(labels.tolist()))
names = ['class {}'.format(i) for i in range(num_labels)]
plt.figure(figsize=(6, 5))
colors = ('r', 'b', 'orange')
for (i, c, label) in zip(set(labels.tolist()), colors, names):
print(len(vecs_2d[((labels == i), 0)]))
plt.scatter(vecs_2d[((labels == i), 0)], vecs_2d[((labels == i), 1)], c=c, label=label, alpha=0.3)
plt.legend()
plt.title(title)
if (words is not None):
k = 60
for i in range(k):
j = np.random.choice(range(len(words)))
label = labels[i]
w = words[j]
(x, y) = vecs_2d[i]
plt.annotate(w, (x, y), size=10, color=('black' if (label == 1) else 'black'))
plt.show()
return vecs_2d |
class BiSeNet(nn.Module):
def __init__(self, n_classes, *args, **kwargs):
super(BiSeNet, self).__init__()
self.cp = ContextPath()
self.ffm = FeatureFusionModule(256, 256)
self.conv_out = BiSeNetOutput(256, 256, n_classes)
self.conv_out16 = BiSeNetOutput(128, 64, n_classes)
self.conv_out32 = BiSeNetOutput(128, 64, n_classes)
self.init_weight()
def forward(self, x):
(H, W) = x.size()[2:]
(feat_res8, feat_cp8, feat_cp16) = self.cp(x)
feat_sp = feat_res8
feat_fuse = self.ffm(feat_sp, feat_cp8)
feat_out = self.conv_out(feat_fuse)
feat_out16 = self.conv_out16(feat_cp8)
feat_out32 = self.conv_out32(feat_cp16)
feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True)
feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True)
feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True)
return (feat_out, feat_out16, feat_out32)
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if (not (ly.bias is None)):
nn.init.constant_(ly.bias, 0)
def get_params(self):
(wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params) = ([], [], [], [])
for (name, child) in self.named_children():
(child_wd_params, child_nowd_params) = child.get_params()
if (isinstance(child, FeatureFusionModule) or isinstance(child, BiSeNetOutput)):
lr_mul_wd_params += child_wd_params
lr_mul_nowd_params += child_nowd_params
else:
wd_params += child_wd_params
nowd_params += child_nowd_params
return (wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params) |
def register_types_ns3_Hash(module):
root_module = module.get_root()
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )*', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )*', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )&', u'ns3::Hash::Hash64Function_ptr&')
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module) |
def resnext34(baseWidth, cardinality, **unused):
model = ResNeXt(baseWidth, cardinality, BasicBlock, [3, 4, 6, 3], 1000)
return model |
class CVAE(nn.Module):
def __init__(self, input_dim, context_dim, latent_dim, hidden_dims, encoder_full_cov=True, decoder_full_cov=True, activation='elu', iaf=None, decoder_maf=None, prior_maf=None, decoder_zcontext=False, encoder_xcontext=False, encoder_ycontext=False, batch_norm=False, prior_gaussian_nn=False, prior_full_cov=False):
super(CVAE, self).__init__()
if (activation == 'elu'):
af = nn.ELU()
elif (activation == 'relu'):
af = nn.ReLU()
elif (activation == 'leaky_relu'):
af = nn.LeakyReLU()
else:
af = nn.ReLU()
print('Invalid activation function specified. Using ReLU.')
if (iaf is not None):
encoder_output_context_dim = iaf['context_dim']
encoder_context_dim = encoder_output_context_dim
if encoder_xcontext:
encoder_context_dim += input_dim
if encoder_ycontext:
encoder_context_dim += context_dim
self.iaf = MAFStack(latent_dim, encoder_context_dim, iaf['hidden_dims'], iaf['nflows'], batch_norm=iaf['batch_norm'], bn_momentum=iaf['bn_momentum'], activation=activation, iaf_parametrization=iaf['iaf_parametrization'])
else:
encoder_output_context_dim = None
self.iaf = None
if (prior_maf is not None):
self.prior_maf = MAFStack(latent_dim, context_dim, prior_maf['hidden_dims'], prior_maf['nflows'], batch_norm=prior_maf['batch_norm'], bn_momentum=prior_maf['bn_momentum'], activation=activation, iaf_parametrization=prior_maf['iaf_parametrization'])
else:
self.prior_maf = None
if (decoder_maf is not None):
if decoder_zcontext:
decoder_context_dim = (context_dim + latent_dim)
else:
decoder_context_dim = context_dim
self.decoder_maf = MAFStack(input_dim, decoder_context_dim, decoder_maf['hidden_dims'], decoder_maf['nflows'], batch_norm=decoder_maf['batch_norm'], bn_momentum=decoder_maf['bn_momentum'], activation=activation, iaf_parametrization=decoder_maf['iaf_parametrization'])
else:
self.decoder_maf = None
self.encoder = Coder(input_dim=input_dim, context_dim=context_dim, hidden_dims=hidden_dims, output_dim=latent_dim, output_context_dim=encoder_output_context_dim, activation=af, full_cov=encoder_full_cov, batch_norm=batch_norm)
self.decoder = Coder(input_dim=latent_dim, context_dim=context_dim, hidden_dims=hidden_dims, output_dim=input_dim, activation=af, full_cov=decoder_full_cov, batch_norm=batch_norm)
if prior_gaussian_nn:
self.prior_nn = Coder(input_dim=context_dim, context_dim=0, hidden_dims=hidden_dims, output_dim=latent_dim, activation=af, full_cov=prior_full_cov, batch_norm=batch_norm)
else:
self.prior_nn = None
self.model_hyperparams = {'input_dim': input_dim, 'context_dim': context_dim, 'latent_dim': latent_dim, 'hidden_dims': hidden_dims, 'encoder_full_cov': encoder_full_cov, 'decoder_full_cov': decoder_full_cov, 'activation': activation, 'iaf': iaf, 'prior_maf': prior_maf, 'decoder_maf': decoder_maf, 'decoder_zcontext': decoder_zcontext, 'encoder_xcontext': encoder_xcontext, 'encoder_ycontext': encoder_ycontext, 'batch_norm': batch_norm, 'prior_gaussian_nn': prior_gaussian_nn, 'prior_full_cov': prior_full_cov}
def forward(self, x, context, base_dist):
if (self.iaf is not None):
(q_dist, iaf_context) = self.encoder(x, context)
else:
q_dist = self.encoder(x, context)
z = q_dist.rsample()
if (self.iaf is not None):
if self.model_hyperparams['encoder_xcontext']:
iaf_context = torch.cat((x, iaf_context), dim=(- 1))
if self.model_hyperparams['encoder_ycontext']:
iaf_context = torch.cat((context, iaf_context), dim=(- 1))
(zT, iaf_log_det) = self.iaf(z, iaf_context)
else:
zT = z
iaf_log_det = torch.zeros_like(z[(..., 0)])
r2_dist = self.decoder(zT, context)
if (self.prior_maf is not None):
(epsilon, prior_maf_log_det) = self.prior_maf(zT, context)
else:
epsilon = zT
prior_maf_log_det = torch.zeros_like(zT[(..., 0)])
if (self.decoder_maf is not None):
if self.model_hyperparams['decoder_zcontext']:
decoder_maf_context = torch.cat((context, zT), dim=(- 1))
else:
decoder_maf_context = context
(x0, decoder_maf_log_det) = self.decoder_maf(x, decoder_maf_context)
reconstruction_loss = (- (r2_dist.log_prob(x0) - decoder_maf_log_det))
else:
reconstruction_loss = (- r2_dist.log_prob(x))
if (self.prior_nn is not None):
prior_gaussian_dist = self.prior_nn(context)
else:
prior_gaussian_dist = base_dist
if ((self.iaf is not None) or (self.prior_maf is not None)):
kl_loss = ((q_dist.log_prob(z) + iaf_log_det) - (prior_gaussian_dist.log_prob(epsilon) - prior_maf_log_det))
else:
kl_loss = torch.distributions.kl_divergence(q_dist, prior_gaussian_dist)
return (reconstruction_loss, kl_loss) |
def HS_all_minimal(f, return_transformation=False, D=None):
MS = MatrixSpace(ZZ, 2)
m = MS.one()
F = copy(f)
F.normalize_coordinates()
if (F.degree() == 1):
raise ValueError('function must be degree at least 2')
if ((f.degree() % 2) == 0):
if return_transformation:
return [[f, m]]
else:
return [f]
if (D is None):
res = ZZ(F.resultant())
D = res.prime_divisors()
M = [[F, m]]
for p in D:
Mp = HS_all_minimal_p(p, F, m, return_transformation=True)
M = [[g.conjugate(t), (t * s)] for (g, s) in M for (G, t) in Mp]
if return_transformation:
return M
else:
return [funct for (funct, matr) in M] |
class RandomNegP(tfk.layers.Layer):
def __init__(self, num_updates=2000, min_prob=0.0, max_prob=0.5, **kwargs):
self.num_updates = num_updates
self.min_prob = min_prob
self.max_prob = max_prob
super().__init__(**kwargs)
def get_config(self):
return dict(num_updates=self.num_updates, min_prob=self.min_prob, max_prob=self.max_prob, **super().get_config())
def build(self, input_shape):
self.inv_prob = self.add_weight(name='inv_prob', shape=[], initializer=tfk.initializers.Constant(value=self.min_prob), trainable=False, aggregation=tf.VariableAggregation.MEAN)
self.built = True
def call(self, inputs, training=None):
if (training is None):
training = tf.keras.backend.learning_phase()
if training:
stat_shape = inputs.shape
dyn_shape = tf.shape(inputs)
uniform_s = tf.random.uniform(shape=[dyn_shape[0], 1, dyn_shape[2], 1], minval=0.0, maxval=1.0, dtype=tf.float32)
signs = tf.where((uniform_s < self.inv_prob), (- 1.0), 1.0)
signs.set_shape([stat_shape[0], 1, stat_shape[2], 1])
outputs = (inputs * signs)
new_prob = tf.minimum(self.max_prob, (self.inv_prob + ((self.max_prob - self.min_prob) / self.num_updates)))
self.inv_prob.assign(new_prob)
else:
outputs = inputs
return outputs |
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict |
def test_RecordArray():
array = ak.Array([{'x': 0}, {'x': 1}, {'x': 2}, {'x': 3}, {'x': 4}, {'x': 5}], backend='cuda')
results = nb_cuda.to_device(np.empty(6, dtype=np.int32))
pass_record_through[(1, 6)](array, results)
nb_cuda.synchronize()
host_results = results.copy_to_host()
assert (ak.Array(host_results).tolist() == array.x.to_list()) |
def read_document(lines, spaces_after, split_clauses):
document = []
sentence = []
for line in lines:
line = line.strip()
if (not line):
if sentence:
if spaces_after:
sentence[(- 1)] = (sentence[(- 1)][0], True)
document.append(sentence)
sentence = []
else:
pieces = line.split('\t')
pieces = [p.replace('\xa0', ' ') for p in pieces]
if (split_clauses and (pieces[0] == '_') and (pieces[3] == 'O')):
if sentence:
sentence[(- 1)] = (sentence[(- 1)][0], True)
document.append(sentence)
sentence = []
elif (pieces[0] == '_'):
sentence[(- 1)] = (sentence[(- 1)][0], True)
else:
sentence.append((pieces[0], False))
if sentence:
if spaces_after:
sentence[(- 1)] = (sentence[(- 1)][0], True)
document.append(sentence)
sentence = []
return [[document]] |
class Scale(nn.Module):
def __init__(self, init_value=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, x):
return (x * self.scale) |
def processNlToks(nlToks):
return [tok.encode('ascii', 'replace').decode().strip() for tok in nlToks if ((tok != '-RCB-') and (tok != '-LCB-') and (tok != '-LSB-') and (tok != '-RSB-') and (tok != '-LRB-') and (tok != '-RRB-') and (tok != '') and (tok != '') and (tok != '') and (tok.encode('ascii', 'replace').decode().strip() != ''))] |
def get_optimizer(training_args, model):
if training_args.train_model_params:
params = [{'params': model.get_codebook_params(), 'lr': training_args.learning_rate, 'weight_decay': 0.0}, {'params': model.get_model_params(), 'lr': (training_args.model_lr_factor * training_args.learning_rate), 'weight_decay': training_args.weight_decay}]
else:
params = model.get_codebook_params()
if (len(params) > 0):
optimizer = torch.optim.AdamW(params, training_args.learning_rate)
else:
RuntimeWarning('Codebook not found in model. Training with model params.')
optimizer = None
return optimizer |
def my_main(_config, cnn):
print(_config)
print('[*] Building CNN')
network = resnet50(pretrained=True, **cnn['cnn'])
network.load_state_dict(torch.load(weights))
network.eval()
network.cuda()
print('[*] Initializing Dataloader')
dataloader = cnn['dataloader']
dataloader['transform'] = 'center'
dataloader['max_per_person'] = 40
db = Datasets(dataset, dataloader)
print('[*] Evaluating {}'.format(db))
data = db._data._dataloader.data
calcScores(network, data, thresholds) |
def findNode(id_, allNodes):
for n in allNodes:
if (n._id == id_):
return n
return None |
class LockFile():
def __init__(self, fname: str):
self._fname = fname
self._fd = None
def acquire(self):
self._fd = open(self._fname, 'w')
try:
os.chmod(self._fname, 511)
except PermissionError:
pass
while True:
try:
fcntl.flock(self._fd, (fcntl.LOCK_EX | fcntl.LOCK_NB))
break
except IOError as e:
if (e.errno != errno.EAGAIN):
raise
else:
time.sleep(0.1)
def release(self):
fcntl.flock(self._fd, fcntl.LOCK_UN)
self._fd.close()
self._fd = None
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release() |
class Linear(torch.nn.Module):
def __init__(self, n_neurons, input_shape=None, input_size=None, bias=True, max_norm=None, combine_dims=False):
super().__init__()
self.max_norm = max_norm
self.combine_dims = combine_dims
if ((input_shape is None) and (input_size is None)):
raise ValueError('Expected one of input_shape or input_size')
if (input_size is None):
input_size = input_shape[(- 1)]
if ((len(input_shape) == 4) and self.combine_dims):
input_size = (input_shape[2] * input_shape[3])
self.w = nn.Linear(input_size, n_neurons, bias=bias)
def forward(self, x):
if ((x.ndim == 4) and self.combine_dims):
x = x.reshape(x.shape[0], x.shape[1], (x.shape[2] * x.shape[3]))
if (self.max_norm is not None):
self.w.weight.data = torch.renorm(self.w.weight.data, p=2, dim=0, maxnorm=self.max_norm)
wx = self.w(x)
return wx |
def get_data(dataset, data_path, batch_size, num_workers):
assert (dataset in ['CIFAR10', 'CIFAR100'])
print('Loading dataset {} from {}'.format(dataset, data_path))
if (dataset in ['CIFAR10', 'CIFAR100']):
ds = getattr(datasets, dataset.upper())
path = os.path.join(data_path, dataset.lower())
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
train_set = ds(path, train=True, download=True, transform=transform_train)
val_set = ds(path, train=True, download=True, transform=transform_test)
test_set = ds(path, train=False, download=True, transform=transform_test)
train_sampler = None
val_sampler = None
else:
raise Exception(('Invalid dataset %s' % dataset))
loaders = {'train': torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=(train_sampler is None), sampler=train_sampler, num_workers=num_workers, pin_memory=True), 'val': torch.utils.data.DataLoader(train_set, batch_size=batch_size, sampler=val_sampler, num_workers=num_workers, pin_memory=True), 'test': torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)}
return loaders |
def run(turns, searcher, num_passages):
results = []
conversation = []
conversation_no = (- 1)
for turn in tqdm(turns):
if (turn['Conversation_no'] != conversation_no):
conversation_no = turn['Conversation_no']
conversation = []
results.append(run_for_turn(turn, conversation, searcher, num_passages))
conversation.append(turn)
return results |
def is_value_with_epsilon_correct(func: T.Callable[([sf.Scalar, sf.Scalar], sf.Expr)], singularity: sf.Scalar=0, limit_direction: str='+', display_func: T.Callable[([T.Any], None)]=_default_display_func, expected_value: sf.Scalar=None) -> bool:
assert (symforce.get_symbolic_api() == 'sympy')
x = sf.Symbol('x', real=True)
epsilon = sf.Symbol('epsilon', positive=True)
is_correct = True
expr_eps = func(x, epsilon)
expr_raw = expr_eps.subs(epsilon, 0)
expr_eps_at_x_zero = expr_eps.subs(x, singularity)
if (expr_eps_at_x_zero == sf.S.NaN):
if display_func:
display_func('Expressions (raw / eps):')
display_func(expr_raw)
display_func(expr_eps)
display_func('[ERROR] Epsilon handling failed, expression at 0 is NaN.')
is_correct = False
if (expected_value is None):
value_x0_raw = _limit_and_simplify(expr_raw, x, singularity, limit_direction)
else:
value_x0_raw = expected_value
value_x0_eps = expr_eps.subs(x, singularity)
value_x0_eps_sub2 = _limit_and_simplify(value_x0_eps, epsilon, 0, '+')
if (value_x0_eps_sub2 != value_x0_raw):
if display_func:
if is_correct:
display_func('Expressions (raw / eps):')
display_func(expr_raw)
display_func(expr_eps)
display_func(f'[ERROR] Values at x={singularity} not match (raw / eps / eps.limit):')
display_func(value_x0_raw)
display_func(value_x0_eps)
display_func(value_x0_eps_sub2)
is_correct = False
return is_correct |
class GroupMorphism_libgap(Morphism):
def __init__(self, homset, gap_hom, check=True):
if check:
if (not gap_hom.IsGroupHomomorphism()):
raise ValueError('not a group homomorphism')
if (homset.domain().gap() != gap_hom.Source()):
raise ValueError('domains do not agree')
if (homset.codomain().gap() != gap_hom.Range()):
raise ValueError('ranges do not agree')
Morphism.__init__(self, homset)
self._phi = gap_hom
def __reduce__(self):
return (self.parent(), (tuple((self(g) for g in self.domain().gens())),))
def _repr_type(self):
return 'Group'
def gap(self):
return self._phi
def _latex_(self):
return '{} \\rightarrow {}'.format(latex(self.domain()), latex(self.codomain()))
def kernel(self):
dom = self.domain()
return dom._subgroup_constructor(self.gap().Kernel())
def pushforward(self, J, *args, **kwds):
dom = self.domain()
codom = self.codomain()
phi = self.gap()
if (isinstance(J, dom.Element) and (J in dom)):
return self._call_(dom(J))
from sage.groups.perm_gps.permgroup import PermutationGroup_generic
if (not isinstance(J, (ParentLibGAP, PermutationGroup_generic))):
raise TypeError('J (={}) must be a libgap or permutation group'.format(J))
if dom.gap().IsSubgroup(J.gap()).sage():
return codom._subgroup_constructor(phi.Image(J.gap()))
image = pushforward
def _call_(self, g):
img_gap = self.gap().Image(g.gap())
return self.codomain()(img_gap)
def lift(self, h):
if (h not in self.codomain()):
raise TypeError('h (={}) must be an element of the codomain'.format(h))
h = self.codomain()(h)
phi = self.gap()
if (h.gap() not in phi.Image()):
raise ValueError('{} is not an element of the image of {}'.format(h, self))
return self.domain()(phi.PreImagesRepresentative(h.gap()))
def preimage(self, S):
phi = self.gap()
from sage.groups.perm_gps.permgroup import PermutationGroup_generic
if (not isinstance(S, (ParentLibGAP, PermutationGroup_generic))):
raise TypeError(('%s must be a GAP or permutation group of %s' % (S, self)))
if (not self.codomain().gap().IsSubgroup(S.gap()).sage()):
raise ValueError(('%s must be a subgroup of %s' % (S, self)))
preimage = phi.PreImage(S.gap())
return self.domain()._subgroup_constructor(preimage)
def section(self):
from sage.categories.homset import Hom
from sage.categories.sets_cat import Sets
H = Hom(self.codomain(), self.domain(), category=Sets())
return H(self.lift) |
_properties
class ControlGraphView(BlockGraphView, abc.ABC):
def nodes(self) -> List['ControlFlowBlock']:
...
def edges(self) -> List[Edge['dace.sdfg.InterstateEdge']]:
...
def all_nodes_recursive(self) -> Iterator[Tuple[(NodeT, GraphT)]]:
for node in self.nodes():
(yield (node, self))
(yield from node.all_nodes_recursive())
def all_edges_recursive(self) -> Iterator[Tuple[(EdgeT, GraphT)]]:
for e in self.edges():
(yield (e, self))
for node in self.nodes():
(yield from node.all_edges_recursive())
def data_nodes(self) -> List[nd.AccessNode]:
data_nodes = []
for node in self.nodes():
data_nodes.extend(node.data_nodes())
return data_nodes
def entry_node(self, node: nd.Node) -> Optional[nd.EntryNode]:
for block in self.nodes():
if (node in block.nodes()):
return block.exit_node(node)
return None
def exit_node(self, entry_node: nd.EntryNode) -> Optional[nd.ExitNode]:
for block in self.nodes():
if (entry_node in block.nodes()):
return block.exit_node(entry_node)
return None
def memlet_path(self, edge: MultiConnectorEdge[mm.Memlet]) -> List[MultiConnectorEdge[mm.Memlet]]:
for block in self.nodes():
if (edge in block.edges()):
return block.memlet_path(edge)
return []
def memlet_tree(self, edge: MultiConnectorEdge) -> mm.MemletTree:
for block in self.nodes():
if (edge in block.edges()):
return block.memlet_tree(edge)
return mm.MemletTree(edge)
def in_edges_by_connector(self, node: nd.Node, connector: AnyStr) -> Iterable[MultiConnectorEdge[mm.Memlet]]:
for block in self.nodes():
if (node in block.nodes()):
return block.in_edges_by_connector(node, connector)
return []
def out_edges_by_connector(self, node: nd.Node, connector: AnyStr) -> Iterable[MultiConnectorEdge[mm.Memlet]]:
for block in self.nodes():
if (node in block.nodes()):
return block.out_edges_by_connector(node, connector)
return []
def edges_by_connector(self, node: nd.Node, connector: AnyStr) -> Iterable[MultiConnectorEdge[mm.Memlet]]:
for block in self.nodes():
if (node in block.nodes()):
return block.edges_by_connector(node, connector)
def _used_symbols_internal(self, all_symbols: bool, defined_syms: Optional[Set]=None, free_syms: Optional[Set]=None, used_before_assignment: Optional[Set]=None, keep_defined_in_mapping: bool=False) -> Tuple[(Set[str], Set[str], Set[str])]:
raise NotImplementedError()
def used_symbols(self, all_symbols: bool, keep_defined_in_mapping: bool=False) -> Set[str]:
return self._used_symbols_internal(all_symbols, keep_defined_in_mapping=keep_defined_in_mapping)[0]
def read_and_write_sets(self) -> Tuple[(Set[AnyStr], Set[AnyStr])]:
read_set = set()
write_set = set()
for block in self.nodes():
for edge in self.in_edges(block):
read_set |= (edge.data.free_symbols & self.sdfg.arrays.keys())
(rs, ws) = block.read_and_write_sets()
read_set.update(rs)
write_set.update(ws)
return (read_set, write_set)
def unordered_arglist(self, defined_syms=None, shared_transients=None) -> Tuple[(Dict[(str, dt.Data)], Dict[(str, dt.Data)])]:
data_args = {}
scalar_args = {}
for block in self.nodes():
(n_data_args, n_scalar_args) = block.unordered_arglist(defined_syms, shared_transients)
data_args.update(n_data_args)
scalar_args.update(n_scalar_args)
return (data_args, scalar_args)
def top_level_transients(self) -> Set[str]:
res = set()
for block in self.nodes():
res.update(block.top_level_transients())
return res
def all_transients(self) -> List[str]:
res = []
for block in self.nodes():
res.extend(block.all_transients())
return dtypes.deduplicate(res)
def replace(self, name: str, new_name: str):
for n in self.nodes():
n.replace(name, new_name)
def replace_dict(self, repl: Dict[(str, str)], symrepl: Optional[Dict[(symbolic.SymbolicType, symbolic.SymbolicType)]]=None, replace_in_graph: bool=True, replace_keys: bool=False):
symrepl = (symrepl or {symbolic.symbol(k): (symbolic.pystr_to_symbolic(v) if isinstance(k, str) else v) for (k, v) in repl.items()})
if replace_in_graph:
for edge in self.edges():
edge.data.replace_dict(repl, replace_keys=replace_keys)
for state in self.nodes():
state.replace_dict(repl, symrepl) |
(TEST_WITH_TSAN, 'Fails with TSAN with the following error: starting new threads after multi-threaded fork is not supported. Dying (set die_after_fork=0 to override)')
class TestDictDataLoader(TestCase):
def setUp(self):
super(TestDictDataLoader, self).setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
for persistent_workers in (False, True):
if persistent_workers:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False, persistent_workers=persistent_workers, num_workers=1)
else:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False, persistent_workers=persistent_workers)
batch_size = loader.batch_size
for (i, sample) in enumerate(loader):
idx = (i * batch_size)
self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})
t = sample['a_tensor']
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == (idx + 1)).all())
n = sample['another_dict']['a_number']
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], (idx + 1))
((not TEST_CUDA), 'CUDA unavailable')
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertTrue(sample['a_tensor'].is_pinned())
self.assertTrue(sample['another_dict']['a_number'].is_pinned()) |
class PhaseShiftUpper(PairwiseUnitary):
def __init__(self, phase_shift: float, dtype=NP_COMPLEX):
super(PhaseShiftUpper, self).__init__(dtype=dtype)
self.phase_shift = phase_shift
def matrix(self) -> np.ndarray:
return np.array([[np.exp((1j * self.phase_shift)), 0], [0, 1]], dtype=self.dtype) |
class TFAutoModelForCausalLM():
def __init__(self):
raise EnvironmentError('TFAutoModelForCausalLM is designed to be instantiated using the `TFAutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path)` or `TFAutoModelForCausalLM.from_config(config)` methods.')
_list_option_in_docstrings(TF_MODEL_FOR_CAUSAL_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
if (type(config) in TF_MODEL_FOR_CAUSAL_LM_MAPPING.keys()):
return TF_MODEL_FOR_CAUSAL_LM_MAPPING[type(config)](config)
raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_CAUSAL_LM_MAPPING.keys()))))
_list_option_in_docstrings(TF_MODEL_FOR_CAUSAL_LM_MAPPING)
_start_docstrings('Instantiate one of the model classes of the library---with a causal language modeling head---from a pretrained model.', TF_AUTO_MODEL_PRETRAINED_DOCSTRING)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
(config, kwargs) = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs)
if (type(config) in TF_MODEL_FOR_CAUSAL_LM_MAPPING.keys()):
return TF_MODEL_FOR_CAUSAL_LM_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_CAUSAL_LM_MAPPING.keys())))) |
class CPPTestsFile(pytest.File):
def collect(self):
cpptests = yaml.safe_load(open(self.path).read())
for suite in cpptests:
sname = suite['name']
binary = suite['binary']
if (platform.system() == 'Windows'):
binary += '.exe'
binary = (BASE / binary)
if (not binary.exists()):
return
seen = set()
for test in suite['tests']:
seen.add(test['test'])
name = f"{sname} - {test['test']}"
item = CPPTestItem.from_parent(self, name=name, binary=binary, test=test['test'], script=test.get('script'), args=test.get('args'))
for m in test.get('markers', []):
item.add_marker(getattr(pytest.mark, m))
(yield item)
for tname in self.list_all_tests(binary):
if (tname not in seen):
name = f'{sname} - {tname}'
(yield CPPTestItem.from_parent(self, name=name, binary=binary, test=tname))
def list_all_tests(self, binary):
proc = subprocess.Popen([str(binary), '--gtest_list_tests'], stdout=subprocess.PIPE)
(out, _) = proc.communicate()
lst = []
lines = list(reversed(out.decode().splitlines()))
while lines:
l = lines.pop().strip()
if l.endswith('.'):
break
else:
raise Exception('Unexpected output')
mod = l
while lines:
l = lines.pop().rstrip()
if (l.startswith(' ') and (not l.endswith('.'))):
l = l.split('#', 2)[0].strip()
lst.append(f'{mod}{l}')
continue
elif l.endswith('.'):
mod = l.strip()
else:
raise Exception(f'Unexpected line: {l}')
return lst |
def categorical_sample(probs):
int_acs = torch.multinomial(probs, 1)
acs = torch.zeros(probs.shape, device=probs.device).scatter_(1, int_acs, 1)
return (int_acs, acs) |
def space_priority(char):
return {'L': 7, 'M': 7, 'N': 5, 'S': 3, 'P': 1, 'Z': (- 1), 'C': (- 3)}[unicodedata.category(char)[0]] |
def are_mcfarland_1973_parameters(v, k, lmbda, return_parameters=False):
if ((v <= k) or (k <= lmbda)):
return ((False, None) if return_parameters else False)
k = ZZ(k)
lmbda = ZZ(lmbda)
(qs, r) = (k - lmbda).sqrtrem()
if (r or ((qs * (qs - 1)) % lmbda)):
return ((False, None) if return_parameters else False)
q = (((qs * (qs - 1)) // lmbda) + 1)
if ((q <= 1) or ((v * (q - 1)) != ((qs * q) * (((qs * q) + q) - 2))) or ((k * (q - 1)) != (qs * ((qs * q) - 1)))):
return ((False, None) if return_parameters else False)
(p1, a1) = qs.is_prime_power(get_data=True)
(p2, a2) = q.is_prime_power(get_data=True)
if ((a1 == 0) or (a2 == 0) or (p1 != p2) or (a1 % a2)):
return ((False, None) if return_parameters else False)
return ((True, (q, (a1 // a2))) if return_parameters else True) |
_optimizer('adadelta')
class Adadelta(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', help='coefficient used for computing a running average of squared gradients')
parser.add_argument('--adadelta-eps', type=float, default=1e-06, metavar='EPS', help='term added to the denominator to improve numerical stability')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'rho': self.args.adadelta_rho, 'eps': self.args.adadelta_eps, 'weight_decay': self.args.weight_decay} |
class TestTranslation(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--dataset-impl', 'raw'])
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--dataset-impl', 'raw'])
generate_main(data_dir, ['--dataset-impl', 'raw'])
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_update_freq') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3'])
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_max_positions') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'])
self.assertTrue(('skip this example with --skip-invalid-size-inputs-valid-test' in str(context.exception)))
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'])
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test'])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_sampling') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir, ['--sampling', '--temperature', '2', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--sampling', '--sampling-topk', '3', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--sampling', '--sampling-topp', '0.2', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--diversity-rate', '0.5', '--beam', '6'])
with self.assertRaises(ValueError):
generate_main(data_dir, ['--diverse-beam-groups', '4', '--match-source-len'])
generate_main(data_dir, ['--prefix-size', '2'])
generate_main(data_dir, ['--retain-dropout'])
def test_eval_bleu(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_eval_bleu') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--eval-bleu', '--eval-bleu-print-samples', '--eval-bleu-remove-bpe', '--eval-bleu-detok', 'space', '--eval-bleu-args', '{"beam": 4, "min_len": 10}'])
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8'])
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm', ['--encoder-layers', '2', '--encoder-bidirectional', '--encoder-hidden-size', '16', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', '--decoder-layers', '2'])
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'], run_validation=True)
generate_main(data_dir)
def test_multilingual_transformer(self):
encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']]
decoder_langtok_flags = [[], ['--decoder-langtok']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(f'test_multilingual_transformer_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, arch='multilingual_transformer', task='multilingual_translation', extra_flags=((['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'multilingual_translation', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
((sys.platform.lower() == 'darwin'), 'skip latent depth test on MacOS')
def test_multilingual_translation_latent_depth(self):
encoder_latent_layer = [[], ['--encoder-latent-layer']]
decoder_latent_layer = [[], ['--decoder-latent-layer']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_latent_layer)):
for j in range(len(decoder_latent_layer)):
if ((i == 0) and (j == 0)):
continue
enc_ll_flag = encoder_latent_layer[i]
dec_ll_flag = decoder_latent_layer[j]
with tempfile.TemporaryDirectory(f'test_multilingual_translation_latent_depth_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=['--joined-dictionary'])
train_translation_model(data_dir, arch='latent_multilingual_transformer', task='multilingual_translation_latent_depth', extra_flags=((['--user-dir', 'examples/latent_depth/latent_depth_src', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--share-encoders', '--share-decoders', '--sparsity-weight', '0.1'] + enc_ll_flag) + dec_ll_flag), lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=((['--user-dir', 'examples/latent_depth/latent_depth_src'] + enc_ll_flag) + dec_ll_flag))
generate_main(data_dir, extra_flags=((['--user-dir', 'examples/latent_depth/latent_depth_src', '--task', 'multilingual_translation_latent_depth', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out'] + enc_ll_flag) + dec_ll_flag))
def test_translation_multi_simple_epoch(self):
encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']]
decoder_langtok_flags = [[], ['--decoder-langtok']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(f'test_translation_multi_simple_epoch_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=['--joined-dictionary'])
train_translation_model(data_dir, arch='transformer', task='translation_multi_simple_epoch', extra_flags=((['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--sampling-method', 'temperature', '--sampling-temperature', '1.5', '--virtual-epoch-size', '1000'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'translation_multi_simple_epoch', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
def test_translation_multi_simple_epoch_no_vepoch(self):
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ['--encoder-langtok', 'src']
dec_ltok_flag = ['--decoder-langtok']
with tempfile.TemporaryDirectory('test_translation_multi_simple_epoch_dict') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(data_dir, arch='transformer', task='translation_multi_simple_epoch', extra_flags=((['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--sampling-method', 'temperature', '--sampling-temperature', '1.5'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'translation_multi_simple_epoch', '--lang-pairs', 'in-out', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
def test_translation_multi_simple_epoch_dicts(self):
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ['--encoder-langtok', 'src']
dec_ltok_flag = ['--decoder-langtok']
with tempfile.TemporaryDirectory('test_translation_multi_simple_epoch_dict') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(data_dir, arch='transformer', task='translation_multi_simple_epoch', extra_flags=((['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--sampling-method', 'temperature', '--sampling-temperature', '1.5', '--virtual-epoch-size', '1000'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'translation_multi_simple_epoch', '--lang-pairs', 'in-out', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
def test_translation_multi_simple_epoch_src_tgt_dict_spec(self):
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ['--encoder-langtok', 'src']
dec_ltok_flag = ['--decoder-langtok']
with tempfile.TemporaryDirectory('test_translation_multi_simple_epoch_dict') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(data_dir, arch='transformer', task='translation_multi_simple_epoch', extra_flags=((['--source-dict', f'{data_dir}/dict.in.txt', '--target-dict', f'{data_dir}/dict.out.txt', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--sampling-method', 'temperature', '--sampling-temperature', '1.5', '--virtual-epoch-size', '1000'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'translation_multi_simple_epoch', '--lang-pairs', 'in-out', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
def test_transformer_cross_self_attention(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_cross_self_attention') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-embed-dim', '8', '--no-cross-attention', '--cross-self-attention'], run_validation=True)
generate_main(data_dir, extra_flags=[])
def test_transformer_pointer_generator(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_pointer_generator') as data_dir:
create_dummy_data(data_dir)
preprocess_summarization_data(data_dir)
train_translation_model(data_dir, 'transformer_pointer_generator', extra_flags=['--user-dir', 'examples/pointer_generator/pointer_generator_src', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--alignment-layer', '-1', '--alignment-heads', '1', '--source-position-markers', '0'], run_validation=True, extra_valid_flags=['--user-dir', 'examples/pointer_generator/pointer_generator_src'])
generate_main(data_dir, extra_flags=['--user-dir', 'examples/pointer_generator/pointer_generator_src'])
def test_lightconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', ['--encoder-conv-type', 'lightweight', '--decoder-conv-type', 'lightweight', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir)
def test_dynamicconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_dynamicconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', ['--encoder-conv-type', 'dynamic', '--decoder-conv-type', 'dynamic', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir)
def test_cmlm_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_cmlm_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'cmlm_transformer', ['--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'nonautoregressive_transformer', ['--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '0', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_iterative_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_iterative_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'iterative_nonautoregressive_transformer', ['--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--stochastic-approx', '--dae-ratio', '0.5', '--train-step', '3'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_insertion_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_insertion_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'insertion_transformer', ['--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'random_mask'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_mixture_of_experts(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_moe') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--task', 'translation_moe', '--user-dir', 'examples/translation_moe/translation_moe_src', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir, ['--task', 'translation_moe', '--user-dir', 'examples/translation_moe/translation_moe_src', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--gen-expert', '0'])
def test_alignment(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_alignment') as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ['--align-suffix', 'align'])
train_translation_model(data_dir, 'transformer_align', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--load-alignments', '--alignment-layer', '1', '--criterion', 'label_smoothed_cross_entropy_with_alignment'], run_validation=True)
generate_main(data_dir)
def test_laser_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_laser_lstm') as data_dir:
laser_config_file = create_laser_data_and_config_json(data_dir)
train_translation_model(laser_config_file.name, 'laser_lstm', ['--user-dir', 'examples/laser/laser_src', '--weighting-alpha', '0.3', '--encoder-bidirectional', '--encoder-hidden-size', '512', '--encoder-layers', '5', '--decoder-layers', '1', '--encoder-embed-dim', '320', '--decoder-embed-dim', '320', '--decoder-lang-embed-dim', '32', '--save-dir', data_dir, '--disable-validation'], task='laser', lang_flags=[])
def test_laser_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_laser_transformer') as data_dir:
laser_config_file = create_laser_data_and_config_json(data_dir)
train_translation_model(laser_config_file.name, 'laser_transformer', ['--user-dir', 'examples/laser/laser_src', '--weighting-alpha', '0.3', '--encoder-embed-dim', '320', '--decoder-embed-dim', '320', '--decoder-lang-embed-dim', '32', '--save-dir', data_dir, '--disable-validation'], task='laser', lang_flags=[])
def test_alignment_full_context(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_alignment') as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ['--align-suffix', 'align'])
train_translation_model(data_dir, 'transformer_align', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--load-alignments', '--alignment-layer', '1', '--criterion', 'label_smoothed_cross_entropy_with_alignment', '--full-context-alignment'], run_validation=True)
generate_main(data_dir)
def test_transformer_layerdrop(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_layerdrop') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '3', '--decoder-layers', '3', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--encoder-layerdrop', '0.01', '--decoder-layerdrop', '0.01'])
generate_main(data_dir)
generate_main(data_dir, ['--model-overrides', "{'encoder_layers_to_keep':'0,2','decoder_layers_to_keep':'1'}"]) |
class Permutations_setk(Permutations_set):
def __classcall_private__(cls, s, k):
return super().__classcall__(cls, tuple(s), k)
def __init__(self, s, k):
Permutations_set.__init__(self, s)
self._k = k
def __contains__(self, x):
if (len(x) != self._k):
return False
s = list(self._set)
return (all(((i in s) for i in x)) and (len(set(x)) == len(x)))
def _repr_(self):
return ('Permutations of the set %s of length %s' % (list(self._set), self._k))
def __iter__(self):
for perm in itertools.permutations(self._set, int(self._k)):
(yield self.element_class(self, perm, check=False))
def random_element(self):
return sample(self._set, self._k) |
class BaseEstimator():
def _get_param_names(cls):
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if (init is object.__init__):
return []
init_signature = inspect.signature(init)
parameters = [p for p in init_signature.parameters.values() if ((p.name != 'self') and (p.kind != p.VAR_KEYWORD))]
for p in parameters:
if (p.kind == p.VAR_POSITIONAL):
raise RuntimeError(("scikit-multiflow estimators should always specify their parameters in the signature of their __init__ (no varargs). %s with constructor %s doesn't follow this convention." % (cls, init_signature)))
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
out = dict()
for key in self._get_param_names():
value = getattr(self, key, None)
if (deep and hasattr(value, 'get_params')):
deep_items = value.get_params().items()
out.update(((((key + '__') + k), val) for (k, val) in deep_items))
out[key] = value
return out
def set_params(self, **params):
if (not params):
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict)
for (key, value) in params.items():
(key, delim, sub_key) = key.partition('__')
if (key not in valid_params):
raise ValueError(('Invalid parameter %s for estimator %s. Check the list of available parameters with `estimator.get_params().keys()`.' % (key, self)))
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for (key, sub_params) in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
def __repr__(self, N_CHAR_MAX=700):
from ..utils._pprint import _EstimatorPrettyPrinter
N_MAX_ELEMENTS_TO_SHOW = 30
pp = _EstimatorPrettyPrinter(compact=True, indent=1, indent_at_name=True, n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW)
repr_ = pp.pformat(self)
n_nonblank = len(''.join(repr_.split()))
if (n_nonblank > N_CHAR_MAX):
lim = (N_CHAR_MAX // 2)
regex = ('^(\\s*\\S){%d}' % lim)
left_lim = re.match(regex, repr_).end()
right_lim = re.match(regex, repr_[::(- 1)]).end()
if ('\n' in repr_[left_lim:(- right_lim)]):
regex += '[^\\n]*\\n'
right_lim = re.match(regex, repr_[::(- 1)]).end()
ellipsis = '...'
if ((left_lim + len(ellipsis)) < (len(repr_) - right_lim)):
repr_ = ((repr_[:left_lim] + '...') + repr_[(- right_lim):])
return repr_
def __getstate__(self):
try:
state = super().__getstate__()
except AttributeError:
state = self.__dict__.copy()
if type(self).__module__.startswith('skmultiflow.'):
return dict(state.items(), _skmultiflow_version=__version__)
else:
return state
def __setstate__(self, state):
if type(self).__module__.startswith('skmultiflow.'):
pickle_version = state.pop('_skmultiflow_version', 'pre-0.18')
if (pickle_version != __version__):
warnings.warn('Trying to unpickle estimator {0} from version {1} when using version {2}. This might lead to breaking code or invalid results. Use at your own risk.'.format(self.__class__.__name__, pickle_version, __version__), UserWarning)
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
def _get_tags(self):
collected_tags = {}
for base_class in inspect.getmro(self.__class__):
if (hasattr(base_class, '_more_tags') and (base_class != self.__class__)):
more_tags = base_class._more_tags(self)
collected_tags = _update_if_consistent(collected_tags, more_tags)
if hasattr(self, '_more_tags'):
more_tags = self._more_tags()
collected_tags = _update_if_consistent(collected_tags, more_tags)
tags = _DEFAULT_TAGS.copy()
tags.update(collected_tags)
return tags |
class SquashDones(gym.Wrapper):
def step(self, action):
(observation, reward, done, info) = self.env.step(action)
return (observation, reward, all(done), info) |
class AnomalibCLI(LightningCLI):
def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
parser.add_argument('--export_mode', type=str, default='', help='Select export mode to ONNX or OpenVINO IR format.')
parser.add_argument('--nncf', type=str, help='Path to NNCF config to enable quantized training.')
parser.add_lightning_class_args(TilerConfigurationCallback, 'tiling')
parser.set_defaults({'tiling.enable': False})
parser.add_lightning_class_args(PostProcessingConfigurationCallback, 'post_processing')
parser.set_defaults({'post_processing.normalization_method': 'min_max', 'post_processing.threshold_method': 'adaptive', 'post_processing.manual_image_threshold': None, 'post_processing.manual_pixel_threshold': None})
parser.add_lightning_class_args(MetricsConfigurationCallback, 'metrics')
parser.set_defaults({'metrics.task': 'segmentation', 'metrics.image_metrics': ['F1Score', 'AUROC'], 'metrics.pixel_metrics': ['F1Score', 'AUROC']})
parser.add_lightning_class_args(ImageVisualizerCallback, 'visualization')
parser.set_defaults({'visualization.mode': 'full', 'visualization.task': 'segmentation', 'visualization.image_save_path': '', 'visualization.save_images': False, 'visualization.show_images': False, 'visualization.log_images': False})
def __set_default_root_dir(self) -> None:
subcommand = self.config['subcommand']
config = self.config[subcommand]
if (config.trainer.resume_from_checkpoint is None):
root_dir = (config.trainer.default_root_dir or './results')
model_name = config.model.class_path.split('.')[(- 1)].lower()
data_name = config.data.class_path.split('.')[(- 1)].lower()
category = (config.data.init_args.category if ('category' in config.data.init_args) else '')
time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
default_root_dir = os.path.join(root_dir, model_name, data_name, category, time_stamp)
else:
default_root_dir = str(Path(config.trainer.resume_from_checkpoint).parent.parent)
if (config.visualization.image_save_path == ''):
self.config[subcommand].visualization.image_save_path = (default_root_dir + '/images')
self.config[subcommand].trainer.default_root_dir = default_root_dir
def __set_callbacks(self) -> None:
subcommand = self.config['subcommand']
config = self.config[subcommand]
callbacks = []
monitor = None
mode = 'max'
if (config.trainer.callbacks is not None):
callbacks = config.trainer.callbacks
callback_args = {c['class_path'].split('.')[(- 1)]: c['init_args'] for c in callbacks}
if ('EarlyStopping' in callback_args):
monitor = callback_args['EarlyStopping']['monitor']
mode = callback_args['EarlyStopping']['mode']
checkpoint = ModelCheckpoint(dirpath=os.path.join(config.trainer.default_root_dir, 'weights'), filename='model', monitor=monitor, mode=mode, auto_insert_metric_name=False)
callbacks.append(checkpoint)
if config.trainer.resume_from_checkpoint:
load_model = LoadModelCallback(config.trainer.resume_from_checkpoint)
callbacks.append(load_model)
callbacks.append(TimerCallback())
normalization = config.post_processing.normalization_method
if normalization:
if (normalization == 'min_max'):
callbacks.append(MinMaxNormalizationCallback())
elif (normalization == 'cdf'):
callbacks.append(CdfNormalizationCallback())
else:
raise ValueError(f'''Unknown normalization type {normalization}.
Available types are either None, min_max or cdf''')
add_visualizer_callback(callbacks, config)
self.config[subcommand].visualization = config.visualization
if (config.export_mode is not None):
from anomalib.utils.callbacks.export import ExportCallback
logger.info('Setting model export to %s', config.export_mode)
callbacks.append(ExportCallback(input_size=config.data.init_args.image_size, dirpath=os.path.join(config.trainer.default_root_dir, 'compressed'), filename='model', export_mode=config.export_mode))
else:
warnings.warn(f'Export option: {config.export_mode} not found. Defaulting to no model export')
if config.nncf:
if (os.path.isfile(config.nncf) and config.nncf.endswith('.yaml')):
nncf_module = import_module('anomalib.core.callbacks.nncf_callback')
nncf_callback = getattr(nncf_module, 'NNCFCallback')
callbacks.append(nncf_callback(config=OmegaConf.load(config.nncf), dirpath=os.path.join(config.trainer.default_root_dir, 'compressed'), filename='model'))
else:
raise ValueError(f'--nncf expects a path to nncf config which is a yaml file, but got {config.nncf}')
self.config[subcommand].trainer.callbacks = callbacks
def before_instantiate_classes(self) -> None:
self.__set_default_root_dir()
self.__set_callbacks()
print('done.') |
def test_mesh():
N = 4
F = FunctionSpace(N, 'F', dtype='d', coordinates=((x,), rv))
xj = F.mesh()
xx = F.cartesian_mesh()
assert (np.sum((abs((xx[0] - np.cos(xj))) * abs((xx[1] - np.sin(xj))))) < 1e-12) |
def get_mnist_config(_processID=0, _maxProcessID=8, _maxGPU=8, _DO_SHUFFLE=False):
_G = grid_maker(methodList, useMixupList, outlierRatioList, errTypeList, tau_invList)
_ids = get_properIdx(_processID, _maxProcessID, _nTask=_G.nIter)
_paramsList = list((_G.paramList[i] for i in _ids))
_GPU_ID = (_processID % _maxGPU)
if _DO_SHUFFLE:
shuffle(_paramsList)
return (_paramsList, _GPU_ID) |
class LinearWarmupScheduler(_BaseWarmupScheduler):
def __init__(self, optimizer, successor, warmup_epoch, min_lr, last_epoch=(- 1), verbose=False):
self.min_lr = min_lr
super().__init__(optimizer, successor, warmup_epoch, last_epoch, verbose)
def get_lr(self):
if (self.last_epoch >= self.warmup_epoch):
return self.successor.get_last_lr()
if (self.last_epoch == 0):
return [self.min_lr for _ in self.base_lrs]
return [((lr * self.last_epoch) / self.warmup_epoch) for lr in self.base_lrs] |
class AggregationLayer(nn.Module):
def __init__(self) -> None:
super().__init__()
self.left1 = nn.Sequential(nn.Conv2d(128, 128, 3, 1, 1, groups=128, bias=False), nn.BatchNorm2d(128), nn.Conv2d(128, 128, 1, 1, 0, bias=False))
self.left2 = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1, bias=False), nn.BatchNorm2d(128), nn.AvgPool2d(3, 2, 1, ceil_mode=False))
self.right1 = nn.Sequential(nn.Conv2d(128, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128), nn.Upsample(scale_factor=4), nn.Sigmoid())
self.right2 = nn.Sequential(nn.Conv2d(128, 128, 3, 1, 1, groups=128, bias=False), nn.BatchNorm2d(128), nn.Conv2d(128, 128, 1, 1, 0, bias=False), nn.Sigmoid())
self.up = nn.Upsample(scale_factor=4)
self.conv = ConvModule(128, 128, 3, 1, 1)
def forward(self, x_d, x_s):
x1 = self.left1(x_d)
x2 = self.left2(x_d)
x3 = self.right1(x_s)
x4 = self.right2(x_s)
left = (x1 * x3)
right = (x2 * x4)
right = self.up(right)
out = (left + right)
return self.conv(out) |
class AdamWeightDecay(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def _rebuild_sparse_tensor(layout, data):
if (layout == torch.sparse_coo):
(indices, values, size) = data
result = torch._sparse_coo_tensor_unsafe(indices, values, size)
_sparse_tensors_to_validate.append(result)
return result
raise NotImplementedError(('rebuilding sparse tensor for layout %s' % layout)) |
class DummyEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
def build_model(cls, args, task):
return cls(DummyEncoder())
def get_logits(self, net_output):
return torch.log(torch.div(net_output['encoder_out'], (1 - net_output['encoder_out'])))
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample)
lprobs.batch_first = True
return lprobs |
def main(args):
verbose = args.verbose
form = args._from
from_forms = [form for _ in range(len(args.files))]
if (form == 'auto'):
try:
from_forms = [file_utils.detect_format(path) for path in args.files]
except file_utils.UnknownFormatError as e:
print((('Error: unrecognized input coordinates file extension (' + e.ext) + ')'), file=sys.stderr)
sys.exit(1)
formats_detected = list(set(from_forms))
if (verbose > 0):
print(('# INPUT formats detected: ' + str(formats_detected)), file=sys.stderr)
output_path = args.output
output = None
to_form = args.to
if (output_path is None):
output = sys.stdout
if (to_form == 'auto'):
if (len(formats_detected) == 1):
to_form = from_forms[0]
else:
print('Error: writing file to stdout and multiple input formats present with no output format (--to) set! Please tell me what format to write!')
sys.exit(1)
if ((to_form == 'box') or (to_form == 'json')):
print('Error: writing BOX or JSON output files requires a destination directory. Please set the --output parameter!')
sys.exit(1)
image_ext = args.image_ext
boxsize = args.boxsize
if (to_form == 'auto'):
if (output_path[(- 1)] == '/'):
if (image_ext is None):
print('Error: writing BOX or JSON output files requires setting the image file extension!')
sys.exit(1)
if (boxsize > 0):
if (verbose > 0):
print('# Detected output format is BOX, because OUTPUT is a directory and boxsize > 0.', file=sys.stderr)
to_form = 'box'
else:
if (verbose > 0):
print('# Detected output format is JSON, because OUTPUT is a directory and no boxsize set.', file=sys.stderr)
to_form = 'json'
else:
try:
to_form = file_utils.detect_format(output_path)
except file_utils.UnkownFormatError as e:
print((('Error: unrecognized output coordinates file extension (' + e.ext) + ')'), file=sys.stderr)
sys.exit(1)
if (verbose > 0):
print(('# OUTPUT format: ' + to_form))
suffix = args.suffix
t = args.threshold
down_scale = args.down_scale
up_scale = args.up_scale
scale = (up_scale / down_scale)
if ((len(formats_detected) == 1) and (formats_detected[0] == 'star') and (to_form == 'star')):
dfs = []
for path in args.files:
with open(path, 'r') as f:
table = star.parse(f)
dfs.append(table)
table = pd.concat(dfs, axis=0)
if (star.SCORE_COLUMN_NAME in table.columns):
table = table.loc[(table[star.SCORE_COLUMN_NAME] >= t)]
if (scale != 1):
x_coord = table[star.X_COLUMN_NAME].values
x_coord = np.round((scale * x_coord)).astype(int)
table[star.X_COLUMN_NAME] = x_coord
y_coord = table[star.Y_COLUMN_NAME].values
y_coord = np.round((scale * y_coord)).astype(int)
table[star.Y_COLUMN_NAME] = y_coord
if (args.voltage > 0):
table[star.VOLTAGE] = args.voltage
if (args.detector_pixel_size > 0):
table[star.DETECTOR_PIXEL_SIZE] = args.detector_pixel_size
if (args.magnification > 0):
table[star.MAGNIFICATION] = args.magnification
if (args.amplitude_contrast > 0):
table[star.AMPLITUDE_CONTRAST] = args.amplitude_contrast
if (output is None):
with open(output_path, 'w') as f:
star.write(table, f)
else:
star.write(table, output)
else:
dfs = []
for i in range(len(args.files)):
path = args.files[i]
coords = file_utils.read_coordinates(path, format=from_forms[i])
dfs.append(coords)
coords = pd.concat(dfs, axis=0)
if ('score' in coords.columns):
coords = coords.loc[(coords['score'] >= t)]
if (scale != 1):
x_coord = coords['x_coord'].values
x_coord = np.round((scale * x_coord)).astype(int)
coords['x_coord'] = x_coord
y_coord = coords['y_coord'].values
y_coord = np.round((scale * y_coord)).astype(int)
coords['y_coord'] = y_coord
if (args.voltage > 0):
coords['voltage'] = args.voltage
if (args.detector_pixel_size > 0):
coords['detector_pixel_size'] = args.detector_pixel_size
if (args.magnification > 0):
coords['magnification'] = args.magnification
if (args.amplitude_contrast > 0):
coords['amplitude_contrast'] = args.amplitude_contrast
invert_y = args.invert_y
if invert_y:
if (args.imagedir is None):
print('Error: --imagedir must specify the directory of images in order to mirror the y-axis coordinates', file=sys.stderr)
sys.exit(1)
dfs = []
for (image_name, group) in coords.groupby('image_name'):
impath = ((os.path.join(args.imagedir, image_name) + '.') + args.image_ext)
impath = glob.glob(impath)[0]
im = load_image(impath)
height = im.height
group = mirror_y_axis(group, height)
dfs.append(group)
coords = pd.concat(dfs, axis=0)
if ((output is None) and (to_form != 'box') and (to_form != 'json')):
output = open(output_path, 'w')
if ((to_form == 'box') or (to_form == 'json')):
output = output_path
file_utils.write_coordinates(output, coords, format=to_form, boxsize=boxsize, image_ext=image_ext, suffix=suffix) |
class TabRegrTask(BaseTask):
def __init__(self, target, features=None, metadata=None):
self._case = 'CSR'
super(TabRegrTask, self).__init__(target, features=features, metadata=metadata) |
def test_append_to_file_uses_checksum_from_appended_file(test_file_path: Path, agent: Agent):
append_text = 'This is appended text.\n'
file_ops.append_to_file(test_file_path, append_text, agent=agent)
file_ops.append_to_file(test_file_path, append_text, agent=agent)
with open(agent.config.file_logger_path, 'r', encoding='utf-8') as f:
log_contents = f.read()
digest = hashlib.md5()
digest.update(append_text.encode('utf-8'))
checksum1 = digest.hexdigest()
digest.update(append_text.encode('utf-8'))
checksum2 = digest.hexdigest()
assert (log_contents == f'''append: {test_file_path} #{checksum1}
append: {test_file_path} #{checksum2}
''') |
def add_md_help_argument(parser):
parser.add_argument('-md', action=MarkdownHelpAction, help='print Markdown-formatted help text and exit.') |
def RandomBipartite(n1, n2, p, set_position=False, seed=None):
if (not ((p >= 0) and (p <= 1))):
raise ValueError('parameter p is a probability, and so should be a real value between 0 and 1')
if (not ((n1 > 0) and (n2 > 0))):
raise ValueError('n1 and n2 should be integers strictly greater than 0')
if (seed is not None):
set_random_seed(seed)
from numpy.random import uniform
g = Graph(name=f'Random bipartite graph of order {n1}+{n2} with edge probability {p}')
S1 = [(0, i) for i in range(n1)]
S2 = [(1, i) for i in range(n2)]
g.add_vertices(S1)
g.add_vertices(S2)
for w in range(n2):
for v in range(n1):
if (uniform() <= p):
g.add_edge((0, v), (1, w))
if set_position:
nmax = max(n1, n2)
g._line_embedding(S1, first=(0, 1), last=(nmax, 1))
g._line_embedding(S2, first=(0, 0), last=(nmax, 0))
return g |
def test_store_not_overwrite(tensor_db):
origin_tensor = tensor_db.tensor_db.copy(deep=True)
_store(tensor_db.tensor_db, 'tensor_name', 'agg', 0, False, ('col1',), np.array([5, 6, 7, 8, 9]), overwrite=False)
assert_frame_equal(origin_tensor, tensor_db.tensor_db) |
def rollback_env_variables(environ, env_var_subfolders):
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if (not isinstance(subfolders, list)):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if (value is not None):
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines |
def build_def(ctx, py_def, type_line, def_name, self_name=None):
body = py_def.body
r = ctx.make_range((py_def.lineno + len(py_def.decorator_list)), py_def.col_offset, (py_def.col_offset + len('def')))
param_list = build_param_list(ctx, py_def.args, self_name)
return_type = None
if (getattr(py_def, 'returns', None) is not None):
return_type = build_expr(ctx, py_def.returns)
decl = Decl(r, param_list, return_type)
is_method = (self_name is not None)
if (type_line is not None):
type_comment_decl = torch._C.parse_type_comment(type_line)
decl = torch._C.merge_type_from_type_comment(decl, type_comment_decl, is_method)
return Def(Ident(r, def_name), decl, build_stmts(ctx, body)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.