code stringlengths 281 23.7M |
|---|
def simrank(G, nodelist=None, c=0.8, num_iterations=10, weight='weight'):
n = len(G)
M = raw_google_matrix(G, nodelist=nodelist, weight=weight)
sim = np.identity(n, dtype=np.float32)
for i in range(num_iterations):
log.debug('Starting SimRank iteration %d', i)
temp = (((c * M.T) sim) M)
sim = ((temp + np.identity(n)) - np.diag(np.diag(temp)))
return sim |
def __get_occurrence_variance(occurrence_matrix: np.ndarray, axis=0) -> float:
prob_matrix = __get_probability_matrix(occurrence_matrix)
tmp_mean_vec = np.sum(prob_matrix, axis=axis)
mean_value = __get_occurrence_mean(occurrence_matrix, axis=axis)
var_value = 0.0
for i in range(len(tmp_mean_vec)):
var_value += ((((i + 1) - mean_value) ** 2) * tmp_mean_vec[i])
return var_value |
def _matmul_to_gemm(node: NodeProto, model: ModelProto):
assert (node.op_type == 'MatMul')
(weight, transposed) = retrieve_constant_input(node, model, WEIGHT_INDEX)
if transposed:
node.input[WEIGHT_INDEX] = weight.name
model.graph.initializer.remove(weight)
weight = transpose_tensor(weight, (1, 0))
model.graph.initializer.append(weight)
node.op_type = 'Gemm'
node.name = node.name.replace('MatMul', 'Gemm')
bias_name = (node.name + '.bias')
bias_data = np.zeros(weight.dims[1])
bias = numpy_helper.from_array(bias_data.astype(np.float32), name=bias_name)
model.graph.initializer.append(bias)
node.input.append(bias_name) |
class PromoteChatMember():
async def promote_chat_member(self: 'pyrogram.Client', chat_id: Union[(int, str)], user_id: Union[(int, str)], privileges: 'types.ChatPrivileges'=None) -> bool:
chat_id = (await self.resolve_peer(chat_id))
user_id = (await self.resolve_peer(user_id))
if (privileges is None):
privileges = types.ChatPrivileges()
try:
raw_chat_member = (await self.invoke(raw.functions.channels.GetParticipant(channel=chat_id, participant=user_id))).participant
except errors.RPCError:
raw_chat_member = None
rank = None
if isinstance(raw_chat_member, raw.types.ChannelParticipantAdmin):
rank = raw_chat_member.rank
(await self.invoke(raw.functions.channels.EditAdmin(channel=chat_id, user_id=user_id, admin_rights=raw.types.ChatAdminRights(anonymous=privileges.is_anonymous, change_info=privileges.can_change_info, post_messages=privileges.can_post_messages, edit_messages=privileges.can_edit_messages, delete_messages=privileges.can_delete_messages, ban_users=privileges.can_restrict_members, invite_users=privileges.can_invite_users, pin_messages=privileges.can_pin_messages, add_admins=privileges.can_promote_members, manage_call=privileges.can_manage_video_chats, other=privileges.can_manage_chat), rank=(rank or ''))))
return True |
.parametrize('method, command', [('x1', 'X1.'), ('y1', 'Y1.'), ('x2', 'X2.'), ('y2', 'Y2.')])
def test_failing_properties(method, command):
with pytest.raises(ValueError):
with expected_protocol(Ametek7270, [(f'{command}'.encode(), b'\n')]) as inst:
(getattr(inst, method) == 0.0) |
class CTCCriterion(nn.Module):
def __init__(self, train_config):
super().__init__()
self.train_config = train_config
self.logsoftmax = nn.LogSoftmax(dim=2)
self.criterion = nn.CTCLoss(reduction='sum', zero_infinity=True)
def forward(self, output_tensor: torch.tensor, output_lengths: torch.tensor, target_tensor: torch.tensor, target_lengths: torch.tensor):
output_tensor = self.logsoftmax(output_tensor).transpose(0, 1)
loss = self.criterion(output_tensor, target_tensor, output_lengths, target_lengths)
return loss |
class MyR2plus1d(nn.Module):
def __init__(self, num_classes, use_pretrained=True, init_std=0.01, model_name='r2plus1d'):
super(MyR2plus1d, self).__init__()
self.model_ft = models.video.r2plus1d_18(pretrained=use_pretrained)
num_ftrs = self.model_ft.fc.in_features
self.init_std = init_std
modules = list(self.model_ft.children())[:(- 1)]
self.model_ft = nn.Sequential(*modules)
self.clsfr = nn.Linear(num_ftrs, num_classes)
normal_init(self.clsfr, std=self.init_std)
def forward(self, x):
feat = self.model_ft(x).squeeze()
if (len(feat.size()) == 1):
feat = feat.unsqueeze(0)
pred_cls = self.clsfr(feat)
return pred_cls |
class AoA_Decoder_Core(nn.Module):
def __init__(self, opt):
super(AoA_Decoder_Core, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.d_model = opt.rnn_size
self.use_multi_head = opt.use_multi_head
self.multi_head_scale = opt.multi_head_scale
self.use_ctx_drop = getattr(opt, 'ctx_drop', 0)
self.out_res = getattr(opt, 'out_res', 0)
self.decoder_type = getattr(opt, 'decoder_type', 'AoA')
self.att_lstm = nn.LSTMCell((opt.input_encoding_size + opt.rnn_size), opt.rnn_size)
self.out_drop = nn.Dropout(self.drop_prob_lm)
if (self.decoder_type == 'AoA'):
self.att2ctx = nn.Sequential(nn.Linear(((self.d_model * opt.multi_head_scale) + opt.rnn_size), (2 * opt.rnn_size)), nn.GLU())
elif (self.decoder_type == 'LSTM'):
self.att2ctx = nn.LSTMCell(((self.d_model * opt.multi_head_scale) + opt.rnn_size), opt.rnn_size)
else:
self.att2ctx = nn.Sequential(nn.Linear(((self.d_model * opt.multi_head_scale) + opt.rnn_size), opt.rnn_size), nn.ReLU())
if (opt.use_multi_head == 2):
self.attention = MultiHeadedDotAttention(opt.num_heads, opt.rnn_size, project_k_v=0, scale=opt.multi_head_scale, use_output_layer=0, do_aoa=0, norm_q=1)
else:
self.attention = Attention(opt)
if self.use_ctx_drop:
self.ctx_drop = nn.Dropout(self.drop_prob_lm)
else:
self.ctx_drop = (lambda x: x)
def forward(self, xt, mean_feats, att_feats, p_att_feats, state, att_masks=None):
(h_att, c_att) = self.att_lstm(torch.cat([xt, (mean_feats + self.ctx_drop(state[0][1]))], 1), (state[0][0], state[1][0]))
if (self.use_multi_head == 2):
att = self.attention(h_att, p_att_feats.narrow(2, 0, (self.multi_head_scale * self.d_model)), p_att_feats.narrow(2, (self.multi_head_scale * self.d_model), (self.multi_head_scale * self.d_model)), att_masks)
else:
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
ctx_input = torch.cat([att, h_att], 1)
if (self.decoder_type == 'LSTM'):
(output, c_logic) = self.att2ctx(ctx_input, (state[0][1], state[1][1]))
state = (torch.stack((h_att, output)), torch.stack((c_att, c_logic)))
else:
output = self.att2ctx(ctx_input)
state = (torch.stack((h_att, output)), torch.stack((c_att, state[1][1])))
if self.out_res:
output = (output + h_att)
output = self.out_drop(output)
return (output, state) |
class Model():
def __init__(self, args):
self.dataName = args.dataName
self.dataSet = DataSet(self.dataName)
self.shape = self.dataSet.shape
self.maxRate = self.dataSet.maxRate
self.train = self.dataSet.train
self.test = self.dataSet.test
self.negNum = args.negNum
self.testNeg = self.dataSet.getTestNeg(self.test, 99)
self.add_embedding_matrix()
self.add_placeholders()
self.userLayer = args.userLayer
self.itemLayer = args.itemLayer
self.add_model()
self.add_loss()
self.lr = args.lr
self.add_train_step()
self.checkPoint = args.checkPoint
self.init_sess()
self.maxEpochs = args.maxEpochs
self.batchSize = args.batchSize
self.topK = args.topK
self.earlyStop = args.earlyStop
def add_placeholders(self):
self.user = tf.placeholder(tf.int32)
self.item = tf.placeholder(tf.int32)
self.rate = tf.placeholder(tf.float32)
self.drop = tf.placeholder(tf.float32)
def add_embedding_matrix(self):
self.user_item_embedding = tf.convert_to_tensor(self.dataSet.getEmbedding())
self.item_user_embedding = tf.transpose(self.user_item_embedding)
def add_model(self):
user_input = tf.nn.embedding_lookup(self.user_item_embedding, self.user)
item_input = tf.nn.embedding_lookup(self.item_user_embedding, self.item)
def init_variable(shape, name):
return tf.Variable(tf.truncated_normal(shape=shape, dtype=tf.float32, stddev=0.01), name=name)
with tf.name_scope('User_Layer'):
user_W1 = init_variable([self.shape[1], self.userLayer[0]], 'user_W1')
user_out = tf.matmul(user_input, user_W1)
for i in range(0, (len(self.userLayer) - 1)):
W = init_variable([self.userLayer[i], self.userLayer[(i + 1)]], ('user_W' + str((i + 2))))
b = init_variable([self.userLayer[(i + 1)]], ('user_b' + str((i + 2))))
user_out = tf.nn.relu(tf.add(tf.matmul(user_out, W), b))
with tf.name_scope('Item_Layer'):
item_W1 = init_variable([self.shape[0], self.itemLayer[0]], 'item_W1')
item_out = tf.matmul(item_input, item_W1)
for i in range(0, (len(self.itemLayer) - 1)):
W = init_variable([self.itemLayer[i], self.itemLayer[(i + 1)]], ('item_W' + str((i + 2))))
b = init_variable([self.itemLayer[(i + 1)]], ('item_b' + str((i + 2))))
item_out = tf.nn.relu(tf.add(tf.matmul(item_out, W), b))
norm_user_output = tf.sqrt(tf.reduce_sum(tf.square(user_out), axis=1))
norm_item_output = tf.sqrt(tf.reduce_sum(tf.square(item_out), axis=1))
self.y_ = (tf.reduce_sum(tf.multiply(user_out, item_out), axis=1, keep_dims=False) / (norm_item_output * norm_user_output))
self.y_ = tf.maximum(1e-06, self.y_)
def add_loss(self):
regRate = (self.rate / self.maxRate)
losses = ((regRate * tf.log(self.y_)) + ((1 - regRate) * tf.log((1 - self.y_))))
loss = (- tf.reduce_sum(losses))
self.loss = loss
def add_train_step(self):
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_step = optimizer.minimize(self.loss)
def init_sess(self):
self.config = tf.ConfigProto()
self.config.gpu_options.allow_growth = True
self.config.allow_soft_placement = True
self.sess = tf.Session(config=self.config)
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
if os.path.exists(self.checkPoint):
[os.remove(f) for f in os.listdir(self.checkPoint)]
else:
os.mkdir(self.checkPoint)
def run(self):
best_hr = (- 1)
best_NDCG = (- 1)
best_epoch = (- 1)
print('Start Training!')
for epoch in range(self.maxEpochs):
print((('=' * 20) + 'Epoch '), epoch, ('=' * 20))
self.run_epoch(self.sess)
print(('=' * 50))
print('Start Evaluation!')
(hr, NDCG) = self.evaluate(self.sess, self.topK)
print('Epoch ', epoch, 'HR: {}, NDCG: {}'.format(hr, NDCG))
if ((hr > best_hr) or (NDCG > best_NDCG)):
best_hr = hr
best_NDCG = NDCG
best_epoch = epoch
self.saver.save(self.sess, self.checkPoint)
if ((epoch - best_epoch) > self.earlyStop):
print('Normal Early stop!')
break
print((('=' * 20) + 'Epoch '), epoch, ('End' + ('=' * 20)))
print('Best hr: {}, NDCG: {}, At Epoch {}'.format(best_hr, best_NDCG, best_epoch))
print('Training complete!')
def run_epoch(self, sess, verbose=10):
(train_u, train_i, train_r) = self.dataSet.getInstances(self.train, self.negNum)
train_len = len(train_u)
shuffled_idx = np.random.permutation(np.arange(train_len))
train_u = train_u[shuffled_idx]
train_i = train_i[shuffled_idx]
train_r = train_r[shuffled_idx]
num_batches = ((len(train_u) // self.batchSize) + 1)
losses = []
for i in range(num_batches):
min_idx = (i * self.batchSize)
max_idx = np.min([train_len, ((i + 1) * self.batchSize)])
train_u_batch = train_u[min_idx:max_idx]
train_i_batch = train_i[min_idx:max_idx]
train_r_batch = train_r[min_idx:max_idx]
feed_dict = self.create_feed_dict(train_u_batch, train_i_batch, train_r_batch)
(_, tmp_loss) = sess.run([self.train_step, self.loss], feed_dict=feed_dict)
losses.append(tmp_loss)
if (verbose and ((i % verbose) == 0)):
sys.stdout.write('\r{} / {} : loss = {}'.format(i, num_batches, np.mean(losses[(- verbose):])))
sys.stdout.flush()
loss = np.mean(losses)
print('\nMean loss in this epoch is: {}'.format(loss))
return loss
def create_feed_dict(self, u, i, r=None, drop=None):
return {self.user: u, self.item: i, self.rate: r, self.drop: drop}
def evaluate(self, sess, topK):
def getHitRatio(ranklist, targetItem):
for item in ranklist:
if (item == targetItem):
return 1
return 0
def getNDCG(ranklist, targetItem):
for i in range(len(ranklist)):
item = ranklist[i]
if (item == targetItem):
return (math.log(2) / math.log((i + 2)))
return 0
hr = []
NDCG = []
testUser = self.testNeg[0]
testItem = self.testNeg[1]
for i in range(len(testUser)):
target = testItem[i][0]
feed_dict = self.create_feed_dict(testUser[i], testItem[i])
predict = sess.run(self.y_, feed_dict=feed_dict)
item_score_dict = {}
for j in range(len(testItem[i])):
item = testItem[i][j]
item_score_dict[item] = predict[j]
ranklist = heapq.nlargest(topK, item_score_dict, key=item_score_dict.get)
tmp_hr = getHitRatio(ranklist, target)
tmp_NDCG = getNDCG(ranklist, target)
hr.append(tmp_hr)
NDCG.append(tmp_NDCG)
return (np.mean(hr), np.mean(NDCG)) |
def _get_dataframe_movielens(name: str, folder_path: Path) -> pd.DataFrame:
if (name == 'ml-1m'):
file_path = folder_path.joinpath('ratings.dat')
df = pd.read_csv(file_path, sep='::', header=None)
elif (name == 'ml-20m'):
file_path = folder_path.joinpath('ratings.csv')
df = pd.read_csv(file_path)
else:
raise ValueError('Invalid name')
return df |
def delimited_loads(explode: bool, name: str, schema_type: str, location: Mapping[(str, Any)], delimiter: str) -> Any:
value = location[name]
explode_type = (explode, schema_type)
if (explode_type == (False, 'array')):
return split(value, separator=delimiter)
if (explode_type == (False, 'object')):
return dict(map(partial(split, separator=delimiter), split(value, separator=delimiter, step=2)))
raise ValueError('not available') |
class PrintTextWavePass(BasePass):
chars_per_cycle = MetadataKey(int)
enable = MetadataKey(bool)
textwave_func = MetadataKey()
textwave_dict = MetadataKey()
def __call__(self, top):
if (top.has_metadata(self.enable) and top.get_metadata(self.enable)):
assert (not top.has_metadata(self.textwave_func))
assert (not top.has_metadata(self.textwave_dict))
(func, sigs_dict) = self._collect_sig_func(top)
top.set_metadata(self.textwave_func, func)
top.set_metadata(self.textwave_dict, sigs_dict)
top.print_textwave = self._gen_print_wave(top, sigs_dict)
def _process_binary(self, sig, base, max):
if (sig[1] == 'b'):
sig = sig[2:]
if (base == 10):
temp_int = int(sig, 2)
if (sig[0] == '1'):
return (temp_int - (2 ** 32))
else:
return temp_int
else:
temp_hex = hex(int(sig, 2))[2:]
l = len(temp_hex)
if (l > max):
temp_hex = temp_hex[(l - max):]
if (l < max):
temp_hex = (('0' * (max - l)) + temp_hex)
return temp_hex
def _gen_print_wave(self, top, sigs_dict):
def print_wave():
if top.has_metadata(self.chars_per_cycle):
char_length = top.get_metadata(self.chars_per_cycle)
else:
char_length = 6
assert ((char_length % 2) == 0)
tick = '|'
(up, down, x, low, high) = ('/', '\\', '|', '_', ' ')
(revstart, revstop) = ('\x1b[7m', '\x1b[0m')
light_gray = '\x1b[47m'
back = '\x1b[0m'
all_signal_values = text_sigs
max_length = 5
for sig in all_signal_values:
max_length = max(max_length, (len(sig) - 2))
print('')
print((' ' * (max_length + 1)), end='')
for i in range(len(all_signal_values['s.reset'])):
print(f'{tick}{str(i).ljust((char_length - 1))}', end='')
print('')
print('')
clk_cycle_str = (((up + (((char_length - 2) // 2) * str(high))) + down) + (((char_length - 2) // 2) * str(low)))
print('clk'.rjust(max_length), (clk_cycle_str * len(all_signal_values['s.reset'])))
print('')
for sig in all_signal_values:
bit_length = (len(all_signal_values[sig][0]) - 2)
print(sig[2:].rjust(max_length), end=' ')
if (bit_length == 1):
prev_sig = None
for (i, val) in enumerate(all_signal_values[sig]):
if (val[2] == '1'):
current_sig = high
else:
current_sig = low
if (prev_sig is not None):
if ((prev_sig == low) and (current_sig == high)):
print((up + (current_sig * (char_length - 1))), end='')
elif ((prev_sig == high) and (current_sig == low)):
print((down + (current_sig * (char_length - 1))), end='')
else:
print((current_sig * char_length), end='')
else:
print((current_sig * char_length), end='')
prev_sig = current_sig
print('')
else:
next = 0
val_list = all_signal_values[sig]
for i in range(len(val_list)):
if (next > 0):
next -= 1
continue
val = val_list[i]
for j in range(i, len(val_list)):
if (val_list[j] != val):
j = (j - 1)
break
length = ((char_length - 1) + (char_length * (j - i)))
next = (j - i)
if (length >= (bit_length // (char_length - 1))):
length = (bit_length // (char_length - 1))
if ((bit_length % char_length) != 0):
length += 1
plus = False
else:
length = (length - 1)
plus = True
current = self._process_binary(val, 16, length)
if plus:
if (i == 0):
print(((((light_gray + ' ') + '\x1b[30m') + '+') + current), end='')
else:
print(((((light_gray + '\x1b[30m') + x) + '+') + current), end='')
else:
if (i == 0):
print((((light_gray + ' ') + '\x1b[30m') + current), end='')
else:
print((((light_gray + '\x1b[30m') + x) + current), end='')
print((' ' * (((char_length - 1) + (char_length * (j - i))) - length)), end='')
print((back + ''))
print('')
return print_wave
def _collect_sig_func(self, top):
wav_srcs = []
text_sigs = {}
signal_names = []
for x in top._dsl.all_signals:
if (x.is_top_level_signal() and (x.get_field_name() != 'clk') and (x.get_field_name() != 'reset')):
signal_names.append((x._dsl.level, repr(x)))
for (_, x) in ([(0, 's.reset')] + sorted(signal_names)):
text_sigs[x] = []
wav_srcs.append(f"text_sigs['{x}'].append( {x}.to_bits().bin() )")
src = '\ndef dump_wav():\n {}\n'.format('\n '.join(wav_srcs))
(s, l_dict) = (top, {})
exec(compile(src, filename='temp', mode='exec'), globals().update(locals()), l_dict)
return (l_dict['dump_wav'], text_sigs) |
class TestAdamOptimizer(TestOptimizer, unittest.TestCase):
def _check_momentum_buffer(self):
return False
def _get_config(self):
return {'name': 'adam', 'num_epochs': 90, 'lr': 0.1, 'betas': (0.9, 0.99), 'eps': 1e-08, 'weight_decay': 0.0001, 'amsgrad': False}
def _instance_to_test(self):
return Adam |
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(nn.Linear(channel, (channel // reduction), bias=False), nn.ReLU(inplace=True), nn.Linear((channel // reduction), (channel // reduction), bias=False), nn.ReLU(inplace=True), nn.Linear((channel // reduction), channel, bias=False), nn.Sigmoid())
def forward(self, x):
(b, c, _, _) = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return (x * y.expand_as(x)) |
def simple_perturb(text: str, method: str, perturbation_level=0.2):
if (not (0 <= perturbation_level <= 1)):
raise ValueError('Invalid value for perturbation level.')
if (method == 'segment'):
return segmentation(text, perturbation_level)
words = nltk.word_tokenize(text)
word_indexes = list(range(0, len(words)))
perturbed_words = 0
perturb_target = (len(words) * perturbation_level)
while (perturbed_words < perturb_target):
if (len(word_indexes) < 1):
break
index = np.random.choice(word_indexes)
word_indexes.remove(index)
word = words[index]
if (method == 'full-swap'):
perturbed_word = swap(word, inner=False)
elif (method == 'inner-swap'):
perturbed_word = swap(word, inner=True)
elif (method == 'intrude'):
perturbed_word = intruders(word, perturbation_level=perturbation_level)
elif (method == 'disemvowel'):
perturbed_word = disemvoweling(word)
elif (method == 'truncate'):
perturbed_word = truncating(word)
elif (method == 'keyboard-typo'):
perturbed_word = key(word)
elif (method == 'natural-typo'):
perturbed_word = natural(word)
else:
raise ValueError(f'Unknown operation {method}')
words[index] = perturbed_word
perturbed_words += (1 if (perturbed_word != word) else 0)
return TreebankWordDetokenizer().detokenize(words) |
def build_augmentation(cfg, is_train):
logger = logging.getLogger(__name__)
aug_list = []
if is_train:
if cfg.INPUT.CROP.ENABLED:
aug_list.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
ms_clip_frame_cnt = (cfg.INPUT.SAMPLING_FRAME_NUM if ('by_clip' in cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING) else 1)
aug_list.append(ResizeShortestEdge(min_size, max_size, sample_style, clip_frame_cnt=ms_clip_frame_cnt))
if (cfg.INPUT.RANDOM_FLIP != 'none'):
if (cfg.INPUT.RANDOM_FLIP == 'flip_by_clip'):
flip_clip_frame_cnt = cfg.INPUT.SAMPLING_FRAME_NUM
else:
flip_clip_frame_cnt = 1
aug_list.append(RandomFlip(horizontal=((cfg.INPUT.RANDOM_FLIP == 'horizontal') or (cfg.INPUT.RANDOM_FLIP == 'flip_by_clip')), vertical=(cfg.INPUT.RANDOM_FLIP == 'vertical'), clip_frame_cnt=flip_clip_frame_cnt))
augmentations = cfg.INPUT.AUGMENTATIONS
if ('brightness' in augmentations):
aug_list.append(T.RandomBrightness(0.9, 1.1))
if ('contrast' in augmentations):
aug_list.append(T.RandomContrast(0.9, 1.1))
if ('saturation' in augmentations):
aug_list.append(T.RandomSaturation(0.9, 1.1))
if ('rotation' in augmentations):
aug_list.append(T.RandomRotation([(- 15), 15], expand=False, center=[(0.4, 0.4), (0.6, 0.6)], sample_style='range'))
if (not cfg.INPUT.CROP.ENABLED):
return aug_list
else:
aug_no_crop = copy.deepcopy(aug_list)
del aug_no_crop[0]
return (aug_no_crop, aug_list)
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = 'choice'
aug_list.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
return aug_list |
class ResourceTest(unittest.TestCase):
def test_copy_resource(self) -> None:
old_capabilities = {'test_key': 'test_value', 'old_key': 'old_value'}
resource = Resource(1, 2, 3, old_capabilities)
new_resource = Resource.copy(resource, test_key='test_value_new', new_key='new_value')
self.assertEqual(new_resource.cpu, 1)
self.assertEqual(new_resource.gpu, 2)
self.assertEqual(new_resource.memMB, 3)
self.assertEqual(len(new_resource.capabilities), 3)
self.assertEqual(new_resource.capabilities['old_key'], 'old_value')
self.assertEqual(new_resource.capabilities['test_key'], 'test_value_new')
self.assertEqual(new_resource.capabilities['new_key'], 'new_value')
self.assertEqual(resource.capabilities['test_key'], 'test_value')
def test_named_resources(self) -> None:
self.assertEqual(named_resources_aws.aws_m5_2xlarge(), named_resources['aws_m5.2xlarge'])
self.assertEqual(named_resources_aws.aws_t3_medium(), named_resources['aws_t3.medium'])
self.assertEqual(named_resources_aws.aws_p3_2xlarge(), named_resources['aws_p3.2xlarge'])
self.assertEqual(named_resources_aws.aws_p3_8xlarge(), named_resources['aws_p3.8xlarge'])
def test_named_resources_contains(self) -> None:
self.assertTrue(('aws_p3.8xlarge' in named_resources))
self.assertFalse(('nonexistant' in named_resources))
def test_resource_util_fn(self) -> None:
self.assertEqual(Resource(cpu=2, gpu=0, memMB=1024), resource())
self.assertEqual(Resource(cpu=1, gpu=0, memMB=1024), resource(cpu=1))
self.assertEqual(Resource(cpu=2, gpu=1, memMB=1024), resource(cpu=2, gpu=1))
self.assertEqual(Resource(cpu=2, gpu=1, memMB=2048), resource(cpu=2, gpu=1, memMB=2048))
h = 'aws_t3.medium'
self.assertEqual(named_resources[h], resource(h=h))
self.assertEqual(named_resources[h], resource(cpu=16, gpu=4, h='aws_t3.medium')) |
class OpenVPNCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(OpenVPNCollector, self).get_default_config_help()
config_help.update({'instances': 'List of instances to collect stats from', 'timeout': 'network timeout'})
return config_help
def get_default_config(self):
config = super(OpenVPNCollector, self).get_default_config()
config.update({'path': 'openvpn', 'instances': 'file:///var/log/openvpn/status.log', 'timeout': '10'})
return config
def parse_url(self, uri):
parsed = urlparse.urlparse(uri)
if ('scheme' not in parsed):
class Object(object):
pass
newparsed = Object()
newparsed.scheme = parsed[0]
newparsed.netloc = parsed[1]
newparsed.path = parsed[2]
newparsed.params = parsed[3]
newparsed.query = parsed[4]
newparsed.fragment = parsed[5]
newparsed.username = ''
newparsed.password = ''
newparsed.hostname = ''
newparsed.port = ''
parsed = newparsed
return parsed
def collect(self):
if isinstance(self.config['instances'], basestring):
instances = [self.config['instances']]
else:
instances = self.config['instances']
for uri in instances:
parsed = self.parse_url(uri)
collect = getattr(self, ('collect_%s' % (parsed.scheme,)), None)
if collect:
collect(uri)
else:
self.log.error('OpenVPN no handler for %s', uri)
def collect_file(self, uri):
parsed = self.parse_url(uri)
filename = parsed.path
if ('?' in filename):
(filename, name) = filename.split('?')
else:
name = os.path.splitext(os.path.basename(filename))[0]
if (not os.access(filename, os.R_OK)):
self.log.error('OpenVPN collect failed: unable to read "%s"', filename)
return
else:
self.log.info('OpenVPN parsing "%s" file: %s', name, filename)
fd = open(filename, 'r')
lines = fd.readlines()
fd.close()
self.parse(name, lines)
def collect_tcp(self, uri):
parsed = self.parse_url(uri)
try:
(host, port) = parsed.netloc.split(':')
port = int(port)
except ValueError:
self.log.error('OpenVPN expected host:port in URI, got "%s"', parsed.netloc)
return
if ('?' in parsed.path):
name = parsed.path[1:]
else:
name = host.replace('.', '_')
self.log.info('OpenVPN parsing "%s" tcp: %s:%d', name, host, port)
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.settimeout(int(self.config['timeout']))
server.connect((host, port))
fd = server.makefile('rb')
line = fd.readline()
if (not line.startswith('>INFO:OpenVPN')):
self.log.debug('OpenVPN received: %s', line.rstrip())
self.log.error('OpenVPN protocol error')
server.close()
return
server.send('status\r\n')
lines = []
while True:
line = fd.readline()
lines.append(line)
if (line.strip() == 'END'):
break
self.parse(name, lines)
server.close()
except socket.error as e:
self.log.error('OpenVPN management connection error: %s', e)
return
def parse(self, name, lines):
for line in lines:
self.log.debug('OpenVPN: %s', line.rstrip())
time.sleep(0.5)
number_connected_clients = 0
section = ''
heading = []
for line in lines:
if (line.strip() == 'END'):
break
elif line.lower().startswith('openvpn statistics'):
section = 'statistics'
elif line.lower().startswith('openvpn client list'):
section = 'clients'
elif line.lower().startswith('routing table'):
section = ''
elif line.lower().startswith('global stats'):
section = 'global'
elif (',' in line):
(key, value) = line.split(',', 1)
if (key.lower() == 'updated'):
continue
if (section == 'statistics'):
self.publish_number('.'.join([name, 'global', key]), value)
elif (section == 'clients'):
if (not heading):
heading = line.strip().split(',')
else:
info = {}
number_connected_clients += 1
for (k, v) in zip(heading, line.strip().split(',')):
info[k.lower()] = v
self.publish_number('.'.join([name, section, info['common name'].replace('.', '_'), 'bytes_rx']), info['bytes received'])
self.publish_number('.'.join([name, section, info['common name'].replace('.', '_'), 'bytes_tx']), info['bytes sent'])
elif (section == 'global'):
self.publish_number('.'.join([name, section, key]), value)
elif line.startswith('END'):
break
self.publish(('%s.clients.connected' % name), number_connected_clients)
def publish_number(self, key, value):
key = key.replace('/', '-').replace(' ', '_').lower()
try:
value = long(value)
except ValueError:
self.log.error('OpenVPN expected a number for "%s", got "%s"', key, value)
return
else:
self.publish(key, value) |
def test_type(make_union):
assert_normalize(type, type, [nt_zero(Any)])
assert_normalize(Type, type, [nt_zero(Any)])
assert_normalize(Type[int], type, [nt_zero(int)])
assert_normalize(Type[Any], type, [nt_zero(Any)])
assert_normalize(Type[make_union[(int, str)]], Union, [normalize_type(Type[int]), normalize_type(Type[str])])
assert_normalize(Union[(Type[make_union[(int, str)]], Type[bool])], Union, [normalize_type(Type[int]), normalize_type(Type[str]), normalize_type(Type[bool])])
assert_normalize(Union[(Type[make_union[(int, str)]], Type[int])], Union, [normalize_type(Type[int]), normalize_type(Type[str])]) |
def generate_number(vcf_number, alt_alleles):
if (vcf_number == '.'):
return np.random.randint(1, 10)
elif str_is_int(vcf_number):
return int(vcf_number)
elif (vcf_number == 'A'):
return alt_alleles
elif (vcf_number == 'R'):
return (alt_alleles + 1)
elif (vcf_number == 'G'):
n_alleles = (alt_alleles + 1)
return comb(((n_alleles + ploidy) - 1), ploidy, exact=True)
raise ValueError(f"Number '{vcf_number}' is not supported.") |
('hash-clear!', [W_HashTable], simple=False)
def hash_clear_bang(ht, env, cont):
from pycket.interpreter import return_value
if ht.is_impersonator():
ht.hash_clear_proc(env, cont)
return hash_clear_loop(ht, env, cont)
else:
ht.hash_empty()
return return_value(values.w_void, env, cont) |
class BVMFExchangeCalendar(TradingCalendar):
name = 'BVMF'
tz = timezone('America/Sao_Paulo')
open_times = ((None, time(10, 1)),)
close_times = ((None, time(17, 0)), (pd.Timestamp('2019-11-04'), time(18, 0)))
def adhoc_holidays(self):
return [CopaDoMundo2014]
def regular_holidays(self):
return HolidayCalendar([ConfUniversal, AniversarioSaoPaulo, CarnavalSegunda, CarnavalTerca, SextaPaixao, CorpusChristi, Tiradentes, DiaTrabalho, Constitucionalista_prepandemic, Constitucionalista_pospandemic, Independencia, Aparecida, Finados, ProclamacaoRepublica, ConscienciaNegra, VesperaNatal, Natal, AnoNovo]) |
('/get/previewinfo', methods=['GET'])
_wrapper_json
def get_previewinfo():
trans = MigrateDal.get_isp_trans()
domain_count = ViewRecordDal.zone_domain_count()
migrate_list = []
histories = MigrateDal.get_migrated_history()
for history in histories:
migrate_list.append({'migrate_rooms': sorted(json.loads(history.migrate_rooms)), 'dst_rooms': sorted(json.loads(history.dst_rooms)), 'migrate_isps': sorted([trans[isp] for isp in json.loads(history.migrate_isps)])})
migrate_acl_subnet = ViewIspAclDal.get_migrate_subnet()
return {'domain_count': domain_count, 'migrate': migrate_list, 'acl_migrate': migrate_acl_subnet} |
def deprecated(*, reason, version):
version = _vparse(str(version))
add_warning = _deprecated(reason=reason, version=version, category=SKCriteriaDeprecationWarning, action=('error' if (_ERROR_GE_VERSION <= version) else 'once'))
def _dec(func):
decorated_func = add_warning(func)
decorated_func.__doc__ = add_sphinx_deprecated_directive(func.__doc__, reason=reason, version=version)
return decorated_func
return _dec |
class BoundingProvider(RequestClassDeterminedProvider, ProviderWithRC):
def __init__(self, request_checker: RequestChecker, provider: Provider):
self._request_checker = request_checker
self._provider = provider
def apply_provider(self, mediator: Mediator, request: Request[T]) -> T:
self._request_checker.check_request(mediator, request)
return self._provider.apply_provider(mediator, request)
def __repr__(self):
return f'{type(self).__name__}({self._request_checker}, {self._provider})'
def maybe_can_process_request_cls(self, request_cls: Type[Request]) -> bool:
if isinstance(self._provider, RequestClassDeterminedProvider):
return self._provider.maybe_can_process_request_cls(request_cls)
return True
def get_request_checker(self) -> Optional[RequestChecker]:
return self._request_checker |
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if (not torch.cuda.is_available()):
print('using CPU, this will be slow')
elif args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if (args.gpu is None):
checkpoint = torch.load(args.resume)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
train(train_loader, model, criterion, optimizer, epoch, args)
acc1 = validate(val_loader, model, criterion, args)
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best) |
def GetTestData(path, cfg):
sr = cfg.sampling_rate
(wav, fs) = sf.read(path)
(wav, _) = librosa.effects.trim(y=wav, top_db=cfg.top_db)
if (fs != sr):
wav = resampy.resample(x=wav, sr_orig=fs, sr_new=sr, axis=0)
fs = sr
assert (fs == 16000), 'Downsampling needs to be done.'
peak = np.abs(wav).max()
if (peak > 1.0):
wav /= peak
mel = logmelspectrogram(x=wav, fs=cfg.sampling_rate, n_mels=cfg.n_mels, n_fft=cfg.n_fft, n_shift=cfg.n_shift, win_length=cfg.win_length, window=cfg.window, fmin=cfg.fmin, fmax=cfg.fmax)
tlen = mel.shape[0]
frame_period = ((cfg.n_shift / cfg.sampling_rate) * 1000)
(f0, timeaxis) = pw.dio(wav.astype('float64'), cfg.sampling_rate, frame_period=frame_period)
f0 = pw.stonemask(wav.astype('float64'), f0, timeaxis, cfg.sampling_rate)
f0 = f0[:tlen].reshape((- 1)).astype('float32')
nonzeros_indices = np.nonzero(f0)
lf0 = f0.copy()
lf0[nonzeros_indices] = np.log(f0[nonzeros_indices])
return (wav, mel, lf0) |
def generate_customers_sql_values(file_name):
f = open(file_name, 'r')
lines = f.readlines()
out = []
lines = lines[1:]
for line in lines:
line = line.strip()
(id, first_name, last_name, age, joined_at) = line.split(',')
out.append("({id}, '{first_name}', '{last_name}', {age}, '{joined_at}')".format(id=id, first_name=first_name, last_name=last_name, age=(age or 'NULL'), joined_at=joined_at))
output = ',\n'.join(out)
print(output) |
class MultilingualDeltaLMTokenizer(DeltaLMTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ['input_ids', 'attention_mask']
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(self, vocab_file, src_lang=None, tgt_lang=None, eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
self.added_tokens_decoder = None
self.added_tokens_encoder = None
self.additional_special_tokens = None
super(MultilingualDeltaLMTokenizer, self).__init__(vocab_file, src_lang=src_lang, tgt_lang=tgt_lang, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=sp_model_kwargs)
def vocab_size(self) -> int:
return (len(self.sp_model) + self.fairseq_offset)
def src_lang(self) -> str:
return self._src_lang
_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
def override_lang_list(self, lang_list):
_lang_list = []
for lang in lang_list:
if (lang in self.fairseq_tokens_to_ids):
continue
else:
_lang_list.append(lang)
lang_list = _lang_list
self.additional_special_tokens = lang_list
start = 250001
self.added_tokens_encoder.clear()
for (i, lang) in enumerate(lang_list):
self.added_tokens_encoder[lang] = (start + i)
self.added_tokens_decoder.clear()
for word in self.added_tokens_encoder:
self.added_tokens_decoder[self.added_tokens_encoder[word]] = word
def tokenize(self, text, **kwargs):
if (text in self.added_tokens_encoder):
return [text]
repeated_ = True
tokens = text.strip().split()
for token in tokens:
if (token not in self.added_tokens_encoder):
repeated_ = False
if repeated_:
return tokens
return super(MultilingualDeltaLMTokenizer, self).tokenize(text, **kwargs)
def _tokenize(self, text: str) -> List[str]:
if (self.src_lang in ['</s>', 'src', 'tgt']):
return self.sp_model.encode(text, out_type=str)
else:
return ([self.src_lang] + self.sp_model.encode(text, out_type=str))
def from_pretrained(cls, *args, lang_list=[], **kwargs):
tokenizer = super(MultilingualDeltaLMTokenizer, cls).from_pretrained(*args, **kwargs)
tokenizer.override_lang_list(lang_list)
return tokenizer |
.parametrize('has_global_name', [False, True])
.parametrize('existing', [False, True])
def test_browser_discord_login_callback_with_sid(mocker: pytest_mock.MockerFixture, clean_database, flask_app, existing, has_global_name):
mock_emit = mocker.patch('flask_socketio.emit')
mock_render = mocker.patch('flask.render_template')
sa = MagicMock()
session = {}
sa.session.return_value.__enter__.return_value = session
sa.get_session.return_value = session
sa.fernet_encrypt.encrypt.return_value = b'encrypted'
discord_user = sa.discord.fetch_user.return_value
discord_user.id = 1234
discord_user.name = 'A Name'
discord_user.to_json.return_value = {'global_name': ('Global Name' if has_global_name else None)}
expected_name = ('Global Name' if has_global_name else 'A Name')
if existing:
User.create(discord_id=discord_user.id, name='Someone else')
with flask_app.test_request_context():
flask.session['sid'] = 'TheSid'
flask.session['DISCORD_OAUTH2_TOKEN'] = 'The_Token'
result = user_session.browser_discord_login_callback(sa)
user = User.get((User.discord_id == 1234))
sa.discord.callback.assert_called_once_with()
sa.discord.callback.fetch_user()
mock_emit.assert_called_once_with('user_session_update', {'encoded_session_b85': b'Wo~0~d2n=PWB', 'user': {'discord_id': 1234, 'id': 1, 'name': expected_name}}, to='TheSid', namespace='/')
mock_render.assert_called_once_with('return_to_randovania.html', user=user)
assert (result is mock_render.return_value)
assert (user.name == expected_name)
assert (session == {'discord-access-token': 'The_Token', 'user-id': 1}) |
def duplicate_module(module_file: Union[(str, os.PathLike)], old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, dest_file: Optional[str]=None, add_copied_from: bool=True, attrs_to_remove: List[str]=None):
if (dest_file is None):
dest_file = str(module_file).replace(old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased)
with open(module_file, 'r', encoding='utf-8') as f:
content = f.read()
content = re.sub('# Copyright (\\d+)\\s', f'# Copyright {CURRENT_YEAR} ', content)
objects = parse_module_content(content)
new_objects = []
for obj in objects:
if ('PRETRAINED_CONFIG_ARCHIVE_MAP = {' in obj):
obj = (((f'{new_model_patterns.model_upper_cased}_PRETRAINED_CONFIG_ARCHIVE_MAP = ' + '{') + f'''
"{new_model_patterns.checkpoint}": "
''') + '}\n')
new_objects.append(obj)
continue
elif ('PRETRAINED_MODEL_ARCHIVE_LIST = [' in obj):
if obj.startswith('TF_'):
prefix = 'TF_'
elif obj.startswith('FLAX_'):
prefix = 'FLAX_'
else:
prefix = ''
obj = f'''{prefix}{new_model_patterns.model_upper_cased}_PRETRAINED_MODEL_ARCHIVE_LIST = [
"{new_model_patterns.checkpoint}",
# See all {new_model_patterns.model_name} models at
]
'''
new_objects.append(obj)
continue
special_pattern = False
for (pattern, attr) in SPECIAL_PATTERNS.items():
if (pattern in obj):
obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))
new_objects.append(obj)
special_pattern = True
break
if special_pattern:
continue
old_obj = obj
(obj, replacement) = replace_model_patterns(obj, old_model_patterns, new_model_patterns)
has_copied_from = (re.search('^#\\s+Copied from', obj, flags=re.MULTILINE) is not None)
if (add_copied_from and (not has_copied_from) and (_re_class_func.search(obj) is not None) and (len(replacement) > 0)):
module_name = get_module_from_file(module_file)
old_object_name = _re_class_func.search(old_obj).groups()[0]
obj = add_content_to_text(obj, f'# Copied from {module_name}.{old_object_name} with {replacement}', add_before=_re_class_func)
obj = re.sub('\n[ ]+# Copied from [^\n]*\n', '\n', obj)
new_objects.append(obj)
content = '\n'.join(new_objects)
if (attrs_to_remove is not None):
for attr in attrs_to_remove:
content = remove_attributes(content, target_attr=attr)
with open(dest_file, 'w', encoding='utf-8') as f:
f.write(content) |
class TestRepositoryManifestLabels(ApiTestCase):
def test_basic_labels(self):
self.login(ADMIN_ACCESS_USER)
repo_ref = registry_model.lookup_repository(ADMIN_ACCESS_USER, 'complex')
tag = registry_model.get_repo_tag(repo_ref, 'prod')
repository = (ADMIN_ACCESS_USER + '/complex')
json = self.getJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest))
self.assertEqual(0, len(json['labels']))
self.postJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='bad_label', value='world', media_type='text/plain'), expected_code=400)
self.postJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='hello', value='world', media_type='bad_media_type'), expected_code=400)
with assert_action_logged('manifest_label_add'):
label1 = self.postJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='hello', value='world', media_type='text/plain'), expected_code=201)
with assert_action_logged('manifest_label_add'):
label2 = self.postJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='hi', value='there', media_type='text/plain'), expected_code=201)
with assert_action_logged('manifest_label_add'):
label3 = self.postJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='hello', value='someone', media_type='application/json'), expected_code=201)
json = self.getJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest))
self.assertEqual(3, len(json['labels']))
self.assertNotEqual(label2['label']['id'], label1['label']['id'])
self.assertNotEqual(label3['label']['id'], label1['label']['id'])
self.assertNotEqual(label2['label']['id'], label3['label']['id'])
self.assertEqual('text/plain', label1['label']['media_type'])
self.assertEqual('text/plain', label2['label']['media_type'])
self.assertEqual('application/json', label3['label']['media_type'])
for label in json['labels']:
label_json = self.getJsonResponse(ManageRepositoryManifestLabel, params=dict(repository=repository, manifestref=tag.manifest_digest, labelid=label['id']))
self.assertEqual(label['id'], label_json['id'])
with assert_action_logged('manifest_label_delete'):
self.deleteEmptyResponse(ManageRepositoryManifestLabel, params=dict(repository=repository, manifestref=tag.manifest_digest, labelid=label1['label']['id']))
json = self.getJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest))
self.assertEqual(2, len(json['labels']))
json = self.getJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest, filter='hello'))
self.assertEqual(1, len(json['labels']))
def test_prefixed_labels(self):
self.login(ADMIN_ACCESS_USER)
repo_ref = registry_model.lookup_repository(ADMIN_ACCESS_USER, 'complex')
tag = registry_model.get_repo_tag(repo_ref, 'prod')
repository = (ADMIN_ACCESS_USER + '/complex')
self.postJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='com.dockers.whatever', value='pants', media_type='text/plain'), expected_code=201)
self.postJsonResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='my.cool.prefix.for.my.label', value='value', media_type='text/plain'), expected_code=201)
def test_add_invalid_media_type(self):
self.login(ADMIN_ACCESS_USER)
repo_ref = registry_model.lookup_repository(ADMIN_ACCESS_USER, 'complex')
tag = registry_model.get_repo_tag(repo_ref, 'prod')
repository = (ADMIN_ACCESS_USER + '/complex')
self.postResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='hello', value='world', media_type='some/invalid'), expected_code=400)
def test_add_invalid_key(self):
self.login(ADMIN_ACCESS_USER)
repo_ref = registry_model.lookup_repository(ADMIN_ACCESS_USER, 'complex')
tag = registry_model.get_repo_tag(repo_ref, 'prod')
repository = (ADMIN_ACCESS_USER + '/complex')
self.postResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='', value='world'), expected_code=400)
self.postResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='invalid___key', value='world'), expected_code=400)
self.postResponse(RepositoryManifestLabels, params=dict(repository=repository, manifestref=tag.manifest_digest), data=dict(key='io.docker.whatever', value='world'), expected_code=400) |
class AtomDataType(object):
IMPLICIT = 0
UTF8 = 1
UTF16 = 2
SJIS = 3
HTML = 6
XML = 7
UUID = 8
ISRC = 9
MI3P = 10
GIF = 12
JPEG = 13
PNG = 14
URL = 15
DURATION = 16
DATETIME = 17
GENRES = 18
INTEGER = 21
RIAA_PA = 24
UPC = 25
BMP = 27 |
def evaluate(args, iteration, miner, miner_semantic):
(gen_i, gen_j) = args.gen_sample.get(args.image_size, (10, 5))
images = []
miner.eval()
miner_semantic.eval()
with torch.no_grad():
for i in range(gen_i):
images.append(G_running_target(miner(fixed_noise[i].cuda()), step=step, alpha=alpha, miner_semantic=miner_semantic).cpu())
sample_path = f'{args.save_path}/sample/{args.name}/{str(iteration).zfill(6)}.png'
utils.save_image(torch.cat(images, dim=0), sample_path, nrow=gen_i, normalize=True, range=((- 1), 1))
sample_num = args.sample_num
(fake_images, fake_acts) = get_fake_images_and_acts(inception, G_running_target, code_size, step, alpha, sample_num, batch_size, miner, miner_semantic)
fid = compute_fid(real_acts, fake_acts)
metrics = {'fid': fid}
miner.train()
miner_semantic.train()
return metrics |
def batch_psnr(gen_frames, gt_frames):
x = np.int32(gen_frames)
y = np.int32(gt_frames)
num_pixels = float(np.size(gen_frames[0]))
mse = (np.sum(((x - y) ** 2), axis=(1, 2), dtype=np.float32) / num_pixels)
psnr = ((20 * np.log10(255)) - (10 * np.log10(mse)))
return np.mean(psnr) |
class TestArgs():
def test_simple(self, qtbot, signaller):
with qtbot.waitSignal(signaller.signal_args) as blocker:
signaller.signal_args.emit('test', 123)
assert (blocker.args == ['test', 123])
def test_timeout(self, qtbot, signaller):
with qtbot.waitSignal(signaller.signal, timeout=100, raising=False) as blocker:
pass
assert (blocker.args is None)
def test_without_args(self, qtbot, signaller):
with qtbot.waitSignal(signaller.signal) as blocker:
signaller.signal.emit()
assert (blocker.args == [])
def test_multi(self, qtbot, signaller):
with qtbot.waitSignals([signaller.signal]) as blocker:
signaller.signal.emit()
with pytest.raises(AttributeError):
blocker.args
def test_connected_signal(self, qtbot, signaller):
with qtbot.waitSignal(signaller.signal_args) as blocker:
blocker.connect(signaller.signal_args_2)
signaller.signal_args_2.emit('foo', 2342)
assert (blocker.args == ['foo', 2342]) |
def test_035_parseTime_suppress_auto_month():
next_day = tomorrow.day
if (next_day > today.day):
last_year = (today.year - 1)
timestr = ('%02d1651Z' % next_day)
report = Metar.Metar(('KEWR ' + timestr), month=1)
assert report.decode_completed
assert (report.time.day == next_day)
assert (report.time.month == 1)
if (today.month > 1):
assert (report.time.year == today.year)
else:
assert (report.time.year == last_year) |
def test_add_opening_quote_basic_quote_added(cmd2_app):
text = 'Ha'
line = 'test_basic {}'.format(text)
endidx = len(line)
begidx = (endidx - len(text))
expected = sorted(['"Ham', '"Ham Sandwich'], key=cmd2_app.default_sort_key)
first_match = complete_tester(text, line, begidx, endidx, cmd2_app)
assert ((first_match is not None) and (cmd2_app.completion_matches == expected)) |
def test_struct_port_single(do_test):
class struct():
bar: Bits32
foo: Bits32
class A(Component):
def construct(s):
s.in_ = InPort(struct)
a = A()
a._ref_symbols = {'struct__bar_32__foo_32': struct}
a._ref_decls = ['s.in_ = InPort( struct__bar_32__foo_32 )']
do_test(a) |
class GCN(object):
def __init__(self, graph, learning_rate=0.01, epochs=200, hidden1=16, dropout=0.5, weight_decay=0.0005, early_stopping=10, max_degree=3, clf_ratio=0.1):
self.graph = graph
self.clf_ratio = clf_ratio
self.learning_rate = learning_rate
self.epochs = epochs
self.hidden1 = hidden1
self.dropout = dropout
self.weight_decay = weight_decay
self.early_stopping = early_stopping
self.max_degree = max_degree
self.preprocess_data()
self.build_placeholders()
self.model = models.GCN(self.placeholders, input_dim=self.features[2][1], hidden1=self.hidden1, weight_decay=self.weight_decay, logging=True)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
cost_val = []
for epoch in range(self.epochs):
t = time.time()
feed_dict = self.construct_feed_dict(self.train_mask)
feed_dict.update({self.placeholders['dropout']: self.dropout})
outs = self.sess.run([self.model.opt_op, self.model.loss, self.model.accuracy], feed_dict=feed_dict)
(cost, acc, duration) = self.evaluate(self.val_mask)
cost_val.append(cost)
print('Epoch:', ('%04d' % (epoch + 1)), 'train_loss=', '{:.5f}'.format(outs[1]), 'train_acc=', '{:.5f}'.format(outs[2]), 'val_loss=', '{:.5f}'.format(cost), 'val_acc=', '{:.5f}'.format(acc), 'time=', '{:.5f}'.format((time.time() - t)))
if ((epoch > self.early_stopping) and (cost_val[(- 1)] > np.mean(cost_val[(- (self.early_stopping + 1)):(- 1)]))):
print('Early stopping...')
break
print('Optimization Finished!')
(test_cost, test_acc, test_duration) = self.evaluate(self.test_mask)
print('Test set results:', 'cost=', '{:.5f}'.format(test_cost), 'accuracy=', '{:.5f}'.format(test_acc), 'time=', '{:.5f}'.format(test_duration))
def evaluate(self, mask):
t_test = time.time()
feed_dict_val = self.construct_feed_dict(mask)
outs_val = self.sess.run([self.model.loss, self.model.accuracy], feed_dict=feed_dict_val)
return (outs_val[0], outs_val[1], (time.time() - t_test))
def build_placeholders(self):
num_supports = 1
self.placeholders = {'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(self.features[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, self.labels.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0.0, shape=()), 'num_features_nonzero': tf.placeholder(tf.int32)}
def build_label(self):
g = self.graph.G
look_up = self.graph.look_up_dict
labels = []
label_dict = {}
label_id = 0
for node in g.nodes():
labels.append((node, g.nodes[node]['label']))
for l in g.nodes[node]['label']:
if (l not in label_dict):
label_dict[l] = label_id
label_id += 1
self.labels = np.zeros((len(labels), label_id))
self.label_dict = label_dict
for (node, l) in labels:
node_id = look_up[node]
for ll in l:
l_id = label_dict[ll]
self.labels[node_id][l_id] = 1
def build_train_val_test(self):
train_precent = self.clf_ratio
training_size = int((train_precent * self.graph.G.number_of_nodes()))
state = np.random.get_state()
np.random.seed(0)
shuffle_indices = np.random.permutation(np.arange(self.graph.G.number_of_nodes()))
np.random.set_state(state)
look_up = self.graph.look_up_dict
g = self.graph.G
def sample_mask(begin, end):
mask = np.zeros(g.number_of_nodes())
for i in range(begin, end):
mask[shuffle_indices[i]] = 1
return mask
self.train_mask = sample_mask(0, (training_size - 100))
self.val_mask = sample_mask((training_size - 100), training_size)
self.test_mask = sample_mask(training_size, g.number_of_nodes())
def preprocess_data(self):
g = self.graph.G
look_back = self.graph.look_back_list
self.features = np.vstack([g.nodes[look_back[i]]['feature'] for i in range(g.number_of_nodes())])
self.features = preprocess_features(self.features)
self.build_label()
self.build_train_val_test()
adj = nx.adjacency_matrix(g)
self.support = [preprocess_adj(adj)]
def construct_feed_dict(self, labels_mask):
feed_dict = dict()
feed_dict.update({self.placeholders['labels']: self.labels})
feed_dict.update({self.placeholders['labels_mask']: labels_mask})
feed_dict.update({self.placeholders['features']: self.features})
feed_dict.update({self.placeholders['support'][i]: self.support[i] for i in range(len(self.support))})
feed_dict.update({self.placeholders['num_features_nonzero']: self.features[1].shape})
return feed_dict |
class Solution(object):
def convertBST(self, root):
total = 0
node = root
stack = []
while (stack or (node is not None)):
while (node is not None):
stack.append(node)
node = node.right
node = stack.pop()
total += node.val
node.val = total
node = node.left
return root |
def setUpModule():
global mol, mf, myadc
r = 1.098
mol = gto.Mole()
mol.atom = [['N', (0.0, 0.0, ((- r) / 2))], ['N', (0.0, 0.0, (r / 2))]]
mol.basis = {'N': 'aug-cc-pvdz'}
mol.verbose = 0
mol.build()
mf = scf.RHF(mol)
mf.conv_tol = 1e-12
mf.kernel()
myadc = adc.ADC(mf) |
def clear_mem():
global K, M, N, S, unum, ww
global my_dict, sub_ptn_list, ptn_len, sDB
global minsup, NumbS, freArr, canArr, candidate
K = 600
M = 100
N = 60000
S = ''
unum = [0.0 for i in range(K)]
ww = 0
my_dict = dict()
sub_ptn_list = [sub_ptn() for i in range(M)]
ptn_len = 0
sDB = [seqdb() for i in range(K)]
minsup = 0
NumbS = 0
freArr = [[] for i in range(M)]
canArr = [[] for i in range(M)]
candidate = [] |
def get_inceptionv3(model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
init_block_channels = 192
channels = [[256, 288, 288], [768, 768, 768, 768, 768], [1280, 2048, 2048]]
b_mid_channels = [128, 160, 160, 192]
net = InceptionV3(channels=channels, init_block_channels=init_block_channels, b_mid_channels=b_mid_channels, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
_model_architecture('model_parallel_transformer_lm', 'transformer_lm_megatron_11b')
def transformer_lm_megatron_11b(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 3072)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', (3072 * 6))
args.decoder_layers = getattr(args, 'decoder_layers', 72)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 32)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args) |
def list_models(filter='', module='', pretrained=False, exclude_filters='', name_matches_cfg=False):
if module:
all_models = list(_module_to_models[module])
else:
all_models = _model_entrypoints.keys()
if filter:
models = []
include_filters = (filter if isinstance(filter, (tuple, list)) else [filter])
for f in include_filters:
include_models = fnmatch.filter(all_models, f)
if len(include_models):
models = set(models).union(include_models)
else:
models = all_models
if exclude_filters:
if (not isinstance(exclude_filters, (tuple, list))):
exclude_filters = [exclude_filters]
for xf in exclude_filters:
exclude_models = fnmatch.filter(models, xf)
if len(exclude_models):
models = set(models).difference(exclude_models)
if pretrained:
models = _model_has_pretrained.intersection(models)
if name_matches_cfg:
models = set(_model_pretrained_cfgs).intersection(models)
return list(sorted(models, key=_natural_key)) |
class Trainer():
def __init__(self):
self._opt = TrainOptions().parse()
PRESET_VARS = PATH(self._opt)
self._model = ModelsFactory.get_by_name(self._opt.model_name, self._opt)
train_transforms = self._model.resnet50.backbone.augment_transforms
val_transforms = self._model.resnet50.backbone.compose_transforms
self.training_dataloaders = Multitask_DatasetDataLoader(self._opt, train_mode='Train', transform=train_transforms)
self.training_dataloaders = self.training_dataloaders.load_multitask_train_data()
self.validation_dataloaders = Multitask_DatasetDataLoader(self._opt, train_mode='Validation', transform=val_transforms)
self.validation_dataloaders = self.validation_dataloaders.load_multitask_val_test_data()
print('Traning Tasks:{}'.format(self._opt.tasks))
actual_bs = (self._opt.batch_size * len(self._opt.tasks))
print('The actual batch size is {}*{}={}'.format(self._opt.batch_size, len(self._opt.tasks), actual_bs))
print('Training sets: {} images ({} images per task)'.format((len(self.training_dataloaders) * actual_bs), (len(self.training_dataloaders) * self._opt.batch_size)))
print('Validation sets')
for task in self._opt.tasks:
data_loader = self.validation_dataloaders[task]
print('{}: {} images'.format(task, ((len(data_loader) * self._opt.batch_size) * len(self._opt.tasks))))
self.visual_dict = {'training': pd.DataFrame(), 'validation': pd.DataFrame()}
self._train()
def _train(self):
self._total_steps = ((self._opt.load_epoch * len(self.training_dataloaders)) * self._opt.batch_size)
self._last_display_time = None
self._last_save_latest_time = None
self._last_print_time = time.time()
self._current_val_acc = 0.0
if (len(self._opt.pretrained_teacher_model) == 0):
for i_epoch in range((self._opt.load_epoch + 1), (self._opt.teacher_nepochs + 1)):
epoch_start_time = time.time()
self._model.get_current_LR()
self._train_epoch(i_epoch)
self.training_dataloaders.reset()
val_acc = self._validate(i_epoch)
if (val_acc > self._current_val_acc):
print('validation acc improved, from {:.4f} to {:.4f}'.format(self._current_val_acc, val_acc))
print(('saving the model at the end of epoch %d, steps %d' % (i_epoch, self._total_steps)))
self._model.save('teacher')
self._current_val_acc = val_acc
self.save_visual_dict('teacher')
self.save_logging_image('teacher')
time_epoch = (time.time() - epoch_start_time)
print(('End of epoch %d / %d \t Time Taken: %d sec (%d min or %d h)' % (i_epoch, self._opt.teacher_nepochs, time_epoch, (time_epoch / 60), (time_epoch / 3600))))
else:
self._model.resnet50.load_state_dict(torch.load(self._opt.pretrained_teacher_model))
self._teacher_model = deepcopy(self._model)
del self._model
self._model = None
self._teacher_model.set_eval()
for i_student in range(self._opt.n_students):
self._current_val_acc = 0.0
self._model = ModelsFactory.get_by_name(self._opt.model_name, self._opt)
self.visual_dict = {'training': pd.DataFrame(), 'validation': pd.DataFrame()}
for i_epoch in range(1, (self._opt.student_nepochs + 1)):
epoch_start_time = time.time()
self._model.get_current_LR()
self._train_epoch_kd(i_epoch)
self.training_dataloaders.reset()
val_acc = self._validate(i_epoch)
if (val_acc > self._current_val_acc):
print('validation acc improved, from {:.4f} to {:.4f}'.format(self._current_val_acc, val_acc))
print(('saving the model at the end of epoch %d, steps %d' % (i_epoch, self._total_steps)))
self._model.save('student_{}'.format(i_student))
self._current_val_acc = val_acc
self.save_visual_dict('student_{}'.format(i_student))
self.save_logging_image('student_{}'.format(i_student))
time_epoch = (time.time() - epoch_start_time)
print(('End of epoch %d / %d \t Time Taken: %d sec (%d min or %d h)' % (i_epoch, self._opt.student_nepochs, time_epoch, (time_epoch / 60), (time_epoch / 3600))))
def _train_epoch(self, i_epoch):
epoch_iter = 0
self._model.set_train()
for (i_train_batch, train_batch) in enumerate(self.training_dataloaders):
iter_start_time = time.time()
do_print_terminal = ((time.time() - self._last_print_time) > self._opt.print_freq_s)
self._model.set_input(train_batch)
self._model.optimize_parameters()
torch.cuda.empty_cache()
self._total_steps += self._opt.batch_size
epoch_iter += self._opt.batch_size
self.save_training_loss_to_visual_dict()
if do_print_terminal:
self._display_terminal(iter_start_time, i_epoch, i_train_batch, len(self.training_dataloaders))
self._last_print_time = time.time()
def _train_epoch_kd(self, i_epoch):
epoch_iter = 0
self._model.set_train()
for (i_train_batch, train_batch) in enumerate(self.training_dataloaders):
iter_start_time = time.time()
do_print_terminal = ((time.time() - self._last_print_time) > self._opt.print_freq_s)
self._model.set_input(train_batch)
self._model.optimize_parameters_kd(self._teacher_model)
torch.cuda.empty_cache()
self._total_steps += self._opt.batch_size
epoch_iter += self._opt.batch_size
self.save_training_loss_to_visual_dict()
if do_print_terminal:
self._display_terminal(iter_start_time, i_epoch, i_train_batch, len(self.training_dataloaders))
self._last_print_time = time.time()
def save_training_loss_to_visual_dict(self):
loss_dict = self._model.get_current_errors()
df = self.visual_dict['training']
data = loss_dict
self.visual_dict['training'] = df.append(pd.DataFrame(data, columns=list(data.keys()), index=[(self._total_steps // self._opt.batch_size)]))
def save_validation_res_to_visual_dict(self, eval_res):
df = self.visual_dict['validation']
data = eval_res
self.visual_dict['validation'] = df.append(pd.DataFrame(data, columns=list(data.keys()), index=[(self._total_steps // self._opt.batch_size)]))
def save_visual_dict(self, save_name):
save_path = os.path.join(self._opt.checkpoints_dir, self._opt.name, '{}.pkl'.format(save_name))
pickle.dump(self.visual_dict, open(save_path, 'wb'))
def save_logging_image(self, save_name):
load_path = os.path.join(self._opt.checkpoints_dir, self._opt.name, '{}.pkl'.format(save_name))
visual_dict = pickle.load(open(load_path, 'rb'))
train_path = os.path.join(self._opt.checkpoints_dir, self._opt.name, (save_name + '_train.png'))
val_path = os.path.join(self._opt.checkpoints_dir, self._opt.name, (save_name + '_val.png'))
save_plots(visual_dict, train_path, val_path)
def _display_terminal(self, iter_start_time, i_epoch, i_train_batch, num_batches):
errors = self._model.get_current_errors()
t = (time.time() - iter_start_time)
start_time = time.strftime('%H:%M', time.localtime(iter_start_time))
output = 'Time {}\tBatch Time {:.2f}\t Epoch [{}]([{}/{}])\t loss {:.4f}\t'.format(start_time, t, i_epoch, i_train_batch, num_batches, errors['loss'])
for task in self._opt.tasks:
if (task != 'VA'):
output += 'loss_{} {:.4f}\t'.format(task, errors['loss_{}'.format(task)])
else:
output += 'loss_valence {:.4f}\t'.format(errors['loss_valence'])
output += 'loss_arousal {:.4f}\t'.format(errors['loss_arousal'])
print(output)
def _validate(self, i_epoch):
val_start_time = time.time()
self._model.set_eval()
eval_per_task = {}
for task in self._opt.tasks:
track_val_preds = {'preds': []}
track_val_labels = {'labels': []}
val_errors = OrderedDict()
data_loader = self.validation_dataloaders[task]
for (i_val_batch, val_batch) in tqdm(enumerate(data_loader), total=len(data_loader)):
wrapped_v_batch = {task: val_batch}
self._model.set_input(wrapped_v_batch, input_tasks=[task])
(outputs, errors) = self._model.forward(return_estimates=True, input_tasks=[task])
for (k, v) in errors.items():
if (k in val_errors):
val_errors[k] += v
else:
val_errors[k] = v
track_val_preds['preds'].append(outputs[task][task])
track_val_labels['labels'].append(wrapped_v_batch[task]['label'])
for k in val_errors.keys():
val_errors[k] /= len(data_loader)
preds = np.concatenate(track_val_preds['preds'], axis=0)
labels = np.concatenate(track_val_labels['labels'], axis=0)
metric_func = self._model.get_metrics_per_task()[task]
(eval_items, eval_res) = metric_func(preds, labels)
now_time = time.strftime('%H:%M', time.localtime(val_start_time))
output = '{} Validation {}: Epoch [{}] Step [{}] loss {:.4f} Eval_0 {:.4f} Eval_1 {:.4f}'.format(task, now_time, i_epoch, self._total_steps, val_errors['loss'], eval_items[0], eval_items[1])
print(output)
if (task != 'VA'):
eval_per_task[task] = eval_res
else:
eval_per_task['valence'] = eval_items[0]
eval_per_task['arousal'] = eval_items[1]
print('Validation Performance:')
output = ''
for task in eval_per_task.keys():
output += '{} Metric: {:.4f} '.format(task, eval_per_task[task])
print(output)
self._model.set_train()
self.save_validation_res_to_visual_dict(eval_per_task)
return sum([eval_per_task[k] for k in eval_per_task]) |
class Overlord(cmd2.Cmd):
os.system('clear')
version = cmd2.ansi.style('v.1.0', fg=Fg.RED, bg=None, bold=True, underline=False)
print(f'''
_ _
_____ _____ _ __| | ___ _ __ __| |
/ _ \ \ / / _ \ '__| |/ _ \| '__/ _` |
| (_) \ V / __/ | | | (_) | | | (_| |
\___/ \_/ \___|_| |_|\___/|_| \__,_|
{version}
''')
intro = 'Welcome to Overlord!\nType help or ? to list commands\n'
variables = {'dotoken': '', 'domains': [], 'aws_access_key': '', 'aws_secret_key': '', 'godaddy_access_key': '', 'godaddy_secret_key': ''}
campaign = []
modules_ids = []
project_id = ''
def __init__(self):
super().__init__()
hide_cmd2_modules(self)
dir_path = 'projects'
uniq = True
while True:
rand = randomString()
for p in next(os.walk(dir_path))[1]:
if (p == rand):
uniq = False
if uniq:
break
self.project_id = rand
self.prompt = ((((('(' + cmd2.ansi.style('Overlord', fg=Fg.RED, bg=None, bold=True, underline=False)) + ' : ') + cmd2.ansi.style(rand, fg=Fg.DARK_GRAY, bg=None, bold=True, underline=False)) + ')') + '$> ')
self.loadproject_id.choices = next(os.walk(dir_path))[1]
self.cloneproject_id.choices = next(os.walk(dir_path))[1]
if os.path.exists((dir_path + '/variables.json')):
with open((dir_path + '/variables.json'), 'r') as filehandle:
self.variables = json.load(filehandle)
self.domain_parser_id.choices = self.variables['domains']
def do_clear(self, arg):
os.system('clear')
def do_exit(self, arg):
flag = input(cmd2.ansi.style('Exit? [y/N]:', fg=Fg.RED, bg=None, bold=True, underline=False))
if (flag == 'y'):
return True
def do_version(self, arg):
print('version 1.0')
def do_create(self, arg):
dir_path = ('projects/' + self.project_id)
self.do_save(None)
create.main(self.campaign, self.variables, self.project_id)
newproject_parser = argparse.ArgumentParser(prog='new')
newproject_id = newproject_parser.add_argument('id', type=str, nargs='?', help='example: new / new <name> ]')
.with_argparser(newproject_parser)
def do_new(self, arg):
dir_path = 'projects'
if (arg.id is None):
uniq = True
while True:
rand = randomString()
for p in next(os.walk(dir_path))[1]:
if (p == rand):
uniq = False
if uniq:
break
self.project_id = rand
else:
self.project_id = arg.id
self.campaign = []
proj = cmd2.ansi.style(self.project_id, fg=Fg.BLUE, bg=None, bold=True, underline=False)
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
print(f'''
{notification} New project with ID {proj} has been created. {notification}
''')
self.prompt = ((((('(' + cmd2.ansi.style('Overlord', fg=Fg.RED, bg=None, bold=True, underline=False)) + ' : ') + cmd2.ansi.style(self.project_id, fg=Fg.DARK_GRAY, bg=None, bold=True, underline=False)) + ')') + '$> ')
def create_dir(self):
os.system(('mkdir projects/' + self.project_id))
os.system((('mkdir projects/' + self.project_id) + '/ssh_keys'))
os.system((('mkdir projects/' + self.project_id) + '/ssh_configs'))
os.system((('mkdir projects/' + self.project_id) + '/certificates'))
loadproject_parser = argparse.ArgumentParser(prog='load')
loadproject_id = loadproject_parser.add_argument('id', type=str, help='example: [ load <ID> ]')
.with_argparser(loadproject_parser)
def do_load(self, arg):
dir_path = ('projects/' + arg.id)
if os.path.exists(dir_path):
with open((dir_path + '/campaign.json'), 'r') as filehandle:
self.campaign = json.load(filehandle)
with open((dir_path + '/variables.json'), 'r') as filehandle:
self.variables = json.load(filehandle)
self.project_id = arg.id
proj = cmd2.ansi.style(self.project_id, fg=Fg.BLUE, bg=None, bold=True, underline=False)
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
print(f'''
{notification} The project with ID {proj} has been loaded {notification}
''')
self.update_choices(self.campaign)
self.prompt = ((((('(' + cmd2.ansi.style('Overlord', fg=Fg.RED, bg=None, bold=True, underline=False)) + ' : ') + cmd2.ansi.style(self.project_id, fg=Fg.DARK_GRAY, bg=None, bold=True, underline=False)) + ')') + '$> ')
cloneproject_parser = argparse.ArgumentParser(prog='clone')
cloneproject_id = cloneproject_parser.add_argument('id', type=str, help='example: [ clone <ID> ]')
cloneproject_parser.add_argument('-n', '--name', type=str, help='Name of the new project')
.with_argparser(cloneproject_parser)
def do_clone(self, arg):
project_to_clone = arg.id
dir_path = ('projects/' + project_to_clone)
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
new_path = ''
new_project_name = ''
if (arg.name is None):
uniq = True
while True:
rand = randomString()
for p in next(os.walk(dir_path))[1]:
if (p == rand):
uniq = False
if uniq:
break
new_path = ('projects/' + rand)
new_project_name = rand
else:
new_path = ('projects/' + arg.name)
new_project_name = arg.name
if (not os.path.exists(new_path)):
command = ('mkdir ' + new_path)
os.system(command)
shutil.copy((dir_path + '/campaign.json'), (new_path + '/campaign.json'))
shutil.copy((dir_path + '/variables.json'), (new_path + '/variables.json'))
self.loadproject_id.choices = next(os.walk('projects'))[1]
self.cloneproject_id.choices = next(os.walk('projects'))[1]
print(f'''
{notification} The project with ID {project_to_clone} has been cloned to {new_project_name} {notification}
''')
else:
print(f'''
{notification} The project with ID {new_project_name} already exists! {notification}
''')
def do_delete(self, arg):
flag = input(cmd2.ansi.style('Are you sure? [y/N]:', fg=Fg.RED, bg=None, bold=True, underline=False))
if (flag == 'y'):
dir_path = (('projects/' + self.project_id) + '/.terraform')
if os.path.exists(dir_path):
os.system(f'cd projects/{self.project_id} && /opt/terraform state rm module.redirect_ns')
os.system(f'cd projects/{self.project_id} && /opt/terraform destroy -auto-approve')
os.system(f'rm projects/{self.project_id}/terraform.tfstate*')
shutil.rmtree(f'projects/{self.project_id}/.terraform')
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
print(f'''
{notification} Check if terraform exited without an error before you proceed. {notification}
''')
flag1 = input(cmd2.ansi.style('Proceding with deleting project directory. Are you sure? [y/N]:', fg=Fg.RED, bg=None, bold=True, underline=False))
if (flag1 == 'y'):
shutil.rmtree(('projects/' + self.project_id))
self.loadproject_id.choices = next(os.walk('projects'))[1]
self.cloneproject_id.choices = next(os.walk('projects'))[1]
self.update_choices(self.campaign)
proj = cmd2.ansi.style(self.project_id, fg=Fg.BLUE, bg=None, bold=True, underline=False)
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
print(f'''
{notification} The project with ID {proj} has been deleted {notification}
''')
def do_save(self, arg):
dir_path = ('projects/' + self.project_id)
if (not os.path.exists(dir_path)):
self.create_dir()
with open((dir_path + '/campaign.json'), 'w') as filehandle:
json.dump(self.campaign, filehandle, indent=4)
with open((dir_path + '/variables.json'), 'w') as filehandle:
json.dump(self.variables, filehandle, indent=4)
self.loadproject_id.choices = next(os.walk('projects'))[1]
self.cloneproject_id.choices = next(os.walk('projects'))[1]
proj = cmd2.ansi.style(self.project_id, fg=Fg.BLUE, bg=None, bold=True, underline=False)
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
print(f'''
{notification} The config files for the project with ID {proj} have been created {notification}
''')
def do_rename(self, arg):
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
if (not arg):
print(f'''
{notification} You have to specify a new name for your project! {notification}
''')
else:
proj_old = cmd2.ansi.style(self.project_id, fg=Fg.BLUE, bg=None, bold=True, underline=False)
dir_path = ('projects/' + self.project_id)
if os.path.exists(dir_path):
os.rename(('projects/' + self.project_id), ('projects/' + arg))
self.project_id = arg
self.loadproject_id.choices = next(os.walk('projects'))[1]
self.cloneproject_id.choices = next(os.walk('projects'))[1]
proj = cmd2.ansi.style(self.project_id, fg=Fg.BLUE, bg=None, bold=True, underline=False)
print(f'''
{notification} The project with ID {proj_old} has been renamed to {proj} {notification}
''')
self.prompt = ((((('(' + cmd2.ansi.style('Overlord', fg=Fg.RED, bg=None, bold=True, underline=False)) + ' : ') + cmd2.ansi.style(self.project_id, fg=Fg.DARK_GRAY, bg=None, bold=True, underline=False)) + ')') + '$> ')
def do_deploy(self, arg):
proj = cmd2.ansi.style(self.project_id, fg=Fg.BLUE, bg=None, bold=True, underline=False)
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
print(f'''
{notification} Started deployment of project with ID {proj} {notification}
''')
os.system(f'mkdir -p projects/{self.project_id}/.terraform/plugins/linux_amd64 ')
os.system(f'cp redbaron/data/plugins/terraform-provider-godaddy_v1.7.3_x4 projects/{self.project_id}/.terraform/plugins/linux_amd64/terraform-provider-godaddy_v1.7.3_x4')
os.system(f'chmod -R a+x projects/{self.project_id}/.terraform/plugins/linux_amd64/*')
os.system(f'cd projects/{self.project_id} && /opt/terraform init')
os.system(f'cd projects/{self.project_id} && /opt/terraform plan')
os.system(f'cd projects/{self.project_id} && /opt/terraform apply -auto-approve')
print(f'''
{notification} Terraform has finished with the installation {notification}
''')
usemodule_parser = argparse.ArgumentParser(prog='usemodule')
usemodule_subparsers = usemodule_parser.add_subparsers(title='usemodule-commands', help='usemodule-command help')
parser_dns_records = usemodule_subparsers.add_parser('dns_records', help='Settings to create a dns_record instance')
parser_gophish = usemodule_subparsers.add_parser('gophish', help='Settings to create a gophish instance')
parser_mail = usemodule_subparsers.add_parser('mail', help='Settings to create a mail instance')
parser_webserver = usemodule_subparsers.add_parser('webserver', help='Settings to create a webserver instance')
parser_c2 = usemodule_subparsers.add_parser('c2', help='Settings to create a c2 instance')
parser_letsencrypt = usemodule_subparsers.add_parser('letsencrypt', help='Settings to create letsencrypt instance')
parser_redirector = usemodule_subparsers.add_parser('redirector', help='Settings to create redirector instance')
parser_godaddy = usemodule_subparsers.add_parser('godaddy', help='Settings to create godaddy NS redirection in a provider of choice')
parser_ansible = usemodule_subparsers.add_parser('ansible', help='Settings to install asnible playbooks')
def update_choices(self, camp):
self.info_mods_id.choices = updateModulesIdList(camp, 'info')
self.del_mods_id.choices = updateModulesIdList(camp, 'del')
self.edit_mods_id.choices = updateModulesIdList(camp, 'edit')
def usemodule_dns_record(self, arg):
if (not self.variables['domains']):
print('No domains are set! [help set domains]')
elif (len(self.campaign) == 0):
print('No modules are set! [help usemodule]')
else:
dns_records.main(self.variables['domains'], self.campaign, None, self.project_id)
addModule(dns_records.module, self.campaign)
self.update_choices(self.campaign)
dns_records.module = {}
def usemodule_redirector(self, arg):
redirector.main(None, self.campaign, self.project_id)
addModule(redirector.module, self.campaign)
self.update_choices(self.campaign)
redirector.module = {}
def usemodule_c2(self, arg):
c2.main(self.campaign, None, self.project_id)
addModule(c2.module, self.campaign)
self.update_choices(self.campaign)
c2.module = {}
def usemodule_ansible(self, arg):
ansible.main(self.campaign, None, self.project_id)
addModule(ansible.module, self.campaign)
self.update_choices(self.campaign)
ansible.module = {}
def usemodule_godaddy(self, arg):
if (not self.variables['godaddy_access_key']):
print('The access key of Godaddy is not set! [help set godaddy_access_key]')
elif (not self.variables['godaddy_secret_key']):
print('The secret key of Godaddy is not set! [help set godaddy_secret_key]')
elif (not self.variables['domains']):
print('No domains are set! [help set domains]')
else:
godaddy.main(self.campaign, self.variables['domains'], None, self.project_id)
addModule(godaddy.module, self.campaign)
self.update_choices(self.campaign)
godaddy.module = {}
def usemodule_mail(self, arg):
if (not self.variables['domains']):
print('No domains are set! [help set domains]')
else:
mail_server.main(self.variables['domains'], self.campaign, None, self.project_id)
addModule(mail_server.module, self.campaign)
self.update_choices(self.campaign)
mail_server.module = {}
def usemodule_webserver(self, arg):
webserver.main(self.campaign, None, self.project_id)
addModule(webserver.module, self.campaign)
self.update_choices(self.campaign)
webserver.module = {}
def usemodule_gophish(self, arg):
gophish.main(self.campaign, None, self.project_id)
addModule(gophish.module, self.campaign)
self.update_choices(self.campaign)
gophish.module = {}
def usemodule_letsencrypt(self, arg):
a_records = False
for c in self.campaign:
if ((c['module'] == 'dns_record') and (c['type'] == 'A')):
a_records = True
break
if (a_records == False):
print('No A records were set! [help usemodule dns_records]')
else:
letsencrypt.main(self.campaign, None, self.project_id)
addModule(letsencrypt.module, self.campaign)
self.update_choices(self.campaign)
letsencrypt.module = {}
parser_dns_records.set_defaults(func=usemodule_dns_record)
parser_c2.set_defaults(func=usemodule_c2)
parser_gophish.set_defaults(func=usemodule_gophish)
parser_mail.set_defaults(func=usemodule_mail)
parser_webserver.set_defaults(func=usemodule_webserver)
parser_letsencrypt.set_defaults(func=usemodule_letsencrypt)
parser_redirector.set_defaults(func=usemodule_redirector)
parser_godaddy.set_defaults(func=usemodule_godaddy)
parser_ansible.set_defaults(func=usemodule_ansible)
.with_argparser(usemodule_parser)
def do_usemodule(self, args):
func = getattr(args, 'func', None)
if (func is not None):
func(self, args)
else:
self.do_help('help')
delmodule_parser = argparse.ArgumentParser(prog='delmodule')
del_mods_id = delmodule_parser.add_argument('id', type=str, choices=modules_ids, help='delete module')
.with_argparser(delmodule_parser)
def do_delmodule(self, arg):
if (arg.id == 'all'):
self.campaign = []
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
print(f'''
{notification} All modules have been deleted from the campaign {notification}
''')
else:
for (idx, c) in enumerate(self.campaign):
if (arg.id == c['id']):
self.campaign.pop(idx)
mod = cmd2.ansi.style(c['module'], fg=Fg.BLUE, bg=None, bold=True, underline=False)
mod_id = cmd2.ansi.style(c['id'], fg=Fg.BLUE, bg=None, bold=True, underline=False)
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
print(f'''
{notification} Module {mod} with ID {mod_id} has been deleted from the campaign {notification}
''')
editmodule_parser = argparse.ArgumentParser(prog='editmodule')
edit_mods_id = editmodule_parser.add_argument('id', type=str, choices=modules_ids, help='example: [ editmodule <ID> ]')
.with_argparser(editmodule_parser)
def do_editmodule(self, arg):
for (idx, c) in enumerate(self.campaign):
if (arg.id == c['id']):
mod = self.campaign.pop(idx)
if (c['module'] == 'c2'):
c2.main(self.campaign, mod, self.project_id)
addModule(c2.module, self.campaign)
self.update_choices(self.campaign)
c2.module = {}
break
if (c['module'] == 'dns_record'):
dns_records.main(self.variables['domains'], self.campaign, mod, self.project_id)
addModule(dns_records.module, self.campaign)
self.update_choices(self.campaign)
dns_records.module = {}
break
if (c['module'] == 'redirector'):
redirector.main(mod, self.campaign, self.project_id)
addModule(redirector.module, self.campaign)
self.update_choices(self.campaign)
redirector.module = {}
break
if (c['module'] == 'gophish'):
gophish.main(self.campaign, mod, self.project_id)
addModule(gophish.module, self.campaign)
self.update_choices(self.campaign)
gophish.module = {}
break
if (c['module'] == 'letsencrypt'):
letsencrypt.main(self.campaign, mod, self.project_id)
addModule(letsencrypt.module, self.campaign)
self.update_choices(self.campaign)
letsencrypt.module = {}
break
if (c['module'] == 'mail'):
mail_server.main(self.variables['domains'], self.campaign, mod, self.project_id)
addModule(mail_server.module, self.campaign)
self.update_choices(self.campaign)
mail_server.module = {}
break
if (c['module'] == 'webserver'):
webserver.main(self.campaign, mod, self.project_id)
addModule(webserver.module, self.campaign)
self.update_choices(self.campaign)
webserver.module = {}
break
if (c['module'] == 'godaddy'):
godaddy.main(self.campaign, self.variables['domains'], mod, self.project_id)
addModule(godaddy.module, self.campaign)
self.update_choices(self.campaign)
godaddy.module = {}
break
if (c['module'] == 'ansible'):
ansible.main(self.campaign, mod, self.project_id)
addModule(ansible.module, self.campaign)
self.update_choices(self.campaign)
ansible.module = {}
break
set_parser = argparse.ArgumentParser(prog='set')
set_subparsers = set_parser.add_subparsers(title='set-commands', help='set-command help')
parser_dotoken = set_subparsers.add_parser('dotoken', help='Sets the Digital Ocean Token')
parser_dotoken.add_argument('dotoken', type=str, help='example : [ set dotoken <token>]')
parser_aws_secret_key = set_subparsers.add_parser('aws_secret_key', help='Sets the AWS Secret Key')
parser_aws_secret_key.add_argument('aws_secret_key', type=str, help='example : [ set aws_secret_key <token>]')
parser_aws_access_key = set_subparsers.add_parser('aws_access_key', help='Sets the AWS Access Key')
parser_aws_access_key.add_argument('aws_access_key', type=str, help='example : [ set aws_access_key <token>]')
parser_godaddy_access_key = set_subparsers.add_parser('godaddy_access_key', help='Sets the Godaddy Access Key')
parser_godaddy_access_key.add_argument('godaddy_access_key', type=str, help='example : [ set godaddy_access_key <token>]')
parser_godaddy_secret_key = set_subparsers.add_parser('godaddy_secret_key', help='Sets the Godaddy Secret Key')
parser_godaddy_secret_key.add_argument('godaddy_secret_key', type=str, help='example : [ set godaddy_secret_key <token>]')
parser_domains = set_subparsers.add_parser('domains', help='Domain names to be used in the campaign (Multilpe domain names can be added)')
parser_domains.add_argument('-a', '--add', type=str, help='Domain to be added')
domain_parser_id = parser_domains.add_argument('-d', '--delete', type=str, choices=('kokos.com', 'a.com'), help='Domain to be deleted')
parser_variables = set_subparsers.add_parser('variables', help='Sets the default variables.json to the values that are in memory')
parser_variables.add_argument('variables', nargs='?', type=str, help='example : [ set variables]')
def set_dotoken(self, arg):
self.variables['dotoken'] = arg.dotoken
def set_aws_access_key(self, arg):
self.variables['aws_access_key'] = arg.aws_access_key
def set_aws_secret_key(self, arg):
self.variables['aws_secret_key'] = arg.aws_secret_key
def set_godaddy_access_key(self, arg):
self.variables['godaddy_access_key'] = arg.godaddy_access_key
def set_godaddy_secret_key(self, arg):
self.variables['godaddy_secret_key'] = arg.godaddy_secret_key
def set_domains(self, arg):
if arg.add:
self.variables['domains'].insert(len(self.variables['domains']), arg.add)
elif arg.delete:
for (idx, c) in enumerate(self.variables['domains']):
if (arg.delete == c):
self.variables['domains'].pop(idx)
self.domain_parser_id.choices = self.variables['domains']
def set_variables(self, arg):
with open('projects/variables.json', 'w') as filehandle:
json.dump(self.variables, filehandle, indent=4)
notification = cmd2.ansi.style('***', fg=Fg.RED, bg=None, bold=True, underline=False)
print(f'''
{notification} Variables have been saved to ./projects/variables.json {notification}
''')
parser_variables.set_defaults(func=set_variables)
parser_dotoken.set_defaults(func=set_dotoken)
parser_aws_access_key.set_defaults(func=set_aws_access_key)
parser_aws_secret_key.set_defaults(func=set_aws_secret_key)
parser_godaddy_access_key.set_defaults(func=set_godaddy_access_key)
parser_godaddy_secret_key.set_defaults(func=set_godaddy_secret_key)
parser_domains.set_defaults(func=set_domains)
.with_argparser(set_parser)
def do_set(self, args):
func = getattr(args, 'func', None)
if (func is not None):
func(self, args)
else:
self.do_help('help')
info_parser = argparse.ArgumentParser(prog='info')
info_mods_id = info_parser.add_argument('id', nargs='?', type=str, choices=modules_ids, help='example: [ info <ID> ]')
def info_table(self, c):
if (c['module'] == 'c2'):
c2.cmd_main.do_info(None, c)
if (c['module'] == 'redirector'):
redirector.cmd_main.do_info(None, c)
if (c['module'] == 'dns_record'):
dns_records.cmd_main.do_info(None, c)
if (c['module'] == 'gophish'):
gophish.cmd_main.do_info(None, c)
if (c['module'] == 'letsencrypt'):
letsencrypt.cmd_main.do_info(None, c)
if (c['module'] == 'mail'):
mail_server.cmd_main.do_info(None, c)
if (c['module'] == 'webserver'):
webserver.cmd_main.do_info(None, c)
if (c['module'] == 'godaddy'):
godaddy.cmd_main.do_info(None, c)
if (c['module'] == 'ansible'):
ansible.cmd_main.do_info(None, c)
.with_argparser(info_parser)
def do_info(self, arg):
if (arg.id is not None):
if (arg.id == 'all'):
for c in self.campaign:
self.info_table(c)
else:
for c in self.campaign:
if (arg.id == c['id']):
self.info_table(c)
else:
print(f'Project ID: {self.project_id}')
if ('dotoken' in self.variables.keys()):
print(f"Digital Ocean Key: {self.variables['dotoken']}")
if ('aws_access_key' in self.variables.keys()):
print(f"AWS Access Key: {self.variables['aws_access_key']}")
if ('aws_secret_key' in self.variables.keys()):
print(f"AWS Secret Key: {self.variables['aws_secret_key']}")
if ('godaddy_access_key' in self.variables.keys()):
print(f"Godaddy Access Key: {self.variables['godaddy_access_key']}")
if ('godaddy_secret_key' in self.variables.keys()):
print(f"Godaddy Secret Key: {self.variables['godaddy_secret_key']}")
print(f"Domains: {', '.join(self.variables['domains'])}")
x = PrettyTable()
x.title = 'Campaign'
x.field_names = ['#', 'MODULE', 'ID']
for (idx, i) in enumerate(self.campaign):
if ('type' in i):
x.add_row([(idx + 1), ((i['module'] + '/') + i['type']), i['id']])
else:
x.add_row([(idx + 1), i['module'], i['id']])
x.align['DESCRITPION'] = 'l'
print(x)
CMD_CAT_GENERAL = 'General (type help <command>)'
CMD_CAT_MODULE = 'Module (type help <command>)'
CMD_CAT_PROJECT = 'Project (type help <command>)'
cmd2.categorize((do_create, do_new, do_save, do_deploy, do_delete, do_load, do_rename, do_clone), CMD_CAT_PROJECT)
cmd2.categorize((do_usemodule, do_editmodule, do_delmodule), CMD_CAT_MODULE)
cmd2.categorize((do_set, do_info), CMD_CAT_GENERAL) |
_optionals.HAS_GAUSSIAN.require_in_instance
class GaussianDriver(ElectronicStructureDriver):
def __init__(self, config: (str | list[str])='# rhf/sto-3g scf(conventional)\n\nh2 molecule\n\n0 1\nH 0.0 0.0 0.0\nH 0.0 0.0 0.735\n\n') -> None:
super().__init__()
if ((not isinstance(config, str)) and (not isinstance(config, list))):
raise QiskitNatureError(f"Invalid config for Gaussian Driver '{config}'")
if isinstance(config, list):
config = '\n'.join(config)
self._config = config
self._mel: ('MatEl' | None) = None
_optionals.HAS_GAUSSIAN.require_in_call
def from_molecule(molecule: MoleculeInfo, *, basis: str='sto-3g', method: MethodType=MethodType.RHF, driver_kwargs: (dict[(str, Any)] | None)=None) -> 'GaussianDriver':
del driver_kwargs
GaussianDriver.check_method_supported(method)
basis = GaussianDriver.to_driver_basis(basis)
if (molecule.units == DistanceUnit.ANGSTROM):
units = 'Angstrom'
elif (molecule.units == DistanceUnit.BOHR):
units = 'Bohr'
else:
raise QiskitNatureError(f"Unknown unit '{molecule.units.value}'")
cfg1 = f'''# {method.value}/{basis} UNITS={units} scf(conventional)
'''
name = ''.join(molecule.symbols)
geom = '\n'.join([((name + ' ') + ' '.join(map(str, coord))) for (name, coord) in zip(molecule.symbols, molecule.coords)])
cfg2 = f'''{name} molecule
'''
cfg3 = f'''{molecule.charge} {molecule.multiplicity}
{geom}
'''
return GaussianDriver(((cfg1 + cfg2) + cfg3))
def to_driver_basis(basis: str) -> str:
if (basis == 'sto3g'):
return 'sto-3g'
return basis
def check_method_supported(method: MethodType) -> None:
if (method not in [MethodType.RHF, MethodType.ROHF, MethodType.UHF]):
raise UnsupportMethodError(f'Invalid Gaussian method {method.value}.')
def run(self) -> ElectronicStructureProblem:
cfg = self._config
while (not cfg.endswith('\n\n')):
cfg += '\n'
logger.debug("User supplied configuration raw: '%s'", cfg.replace('\r', '\\r').replace('\n', '\\n'))
logger.debug('User supplied configuration\n%s', cfg)
(file, fname) = tempfile.mkstemp(suffix='.mat')
os.close(file)
cfg = GaussianDriver._augment_config(fname, cfg)
logger.debug('Augmented control information:\n%s', cfg)
self._config = cfg
run_g16(cfg)
self._mel = GaussianDriver._parse_matrix_file(fname)
try:
os.remove(fname)
except Exception:
logger.warning('Failed to remove MatrixElement file %s', fname)
return self.to_problem()
def _augment_config(fname: str, cfg: str) -> str:
cfgaug = ''
with io.StringIO() as outf:
with io.StringIO(cfg) as inf:
line = ''
added = False
while (not added):
line = inf.readline()
if (not line):
break
if line.startswith('#'):
outf.write(line)
while (not added):
line = inf.readline()
if (not line):
raise QiskitNatureError('Unexpected end of Gaussian input')
if (not line.strip()):
outf.write('# Window=Full Int=NoRaff Symm=(NoInt,None) output=(matrix,i4labels,mo2el) tran=full\n')
added = True
outf.write(line)
else:
outf.write(line)
added = False
section_count = 0
blank = True
while (not added):
line = inf.readline()
if (not line):
raise QiskitNatureError('Unexpected end of Gaussian input')
if (not line.strip()):
blank = True
if (section_count == 2):
break
elif blank:
section_count += 1
blank = False
outf.write(line)
outf.write(line)
outf.write(fname)
outf.write('\n\n')
while True:
line = inf.readline()
if (not line):
break
outf.write(line)
cfgaug = outf.getvalue()
return cfgaug
def _parse_matrix_file(fname: str) -> 'MatEl':
try:
gauopen_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'gauopen')
if (gauopen_directory not in sys.path):
sys.path.insert(0, gauopen_directory)
from .gauopen.QCMatEl import MatEl
except ImportError as mnfe:
msg = ('qcmatrixio extension not found. See Gaussian driver readme to build qcmatrixio.F using f2py' if (mnfe.name == 'qcmatrixio') else str(mnfe))
logger.info(msg)
raise QiskitNatureError(msg) from mnfe
_mel = MatEl(file=fname)
logger.debug('MatrixElement file:\n%s', _mel)
return _mel
def to_qcschema(self, *, include_dipole: bool=True) -> QCSchema:
return GaussianDriver._qcschema_from_matrix_file(self._mel, include_dipole=include_dipole)
def _qcschema_from_matrix_file(mel: MatEl, *, include_dipole: bool=True) -> QCSchema:
(einsum_func, _) = get_einsum()
data = _QCSchemaData()
data.overlap = GaussianDriver._get_matrix(mel, 'OVERLAP')
data.mo_coeff = GaussianDriver._get_matrix(mel, 'ALPHA MO COEFFICIENTS')
data.mo_coeff_b = GaussianDriver._get_matrix(mel, 'BETA MO COEFFICIENTS')
if np.array_equal(data.mo_coeff, data.mo_coeff_b):
logger.debug('ALPHA and BETA MO COEFFS identical, keeping only ALPHA')
data.mo_coeff_b = None
data.hij = GaussianDriver._get_matrix(mel, 'CORE HAMILTONIAN ALPHA')
logger.debug('CORE HAMILTONIAN ALPHA %s', data.hij.shape)
data.hij_b = GaussianDriver._get_matrix(mel, 'CORE HAMILTONIAN BETA')
if np.array_equal(data.hij, data.hij_b):
logger.debug('CORE HAMILTONIAN ALPHA and BETA identical, keeping only ALPHA')
data.hij_b = None
logger.debug('CORE HAMILTONIAN BETA %s', ('- Not present' if (data.hij_b is None) else data.hij_b.shape))
data.hij_mo = np.dot(np.dot(data.mo_coeff.T, data.hij), data.mo_coeff)
if (data.mo_coeff_b is not None):
data.hij_mo_b = np.dot(np.dot(data.mo_coeff_b.T, (data.hij_b if (data.hij_b is not None) else data.hij)), data.mo_coeff_b)
data.eri = GaussianDriver._get_matrix(mel, 'REGULAR 2E INTEGRALS')
logger.debug('REGULAR 2E INTEGRALS %s', data.eri.shape)
useao2e = False
if ((data.mo_coeff_b is None) and (mel.matlist.get('BB MO 2E INTEGRALS') is not None)):
useao2e = True
logger.info('Identical A and B coeffs but BB ints are present - using regular 2E ints instead')
if useao2e:
einsum_ao_to_mo = 'pqrs,pi,qj,rk,sl->ijkl'
data.eri_mo = einsum_func(einsum_ao_to_mo, data.eri, data.mo_coeff, data.mo_coeff, data.mo_coeff, data.mo_coeff, optimize=settings.optimize_einsum)
if (data.mo_coeff_b is not None):
data.eri_mo_ba = einsum_func(einsum_ao_to_mo, data.eri, data.mo_coeff_b, data.mo_coeff_b, data.mo_coeff, data.mo_coeff, optimize=settings.optimize_einsum)
data.eri_mo_bb = einsum_func(einsum_ao_to_mo, data.eri, data.mo_coeff_b, data.mo_coeff_b, data.mo_coeff_b, data.mo_coeff_b, optimize=settings.optimize_einsum)
else:
data.eri_mo = GaussianDriver._get_matrix(mel, 'AA MO 2E INTEGRALS')
logger.debug('AA MO 2E INTEGRALS %s', data.eri_mo.shape)
data.eri_mo_bb = GaussianDriver._get_matrix(mel, 'BB MO 2E INTEGRALS')
logger.debug('BB MO 2E INTEGRALS %s', ('- Not present' if (data.eri_mo_bb is None) else data.eri_mo_bb.shape))
data.eri_mo_ba = GaussianDriver._get_matrix(mel, 'BA MO 2E INTEGRALS')
logger.debug('BA MO 2E INTEGRALS %s', ('- Not present' if (data.eri_mo_ba is None) else data.eri_mo_ba.shape))
data.mo_energy = GaussianDriver._get_matrix(mel, 'ALPHA ORBITAL ENERGIES')
logger.debug('ORBITAL ENERGIES %s', data.mo_energy)
data.mo_energy_b = GaussianDriver._get_matrix(mel, 'BETA ORBITAL ENERGIES')
logger.debug('BETA ORBITAL ENERGIES %s', data.mo_energy_b)
data.e_nuc = mel.scalar('ENUCREP')
data.e_ref = mel.scalar('ETOTAL')
data.symbols = [PERIODIC_TABLE[atom] for atom in mel.ian]
data.coords = mel.c
data.multiplicity = mel.multip
data.charge = mel.icharg
data.method = 'RHF'
data.basis = 'sto-3g'
data.creator = 'Gaussian'
data.version = mel.gversion
data.nbasis = mel.nbasis
data.nmo = data.mo_coeff.shape[0]
data.nalpha = (((mel.ne + mel.multip) - 1) // 2)
data.nbeta = (((mel.ne - mel.multip) + 1) // 2)
if include_dipole:
dipints = GaussianDriver._get_matrix(mel, 'DIPOLE INTEGRALS')
dipints = einsum_func('ijk->kji', dipints)
data.dip_x = dipints[0]
data.dip_y = dipints[1]
data.dip_z = dipints[2]
data.dip_mo_x_a = np.dot(np.dot(data.mo_coeff.T, data.dip_x), data.mo_coeff)
data.dip_mo_y_a = np.dot(np.dot(data.mo_coeff.T, data.dip_y), data.mo_coeff)
data.dip_mo_z_a = np.dot(np.dot(data.mo_coeff.T, data.dip_z), data.mo_coeff)
if (data.mo_coeff_b is not None):
data.dip_mo_x_b = np.dot(np.dot(data.mo_coeff_b.T, data.dip_x), data.mo_coeff_b)
data.dip_mo_y_b = np.dot(np.dot(data.mo_coeff_b.T, data.dip_y), data.mo_coeff_b)
data.dip_mo_z_b = np.dot(np.dot(data.mo_coeff_b.T, data.dip_z), data.mo_coeff_b)
coords = np.reshape(mel.c, (len(mel.ian), 3))
nucl_dip = einsum_func('i,ix->x', mel.ian, coords)
nucl_dip = np.round(nucl_dip, decimals=8)
ref_dip = GaussianDriver._get_matrix(mel, 'ELECTRIC DIPOLE MOMENT')
ref_dip = np.round(ref_dip, decimals=8)
elec_dip = (ref_dip - nucl_dip)
logger.info('HF Electronic dipole moment: %s', elec_dip)
logger.info('Nuclear dipole moment: %s', nucl_dip)
logger.info('Total dipole moment: %s', ref_dip)
data.dip_nuc = nucl_dip
data.dip_ref = ref_dip
return GaussianDriver._to_qcschema(data, include_dipole=include_dipole)
def to_problem(self, *, basis: ElectronicBasis=ElectronicBasis.MO, include_dipole: bool=True) -> ElectronicStructureProblem:
return GaussianDriver._problem_from_matrix_file(self._mel, basis=basis, include_dipole=include_dipole)
def _problem_from_matrix_file(mel: MatEl, *, basis: ElectronicBasis=ElectronicBasis.MO, include_dipole: bool=True) -> ElectronicStructureProblem:
qcschema = GaussianDriver._qcschema_from_matrix_file(mel, include_dipole=include_dipole)
problem = qcschema_to_problem(qcschema, basis=basis, include_dipole=include_dipole)
if (include_dipole and (problem.properties.electronic_dipole_moment is not None)):
problem.properties.electronic_dipole_moment.reverse_dipole_sign = True
return problem
def _get_matrix(mel, name) -> np.ndarray:
m_x = mel.matlist.get(name)
if (m_x is None):
return None
dims = tuple((abs(i) for i in m_x.dimens))
mat = np.reshape(m_x.expand(), dims, order='F')
return mat |
def jetson_clocks_gui(stdscr, offset, start, jetson):
jc_status_name = jetson.jetson_clocks.get_status()
if (jc_status_name == 'running'):
color = (curses.A_BOLD | NColors.green())
elif (jc_status_name == 'inactive'):
color = curses.A_NORMAL
elif ('ing' in jc_status_name):
color = NColors.yellow()
else:
color = NColors.red()
if jetson.jetson_clocks.get_boot():
jc_status_name = (('[' + jc_status_name) + ']')
plot_name_info(stdscr, offset, start, 'Jetson Clocks', jc_status_name, color) |
def _prepare_prompt_learning_config(peft_config, model_config):
if (peft_config.num_layers is None):
if ('num_hidden_layers' in model_config):
num_layers = model_config['num_hidden_layers']
elif ('num_layers' in model_config):
num_layers = model_config['num_layers']
elif ('n_layer' in model_config):
num_layers = model_config['n_layer']
else:
raise ValueError('Please specify `num_layers` in `peft_config`')
peft_config.num_layers = num_layers
if (peft_config.token_dim is None):
if ('hidden_size' in model_config):
token_dim = model_config['hidden_size']
elif ('n_embd' in model_config):
token_dim = model_config['n_embd']
elif ('d_model' in model_config):
token_dim = model_config['d_model']
else:
raise ValueError('Please specify `token_dim` in `peft_config`')
peft_config.token_dim = token_dim
if (peft_config.num_attention_heads is None):
if ('num_attention_heads' in model_config):
num_attention_heads = model_config['num_attention_heads']
elif ('n_head' in model_config):
num_attention_heads = model_config['n_head']
elif ('num_heads' in model_config):
num_attention_heads = model_config['num_heads']
elif ('encoder_attention_heads' in model_config):
num_attention_heads = model_config['encoder_attention_heads']
else:
raise ValueError('Please specify `num_attention_heads` in `peft_config`')
peft_config.num_attention_heads = num_attention_heads
if (getattr(peft_config, 'encoder_hidden_size', None) is None):
setattr(peft_config, 'encoder_hidden_size', token_dim)
return peft_config |
def get_rxn_smarts(probs):
rxn_smarts = []
for key in probs:
tokens = key.split(']')
smarts = tokens[0]
if (('-' in key) and ('#16' not in smarts)):
smarts += ';!H0:1]>>[*:1]'
if (('=' in key) and ('#16' not in smarts)):
smarts += ';!H1;!H0:1]>>[*:1]'
if (']#[' in key):
smarts += ';H3:1]>>[*:1]'
if ('#16' in smarts):
smarts += ':1]>>[*:1]'
smarts += (tokens[(- 2)] + ']')
rxn_smarts.append(smarts)
return rxn_smarts |
class ConvNextImageProcessor(BaseImageProcessor):
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Dict[(str, int)]=None, crop_pct: float=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[(int, float)]=(1 / 255), do_normalize: bool=True, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, **kwargs) -> None:
super().__init__(**kwargs)
size = (size if (size is not None) else {'shortest_edge': 384})
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.crop_pct = (crop_pct if (crop_pct is not None) else (224 / 256))
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else IMAGENET_STANDARD_MEAN)
self.image_std = (image_std if (image_std is not None) else IMAGENET_STANDARD_STD)
def resize(self, image: np.ndarray, size: Dict[(str, int)], crop_pct: float, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size, default_to_square=False)
if ('shortest_edge' not in size):
raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
shortest_edge = size['shortest_edge']
if (shortest_edge < 384):
resize_shortest_edge = int((shortest_edge / crop_pct))
resize_size = get_resize_output_image_size(image, size=resize_shortest_edge, default_to_square=False)
image = resize(image=image, size=resize_size, resample=resample, data_format=data_format, **kwargs)
return center_crop(image=image, size=(shortest_edge, shortest_edge), data_format=data_format, **kwargs)
else:
return resize(image, size=(shortest_edge, shortest_edge), resample=resample, data_format=data_format, **kwargs)
def rescale(self, image: np.ndarray, scale: Union[(int, float)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs):
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(self, image: np.ndarray, mean: Union[(float, List[float])], std: Union[(float, List[float])], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[(str, int)]=None, crop_pct: float=None, resample: PILImageResampling=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, **kwargs) -> PIL.Image.Image:
do_resize = (do_resize if (do_resize is not None) else self.do_resize)
crop_pct = (crop_pct if (crop_pct is not None) else self.crop_pct)
resample = (resample if (resample is not None) else self.resample)
do_rescale = (do_rescale if (do_rescale is not None) else self.do_rescale)
rescale_factor = (rescale_factor if (rescale_factor is not None) else self.rescale_factor)
do_normalize = (do_normalize if (do_normalize is not None) else self.do_normalize)
image_mean = (image_mean if (image_mean is not None) else self.image_mean)
image_std = (image_std if (image_std is not None) else self.image_std)
size = (size if (size is not None) else self.size)
size = get_size_dict(size, default_to_square=False)
images = make_list_of_images(images)
if (not valid_images(images)):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
if ((do_resize and (size is None)) or (resample is None)):
raise ValueError('Size and resample must be specified if do_resize is True.')
if (do_resize and (size['shortest_edge'] < 384) and (crop_pct is None)):
raise ValueError('crop_pct must be specified if size < 384.')
if (do_rescale and (rescale_factor is None)):
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if (do_normalize and ((image_mean is None) or (image_std is None))):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
images = [to_numpy_array(image) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, crop_pct=crop_pct, resample=resample) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images]
images = [to_channel_dimension_format(image, data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors) |
def test_metadata_path_with_prepare(tmp_dir, package_test_setuptools):
builder = build.ProjectBuilder(package_test_setuptools)
metadata = _importlib.metadata.PathDistribution(pathlib.Path(builder.metadata_path(tmp_dir))).metadata
assert (metadata['name'] == 'test-setuptools')
assert (metadata['Version'] == '1.0.0') |
class Migration(migrations.Migration):
dependencies = [('api', '0080_add_aoc_tables')]
operations = [migrations.CreateModel(name='BumpedThread', fields=[('thread_id', models.BigIntegerField(help_text='The thread ID that should be bumped.', primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(limit_value=0, message='Thread IDs cannot be negative.')], verbose_name='Thread ID'))], bases=(pydis_site.apps.api.models.mixins.ModelReprMixin, models.Model))] |
def create_model(session, Model_class, path, load_vec, config, id_to_char, logger):
model = Model_class(config)
ckpt = tf.train.get_checkpoint_state(path)
if (ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path)):
logger.info(('Reading model parameters from %s' % ckpt.model_checkpoint_path))
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
logger.info('Created model with fresh parameters.')
session.run(tf.global_variables_initializer())
if config['pre_emb']:
emb_weights = session.run(model.char_lookup.read_value())
emb_weights = load_vec(config['emb_file'], id_to_char, config['char_dim'], emb_weights)
session.run(model.char_lookup.assign(emb_weights))
logger.info('Load pre-trained embedding.')
return model |
class Solution():
def isValid(self, s: str) -> bool:
d = {'#': 0, '(': (- 1), ')': 1, '[': (- 2), ']': 2, '{': (- 3), '}': 3}
stack = [0]
(start, end) = (0, len(s))
while (start < end):
if (stack[(- 1)] == (- d[s[start]])):
stack.pop()
else:
stack.append(d[s[start]])
start += 1
if (stack[(- 1)] == 0):
return True
return False |
def fasttext_export(embedding_file):
fin = io.open(embedding_file, 'r', encoding='utf-8', newline='\n', errors='ignore')
vocabulary = []
embeddings = []
line_idx = 0
for line in fin:
if (line_idx == 0):
line_idx += 1
continue
tokens = line.rstrip().split(' ')
vocabulary.append(tokens[0])
embeddings.append([float(item) for item in tokens[1:]])
line_idx += 1
export_data_h5(vocabulary, np.array(embeddings, dtype=np.float32), output=(embedding_file + '.h5')) |
.parametrize('with_root', [True, False])
.parametrize('error', ['module', 'readme', ''])
def test_install_warning_corrupt_root(command_tester_factory: CommandTesterFactory, project_factory: ProjectFactory, with_root: bool, error: str) -> None:
name = 'corrupt'
content = f'''[tool.poetry]
name = "{name}"
version = "1.2.3"
description = ""
authors = []
'''
if (error == 'readme'):
content += 'readme = "missing_readme.md"\n'
poetry = project_factory(name=name, pyproject_content=content)
if (error != 'module'):
(poetry.pyproject_path.parent / f'{name}.py').touch()
tester = command_tester_factory('install', poetry=poetry)
tester.execute(('' if with_root else '--no-root'))
assert (tester.status_code == 0)
if (with_root and error):
assert ('The current project could not be installed: ' in tester.io.fetch_error())
else:
assert (tester.io.fetch_error() == '') |
class CustomSelector(discord.ui.Select):
def __init__(self, placeholder: str, options: List[discord.SelectOption]):
super().__init__(placeholder=placeholder, options=options)
async def callback(self, interaction: discord.Interaction):
(await interaction.response.defer())
self.view.custom_id = interaction.data['values'][0]
self.view.stop() |
def postprocess_text(preds, labels, metric_name):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
if (metric_name == 'rouge'):
preds = ['\n'.join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ['\n'.join(nltk.sent_tokenize(label)) for label in labels]
elif (metric_name == 'sacrebleu'):
labels = [[label] for label in labels]
elif (metric_name == 'bleu'):
preds = [pred.split(' ') for pred in preds]
labels = [[label.split(' ')] for label in labels]
else:
pass
return (preds, labels) |
def get_training_eval_datasets(cfg: DatasetConfig, shard_id: int, num_shards: int, eval_steps: int, feature_converter_cls: Callable[(..., seqio.FeatureConverter)], deterministic: bool=False, model_dir: Optional[str]=None, start_step: int=0) -> Mapping[(str, tf.data.Dataset)]:
if isinstance(cfg.mixture_or_task_name, seqio.DatasetProviderBase):
mixture_or_task = cfg.mixture_or_task_name
else:
mixture_or_task = seqio.get_mixture_or_task(cfg.mixture_or_task_name)
datasets = {}
get_dataset_fn = get_dataset
if deterministic:
assert (model_dir is not None)
get_dataset_fn = functools.partial(get_deterministic_dataset, model_dir=model_dir, start_step=start_step)
if (cfg.batch_size % num_shards):
raise ValueError(f'Batch size ({cfg.batch_size}) must be divisible by number of shards ({num_shards}).')
def _repeat_shard_batch_take_cache(ds: tf.data.Dataset):
if (not isinstance(ds, tf.data.Dataset)):
raise ValueError('Only tf.data.Dataset objects supported.')
ds = ds.unbatch().repeat().shard(num_shards, shard_id).batch((cfg.batch_size // num_shards), drop_remainder=True).take(eval_steps)
if cfg.use_memory_cache:
return ds.cache()
else:
return ds
for task in seqio.get_subtasks(mixture_or_task):
if (cfg.split not in task.splits):
logging.info("Task %s has no '%s' split; skipping training evaluation.", task.name, cfg.split)
continue
logging.info('Loading task %s for training evaluation.', task.name)
task_cfg = dataclasses.replace(cfg, mixture_or_task_name=task.name, batch_size=1)
datasets[task.name] = _repeat_shard_batch_take_cache(get_dataset_fn(task_cfg, shard_id=0, num_shards=1, feature_converter_cls=feature_converter_cls, num_epochs=(eval_steps * cfg.batch_size), continue_from_last_checkpoint=False))
if isinstance(mixture_or_task, seqio.Mixture):
datasets[mixture_or_task.name] = _repeat_shard_batch_take_cache(get_dataset_fn(dataclasses.replace(cfg, batch_size=1), shard_id=0, num_shards=1, feature_converter_cls=feature_converter_cls, num_epochs=(eval_steps * cfg.batch_size), continue_from_last_checkpoint=False))
return datasets |
def get_smart_contracts_start_at(chain_id: ChainID) -> BlockNumber:
if (chain_id == Networks.MAINNET.value):
smart_contracts_start_at = EthereumForks.CONSTANTINOPLE.value
elif (chain_id == Networks.ROPSTEN.value):
smart_contracts_start_at = RopstenForks.CONSTANTINOPLE.value
elif (chain_id == Networks.KOVAN.value):
smart_contracts_start_at = KovanForks.CONSTANTINOPLE.value
elif (chain_id == Networks.RINKEBY.value):
smart_contracts_start_at = RinkebyForks.CONSTANTINOPLE.value
elif (chain_id == Networks.GOERLI.value):
smart_contracts_start_at = GoerliForks.CONSTANTINOPLE.value
else:
smart_contracts_start_at = GENESIS_BLOCK_NUMBER
return smart_contracts_start_at |
.parametrize('case_name', _get_explicit_cases('positive'))
def test_explicit_positive_examples(case_name, run_line):
_check_file_format_skip(case_name)
casedir = ((EXAMPLE_EXPLICIT_FILES / 'positive') / case_name)
instance = (casedir / 'instance.json')
if (not instance.exists()):
instance = (casedir / 'instance.yaml')
if (not instance.exists()):
instance = (casedir / 'instance.toml')
if (not instance.exists()):
raise Exception('could not find an instance file for test case')
schema = (casedir / 'schema.json')
if (not schema.exists()):
schema = (casedir / 'schema.yaml')
if (not schema.exists()):
raise Exception('could not find a schema file for test case')
ret = run_line(['check-jsonschema', '--schemafile', str(schema), str(instance)])
assert (ret.exit_code == 0) |
class FlowStep(nn.Module):
FlowPermutation = {'reverse': (lambda obj, z, logdet, rev: (obj.reverse(z, rev), logdet)), 'shuffle': (lambda obj, z, logdet, rev: (obj.shuffle(z, rev), logdet)), 'invconv': (lambda obj, z, logdet, rev: obj.invconv(z, logdet, rev)), 'squeeze_invconv': (lambda obj, z, logdet, rev: obj.invconv(z, logdet, rev)), 'resqueeze_invconv_alternating_2_3': (lambda obj, z, logdet, rev: obj.invconv(z, logdet, rev)), 'resqueeze_invconv_3': (lambda obj, z, logdet, rev: obj.invconv(z, logdet, rev)), 'InvertibleConv1x1GridAlign': (lambda obj, z, logdet, rev: obj.invconv(z, logdet, rev)), 'InvertibleConv1x1SubblocksShuf': (lambda obj, z, logdet, rev: obj.invconv(z, logdet, rev)), 'InvertibleConv1x1GridAlignIndepBorder': (lambda obj, z, logdet, rev: obj.invconv(z, logdet, rev)), 'InvertibleConv1x1GridAlignIndepBorder4': (lambda obj, z, logdet, rev: obj.invconv(z, logdet, rev))}
def __init__(self, in_channels, hidden_channels, actnorm_scale=1.0, flow_permutation='invconv', flow_coupling='additive', LU_decomposed=False, opt=None, image_injector=None, idx=None, acOpt=None, normOpt=None, in_shape=None, position=None):
assert (flow_permutation in FlowStep.FlowPermutation), 'float_permutation should be in `{}`'.format(FlowStep.FlowPermutation.keys())
super().__init__()
self.flow_permutation = flow_permutation
self.flow_coupling = flow_coupling
self.image_injector = image_injector
self.norm_type = (normOpt['type'] if normOpt else 'ActNorm2d')
self.position = (normOpt['position'] if normOpt else None)
self.in_shape = in_shape
self.position = position
self.acOpt = acOpt
self.actnorm = models.modules.FlowActNorms.ActNorm2d(in_channels, actnorm_scale)
if (flow_permutation == 'invconv'):
self.invconv = models.modules.Permutations.InvertibleConv1x1(in_channels, LU_decomposed=LU_decomposed)
if (flow_coupling == 'CondAffineSeparatedAndCond'):
self.affine = models.modules.FlowAffineCouplingsAblation.CondAffineSeparatedAndCond(in_channels=in_channels, opt=opt)
elif (flow_coupling == 'noCoupling'):
pass
else:
raise RuntimeError('coupling not Found:', flow_coupling)
def forward(self, input, logdet=None, reverse=False, rrdbResults=None):
if (not reverse):
return self.normal_flow(input, logdet, rrdbResults)
else:
return self.reverse_flow(input, logdet, rrdbResults)
def normal_flow(self, z, logdet, rrdbResults=None):
if (self.flow_coupling == 'bentIdentityPreAct'):
(z, logdet) = self.bentIdentPar(z, logdet, reverse=False)
if (self.norm_type == 'ConditionalActNormImageInjector'):
img_ft = getConditional(rrdbResults, self.position)
(z, logdet) = self.actnorm(z, img_ft=img_ft, logdet=logdet, reverse=False)
elif (self.norm_type == 'noNorm'):
pass
else:
(z, logdet) = self.actnorm(z, logdet=logdet, reverse=False)
(z, logdet) = FlowStep.FlowPermutation[self.flow_permutation](self, z, logdet, False)
need_features = self.affine_need_features()
if (need_features or (self.flow_coupling in ['condAffine', 'condFtAffine', 'condNormAffine'])):
img_ft = getConditional(rrdbResults, self.position)
(z, logdet) = self.affine(input=z, logdet=logdet, reverse=False, ft=img_ft)
return (z, logdet)
def reverse_flow(self, z, logdet, rrdbResults=None):
need_features = self.affine_need_features()
if (need_features or (self.flow_coupling in ['condAffine', 'condFtAffine', 'condNormAffine'])):
img_ft = getConditional(rrdbResults, self.position)
(z, logdet) = self.affine(input=z, logdet=logdet, reverse=True, ft=img_ft)
(z, logdet) = FlowStep.FlowPermutation[self.flow_permutation](self, z, logdet, True)
(z, logdet) = self.actnorm(z, logdet=logdet, reverse=True)
return (z, logdet)
def affine_need_features(self):
need_features = False
try:
need_features = self.affine.need_features
except:
pass
return need_features |
.parametrize('connection_error,response_code,exception', [(True, 200, requests.exceptions.Timeout), (True, 200, requests.exceptions.ConnectionError), (False, 200, requests.exceptions.RequestException), (False, 200, ValueError), (True, 500, api.Non200ResponseException(mock.Mock(status_code=500))), (False, 400, api.Non200ResponseException(mock.Mock(status_code=400))), (False, 404, api.Non200ResponseException(mock.Mock(status_code=404))), (False, 200, api.InvalidMetadataException)])
def test_get_metadata_exception(connection_error, response_code, exception):
app = Flask(__name__)
app.config.from_object(testconfig.TestConfig())
request = mock.Mock(status_code=response_code)
client = mock.Mock(request=request)
client.request.side_effect = exception
tuf_api = api.TUFMetadataAPI(app, app.config, client=client)
(tags, expiration) = tuf_api.get_default_tags_with_expiration('quay', 'quay')
assert (tags is None)
assert (expiration is None) |
def set_network(vm_server, vm, target_network):
nic = None
backing_network = None
for network in vm_server.getObject(vim.Network):
if (target_network == network.name):
backing_network = network
break
for device in vm.vmObject.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualEthernetCard):
nic = vim.vm.device.VirtualDeviceSpec()
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic.device = device
nic.device.wakeOnLanEnabled = True
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic.device.backing.network = backing_network
nic.device.backing.deviceName = target_network
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.device.connectable.startConnected = True
nic.device.connectable.allowGuestControl = True
if (nic is not None):
config = vim.vm.ConfigSpec(deviceChange=[nic])
task = vm.vmObject.ReconfigVM_Task(config)
vm.waitForTask(task) |
class encoder(nn.Module):
def __init__(self, in_channels, out_channels):
super(encoder, self).__init__()
self.down_conv = x2conv(in_channels, out_channels)
self.pool = nn.MaxPool2d(kernel_size=2, ceil_mode=True)
def forward(self, x):
x = self.down_conv(x)
x = self.pool(x)
return x |
def find_in_rally(clipinfo_data, rally_num, num_hit):
cnt = 0
shift_round = 0
for i in range(len(clipinfo_data['rally'])):
if ((clipinfo_data['rally'][i] + shift_round) == rally_num):
cnt += 1
if ((clipinfo_data['rally'][i] == 1) and (i != 0) and (clipinfo_data['rally'][(i - 1)] != 1)):
shift_round = clipinfo_data['rally'][(i - 1)]
if (cnt == num_hit):
return int((clipinfo_data['hit_height'][(i - 1)] - 1))
elif ((cnt > 0) and ((clipinfo_data['rally'][i] + shift_round) != rally_num)):
return int((clipinfo_data['hit_height'][(i - 2)] - 1)) |
class KnownValues(unittest.TestCase):
def test_hcore(self):
h1ref = pbchf.get_hcore(cell)
h1 = pbchf.RHF(cell).get_hcore()
self.assertAlmostEqual(abs((h1 - h1ref)).max(), 0, 9)
self.assertAlmostEqual(lib.fp(h1), 0., 8)
cell1 = cell.copy()
cell1.ecp = {'He': (2, (((- 1), (((7.2, 0.3),),)),))}
cell1.build(0, 0)
kpt = (numpy.ones(3) * 0.5)
h1ref = pbchf.get_hcore(cell1, kpt)
h1 = pbchf.RHF(cell1).get_hcore(kpt=kpt)
self.assertAlmostEqual(abs((h1 - h1ref)).max(), 0, 9)
self.assertAlmostEqual(lib.fp(h1), ((- 2.) - 0.j), 8)
h1 = pscf.KRHF(cell1).get_hcore(kpts=[kpt])
self.assertEqual(h1.ndim, 3)
self.assertAlmostEqual(abs((h1[0] - h1ref)).max(), 0, 9)
def test_rhf_vcut_sph(self):
mf = pbchf.RHF(cell, exxdiv='vcut_sph')
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 4.), 7)
self.assertTrue((mf.mo_coeff.dtype == numpy.double))
mf = pscf.KRHF(cell, [[0, 0, 0]], exxdiv='vcut_sph')
e0 = mf.kernel()
self.assertTrue(numpy.allclose(e0, e1))
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pbchf.RHF(cell, k, exxdiv='vcut_sph')
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 4.), 7)
self.assertTrue((mf.mo_coeff.dtype == numpy.complex128))
mf = pscf.KRHF(cell, k, exxdiv='vcut_sph')
e0 = mf.kernel()
self.assertTrue(numpy.allclose(e0, e1))
def test_rhf_exx_ewald(self):
self.assertAlmostEqual(mf.e_tot, (- 4.), 7)
self.assertTrue((mf.mo_coeff.dtype == numpy.double))
self.assertAlmostEqual(mf.e_tot, kmf.e_tot, 8)
numpy.random.seed(1)
kpts_band = numpy.random.random((2, 3))
(e1, c1) = mf.get_bands(kpts_band)
(e0, c0) = kmf.get_bands(kpts_band)
self.assertAlmostEqual(abs((e0[0] - e1[0])).max(), 0, 7)
self.assertAlmostEqual(abs((e0[1] - e1[1])).max(), 0, 7)
self.assertAlmostEqual(lib.fp(e1[0]), (- 6.), 7)
self.assertAlmostEqual(lib.fp(e1[1]), (- 7.), 7)
def test_rhf_exx_ewald_with_kpt(self):
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pbchf.RHF(cell, k, exxdiv='ewald')
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 4.), 7)
self.assertTrue((mf.mo_coeff.dtype == numpy.complex128))
kmf = pscf.KRHF(cell, k, exxdiv='ewald')
e0 = kmf.kernel()
self.assertTrue(numpy.allclose(e0, e1))
numpy.random.seed(1)
kpt_band = numpy.random.random(3)
(e1, c1) = mf.get_bands(kpt_band)
(e0, c0) = kmf.get_bands(kpt_band)
self.assertAlmostEqual(abs((e0 - e1)).max(), 0, 7)
self.assertAlmostEqual(lib.fp(e1), (- 6.), 7)
def test_rhf_exx_None(self):
mf = pbchf.RHF(cell, exxdiv=None)
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 2.), 7)
self.assertTrue((mf.mo_coeff.dtype == numpy.double))
mf = pscf.KRHF(cell, [[0, 0, 0]], exxdiv=None)
e0 = mf.kernel()
self.assertTrue(numpy.allclose(e0, e1))
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pbchf.RHF(cell, k, exxdiv=None)
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 2.), 7)
self.assertTrue((mf.mo_coeff.dtype == numpy.complex128))
mf = pscf.KRHF(cell, k, exxdiv=None)
mf.init_guess = 'hcore'
e0 = mf.kernel()
self.assertTrue(numpy.allclose(e0, e1))
def test_init_guess_by_chkfile(self):
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pbchf.RHF(cell, k, exxdiv='vcut_sph')
mf.chkfile = tempfile.NamedTemporaryFile().name
mf.max_cycle = 1
mf.diis = None
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 4.), 7)
mf1 = pbchf.RHF(cell, exxdiv='vcut_sph')
mf1.chkfile = mf.chkfile
mf1.init_guess = 'chkfile'
mf1.diis = None
mf1.max_cycle = 1
e1 = mf1.kernel()
self.assertAlmostEqual(e1, (- 4.), 7)
self.assertTrue((mf1.mo_coeff.dtype == numpy.double))
def test_uhf_exx_ewald(self):
mf = pscf.UHF(cell, exxdiv='ewald')
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 4.), 7)
self.assertTrue((mf.mo_coeff[0].dtype == numpy.double))
kmf = pscf.KUHF(cell, [[0, 0, 0]], exxdiv='ewald')
kmf.init_guess = 'hcore'
e0 = kmf.kernel()
self.assertTrue(numpy.allclose(e0, e1))
numpy.random.seed(1)
kpts_band = numpy.random.random((2, 3))
(e1a, e1b) = mf.get_bands(kpts_band)[0]
(e0a, e0b) = kmf.get_bands(kpts_band)[0]
self.assertAlmostEqual(abs((e0a[0] - e1a[0])).max(), 0, 5)
self.assertAlmostEqual(abs((e0a[1] - e1a[1])).max(), 0, 5)
self.assertAlmostEqual(abs((e0b[0] - e1b[0])).max(), 0, 5)
self.assertAlmostEqual(abs((e0b[1] - e1b[1])).max(), 0, 5)
self.assertAlmostEqual(lib.fp(e1a[0]), (- 6.), 5)
self.assertAlmostEqual(lib.fp(e1a[1]), (- 7.), 5)
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pscf.UHF(cell, k, exxdiv='ewald')
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 4.), 7)
self.assertTrue((mf.mo_coeff[0].dtype == numpy.complex128))
kmf = pscf.KUHF(cell, k, exxdiv='ewald')
e0 = kmf.kernel()
self.assertTrue(numpy.allclose(e0, e1))
numpy.random.seed(1)
kpts_band = numpy.random.random((2, 3))
(e1a, e1b) = mf.get_bands(kpts_band)[0]
(e0a, e0b) = kmf.get_bands(kpts_band)[0]
self.assertAlmostEqual(abs((e0a[0] - e1a[0])).max(), 0, 5)
self.assertAlmostEqual(abs((e0a[1] - e1a[1])).max(), 0, 5)
self.assertAlmostEqual(abs((e0b[0] - e1b[0])).max(), 0, 5)
self.assertAlmostEqual(abs((e0b[1] - e1b[1])).max(), 0, 5)
self.assertAlmostEqual(lib.fp(e1a[0]), (- 6.), 5)
self.assertAlmostEqual(lib.fp(e1a[1]), (- 6.), 5)
def test_ghf_exx_ewald(self):
mf = pscf.GHF(cell, exxdiv='ewald')
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 4.), 7)
self.assertTrue((mf.mo_coeff.dtype == numpy.double))
kmf = pscf.KGHF(cell, [[0, 0, 0]], exxdiv='ewald')
kmf.init_guess = 'hcore'
e0 = kmf.kernel()
self.assertTrue(numpy.allclose(e0, e1))
numpy.random.seed(1)
k = numpy.random.random(3)
mf = pscf.GHF(cell, k, exxdiv='ewald')
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 4.), 7)
self.assertTrue((mf.mo_coeff.dtype == numpy.complex128))
kmf = pscf.KGHF(cell, k, exxdiv='ewald')
e0 = kmf.kernel()
self.assertTrue(numpy.allclose(e0, e1))
def test_rhf_0d(self):
cell = pbcgto.Cell()
cell.build(unit='B', a=(numpy.eye(3) * 5), atom='He 2 2 2; He 2 2 3', dimension=0, verbose=0, basis={'He': [[0, (0.8, 1.0)], [0, (1.0, 1.0)], [0, (1.2, 1.0)]]})
mol = cell.to_mol()
mf = mol.RHF().run()
eref = mf.kernel()
mf = cell.RHF()
mf.with_df = pdf.AFTDF(cell)
e1 = mf.kernel()
self.assertAlmostEqual(eref, (- 4.), 8)
self.assertAlmostEqual(e1, eref, 4)
cell = pbcgto.Cell()
cell.atom = 'He 1. .5 .5; C .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1.0, 1))], 'C': 'gth-szv'}
cell.pseudo = {'C': 'gth-pade', 'He': pbcgto.pseudo.parse('He\n 2\n 0. 3 -1. -0. 0.\n 2\n 0. 3 1. .855 .3\n .71 -1.1\n .9\n 0. 2 2. -0.\n 0.\n ')}
cell.a = numpy.eye(3)
cell.dimension = 0
cell.build()
mf = pscf.RHF(cell)
mf.with_df = pdf.AFTDF(cell)
mf.run()
mol = cell.to_mol()
mf1 = mol.RHF().run()
self.assertAlmostEqual(mf1.e_tot, (- 5.), 8)
self.assertAlmostEqual(mf1.e_tot, mf.e_tot, 4)
def test_rhf_1d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit='B', a=[[L, 0, 0], [0, (L * 5), 0], [0, 0, (L * 5)]], atom='He 2 0 0; He 3 0 0', dimension=1, low_dim_ft_type='inf_vacuum', verbose=0, rcut=7., basis={'He': [[0, (0.8, 1.0)], [0, (1.2, 1.0)]]})
mf = pbchf.RHF(cell)
mf.with_df = pdf.AFTDF(cell)
mf.with_df.eta = 0.3
mf.with_df.mesh = cell.mesh
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 3.), 6)
def test_rhf_2d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit='B', a=[[L, 0, 0], [0, L, 0], [0, 0, (L * 5)]], atom='He 2 0 0; He 3 0 0', dimension=2, low_dim_ft_type='inf_vacuum', verbose=0, rcut=7., basis={'He': [[0, (0.8, 1.0)], [0, (1.2, 1.0)]]})
mf = pbchf.RHF(cell)
mf.with_df = pdf.AFTDF(cell)
mf.with_df.eta = 0.3
mf.with_df.mesh = cell.mesh
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 3.), 5)
def test_rhf_2d_fft(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit='B', a=[[L, 0, 0], [0, L, 0], [0, 0, 10]], atom='He 2 0 0; He 3 0 0', dimension=2, verbose=0, basis={'He': [[0, (0.8, 1.0)], [0, (1.2, 1.0)]]})
mf = pbchf.RHF(cell, exxdiv='ewald')
mf.with_df = pdf.FFTDF(cell)
mf.with_df.mesh = cell.mesh
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 3.), 7)
mf1 = pbchf.RHF(cell, exxdiv='ewald')
mf1.with_df = pdf.FFTDF(cell)
mf1.with_df.mesh = cell.mesh
mf1.direct_scf = True
e1 = mf1.kernel()
self.assertAlmostEqual(e1, (- 3.), 7)
mf2 = pbchf.RHF(cell, exxdiv=None)
mf2.with_df = pdf.FFTDF(cell)
mf2.with_df.mesh = cell.mesh
mf2.direct_scf = True
e2 = mf2.kernel()
self.assertAlmostEqual(e2, (- 1.), 7)
def test_uhf_1d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit='B', a=(numpy.eye(3) * 4), atom='He 2 0 0; He 3 0 0', dimension=1, low_dim_ft_type='inf_vacuum', verbose=0, rcut=7., basis={'He': [[0, (0.8, 1.0)], [0, (1.2, 1.0)]]})
mf = pscf.UHF(cell)
mf.with_df = pdf.AFTDF(cell)
mf.with_df.eta = 0.3
mf.with_df.mesh = cell.mesh
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 3.), 6)
def test_ghf_1d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit='B', a=(numpy.eye(3) * 4), atom='He 2 0 0; He 3 0 0', dimension=1, low_dim_ft_type='inf_vacuum', verbose=0, rcut=7., basis={'He': [[0, (0.8, 1.0)], [0, (1.2, 1.0)]]})
mf = pscf.GHF(cell)
mf.with_df = pdf.AFTDF(cell)
mf.with_df.eta = 0.3
mf.with_df.mesh = cell.mesh
mf.init_guess = 'hcore'
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 3.), 6)
def test_get_veff(self):
mf = pscf.RHF(cell)
numpy.random.seed(1)
nao = cell.nao_nr()
dm = (numpy.random.random((nao, nao)) + (numpy.random.random((nao, nao)) * 1j))
dm = (dm + dm.conj().T)
v11 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([0.25, 0.25, 0.25]))
v12 = mf.get_veff(cell, dm, kpts_band=cell.get_abs_kpts([0.25, 0.25, 0.25]))
v13 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([((- 1.0) / 3), (1.0 / 3), 0.25]), kpts_band=cell.get_abs_kpts([0.25, 0.25, 0.25]))
v14 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([((- 1.0) / 3), (1.0 / 3), 0.25]), kpts_band=cell.make_kpts([2, 1, 1]))
self.assertTrue((v11.dtype == numpy.complex128))
self.assertTrue((v12.dtype == numpy.complex128))
mf = pscf.UHF(cell)
v21 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([0.25, 0.25, 0.25]))
dm = [(dm * 0.5), (dm * 0.5)]
v22 = mf.get_veff(cell, dm, kpts_band=cell.get_abs_kpts([0.25, 0.25, 0.25]))
v23 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([((- 1.0) / 3), (1.0 / 3), 0.25]), kpts_band=cell.get_abs_kpts([0.25, 0.25, 0.25]))
v24 = mf.get_veff(cell, dm, kpt=cell.get_abs_kpts([((- 1.0) / 3), (1.0 / 3), 0.25]), kpts_band=cell.make_kpts([2, 1, 1]))
self.assertAlmostEqual(abs((v11 - v21)).max(), 0, 9)
self.assertAlmostEqual(abs((v12 - v22)).max(), 0, 9)
self.assertAlmostEqual(abs((v13 - v23)).max(), 0, 9)
self.assertAlmostEqual(abs((v14 - v24)).max(), 0, 9)
self.assertAlmostEqual(lib.fp(v11), ((- 0.) + 0.j), 8)
self.assertAlmostEqual(lib.fp(v12), ((- 2.) - 9.j), 8)
def test_init(self):
from pyscf.pbc import dft
cell_u = cell.copy()
cell_u.spin = 2
self.assertTrue(isinstance(pscf.RKS(cell), dft.rks.RKS))
self.assertTrue(isinstance(pscf.RKS(cell_u), dft.roks.ROKS))
self.assertTrue(isinstance(pscf.UKS(cell), dft.uks.UKS))
self.assertTrue(isinstance(pscf.ROKS(cell), dft.roks.ROKS))
self.assertTrue(isinstance(pscf.KS(cell), dft.rks.RKS))
self.assertTrue(isinstance(pscf.KS(cell_u), dft.uks.UKS))
self.assertTrue(isinstance(pscf.KRKS(cell), dft.krks.KRKS))
self.assertTrue(isinstance(pscf.KRKS(cell_u), dft.krks.KRKS))
self.assertTrue(isinstance(pscf.KUKS(cell), dft.kuks.KUKS))
self.assertTrue(isinstance(pscf.KROKS(cell), dft.kroks.KROKS))
self.assertTrue(isinstance(pscf.KKS(cell), dft.krks.KRKS))
self.assertTrue(isinstance(pscf.KKS(cell_u), dft.kuks.KUKS))
self.assertTrue(isinstance(pscf.RHF(cell), pscf.hf.RHF))
self.assertTrue(isinstance(pscf.RHF(cell_u), pscf.rohf.ROHF))
self.assertTrue(isinstance(pscf.KRHF(cell), pscf.khf.KRHF))
self.assertTrue(isinstance(pscf.KRHF(cell_u), pscf.khf.KRHF))
self.assertTrue(isinstance(pscf.UHF(cell), pscf.uhf.UHF))
self.assertTrue(isinstance(pscf.KUHF(cell_u), pscf.kuhf.KUHF))
self.assertTrue(isinstance(pscf.GHF(cell), pscf.ghf.GHF))
self.assertTrue(isinstance(pscf.KGHF(cell_u), pscf.kghf.KGHF))
self.assertTrue(isinstance(pscf.ROHF(cell), pscf.rohf.ROHF))
self.assertTrue(isinstance(pscf.ROHF(cell_u), pscf.rohf.ROHF))
self.assertTrue(isinstance(pscf.KROHF(cell), pscf.krohf.KROHF))
self.assertTrue(isinstance(pscf.KROHF(cell_u), pscf.krohf.KROHF))
self.assertTrue(isinstance(pscf.HF(cell), pscf.hf.RHF))
self.assertTrue(isinstance(pscf.HF(cell_u), pscf.uhf.UHF))
self.assertTrue(isinstance(pscf.KHF(cell), pscf.khf.KRHF))
self.assertTrue(isinstance(pscf.KHF(cell_u), pscf.kuhf.KUHF))
def test_makov_payne_correction(self):
from pyscf.pbc.dft import gen_grid
de = pbchf.makov_payne_correction(mf)
self.assertAlmostEqual(de[0], (- 0.), 2)
self.assertAlmostEqual(de[0], de[1], 7)
self.assertAlmostEqual(de[0], de[2], 7)
dm = mf.make_rdm1()
grids = gen_grid.UniformGrids(cell)
rho = pscf.hf.get_rho(mf, dm, grids)
log = lib.logger.new_logger(mf)
center = pscf.hf._search_dipole_gauge_origin(cell, grids, rho, log)
self.assertAlmostEqual(abs((center - [1.75, 2, 2])).max(), 0, 2)
dip = mf.dip_moment(cell, dm)
self.assertAlmostEqual(abs(dip).max(), 0, 1)
def test_init_guess_by_1e(self):
dm = mf.get_init_guess(key='1e')
self.assertAlmostEqual(lib.fp(dm), 0., 6)
dm = kmf.get_init_guess(key='1e')
self.assertEqual(dm.ndim, 3)
self.assertAlmostEqual(lib.fp(dm), 0., 6)
def test_init_guess_by_atom(self):
with lib.temporary_env(cell, dimension=1):
dm = mf.get_init_guess(key='minao')
kdm = kmf.get_init_guess(key='minao')
self.assertAlmostEqual(lib.fp(dm), (- 1.), 8)
self.assertEqual(kdm.ndim, 3)
self.assertAlmostEqual(lib.fp(kdm), (- 1.), 8)
def test_jk(self):
nao = cell.nao
numpy.random.seed(2)
dm = (numpy.random.random((2, nao, nao)) + (0.5j * numpy.random.random((2, nao, nao))))
dm = (dm + dm.conj().transpose(0, 2, 1))
ref = pbchf.get_jk(mf, cell, dm)
(vj, vk) = mf.get_jk_incore(cell, dm)
self.assertAlmostEqual(abs((vj - ref[0])).max(), 0, 9)
self.assertAlmostEqual(abs((vk - ref[1])).max(), 0, 9)
def test_analyze(self):
(rpop, rchg) = mf.analyze()[0]
self.assertAlmostEqual(lib.fp(rpop), 0.0110475, 4)
self.assertAlmostEqual(abs(rchg).max(), 0, 7) |
def find_all_documented_objects():
documented_obj = []
for doc_file in Path(PATH_TO_DOC).glob('**/*.rst'):
with open(doc_file, 'r', encoding='utf-8', newline='\n') as f:
content = f.read()
raw_doc_objs = re.findall('(?:autoclass|autofunction):: transformers.(\\S+)\\s+', content)
documented_obj += [obj.split('.')[(- 1)] for obj in raw_doc_objs]
for doc_file in Path(PATH_TO_DOC).glob('**/*.mdx'):
with open(doc_file, 'r', encoding='utf-8', newline='\n') as f:
content = f.read()
raw_doc_objs = re.findall('\\[\\[autodoc\\]\\]\\s+(\\S+)\\s+', content)
documented_obj += [obj.split('.')[(- 1)] for obj in raw_doc_objs]
return documented_obj |
class CheckTypes(RPathTest):
def testExist(self):
self.assertTrue(rpath.RPath(self.lc, self.prefix, ()).lstat())
self.assertFalse(rpath.RPath(self.lc, 'asuthasetuouo', ()).lstat())
def testDir(self):
self.assertTrue(rpath.RPath(self.lc, self.prefix, ()).isdir())
self.assertFalse(rpath.RPath(self.lc, self.prefix, ('regular_file',)).isdir())
def testSym(self):
self.assertTrue(rpath.RPath(self.lc, self.prefix, ('symbolic_link',)).issym())
self.assertFalse(rpath.RPath(self.lc, self.prefix, ()).issym())
def testReg(self):
self.assertTrue(rpath.RPath(self.lc, self.prefix, ('regular_file',)).isreg())
self.assertFalse(rpath.RPath(self.lc, self.prefix, ('symbolic_link',)).isreg())
((os.name == 'nt'), "Fifo don't exist under Windows")
def testFifo(self):
self.assertTrue(rpath.RPath(self.lc, self.prefix, ('fifo',)).isfifo())
self.assertFalse(rpath.RPath(self.lc, self.prefix, ()).isfifo())
((os.path.exists('/dev/null') or os.path.exists('/dev/zero')), 'Test requires /dev/null or /dev/zero')
def testCharDev(self):
if os.path.exists('/dev/null'):
self.assertTrue(rpath.RPath(self.lc, '/dev/null', ()).ischardev())
else:
self.assertTrue(rpath.RPath(self.lc, '/dev/zero', ()).ischardev())
self.assertFalse(rpath.RPath(self.lc, self.prefix, ('regular_file',)).ischardev())
((os.path.exists('/dev/sda') or os.path.exists('/dev/nvme0n1')), 'Test requires either /dev/sda or /dev/nvme0n1')
def testBlockDev(self):
if os.path.exists('/dev/sda'):
self.assertTrue(rpath.RPath(self.lc, '/dev/sda', ()).isblkdev())
else:
self.assertTrue(rpath.RPath(self.lc, '/dev/nvme0n1', ()).isblkdev())
self.assertFalse(rpath.RPath(self.lc, self.prefix, ('regular_file',)).isblkdev()) |
def combine(img_file, mask_file, class_name_list='VOC', include_color0=False, has_legend=True):
img = Image.open(img_file)
mask_p = Image.open(mask_file)
mask_index = np.array(mask_p)
mask_alpha = np.where(np.equal(mask_index, 0), 0, 180)
mask_alpha = Image.fromarray(mask_alpha.astype(np.uint8), mode='L')
palette = get_palette(mask_p, class_name_list, include_color0)
if has_legend:
mask_colors = get_mask_colors(mask_p)
(legends, off_height) = get_legends(img, mask_colors, palette)
rtn_img = Image.new('RGBA', (img.width, (img.height + off_height)))
draw_legend(rtn_img, legends)
else:
off_height = 0
rtn_img = Image.new('RGBA', (img.width, (img.height + off_height)))
img = img.convert('RGBA')
img.putalpha(150)
rtn_img.alpha_composite(img, (0, off_height))
mask = mask_p.convert('RGBA')
mask.putalpha(mask_alpha)
rtn_img.alpha_composite(mask, (0, off_height))
return rtn_img.convert('RGB') |
class DiscriminatorBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, initialW, sn=True):
super(DiscriminatorBlock, self).__init__()
with self.init_scope():
if sn:
self.c0 = SNConvolution2D(in_ch, in_ch, 3, 1, 1, initialW=initialW)
self.c1 = SNConvolution2D(in_ch, out_ch, 3, 1, 1, initialW=initialW)
else:
self.c0 = L.Convolution2D(in_ch, in_ch, 3, 1, 1, initialW=initialW)
self.c1 = L.Convolution2D(in_ch, out_ch, 3, 1, 1, initialW=initialW)
def __call__(self, x):
h = F.leaky_relu(self.c0(x))
h = F.leaky_relu(self.c1(h))
h = F.average_pooling_2d(h, 2, 2, 0)
return h |
.asyncio(scope='class')
class TestClassScopedLoop():
loop: asyncio.AbstractEventLoop
_asyncio.fixture(scope='class')
async def my_fixture(self):
TestClassScopedLoop.loop = asyncio.get_running_loop()
async def test_runs_is_same_loop_as_fixture(self, my_fixture):
assert (asyncio.get_running_loop() is TestClassScopedLoop.loop) |
.parametrize('func', [qutip.spin_state, partial(qutip.spin_coherent, phi=0.5)])
def test_spin_output(func):
assert qutip.isket(func(1.0, 0, type='ket'))
assert qutip.isbra(func(1.0, 0, type='bra'))
assert qutip.isoper(func(1.0, 0, type='dm'))
with pytest.raises(ValueError) as e:
func(1.0, 0, type='something')
assert str(e.value).startswith('Invalid value keyword argument') |
class ArchCheckerReportConstants():
OP_STRUCT_OP_TYPE = 'OpStructure'
DF_GRAPH_NODENAME = 'Graph/Layer_name'
DF_ISSUE = 'Issue'
DF_RECOMM = 'Recommendation'
UNDEFINED_ISSUE = 'Undefined issue from check: {}'
UNDEFINED_RECOMM = 'Undefined recommendation from check: {}'
OUTPUT_CSV_HEADER = [DF_GRAPH_NODENAME, DF_ISSUE, DF_RECOMM]
ERR_MSG_DICT = {'_check_conv_channel_32_base': {DF_ISSUE: 'The channel size of input/output tensor of this convolution is not a multiple of 32', DF_RECOMM: 'Try adjusting the channels to multiple of 32 to get better performance.'}, '_check_conv_channel_larger_than_32': {DF_ISSUE: 'The channel size of input/output tensor of this convolution is smaller than 32', DF_RECOMM: 'Try adjusting the channels to multiple of 32 to get better performance.'}, '_activation_checks': {'PRelu': {DF_ISSUE: 'PRelu activation function degenerates performance.', DF_RECOMM: 'Try use Relu instead.'}, 'SiLU': {DF_ISSUE: 'SiLU (Swish) activation function degenerates performance.', DF_RECOMM: 'Try use Hard SiLU (hardswish) instaed.'}}, '_check_batch_norm_fold': {DF_ISSUE: 'The batch norm layer cannot be folded to immediate conv/linear layer. Quantizing standalone BN can degenerate performance.', DF_RECOMM: 'Try remove the standalone BN or move the BN adjacent to Conv.'}, '_check_intermediate_padding': {DF_ISSUE: 'This convolution includes intermediate padding that degenerates performance.', DF_RECOMM: 'Try move all padding to the first convolution in the sequence: [Conv -> Activation -> (Optionally) BN -> Conv].'}, '_check_foldable_bn_with_split': {DF_ISSUE: 'This structure: (conv1, conv2, ...) -> split_node(concat) -> BN degenerates performance', DF_RECOMM: 'Try transform the structure so that BN can be folded to conv.'}} |
class Graph():
def __init__(self, labeling_mode='spatial'):
self.A = self.get_adjacency_matrix(labeling_mode)
self.num_node = num_node
self.self_link = self_link
self.inward = inward
self.outward = outward
self.neighbor = neighbor
def get_adjacency_matrix(self, labeling_mode=None):
if (labeling_mode is None):
return self.A
if (labeling_mode == 'spatial'):
A = tools.get_spatial_graph(num_node, self_link, inward, outward)
else:
raise ValueError()
return A
def get_A2515(self):
A_15 = np.zeros((18, 13))
NTU_link_neighber = [[0, 14, 15, 16, 17], [1, 0, 5, 2], [2, 1, 3], [3, 2], [4], [5, 1, 6], [6, 5], [7], [8, 11], [9, 10], [10], [11, 12], [13]]
for i in range(13):
index = NTU_link_neighber[i]
A_15[(index, i)] = 1
A_15 = tools.normalize_digraph(A_15)
return A_15
def get_A159(self):
A_9 = np.zeros((13, 9))
NTU_link_neighber = [[0, 1, 2, 5, 8], [5, 6, 7], [7], [2, 3, 4], [4], [8, 11, 12], [12], [8, 9, 10], [10]]
for i in range(9):
index = NTU_link_neighber[i]
A_9[(index, i)] = 1
A_9 = tools.normalize_digraph(A_9)
return A_9
def get_A15(self):
A = tools.get_spatial_graph(13, self_link13, inward13, outward13)
return A
def get_A9(self):
A = tools.get_spatial_graph(9, self_link9, inward9, outward9)
return A |
class ClassificationEvaluator(object):
MACRO_AVERAGE = 'macro_average'
MICRO_AVERAGE = 'micro_average'
def __init__(self, eval_dir):
self.confusion_matrix_list = None
self.precision_list = None
self.recall_list = None
self.fscore_list = None
self.right_list = None
self.predict_list = None
self.standard_list = None
self.eval_dir = eval_dir
if (not os.path.exists(self.eval_dir)):
os.makedirs(self.eval_dir)
def _calculate_prf(right_count, predict_count, standard_count):
(precision, recall, f_score) = (0, 0, 0)
if (predict_count > 0):
precision = (right_count / predict_count)
if (standard_count > 0):
recall = (right_count / standard_count)
if ((precision + recall) > 0):
f_score = (((precision * recall) * 2) / (precision + recall))
return (precision, recall, f_score)
def _judge_label_in(label_name, label_to_id_maps):
cnt = 0
for label in label_name:
for i in range(0, len(label_to_id_maps)):
if (label in label_to_id_maps[i]):
cnt += 1
break
return (cnt == len(label_name))
def calculate_level_performance(self, id_to_label_map, right_count_category, predict_count_category, standard_count_category, other_text='', exclude_method='contain'):
other_label = dict()
for (_, label_name) in id_to_label_map.items():
if (exclude_method == 'contain'):
if (other_text in label_name):
other_label[label_name] = 1
elif (exclude_method == 'start'):
if label_name.startswith(other_text):
other_label[label_name] = 1
else:
raise TypeError(('Cannot find exclude_method: ' + exclude_method))
precision_dict = dict()
recall_dict = dict()
fscore_dict = dict()
precision_dict[self.MACRO_AVERAGE] = 0
recall_dict[self.MACRO_AVERAGE] = 0
fscore_dict[self.MACRO_AVERAGE] = 0
right_total = 0
predict_total = 0
standard_total = 0
for (_, label_name) in id_to_label_map.items():
if (label_name in other_label):
continue
(precision_dict[label_name], recall_dict[label_name], fscore_dict[label_name]) = self._calculate_prf(right_count_category[label_name], predict_count_category[label_name], standard_count_category[label_name])
right_total += right_count_category[label_name]
predict_total += predict_count_category[label_name]
standard_total += standard_count_category[label_name]
precision_dict[self.MACRO_AVERAGE] += precision_dict[label_name]
recall_dict[self.MACRO_AVERAGE] += recall_dict[label_name]
fscore_dict[self.MACRO_AVERAGE] += fscore_dict[label_name]
num_label_eval = (len(id_to_label_map) - len(other_label))
precision_dict[self.MACRO_AVERAGE] = (precision_dict[self.MACRO_AVERAGE] / num_label_eval)
recall_dict[self.MACRO_AVERAGE] = (recall_dict[self.MACRO_AVERAGE] / num_label_eval)
fscore_dict[self.MACRO_AVERAGE] = (0 if ((recall_dict[self.MACRO_AVERAGE] + precision_dict[self.MACRO_AVERAGE]) == 0) else (((2 * precision_dict[self.MACRO_AVERAGE]) * recall_dict[self.MACRO_AVERAGE]) / (recall_dict[self.MACRO_AVERAGE] + precision_dict[self.MACRO_AVERAGE])))
right_count_category[self.MICRO_AVERAGE] = right_total
predict_count_category[self.MICRO_AVERAGE] = predict_total
standard_count_category[self.MICRO_AVERAGE] = standard_total
(precision_dict[self.MICRO_AVERAGE], recall_dict[self.MICRO_AVERAGE], fscore_dict[self.MICRO_AVERAGE]) = self._calculate_prf(right_total, predict_total, standard_total)
return (precision_dict, recall_dict, fscore_dict)
def evaluate(self, predicts, standard_label_names=None, standard_label_ids=None, label_map=None, threshold=0, top_k=3, is_prob=True, is_flat=False, is_multi=False, other_text=''):
def _init_confusion_matrix(label_map):
confusion_matrix = dict()
for label_name in label_map.keys():
confusion_matrix[label_name] = dict()
for label_name_other in label_map.keys():
confusion_matrix[label_name][label_name_other] = 0
return confusion_matrix
def _init_count_dict(label_map):
count_dict = dict()
for label_name in label_map.keys():
count_dict[label_name] = 0
return count_dict
assert ((standard_label_names is not None) or (standard_label_ids is not None))
sep = cDataset.CLASSIFICATION_LABEL_SEPARATOR
depth = 0
if ((not is_prob) and (label_map is None)):
assert (standard_label_names is not None)
label_map = dict()
for label_list in standard_label_names:
for label in label_list:
if (label not in label_map):
label_map[label] = len(label_map)
if (not is_flat):
for label in label_map.keys():
hierarchical_labels = label.split(sep)
depth = max(len(hierarchical_labels), depth)
label_to_id_maps = []
id_to_label_maps = []
for i in range((depth + 1)):
label_to_id_maps.append(dict())
id_to_label_maps.append(dict())
for (label_name, label_id) in label_map.items():
label_to_id_maps[0][label_name] = label_id
id_to_label_maps[0][label_id] = label_name
if (not is_flat):
hierarchical_labels = label_name.split(sep)
for i in range(1, (len(hierarchical_labels) + 1)):
label = sep.join(hierarchical_labels[:i])
if (label not in label_to_id_maps[i]):
index = len(label_to_id_maps[i])
label_to_id_maps[i][label] = index
id_to_label_maps[i][index] = label
confusion_matrix_list = []
right_category_count_list = []
predict_category_count_list = []
standard_category_count_list = []
for i in range((depth + 1)):
confusion_matrix_list.append(_init_confusion_matrix(label_to_id_maps[i]))
right_category_count_list.append(_init_count_dict(label_to_id_maps[i]))
predict_category_count_list.append(_init_count_dict(label_to_id_maps[i]))
standard_category_count_list.append(_init_count_dict(label_to_id_maps[i]))
line_count = 0
debug_file = open('probs.txt', 'w', encoding=cDataset.CHARSET)
for predict in predicts:
if is_prob:
prob_np = np.array(predict, dtype=np.float32)
if (not is_multi):
predict_label_ids = [prob_np.argmax()]
else:
predict_label_ids = []
predict_label_idx = np.argsort((- prob_np))
for j in range(0, top_k):
if (prob_np[predict_label_idx[j]] > threshold):
predict_label_ids.append(predict_label_idx[j])
predict_label_name = [id_to_label_maps[0][predict_label_id] for predict_label_id in predict_label_ids]
debug_file.write(json.dumps(prob_np.tolist()))
debug_file.write('\n')
else:
predict_label_name = predict
if (standard_label_names is not None):
standard_label_name = standard_label_names[line_count]
else:
standard_label_name = [id_to_label_maps[0][standard_label_ids[line_count][i]] for i in range(len(standard_label_ids[line_count]))]
if ((not self._judge_label_in(predict_label_name, label_to_id_maps)) or (not self._judge_label_in(standard_label_name, label_to_id_maps))):
line_count += 1
continue
for std_name in standard_label_name:
for pred_name in predict_label_name:
confusion_matrix_list[0][std_name][pred_name] += 1
for pred_name in predict_label_name:
predict_category_count_list[0][pred_name] += 1
for std_name in standard_label_name:
standard_category_count_list[0][std_name] += 1
for pred_name in predict_label_name:
if (std_name == pred_name):
right_category_count_list[0][pred_name] += 1
if (not is_flat):
standard_hierarchical_labels = [std_name.split(sep) for std_name in standard_label_name]
predict_hierarchical_labels = [pred_name.split(sep) for pred_name in predict_label_name]
standard_label_map = {}
predict_label_map = {}
for std_label in standard_hierarchical_labels:
for i in range(0, len(std_label)):
if ((i + 1) not in standard_label_map):
standard_label_map[(i + 1)] = set()
standard_label_map[(i + 1)].add(sep.join(std_label[:(i + 1)]))
for pred_label in predict_hierarchical_labels:
for i in range(0, len(pred_label)):
if ((i + 1) not in predict_label_map):
predict_label_map[(i + 1)] = set()
predict_label_map[(i + 1)].add(sep.join(pred_label[:(i + 1)]))
for (level, std_label_set) in standard_label_map.items():
for std_label in std_label_set:
standard_category_count_list[level][std_label] += 1
for (level, pred_label_set) in predict_label_map.items():
for pred_label in pred_label_set:
predict_category_count_list[level][pred_label] += 1
for (level, std_label_set) in standard_label_map.items():
for std_label in std_label_set:
if (level in predict_label_map):
for pred_label in predict_label_map[level]:
confusion_matrix_list[level][std_label][pred_label] += 1
if (std_label == pred_label):
right_category_count_list[level][pred_label] += 1
line_count += 1
debug_file.close()
precision_list = []
recall_list = []
fscore_list = []
(precision_dict, recall_dict, fscore_dict) = self.calculate_level_performance(id_to_label_maps[0], right_category_count_list[0], predict_category_count_list[0], standard_category_count_list[0], exclude_method='start')
precision_list.append(precision_dict)
recall_list.append(recall_dict)
fscore_list.append(fscore_dict)
for i in range(1, (depth + 1)):
(precision_dict, recall_dict, fscore_dict) = self.calculate_level_performance(id_to_label_maps[i], right_category_count_list[i], predict_category_count_list[i], standard_category_count_list[i], other_text)
precision_list.append(precision_dict)
recall_list.append(recall_dict)
fscore_list.append(fscore_dict)
(self.confusion_matrix_list, self.precision_list, self.recall_list, self.fscore_list, self.right_list, self.predict_list, self.standard_list) = (confusion_matrix_list, precision_list, recall_list, fscore_list, right_category_count_list, predict_category_count_list, standard_category_count_list)
return (confusion_matrix_list, precision_list, recall_list, fscore_list, right_category_count_list, predict_category_count_list, standard_category_count_list)
def save_confusion_matrix(file_name, confusion_matrix):
with open(file_name, 'w', encoding=cDataset.CHARSET) as cm_file:
cm_file.write('\t')
for category_fist in sorted(confusion_matrix.keys()):
cm_file.write((category_fist + '\t'))
cm_file.write('\n')
for category_fist in sorted(confusion_matrix.keys()):
cm_file.write((category_fist + '\t'))
for category_second in sorted(confusion_matrix.keys()):
cm_file.write((str(confusion_matrix[category_fist][category_second]) + '\t'))
cm_file.write('\n')
def save_prf(self, file_name, precision_category, recall_category, fscore_category, right_category, predict_category, standard_category):
def _format(category):
if (category == self.MACRO_AVERAGE):
return ('%s, precision: %f, recall: %f, fscore: %f, ' % (category, precision_category[category], recall_category[category], fscore_category[category]))
return ('%s, precision: %f, recall: %f, fscore: %f, right_count: %d, predict_count: %d, standard_count: %d' % (category, precision_category[category], recall_category[category], fscore_category[category], right_category[category], predict_category[category], standard_category[category]))
with open(file_name, 'w', encoding=cDataset.CHARSET) as prf_file:
prf_file.write((_format(self.MACRO_AVERAGE) + '\n'))
prf_file.write((_format(self.MICRO_AVERAGE) + '\n'))
prf_file.write('\n')
for category in precision_category:
if ((category != self.MICRO_AVERAGE) and (category != self.MACRO_AVERAGE)):
prf_file.write((_format(category) + '\n'))
def save(self):
for (i, confusion_matrix) in enumerate(self.confusion_matrix_list):
if (i == 0):
eval_name = 'all'
else:
eval_name = ('level_%s' % i)
self.save_confusion_matrix((((self.eval_dir + '/') + eval_name) + '_confusion_matrix'), confusion_matrix)
self.save_prf((((self.eval_dir + '/') + eval_name) + '_prf'), self.precision_list[i], self.recall_list[i], self.fscore_list[i], self.right_list[i], self.predict_list[i], self.standard_list[i]) |
class _IdentityExpBase(_AlgebraicExpBase):
_operator = ' ? '
def __init__(self, members=()):
super().__init__(template=None)
self.members = tuple(members)
super()._freeze_()
def kind(self):
if (not self.members):
return 'identity'
return self.members[0].kind
def name(self):
return ''
def __eq__(self, other):
return ((type(self) is type(other)) and (set(self.members) == set(other.members)))
def __hash__(self):
return (hash(type(self)) ^ hash(frozenset(self.members)))
def __repr__(self):
if (not self.members):
return (self.__class__.__name__ + '()')
return self._operator.join((repr(m) for m in self.members))
def __iter__(self):
for m in self.unpack_union():
(yield from m)
def iter_symbols(self):
for m in self.unpack_union():
(yield from m.iter_symbols())
def get_union_membership(self):
if self.members:
return self.members[0].get_union_membership() |
class ServoCalibration(object):
def __init__(self, servo):
self.server = servo.server
self.run = self.Register(BooleanProperty, 'run', False)
self.rawcommand = self.Register(SensorValue, 'raw_command')
self.console = self.Register(Value, 'console', '')
self.current_total = self.voltage_total = (0, 0)
self.servo = servo
self.thread = threading.Thread(target=ServoCalibrationThread, args=(self,))
self.rawcommand.set(0)
def raw_command(self, value):
self.rawcommand.set(value)
def Register(self, _type, name, *args, **kwargs):
return self.server.Register(_type(*([('servo.calibration.' + name)] + list(args)), **kwargs))
def fault(self):
return (((ServoFlags.OVERCURRENT_FAULT | ServoFlags.FALTPIN) & self.servo.flags.value) or (not self.servo.engaged.value))
def poll(self):
if (not self.thread.is_alive()):
if self.run.value:
self.force_engaged = True
self.command = self.servo.command.value
self.raw_command = self.servo.raw_command.value
self.servo.brake_hack.set(False)
self.log = []
self.state = 0
self.thread = thread.start()
else:
return
if ((not self.run.value) or self.ap.enabled.value):
self.thread.exit()
return
if (self.command != self.servo.command.value):
console('servo command received, aborting')
console('ensure the autopilot is not active and')
console('no manual servo commands during calibration!')
self.command = self.servo.command.value
self.thread.exit(0)
self.log.append([self.servo.voltage.value, self.servo.current.value, self.servo.current.time])
if (self.fwd_fault and (self.rawcommand.value < 0)):
self.fwd_fault = False
elif (self.rev_fault and (self.rawcommand.value > 0)):
self.rev_fault = False
self.servo.engage()
self.servo.raw_command(self.rawcommand.value)
def stop(self):
if self.thread.is_alive():
self.thread.exit() |
def test_monitor():
class SomethingElse():
def foo(self, n, y=None):
self.n = n
return y
s = SomethingElse()
original_method = s.foo.__func__
with CallMonitor(s.foo) as monitor:
assert (s.foo(1, y='a') == 'a')
assert (s.foo(2) is None)
assert (s.foo.__func__ is original_method)
assert (s.n == 2)
assert (monitor.times_called == 2)
assert (monitor.calls[0].args == (1,))
assert (monitor.calls[0].kwargs == {'y': 'a'})
assert (monitor.calls[0].return_value == 'a')
assert (monitor.calls[1].args == (2,))
assert (monitor.calls[1].kwargs == {})
assert (monitor.calls[1].return_value is None) |
def clean_room_edges(all_room_edges):
refined_room_paths = [_extract_room_path(room_edges) for room_edges in all_room_edges]
corner_to_room = defaultdict(list)
for (room_idx, room_path) in enumerate(refined_room_paths):
for corner in room_path:
corner_to_room[corner].append(room_idx)
for (room_idx, room_edges) in enumerate(all_room_edges):
cp_room_edges = list(room_edges)
rm_flag = True
while rm_flag:
rm_flag = False
for (edge_i, edge) in enumerate(cp_room_edges):
prev_i = (edge_i - 1)
prev_edge = cp_room_edges[prev_i]
if _check_colinear(prev_edge, edge):
rm_candidate = edge[0]
if ((len(corner_to_room[rm_candidate]) == 1) and (corner_to_room[rm_candidate][0] == room_idx)):
cp_room_edges[prev_i] = (prev_edge[0], edge[1])
rm_flag = True
cp_room_edges.pop(edge_i)
break
next_i = ((edge_i + 1) if (edge_i < (len(cp_room_edges) - 1)) else 0)
next_edge = cp_room_edges[next_i]
if _check_colinear(next_edge, edge):
rm_candidate = edge[1]
if ((len(corner_to_room[rm_candidate]) == 1) and (corner_to_room[rm_candidate][0] == room_idx)):
cp_room_edges[next_i] = (edge[0], next_edge[1])
rm_flag = True
cp_room_edges.pop(edge_i)
break
if (len(cp_room_edges) != len(room_edges)):
all_room_edges[room_idx] = cp_room_edges
corner_to_room = get_corner_to_room(all_room_edges)
all_corners = list(corner_to_room.keys())
corners_to_merge = find_corners_to_merge(all_corners)
while (corners_to_merge is not None):
num_aff = [len(corner_to_room[x]) for x in corners_to_merge]
order = np.argsort(num_aff)[::(- 1)]
base_corner = corners_to_merge[order[0]]
for corner in corners_to_merge:
if (corner == base_corner):
continue
all_room_edges = move_corner(corner, base_corner, corner_to_room, all_room_edges)
corner_to_room = get_corner_to_room(all_room_edges)
all_corners = list(corner_to_room.keys())
corners_to_merge = find_corners_to_merge(all_corners)
for (room_idx, room_edges) in enumerate(all_room_edges):
cp_room_edges = list(room_edges)
rm_flag = True
while rm_flag:
rm_flag = False
for (edge_i, edge) in enumerate(cp_room_edges):
len_e = len_edge(edge)
if (len_e <= 5):
if (len(corner_to_room[edge[0]]) == 1):
prev_i = (edge_i - 1)
prev_edge = cp_room_edges[prev_i]
cp_room_edges[prev_i] = (prev_edge[0], edge[1])
rm_flag = True
cp_room_edges.pop(edge_i)
break
elif (len(corner_to_room[edge[1]]) == 1):
next_i = ((edge_i + 1) if (edge_i < (len(cp_room_edges) - 1)) else 0)
next_edge = cp_room_edges[next_i]
cp_room_edges[next_i] = (edge[0], next_edge[1])
rm_flag = True
cp_room_edges.pop(edge_i)
else:
continue
if (len(cp_room_edges) != len(room_edges)):
all_room_edges[room_idx] = cp_room_edges
return all_room_edges |
.functions
(df=categoricaldf_strategy())
def test_all_cat_None_2(df):
result = df.encode_categorical(names='appearance')
categories = pd.CategoricalDtype(categories=df.names.factorize(sort=False)[(- 1)], ordered=True)
expected = df.astype({'names': categories})
assert expected['names'].equals(result['names']) |
class LIPSegmentation(SegmentationDataset):
BASE_DIR = 'LIP'
NUM_CLASS = 20
def __init__(self, root='datasets/LIP', split='train', mode=None, transform=None, **kwargs):
super(LIPSegmentation, self).__init__(root, split, mode, transform, **kwargs)
_trainval_image_dir = os.path.join(root, 'TrainVal_images')
_testing_image_dir = os.path.join(root, 'Testing_images')
_trainval_mask_dir = os.path.join(root, 'TrainVal_parsing_annotations')
if (split == 'train'):
_image_dir = os.path.join(_trainval_image_dir, 'train_images')
_mask_dir = os.path.join(_trainval_mask_dir, 'train_segmentations')
_split_f = os.path.join(_trainval_image_dir, 'train_id.txt')
elif (split == 'val'):
_image_dir = os.path.join(_trainval_image_dir, 'val_images')
_mask_dir = os.path.join(_trainval_mask_dir, 'val_segmentations')
_split_f = os.path.join(_trainval_image_dir, 'val_id.txt')
elif (split == 'test'):
_image_dir = os.path.join(_testing_image_dir, 'testing_images')
_split_f = os.path.join(_testing_image_dir, 'test_id.txt')
else:
raise RuntimeError('Unknown dataset split.')
self.images = []
self.masks = []
with open(os.path.join(_split_f), 'r') as lines:
for line in lines:
_image = os.path.join(_image_dir, (line.rstrip('\n') + '.jpg'))
assert os.path.isfile(_image)
self.images.append(_image)
if (split != 'test'):
_mask = os.path.join(_mask_dir, (line.rstrip('\n') + '.png'))
assert os.path.isfile(_mask)
self.masks.append(_mask)
if (split != 'test'):
assert (len(self.images) == len(self.masks))
print('Found {} {} images in the folder {}'.format(len(self.images), split, root))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if (self.mode == 'test'):
img = self._img_transform(img)
if (self.transform is not None):
img = self.transform(img)
return (img, os.path.basename(self.images[index]))
mask = Image.open(self.masks[index])
if (self.mode == 'train'):
(img, mask) = self._sync_transform(img, mask)
elif (self.mode == 'val'):
(img, mask) = self._val_sync_transform(img, mask)
else:
assert (self.mode == 'testval')
(img, mask) = (self._img_transform(img), self._mask_transform(mask))
if (self.transform is not None):
img = self.transform(img)
return (img, mask, os.path.basename(self.images[index]))
def __len__(self):
return len(self.images)
def _mask_transform(self, mask):
target = np.array(mask).astype('int32')
return torch.from_numpy(target).long()
def classes(self):
return ('background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes', 'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt', 'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe', 'rightShoe') |
def _build_mlp(nlayers, in_dim, bottleneck_dim, hidden_dim=None, use_bn=False, bias=True):
if (nlayers == 1):
return nn.Linear(in_dim, bottleneck_dim, bias=bias)
else:
layers = [nn.Linear(in_dim, hidden_dim, bias=bias)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range((nlayers - 2)):
layers.append(nn.Linear(hidden_dim, hidden_dim, bias=bias))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim, bias=bias))
return nn.Sequential(*layers) |
def extract_flavors_and_pens(penalties, penalty_keys, force_base=False, restrict=None):
penalty_map = {k: penalties[idx] for (idx, k) in enumerate(penalty_keys)}
(flavors, flavor_keys) = extract_flavors(penalties=penalties, keys=penalty_keys, force_base=force_base, restrict=restrict)
parent_penalties = []
for flav_key in flavor_keys:
parent_pen_key = get_parent_key(flav_key)
parent_pen = penalty_map[parent_pen_key]
parent_penalties.append(parent_pen)
return (flavors, flavor_keys, parent_penalties) |
def main():
parser = argparse.ArgumentParser(description='Preprocess ACE event data.')
parser.add_argument('output_name', help='Name for output directory.')
parser.add_argument('--use_span_extent', action='store_true', help='Use full extent of entity mentions instead of just heads.')
parser.add_argument('--include_times_and_values', action='store_true', help='Treat times and values as entities and include them as event arguments.')
parser.add_argument('--include_pronouns', action='store_true', help='Include pronouns as entities and include them as event arguments.')
args = parser.parse_args()
output_dir = f'./data/ace-event/processed-data/{args.output_name}/json'
os.makedirs(output_dir, exist_ok=True)
for fold in ['train', 'dev', 'test']:
msg = f'Parsing {fold} set.'
print(msg)
one_fold(fold, output_dir, heads_only=(not args.use_span_extent), real_entities_only=(not args.include_times_and_values), include_pronouns=args.include_pronouns) |
.repeat(2)
.parametrize('superrep', ['choi', 'super'])
def test_rand_super(dimensions, dtype, superrep):
random_qobj = rand_super(dimensions, dtype=dtype, superrep=superrep)
assert random_qobj.issuper
with CoreOptions(atol=1e-09):
assert random_qobj.iscptp
assert (random_qobj.superrep == superrep)
_assert_metadata(random_qobj, dimensions, dtype, super=True) |
class AliasMethod(nn.Module):
def __init__(self, probs):
super(AliasMethod, self).__init__()
if (probs.sum() > 1):
probs.div_(probs.sum())
K = len(probs)
self.register_buffer('prob', torch.zeros(K))
self.register_buffer('alias', torch.LongTensor(([0] * K)))
smaller = []
larger = []
for (kk, prob) in enumerate(probs):
self.prob[kk] = (K * prob)
if (self.prob[kk] < 1.0):
smaller.append(kk)
else:
larger.append(kk)
while ((len(smaller) > 0) and (len(larger) > 0)):
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = ((self.prob[large] - 1.0) + self.prob[small])
if (self.prob[large] < 1.0):
smaller.append(large)
else:
larger.append(large)
for last_one in (smaller + larger):
self.prob[last_one] = 1
def draw(self, N):
K = self.alias.size(0)
kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1 - b).long())
return (oq + oj) |
class logRegClassificationEvaluator(Evaluator):
def __init__(self, sentences_train, y_train, sentences_test, y_test, max_iter=100, batch_size=32, limit=None, **kwargs):
super().__init__(**kwargs)
if (limit is not None):
sentences_train = sentences_train[:limit]
y_train = y_train[:limit]
sentences_test = sentences_test[:limit]
y_test = y_test[:limit]
self.sentences_train = sentences_train
self.y_train = y_train
self.sentences_test = sentences_test
self.y_test = y_test
self.args = kwargs['args']
self.max_iter = max_iter
if (self.args.batch_size > 0):
self.batch_size = self.args.batch_size
else:
self.batch_size = batch_size
def __call__(self, model, test_cache=None):
print('use logRegClassificationEvaluator')
scores = {}
clf = LogisticRegression(random_state=self.seed, n_jobs=(- 1), max_iter=self.max_iter, verbose=(1 if logger.isEnabledFor(logging.DEBUG) else 0))
logger.info(f'Encoding {len(self.sentences_train)} training sentences...')
if self.args.prompt:
new_sentences = []
print('with prompt')
for s in self.sentences_train:
new_sentences.append([DEFINITIONS[self.args.prompt][self.args.task_name], s, 0])
self.sentences_train = new_sentences
new_sentences = []
print('with prompt')
for s in self.sentences_test:
new_sentences.append([DEFINITIONS[self.args.prompt][self.args.task_name], s, 0])
self.sentences_test = new_sentences
X_train = np.asarray(model.encode(self.sentences_train, batch_size=self.batch_size))
logger.info(f'Encoding {len(self.sentences_test)} test sentences...')
X_test = np.asarray(model.encode(self.sentences_test, batch_size=self.batch_size))
test_cache = X_test
logger.info('Fitting logistic regression classifier...')
clf.fit(X_train, self.y_train)
logger.info('Evaluating...')
y_pred = clf.predict(X_test)
accuracy = accuracy_score(self.y_test, y_pred)
f1 = f1_score(self.y_test, y_pred, average='macro')
scores['accuracy'] = accuracy
scores['f1'] = f1
if (len(np.unique(self.y_train)) == 2):
ap = average_precision_score(self.y_test, y_pred)
scores['ap'] = ap
return (scores, test_cache) |
def test_complex_cepstrum():
duration = 5.0
fs = 8000.0
samples = int((fs * duration))
t = (np.arange(samples) / fs)
fundamental = 100.0
signal = sawtooth((((2.0 * np.pi) * fundamental) * t))
(ceps, _) = complex_cepstrum(signal)
assert (fundamental == (1.0 / t[ceps.argmax()])) |
def test_search_paths(temp_dir, helpers):
source = CodeSource(str(temp_dir), {'path': 'a/b.py', 'search-paths': ['.']})
parent_dir = (temp_dir / 'a')
parent_dir.mkdir()
(parent_dir / '__init__.py').touch()
(parent_dir / 'b.py').write_text(helpers.dedent("\n from a.c import foo\n\n __version__ = foo((1, 0, 0, 1, 'dev0'))\n "))
(parent_dir / 'c.py').write_text(helpers.dedent("\n def foo(version_info):\n return '.'.join(str(part) for part in version_info)\n "))
with temp_dir.as_cwd():
assert (source.get_version_data()['version'] == '1.0.0.1.dev0') |
def pytest_addoption(parser):
group = parser.getgroup('xdist', 'distributed and subprocess testing')
group._addoption('-n', '--numprocesses', dest='numprocesses', metavar='numprocesses', action='store', type=parse_numprocesses, help="Shortcut for '--dist=load --tx=NUM*popen'. With 'auto', attempt to detect physical CPU count. With 'logical', detect logical CPU count. If physical CPU count cannot be found, falls back to logical count. This will be 0 when used with --pdb.")
group.addoption('--maxprocesses', dest='maxprocesses', metavar='maxprocesses', action='store', type=int, help='limit the maximum number of workers to process the tests when using --numprocesses=auto')
group.addoption('--max-worker-restart', action='store', default=None, dest='maxworkerrestart', help='maximum number of workers that can be restarted when crashed (set to zero to disable this feature)')
group.addoption('--dist', metavar='distmode', action='store', choices=['each', 'load', 'loadscope', 'loadfile', 'loadgroup', 'worksteal', 'no'], dest='dist', default='no', help="set mode for distributing tests to exec environments.\n\neach: send each test to all available environments.\n\nload: load balance by sending any pending test to any available environment.\n\nloadscope: load balance by sending pending groups of tests in the same scope to any available environment.\n\nloadfile: load balance by sending test grouped by file to any available environment.\n\nloadgroup: like load, but sends tests marked with 'xdist_group' to the same worker.\n\nworksteal: split the test suite between available environments, then rebalance when any worker runs out of tests.\n\n(default) no: run tests inprocess, don't distribute.")
group.addoption('--tx', dest='tx', action='append', default=[], metavar='xspec', help='add a test execution environment. some examples: --tx popen//python=python2.5 --tx socket=192.168.1.102:8888 --tx ssh=//chdir=testcache')
group._addoption('-d', action='store_true', dest='distload', default=False, help="load-balance tests. shortcut for '--dist=load'")
group.addoption('--rsyncdir', action='append', default=[], metavar='DIR', help='add directory for rsyncing to remote tx nodes.')
group.addoption('--rsyncignore', action='append', default=[], metavar='GLOB', help='add expression for ignores when rsyncing to remote tx nodes.')
group.addoption('--testrunuid', action='store', help="provide an identifier shared amongst all workers as the value of the 'testrun_uid' fixture,\n\n,if not provided, 'testrun_uid' is filled with a new unique string on every test run.")
group.addoption('--maxschedchunk', action='store', type=int, help='Maximum number of tests scheduled in one step for --dist=load. Setting it to 1 will force pytest to send tests to workers one by one - might be useful for a small number of slow tests. Larger numbers will allow the scheduler to submit consecutive chunks of tests to workers - allows reusing fixtures. Due to implementation reasons, at least 2 tests are scheduled per worker at the start. Only later tests can be scheduled one by one. Unlimited if not set.')
parser.addini('rsyncdirs', 'list of (relative) paths to be rsynced for remote distributed testing.', type=('paths' if PYTEST_GTE_7 else 'pathlist'))
parser.addini('rsyncignore', 'list of (relative) glob-style paths to be ignored for rsyncing.', type=('paths' if PYTEST_GTE_7 else 'pathlist'))
parser.addini('looponfailroots', type=('paths' if PYTEST_GTE_7 else 'pathlist'), help='directories to check for changes. Default: current directory.') |
class Logic():
game: GameDescription
configuration: BaseConfiguration
additional_requirements: list[RequirementSet]
_attempts: int
_current_indent: int = 0
_last_printed_additional: dict[(Node, RequirementSet)]
def __init__(self, game: GameDescription, configuration: BaseConfiguration):
self.game = game
self.configuration = configuration
self.additional_requirements = ([RequirementSet.trivial()] * len(game.region_list.all_nodes))
def get_additional_requirements(self, node: Node) -> RequirementSet:
return self.additional_requirements[node.node_index]
def set_additional_requirements(self, node: Node, req: RequirementSet):
self.additional_requirements[node.node_index] = req
def victory_condition(self, state: State) -> Requirement:
return self.game.victory_condition
def _indent(self, offset=0):
return (' ' * (self._current_indent - offset))
def get_attempts(self) -> int:
return self._attempts
def resolver_start(self):
self._attempts = 0
self._current_indent = 0
self._last_printed_additional = {}
def start_new_attempt(self, state: State, max_attempts: (int | None)):
if ((max_attempts is not None) and (self._attempts >= max_attempts)):
raise ResolverTimeoutError(f'Timed out after {max_attempts} attempts')
self._attempts += 1
self._current_indent += 1
if (debug.debug_level() > 0):
region_list = state.region_list
resources = []
if isinstance(state.node, ResourceNode):
context_state = (state.previous_state or state)
for (resource, quantity) in state.node.resource_gain_on_collect(context_state.node_context()):
text = f'{resource.resource_type.name[0]}: {resource.long_name}'
if (quantity > 1):
text += f' x{quantity}'
resources.append(text)
if (debug.debug_level() >= 3):
for node in state.path_from_previous_state[1:]:
debug.print_function(f'{self._indent(1)}: {n(node, region_list=region_list)}')
debug.print_function(f'{self._indent(1)}> {n(state.node, region_list=region_list)}{energy_string(state)} for {resources}')
def log_checking_satisfiable_actions(self, state: State, actions: list[tuple[(ResourceNode, int)]]):
if (debug.debug_level() > 1):
debug.print_function(f'{self._indent()}# Satisfiable Actions')
for (action, _) in actions:
debug.print_function(f'{self._indent((- 1))}= {n(action, region_list=state.region_list)}')
def log_rollback(self, state: State, has_action, possible_action: bool, additional_requirements: (RequirementSet | None)=None):
if (debug.debug_level() > 0):
show_reqs = ((debug.debug_level() > 1) and (additional_requirements is not None))
debug.print_function('{}* Rollback {}; Had action? {}; Possible Action? {}{}'.format(self._indent(), n(state.node, region_list=state.region_list), has_action, possible_action, ('; Additional Requirements:' if show_reqs else '')))
if show_reqs:
self.print_requirement_set(additional_requirements, (- 1))
self._current_indent -= 1
def log_skip_action_missing_requirement(self, node: Node, game: GameDescription):
if (debug.debug_level() > 1):
requirement_set = self.get_additional_requirements(node)
if ((node in self._last_printed_additional) and (self._last_printed_additional[node] == requirement_set)):
debug.print_function(f'{self._indent()}* Skip {n(node, region_list=game.region_list)}, same additional')
else:
debug.print_function(f'{self._indent()}* Skip {n(node, region_list=game.region_list)}, missing additional:')
self.print_requirement_set(requirement_set, (- 1))
self._last_printed_additional[node] = requirement_set
def print_requirement_set(self, requirement_set: RequirementSet, indent: int=0):
requirement_set.pretty_print(self._indent(indent), print_function=debug.print_function) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, previous_dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=previous_dilation, dilation=previous_dilation, bias=False)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class F23Handler(BaseHandler):
version = F23
commandMap = {'auth': commands.authconfig.FC3_Authconfig, 'authconfig': commands.authconfig.FC3_Authconfig, 'autopart': commands.autopart.F23_AutoPart, 'autostep': commands.autostep.FC3_AutoStep, 'bootloader': commands.bootloader.F21_Bootloader, 'btrfs': commands.btrfs.F23_BTRFS, 'cdrom': commands.cdrom.FC3_Cdrom, 'clearpart': commands.clearpart.F21_ClearPart, 'cmdline': commands.displaymode.FC3_DisplayMode, 'device': commands.device.F8_Device, 'deviceprobe': commands.deviceprobe.FC3_DeviceProbe, 'dmraid': commands.dmraid.FC6_DmRaid, 'driverdisk': commands.driverdisk.F14_DriverDisk, 'eula': commands.eula.F20_Eula, 'fcoe': commands.fcoe.F13_Fcoe, 'firewall': commands.firewall.F20_Firewall, 'firstboot': commands.firstboot.FC3_Firstboot, 'graphical': commands.displaymode.FC3_DisplayMode, 'group': commands.group.F12_Group, 'halt': commands.reboot.F23_Reboot, 'harddrive': commands.harddrive.FC3_HardDrive, 'ignoredisk': commands.ignoredisk.F14_IgnoreDisk, 'install': commands.install.F20_Install, 'iscsi': commands.iscsi.F17_Iscsi, 'iscsiname': commands.iscsiname.FC6_IscsiName, 'keyboard': commands.keyboard.F18_Keyboard, 'lang': commands.lang.F19_Lang, 'liveimg': commands.liveimg.F19_Liveimg, 'logging': commands.logging.FC6_Logging, 'logvol': commands.logvol.F23_LogVol, 'mediacheck': commands.mediacheck.FC4_MediaCheck, 'method': commands.method.F19_Method, 'multipath': commands.multipath.FC6_MultiPath, 'network': commands.network.F22_Network, 'nfs': commands.nfs.FC6_NFS, 'ostreesetup': commands.ostreesetup.F21_OSTreeSetup, 'part': commands.partition.F23_Partition, 'partition': commands.partition.F23_Partition, 'poweroff': commands.reboot.F23_Reboot, 'raid': commands.raid.F23_Raid, 'realm': commands.realm.F19_Realm, 'reboot': commands.reboot.F23_Reboot, 'repo': commands.repo.F21_Repo, 'reqpart': commands.reqpart.F23_ReqPart, 'rescue': commands.rescue.F10_Rescue, 'rootpw': commands.rootpw.F18_RootPw, 'selinux': commands.selinux.FC3_SELinux, 'services': commands.services.FC6_Services, 'shutdown': commands.reboot.F23_Reboot, 'skipx': commands.skipx.FC3_SkipX, 'sshpw': commands.sshpw.F13_SshPw, 'sshkey': commands.sshkey.F22_SshKey, 'text': commands.displaymode.FC3_DisplayMode, 'timezone': commands.timezone.F23_Timezone, 'updates': commands.updates.F7_Updates, 'upgrade': commands.upgrade.F20_Upgrade, 'url': commands.url.F18_Url, 'user': commands.user.F19_User, 'vnc': commands.vnc.F9_Vnc, 'volgroup': commands.volgroup.F21_VolGroup, 'xconfig': commands.xconfig.F14_XConfig, 'zerombr': commands.zerombr.F9_ZeroMbr, 'zfcp': commands.zfcp.F14_ZFCP}
dataMap = {'BTRFSData': commands.btrfs.F23_BTRFSData, 'DriverDiskData': commands.driverdisk.F14_DriverDiskData, 'DeviceData': commands.device.F8_DeviceData, 'DmRaidData': commands.dmraid.FC6_DmRaidData, 'FcoeData': commands.fcoe.F13_FcoeData, 'GroupData': commands.group.F12_GroupData, 'IscsiData': commands.iscsi.F17_IscsiData, 'LogVolData': commands.logvol.F23_LogVolData, 'MultiPathData': commands.multipath.FC6_MultiPathData, 'NetworkData': commands.network.F22_NetworkData, 'PartData': commands.partition.F23_PartData, 'RaidData': commands.raid.F23_RaidData, 'RepoData': commands.repo.F21_RepoData, 'SshPwData': commands.sshpw.F13_SshPwData, 'SshKeyData': commands.sshkey.F22_SshKeyData, 'UserData': commands.user.F19_UserData, 'VolGroupData': commands.volgroup.F21_VolGroupData, 'ZFCPData': commands.zfcp.F14_ZFCPData} |
def plt_hist(axis, data, hatch, label, bins, col):
(counts, edges) = np.histogram(data, bins=bins, range=[0, 1])
edges = np.repeat(edges, 2)
hist = np.hstack((0, np.repeat(counts, 2), 0))
(outline,) = axis.plot(edges, hist, linewidth=1.3, color=col)
axis.fill_between(edges, hist, 0, edgecolor=col, hatch=hatch, label=label, facecolor='none')
axis.set_ylim(0, None, auto=True) |
class Quantizer(nn.Module):
def __init__(self, shape=1):
super(Quantizer, self).__init__()
self.register_buffer('maxq', torch.tensor(0))
self.register_buffer('scale', torch.zeros(shape))
self.register_buffer('zero', torch.zeros(shape))
def configure(self, bits, perchannel=False, sym=True, mse=False, norm=2.4, grid=100, maxshrink=0.8, trits=False):
self.maxq = torch.tensor(((2 ** bits) - 1))
self.perchannel = perchannel
self.sym = sym
self.mse = mse
self.norm = norm
self.grid = grid
self.maxshrink = maxshrink
if trits:
self.maxq = torch.tensor((- 1))
self.scale = torch.zeros_like(self.scale)
def _quantize(self, x, scale, zero, maxq):
if (maxq < 0):
return (((x > (scale / 2)).float() * scale) + ((x < (zero / 2)).float() * zero))
q = torch.clamp((torch.round((x / scale)) + zero), 0, maxq)
return (scale * (q - zero))
def find_params(self, x, weight=False):
dev = x.device
self.maxq = self.maxq.to(dev)
shape = x.shape
if self.perchannel:
if weight:
x = x.flatten(1)
else:
if (len(shape) == 4):
x = x.permute([1, 0, 2, 3])
x = x.flatten(1)
if (len(shape) == 3):
x = x.reshape(((- 1), shape[(- 1)])).t()
if (len(shape) == 2):
x = x.t()
else:
x = x.flatten().unsqueeze(0)
tmp = torch.zeros(x.shape[0], device=dev)
xmin = torch.minimum(x.min(1)[0], tmp)
xmax = torch.maximum(x.max(1)[0], tmp)
if self.sym:
xmax = torch.maximum(torch.abs(xmin), xmax)
tmp = (xmin < 0)
if torch.any(tmp):
xmin[tmp] = (- xmax[tmp])
tmp = ((xmin == 0) & (xmax == 0))
xmin[tmp] = (- 1)
xmax[tmp] = (+ 1)
if (self.maxq < 0):
self.scale = xmax
self.zero = xmin
else:
self.scale = ((xmax - xmin) / self.maxq)
if self.sym:
self.zero = torch.full_like(self.scale, ((self.maxq + 1) / 2))
else:
self.zero = torch.round(((- xmin) / self.scale))
if self.mse:
best = torch.full([x.shape[0]], float('inf'), device=dev)
for i in range(int((self.maxshrink * self.grid))):
p = (1 - (i / self.grid))
xmin1 = (p * xmin)
xmax1 = (p * xmax)
scale1 = ((xmax1 - xmin1) / self.maxq)
zero1 = (torch.round(((- xmin1) / scale1)) if (not self.sym) else self.zero)
q = self._quantize(x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq)
q -= x
q.abs_()
q.pow_(self.norm)
err = torch.sum(q, 1)
tmp = (err < best)
if torch.any(tmp):
best[tmp] = err[tmp]
self.scale[tmp] = scale1[tmp]
self.zero[tmp] = zero1[tmp]
if (not self.perchannel):
if weight:
tmp = shape[0]
else:
tmp = (shape[1] if (len(shape) != 3) else shape[2])
self.scale = self.scale.repeat(tmp)
self.zero = self.zero.repeat(tmp)
if weight:
shape = ([(- 1)] + ([1] * (len(shape) - 1)))
self.scale = self.scale.reshape(shape)
self.zero = self.zero.reshape(shape)
return
if (len(shape) == 4):
self.scale = self.scale.reshape((1, (- 1), 1, 1))
self.zero = self.zero.reshape((1, (- 1), 1, 1))
if (len(shape) == 3):
self.scale = self.scale.reshape((1, 1, (- 1)))
self.zero = self.zero.reshape((1, 1, (- 1)))
if (len(shape) == 2):
self.scale = self.scale.unsqueeze(0)
self.zero = self.zero.unsqueeze(0)
def quantize(self, x):
if self.ready():
return self._quantize(x, self.scale, self.zero, self.maxq)
return x
def enabled(self):
return (self.maxq > 0)
def ready(self):
return torch.all((self.scale != 0)) |
def _check_if_dag_has_cycles(dag: nx.DiGraph) -> None:
try:
cycles = nx.algorithms.cycles.find_cycle(dag)
except nx.NetworkXNoCycle:
pass
else:
msg = f'''The DAG contains cycles which means a dependency is directly or indirectly a product of the same task. See the following the path of nodes in the graph which forms the cycle.
{_format_cycles(dag, cycles)}'''
raise ResolvingDependenciesError(msg) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.