code stringlengths 101 5.91M |
|---|
class EventMention(Mention):
def __init__(self, doc_id, sent_id, tokens_numbers, tokens, mention_str, head_text, head_lemma, is_singleton, is_continuous, coref_chain):
super(EventMention, self).__init__(doc_id, sent_id, tokens_numbers, tokens, mention_str, head_text, head_lemma, is_singleton, is_continuous, coref_chain)
self.arg0 = None
self.arg1 = None
self.amtmp = None
self.amloc = None
def __str__(self):
a0 = (self.arg0[0] if (self.arg0 is not None) else '-')
a1 = (self.arg1[0] if (self.arg1 is not None) else '-')
atmp = (self.amtmp[0] if (self.amtmp is not None) else '-')
aloc = (self.amloc[0] if (self.amloc is not None) else '-')
return '{}_a0: {}_a1: {}_loc: {}_tmp: {}_{}'.format(super(EventMention, self).__str__(), a0, a1, aloc, atmp, self.mention_id) |
def _simple_fixed_content(a, content, t, p, k, equality=False):
n = len(a)
if (t > n):
if equality:
if (n == p):
(yield a)
elif (not (n % p)):
(yield a)
else:
r = list(range(a[((t - p) - 1)], k))
for j in r:
if (content[j] > 0):
a[(t - 1)] = j
content[j] -= 1
if (j == a[((t - p) - 1)]):
(yield from _simple_fixed_content(a[:], content, (t + 1), p, k, equality=equality))
else:
(yield from _simple_fixed_content(a[:], content, (t + 1), t, k, equality=equality))
content[j] += 1 |
.parametrize('loss_class', [IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss])
def test_iou_type_loss_zeros_weight(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
weight = torch.zeros(10)
loss = loss_class()(pred, target, weight)
assert (loss == 0.0) |
.parametrize('n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, decay_function, click_model, eta, behavior_policy_function, is_factorizable, reward_function, return_pscore_item_position, description', valid_input_of_obtain_batch_bandit_feedback)
def test_synthetic_slate_using_valid_inputs(n_unique_action, len_list, dim_context, reward_type, random_state, n_rounds, reward_structure, decay_function, click_model, eta, behavior_policy_function, is_factorizable, reward_function, return_pscore_item_position, description):
dataset = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, reward_structure=reward_structure, decay_function=decay_function, click_model=click_model, eta=eta, random_state=random_state, behavior_policy_function=behavior_policy_function, is_factorizable=is_factorizable, base_reward_function=reward_function)
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds, return_pscore_item_position=return_pscore_item_position)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback, is_factorizable=is_factorizable)
pscore_columns = ['pscore_cascade', 'pscore', 'pscore_item_position']
bandit_feedback_df = pd.DataFrame()
for column in (['slate_id', 'position', 'action', 'reward', 'expected_reward_factual'] + pscore_columns):
bandit_feedback_df[column] = bandit_feedback[column]
print(f'-------{description}')
print(bandit_feedback_df.groupby('position')['reward'].describe())
if (reward_type == 'binary'):
assert (set(np.unique(bandit_feedback['reward'])) == set([0, 1])) |
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
plt.figure(figsize=((1.8 * n_col), (2.4 * n_row)))
plt.subplots_adjust(bottom=0, left=0.01, right=0.99, top=0.9, hspace=0.35)
for i in range((n_row * n_col)):
plt.subplot(n_row, n_col, (i + 1))
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(()) |
def test_cora_load_str() -> None:
(g, subjects) = Cora().load(str_node_ids=True)
assert (type(g.nodes()[0]) == str)
assert all(((type(n) == str) for n in g.nodes()))
assert (set(subjects.index) == set(g.nodes())) |
_start_docstrings(AutoModelWithLMHead.__doc__)
def modelWithLMHead(*args, **kwargs):
return AutoModelWithLMHead.from_pretrained(*args, **kwargs) |
def create_model(args, data_shape, regularization_fns):
hidden_dims = tuple(map(int, args.dims.split(',')))
strides = tuple(map(int, args.strides.split(',')))
if args.multiscale:
model = odenvp.ODENVP((args.batch_size, *data_shape), n_blocks=args.num_blocks, intermediate_dims=hidden_dims, nonlinearity=args.nonlinearity, alpha=args.alpha, cnf_kwargs={'T': args.time_length, 'train_T': args.train_T, 'regularization_fns': regularization_fns})
elif args.parallel:
model = multiscale_parallel.MultiscaleParallelCNF((args.batch_size, *data_shape), n_blocks=args.num_blocks, intermediate_dims=hidden_dims, alpha=args.alpha, time_length=args.time_length)
else:
if args.autoencode:
def build_cnf():
autoencoder_diffeq = layers.AutoencoderDiffEqNet(hidden_dims=hidden_dims, input_shape=data_shape, strides=strides, conv=args.conv, layer_type=args.layer_type, nonlinearity=args.nonlinearity)
odefunc = layers.AutoencoderODEfunc(autoencoder_diffeq=autoencoder_diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher)
cnf = layers.CNF(odefunc=odefunc, T=args.time_length, regularization_fns=regularization_fns, solver=args.solver)
return cnf
else:
def build_cnf():
diffeq = layers.ODEnet(hidden_dims=hidden_dims, input_shape=data_shape, strides=strides, conv=args.conv, layer_type=args.layer_type, nonlinearity=args.nonlinearity)
odefunc = layers.ODEfunc(diffeq=diffeq, divergence_fn=args.divergence_fn, residual=args.residual, rademacher=args.rademacher)
cnf = layers.CNF(odefunc=odefunc, T=args.time_length, train_T=args.train_T, regularization_fns=regularization_fns, solver=args.solver)
return cnf
chain = ([layers.LogitTransform(alpha=args.alpha)] if (args.alpha > 0) else [layers.ZeroMeanTransform()])
chain = (chain + [build_cnf() for _ in range(args.num_blocks)])
if args.batch_norm:
chain.append(layers.MovingBatchNorm2d(data_shape[0]))
model = layers.SequentialFlow(chain)
return model |
_kl(Poisson, Bernoulli)
_kl(Poisson, Binomial)
def _kl_poisson_infinity(p, q):
return _infinite_like(p.rate) |
class TestCategoricalCNNPolicyImageObs(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(DummyDiscretePixelEnv(), is_image=True)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.env.reset()
.parametrize('filters, strides, padding, hidden_sizes', [(((3, (32, 32)),), (1,), 'VALID', (4,))])
def test_obs_unflattened(self, filters, strides, padding, hidden_sizes):
self.policy = CategoricalCNNPolicy(env_spec=self.env.spec, filters=filters, strides=strides, padding=padding, hidden_sizes=hidden_sizes)
obs = self.env.observation_space.sample()
(action, _) = self.policy.get_action(self.env.observation_space.flatten(obs))
self.env.step(action) |
def extract_keyphrases(embedding_distrib, ptagger, raw_text, N, lang, beta=0.55, alias_threshold=0.7):
tagged = ptagger.pos_tag_raw_text(raw_text)
text_obj = InputTextObj(tagged, lang)
return MMRPhrase(embedding_distrib, text_obj, N=N, beta=beta, alias_threshold=alias_threshold) |
class TextDetector(object):
def __init__(self, model):
self.model = model
model.eval()
def detect1(self, image):
with torch.no_grad():
(pointer_pred, dail_pred, text_pred, pred_recog, std_points) = self.model.forward_test(image)
image = image[0].data.cpu().numpy()
output = {'image': image, 'pointer': pointer_pred, 'dail': dail_pred, 'text': text_pred, 'reco': pred_recog, 'std': std_points}
return output |
def test_tokenize_replay_buffer() -> None:
tokenizer = FloatTokenizer(num_bins=100)
episode1 = create_episode(observation_shape=(100,), action_size=2, length=100)
episode2 = create_episode(observation_shape=(100,), action_size=2, length=100)
replay_buffer = ReplayBuffer(InfiniteBuffer(), episodes=[episode1, episode2])
tokenized_replay_buffer = tokenize_replay_buffer(replay_buffer, observation_tokenizer=tokenizer, action_tokenizer=tokenizer)
for (ep, tokenized_ep) in zip(replay_buffer.episodes, tokenized_replay_buffer.episodes):
assert isinstance(ep.observations, np.ndarray)
assert np.all((tokenizer(ep.observations) == tokenized_ep.observations))
assert np.all((tokenizer(ep.actions) == tokenized_ep.actions))
assert np.all((ep.rewards == tokenized_ep.rewards))
assert np.all((ep.terminated == tokenized_ep.terminated)) |
_bpe('fastbpe')
class fastBPE(object):
def add_args(parser):
parser.add_argument('--bpe-codes', type=str, help='path to fastBPE BPE')
def __init__(self, args):
if (args.bpe_codes is None):
raise ValueError('--bpe-codes is required for --bpe=fastbpe')
codes = file_utils.cached_path(args.bpe_codes)
try:
import fastBPE
self.bpe = fastBPE.fastBPE(codes)
self.bpe_symbol = ' '
except ImportError:
raise ImportError('Please install fastBPE with: pip install fastBPE')
def encode(self, x: str) -> str:
return self.bpe.apply([x])[0]
def decode(self, x: str) -> str:
return (x + ' ').replace(self.bpe_symbol, '').rstrip() |
def pytorch_apply_second_moment_correction(quantized_model: Any, core_config: CoreConfig, representative_data_gen: Callable, graph: common.Graph):
model = copy.deepcopy(quantized_model)
set_model(model)
for (name, module) in model.named_modules():
if (len(graph.find_node_by_name(name)) > 0):
node = graph.find_node_by_name(name)[0]
if (isinstance(module, torch.nn.BatchNorm2d) and node.final_weights_quantization_cfg.weights_second_moment_correction):
module.train()
with torch.no_grad():
for data in tqdm(representative_data_gen()):
model(*to_torch_tensor(data))
set_model(model)
for (name, module) in model.named_modules():
if (len(graph.find_node_by_name(name)) > 0):
node = graph.find_node_by_name(name)[0]
if (isinstance(module, torch.nn.BatchNorm2d) and node.final_weights_quantization_cfg.weights_second_moment_correction):
module.eval()
bn_node_weights = {GAMMA: module.weight.detach().cpu().numpy(), BETA: module.bias.detach().cpu().numpy(), MOVING_MEAN: module.running_mean.detach().cpu().numpy(), MOVING_VARIANCE: module.running_var.detach().cpu().numpy()}
node.weights = copy.deepcopy(bn_node_weights)
return graph |
class PhobertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = PhobertTokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
vocab = ['', 'i', 'I', '', 'r', '']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'l a</w>']
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}
''')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = 'Toi la VinAI Research'
output_text = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = PhobertTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = 'Toi la VinAI Research'
bpe_tokens = ' o i a I h'.split()
tokens = tokenizer.tokenize(text)
print(tokens)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + [tokenizer.unk_token])
input_bpe_tokens = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) |
def tag_normalize(tag):
tag = tag.replace("'n'", '').replace("'", '').replace('(', '').replace(')', '').replace('/', ' ').replace('-', ' ').replace(' & ', 'n').replace('&', 'n')
tag = unique_word(tag)
return tag |
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.') |
class MIDRAN():
def __init__(self, config):
self.trainingImagePath = config['trainingImagePath']
self.checkpointPath = config['checkpointPath']
self.logPath = config['logPath']
self.testImagesPath = config['testImagePath']
self.resultDir = config['resultDir']
self.modelName = config['modelName']
self.dataSamples = config['dataSamples']
self.batchSize = int(config['batchSize'])
self.imageH = int(config['imageH'])
self.imageW = int(config['imageW'])
self.inputC = int(config['inputC'])
self.outputC = int(config['outputC'])
self.totalEpoch = int(config['epoch'])
self.interval = int(config['interval'])
self.learningRate = float(config['learningRate'])
self.adamBeta1 = float(config['adamBeta1'])
self.adamBeta2 = float(config['adamBeta2'])
self.barLen = int(config['barLen'])
self.currentEpoch = 0
self.startSteps = 0
self.totalSteps = 0
self.adversarialMean = 0
self.PR = 0.0
self.unNorm = UnNormalize()
self.noiseSet = [25, 50]
self.device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
self.net = DynamicResAttNet(3).to(self.device)
self.optimizerEG = torch.optim.Adam(self.net.parameters(), lr=self.learningRate, betas=(self.adamBeta1, self.adamBeta2))
self.scheduleLR = None
def customTrainLoader(self, overFitTest=False):
targetImageList = imageList(self.trainingImagePath)
print('Trining Samples (Input):', self.trainingImagePath, len(targetImageList))
if (overFitTest == True):
targetImageList = targetImageList[(- 1):]
if self.dataSamples:
targetImageList = targetImageList[:self.dataSamples]
datasetReadder = customDatasetReader(image_list=targetImageList, imagePath=self.trainingImagePath, height=self.imageH, width=self.imageW)
self.trainLoader = torch.utils.data.DataLoader(dataset=datasetReadder, batch_size=self.batchSize, shuffle=True)
return self.trainLoader
def modelTraining(self, resumeTraning=False, overFitTest=False, dataSamples=None):
if dataSamples:
self.dataSamples = dataSamples
reconstructionLoss = torch.nn.L1Loss().to(self.device)
if (overFitTest == True):
customPrint((Fore.RED + 'Over Fitting Testing with an arbitary image!'), self.barLen)
trainingImageLoader = self.customTrainLoader(overFitTest=True)
self.interval = 1
self.totalEpoch = 100000
else:
trainingImageLoader = self.customTrainLoader()
if (resumeTraning == True):
try:
self.modelLoad()
except:
customPrint((Fore.RED + 'Would you like to start training from sketch (default: Y): '), textWidth=self.barLen)
userInput = (input() or 'Y')
if (not ((userInput == 'Y') or (userInput == 'y'))):
exit()
customPrint((('Training is about to begin using:' + Fore.YELLOW) + '[{}]'.format(self.device).upper()), textWidth=self.barLen)
self.totalSteps = int((len(trainingImageLoader) * self.totalEpoch))
startTime = time.time()
bar = ProgressBar(self.totalSteps, max_width=int((self.barLen / 2)))
currentStep = self.startSteps
while (currentStep < self.totalSteps):
iterTime = time.time()
for (LRImages, HRGTImages) in trainingImageLoader:
if (currentStep > self.totalSteps):
self.savingWeights(currentStep)
customPrint((Fore.YELLOW + 'Training Completed Successfully!'), textWidth=self.barLen)
exit()
currentStep += 1
rawInput = LRImages.to(self.device)
highResReal = HRGTImages.to(self.device)
residualNoise = self.net(rawInput)
self.optimizerEG.zero_grad()
generatorContentLoss = reconstructionLoss(residualNoise, highResReal)
lossEG = generatorContentLoss
lossEG.backward()
self.optimizerEG.step()
if (((currentStep + 1) % 25) == 0):
bar.numerator = (currentStep + 1)
print((Fore.YELLOW + 'Steps |'), bar, (Fore.YELLOW + '| LossEG: {:.4f}'.format(lossEG)), end='\r')
if (((currentStep + 1) % self.interval) == 0):
summaryInfo = {'Input Images': self.unNorm(rawInput), 'Residual Images': self.unNorm(residualNoise), 'Denoised Images': self.unNorm((rawInput - residualNoise)), 'GTNoise': self.unNorm(highResReal), 'Step': (currentStep + 1), 'Epoch': self.currentEpoch, 'LossEG': lossEG.item(), 'Path': self.logPath, 'Atttention Net': self.net}
tbLogWritter(summaryInfo)
save_image(self.unNorm((rawInput - residualNoise[0])), 'modelOutput.png')
self.savingWeights(currentStep)
if (((currentStep + 1) % 10000) == 0):
print('\n')
self.savingWeights((currentStep + 1), True)
self.modelInference(validation=True, steps=(currentStep + 1))
(eHours, eMinutes, eSeconds) = timer(iterTime, time.time())
print((Fore.CYAN + 'Steps [{}/{}] | Time elapsed [{:0>2}:{:0>2}:{:0>2}] | Loss: {:.2f}'.format((currentStep + 1), self.totalSteps, eHours, eMinutes, eSeconds, lossEG)))
def modelInference(self, testImagesPath=None, outputDir=None, resize=None, validation=None, noiseSet=None, steps=None):
if (not validation):
self.modelLoad()
print('\nInferencing on pretrained weights.')
else:
print('Validation about to begin.')
if (not noiseSet):
noiseSet = self.noiseSet
if testImagesPath:
self.testImagesPath = testImagesPath
if outputDir:
self.resultDir = outputDir
modelInference = inference(inputRootDir=self.testImagesPath, outputRootDir=self.resultDir, modelName=self.modelName, validation=validation)
testImageList = modelInference.testingSetProcessor()
barVal = ProgressBar((len(testImageList) * len(noiseSet)), max_width=int(50))
imageCounter = 0
with torch.no_grad():
for noise in noiseSet:
for imgPath in testImageList:
img = modelInference.inputForInference(imgPath, noiseLevel=noise).to(self.device)
output = self.net(img)
modelInference.saveModelOutput((img - output), imgPath, noise, steps)
imageCounter += 1
if ((imageCounter % 2) == 0):
barVal.numerator = imageCounter
print((Fore.CYAN + 'Image Processd |'), barVal, Fore.CYAN, end='\r')
print('\n')
def modelSummary(self, input_size=None):
if (not input_size):
input_size = (3, self.imageH, self.imageW)
customPrint((Fore.YELLOW + 'Model Summary:Dynamic Residual Attention Network'), textWidth=self.barLen)
summary(self.net, input_size=input_size)
print(('*' * self.barLen))
print()
(flops, params) = get_model_complexity_info(self.net, input_size, as_strings=True, print_per_layer_stat=False)
customPrint('Computational complexity (Dynamic Residual Attention Network):{}'.format(flops), self.barLen, '-')
configShower()
print(('*' * self.barLen))
def savingWeights(self, currentStep, duplicate=None):
checkpoint = {'step': (currentStep + 1), 'stateDictEG': self.net.state_dict(), 'optimizerEG': self.optimizerEG.state_dict(), 'schedulerLR': self.scheduleLR}
saveCheckpoint(modelStates=checkpoint, path=self.checkpointPath, modelName=self.modelName)
if duplicate:
saveCheckpoint(modelStates=checkpoint, path=((self.checkpointPath + str(currentStep)) + '/'), modelName=self.modelName, backup=None)
def modelLoad(self):
customPrint((Fore.RED + 'Loading pretrained weight'), textWidth=self.barLen)
previousWeight = loadCheckpoints(self.checkpointPath, self.modelName)
self.net.load_state_dict(previousWeight['stateDictEG'])
self.optimizerEG.load_state_dict(previousWeight['optimizerEG'])
self.scheduleLR = previousWeight['schedulerLR']
self.startSteps = int(previousWeight['step'])
customPrint((Fore.YELLOW + 'Weight loaded successfully'), textWidth=self.barLen) |
def consolidate_ckpt(src_path, dst_path):
print('Loading model')
auto_upgrade(src_path)
src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
src_tokenizer = AutoTokenizer.from_pretrained(src_path)
src_model.save_pretrained(dst_path)
src_tokenizer.save_pretrained(dst_path) |
(config_path='../config', config_name='text_classification')
def main(config):
try:
(config, logger) = startup(config)
(train_loader, test_loader, train_splits, vocab_size) = get_text_loaders(config)
tb_logger = SummaryWriter(log_dir=f'./{config.job_name}/{config.timestamp}')
if config.teacher.use_ckpts:
try:
(teachers, ckpt_files) = load_teachers(config, num_words=vocab_size)
if config.teacher.shuffle_ckpts:
print('shuffling checkpoints')
random.shuffle(teachers)
except FileNotFoundError:
teachers = []
if (len(teachers) >= config.teacher.num_components):
teachers = select_ckpts(teachers, config.trial_id, config.teacher.num_components, ckpt_names=ckpt_files)
teachers = [try_cuda(m) for m in teachers]
else:
teachers = []
num_ckpts = len(teachers)
for i in range(num_ckpts, config.teacher.num_components):
model = hydra.utils.instantiate(config.classifier, num_words=vocab_size)
model = try_cuda(model)
logger.save_obj(model.state_dict(), f'teacher_init_{i}.ckpt')
print(f'==== training teacher model {(i + 1)} ====')
teacher_loss = distillation.ClassifierTeacherLoss(model)
(model, records) = train_loop(config, model, train_closure=supervised_epoch, train_loader=train_loader, train_kwargs=dict(loss_fn=teacher_loss), eval_closure=eval_epoch, eval_loader=test_loader, eval_kwargs=dict(loss_fn=teacher_loss, drop_synthetic_inputs=False), tb_logger=tb_logger, tb_prefix='teachers/teacher_{}/'.format(i))
teachers.append(model)
logger.add_table(f'teacher_{i}_train_metrics', records)
logger.write_csv()
for (i, model) in enumerate(teachers):
logger.save_obj(model.state_dict(), f'teacher_{i}.ckpt')
if (config.trainer.distill_teacher is False):
return float('NaN')
print('==== ensembling teacher classifiers ====')
teacher = models.ClassifierEnsemble(*teachers)
distill_loader = hydra.utils.instantiate(config.distill_loader, loader=train_loader, teacher=teacher)
teacher_train_metrics = eval_epoch(teacher, distill_loader, epoch=0, loss_fn=models.ensemble.ClassifierEnsembleLoss(teacher), drop_synthetic_inputs=False)
teacher_test_metrics = eval_epoch(teacher, test_loader, epoch=0, loss_fn=models.ensemble.ClassifierEnsembleLoss(teacher), drop_synthetic_inputs=False)
student = hydra.utils.instantiate(config.classifier, num_words=vocab_size)
student = try_cuda(student)
if (config.teacher.ckpt_init.type == 'init'):
assert (config.classifier.depth == config.teacher.depth)
assert (config.teacher.num_components == 1)
(init_teachers, init_fnames) = load_teachers(config, ckpt_pattern='*teacher_init_?.ckpt', num_words=vocab_size)
print('initializing the student near the initial teacher weights')
init_teachers = select_ckpts(init_teachers, config.trial_id, 1, ckpt_names=init_fnames)
start_idx = (((config.trial_id * config.teacher.num_components) % len(init_teachers)) - len(init_teachers))
stop_idx = (start_idx + 1)
print(f'using checkpoints {[((len(init_teachers) + i) % len(init_teachers)) for i in range(start_idx, stop_idx)]}')
student = interpolate_net(student, init_teachers[0].state_dict(), config.teacher.ckpt_init.loc_param, train_loader, config.trainer.freeze_bn)
elif (config.teacher.ckpt_init.type == 'final'):
assert (config.classifier.depth == config.teacher.depth)
assert (config.teacher.num_components == 1)
print('initializing the student near the final teacher weights')
student = interpolate_net(student, teachers[0].state_dict(), config.teacher.ckpt_init.loc_param, train_loader, config.trainer.freeze_bn)
config.trainer.optimizer.lr = max((config.trainer.optimizer.lr * config.teacher.ckpt_init.loc_param), config.trainer.lr_scheduler.eta_min)
logger.save_obj(student.state_dict(), 'student_init.ckpt')
student_base_loss = hydra.utils.instantiate(config.loss.init)
student_loss = distillation.ClassifierStudentLoss(student, student_base_loss, config.loss.alpha)
print(f'==== distilling student classifier ====')
(student, records) = train_loop(config, student, train_closure=distillation_epoch, train_loader=distill_loader, train_kwargs=dict(loss_fn=student_loss), eval_closure=eval_epoch, eval_loader=test_loader, eval_kwargs=dict(loss_fn=student_loss, teacher=teacher, drop_synthetic_inputs=False, with_cka=False), tb_logger=tb_logger, tb_prefix='student/')
for r in records:
r.update(dict(teacher_train_acc=teacher_train_metrics['test_acc'], teacher_test_acc=teacher_test_metrics['test_acc']))
logger.add_table(f'student_train_metrics', records)
logger.write_csv()
logger.save_obj(student.state_dict(), f'student.ckpt')
del train_loader, test_loader
return ((1 - (records[(- 1)]['test_acc'] / 100.0)) if (len(records) > 0) else float('NaN'))
except Exception:
logging.error(traceback.format_exc())
return float('NaN') |
def tensor2map(var):
mask = np.argmax(var.data.cpu().numpy(), axis=0)
colors = get_colors()
mask_image = np.ones(shape=(mask.shape[0], mask.shape[1], 3))
for class_idx in np.unique(mask):
mask_image[(mask == class_idx)] = colors[class_idx]
mask_image = mask_image.astype('uint8')
return Image.fromarray(mask_image) |
class PythonStoreTest(TestCase):
def setUp(self):
super(PythonStoreTest, self).setUp()
def test_set_get(self):
c10d._test_python_store(MyPythonStore()) |
def get_metrics_list(sd):
metrics_tuple_set = set([tuple(sorted(list(x['scores'].keys()))) for d in sd.values() for x in d['sys_summs'].values()])
assert (len(metrics_tuple_set) == 1), (metrics_tuple_set, 'all system summary score dicts should have the same set of all_metrics')
metrics_list = list(list(metrics_tuple_set)[0])
return metrics_list |
def register_Ns3EventGarbageCollector_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('Track', 'void', [param('ns3::EventId', 'event')])
cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')])
return |
def idx_list_converter(split_line, idx):
if isinstance(idx, int):
return [split_line[idx]]
return [split_line[i] for i in idx] |
class CoxeterGroups(Category_singleton):
def super_categories(self):
return [GeneralizedCoxeterGroups()]
def additional_structure(self):
return None
Finite = LazyImport('sage.categories.finite_coxeter_groups', 'FiniteCoxeterGroups')
Algebras = LazyImport('sage.categories.coxeter_group_algebras', 'CoxeterGroupAlgebras')
class ParentMethods():
_method
def coxeter_matrix(self):
_method
def index_set(self):
return self.coxeter_matrix().index_set()
def coxeter_diagram(self):
return self.coxeter_matrix().coxeter_graph()
def coxeter_type(self):
return self.coxeter_matrix().coxeter_type()
def braid_relations(self):
rels = []
M = self.coxeter_matrix()
I = self.index_set()
for (ii, i) in enumerate(I):
for j in I[(ii + 1):]:
m = M[(i, j)]
rel = ([i, j] * m)
rels.append([rel[:m], (rel[m:] if (m % 2) else list(reversed(rel[m:])))])
return rels
def braid_group_as_finitely_presented_group(self):
from sage.groups.free_group import FreeGroup
from sage.misc.misc_c import prod
I = self.index_set()
F = FreeGroup([('S%s' % i) for i in I])
S = F.gens()
rels = self.braid_relations()
return (F / [(prod((S[I.index(i)] for i in l)) * prod(((S[I.index(i)] ** (- 1)) for i in reversed(r)))) for (l, r) in rels])
def braid_orbit_iter(self, word):
word = list(word)
from sage.combinat.root_system.braid_orbit import BraidOrbit
braid_rels = self.braid_relations()
I = self.index_set()
from sage.rings.integer_ring import ZZ
be_careful = any(((i not in ZZ) for i in I))
if be_careful:
Iinv = {i: j for (j, i) in enumerate(I)}
word = [Iinv[i] for i in word]
braid_rels = [[[Iinv[i] for i in l], [Iinv[i] for i in r]] for (l, r) in braid_rels]
orb = BraidOrbit(word, braid_rels)
if be_careful:
for word in orb:
(yield [I[i] for i in word])
else:
for I in orb:
(yield list(I))
def braid_orbit(self, word):
return list(self.braid_orbit_iter(word))
def __iter__(self):
return iter(self.weak_order_ideal(predicate=ConstantFunction(True)))
def _element_constructor_(self, x, **args):
P = parent(x)
if (P in CoxeterGroups()):
try:
return self.from_reduced_word(x.reduced_word())
except KeyError:
pass
return self.element_class(self, x, **args)
def weak_order_ideal(self, predicate, side='right', category=None):
from sage.sets.recursively_enumerated_set import RecursivelyEnumeratedSet_forest
def succ(u):
for i in u.descents(positive=True, side=side):
u1 = u.apply_simple_reflection(i, side)
if ((i == u1.first_descent(side=side)) and predicate(u1)):
(yield u1)
from sage.categories.finite_coxeter_groups import FiniteCoxeterGroups
default_category = (FiniteEnumeratedSets() if (self in FiniteCoxeterGroups()) else EnumeratedSets())
cat = default_category.or_subcategory(category)
return RecursivelyEnumeratedSet_forest((self.one(),), succ, algorithm='breadth', category=cat)
_method
def coxeter_element(self):
return self.prod(self.simple_reflections())
_method
def standard_coxeter_elements(self):
if ((not self.is_irreducible()) or (not self.is_well_generated())):
raise ValueError('this method is available for irreducible, well-generated complex reflection groups')
from sage.combinat.permutation import Permutations
return {self.from_reduced_word(w) for w in Permutations(self.index_set())}
def grassmannian_elements(self, side='right'):
order_side = ('left' if (side == 'right') else 'right')
return self.weak_order_ideal(attrcall('is_grassmannian', side=side), side=order_side)
def fully_commutative_elements(self):
from sage.combinat.fully_commutative_elements import FullyCommutativeElements
return FullyCommutativeElements(self)
def _test_reduced_word(self, **options):
tester = self._tester(**options)
s = self.simple_reflections()
for x in tester.some_elements():
red = x.reduced_word()
tester.assertEqual(self.from_reduced_word(red), x)
tester.assertEqual(self.prod((s[i] for i in red)), x)
def simple_projection(self, i, side='right', length_increasing=True):
if (not ((i in self.index_set()) or (i == 0))):
raise ValueError(('%s is not 0 and not in the Dynkin node set %s' % (i, self.index_set())))
return (lambda x: x.apply_simple_projection(i, side=side, length_increasing=length_increasing))
def kazhdan_lusztig_cells(self, side='left'):
if (not self.coxeter_type().is_finite()):
raise ValueError('the Coxeter group must be finite to compute Kazhdan--Lusztig cells')
identity = frozenset([self.one()])
cells = {identity}
for w in self:
if (not any(((w in c) for c in cells))):
cell = w.kazhdan_lusztig_cell(side=side)
cells.add(frozenset(cell))
return cells
_method
def simple_projections(self, side='right', length_increasing=True):
from sage.sets.family import Family
return Family(self.index_set(), (lambda i: self.simple_projection(i, side=side, length_increasing=length_increasing)))
def sign_representation(self, base_ring=None, side='twosided'):
if (base_ring is None):
from sage.rings.integer_ring import ZZ
base_ring = ZZ
from sage.modules.with_basis.representation import SignRepresentationCoxeterGroup
return SignRepresentationCoxeterGroup(self, base_ring)
def demazure_product(self, Q):
return self.one().apply_demazure_product(Q)
def bruhat_interval(self, x, y):
if (x == 1):
x = self.one()
if (y == 1):
y = self.one()
if (x == y):
return [x]
ret = []
if (not x.bruhat_le(y)):
return ret
ret.append([y])
while ret[(- 1)]:
nextlayer = []
for z in ret[(- 1)]:
for t in z.bruhat_lower_covers():
if (t not in nextlayer):
if x.bruhat_le(t):
nextlayer.append(t)
ret.append(nextlayer)
return flatten(ret)
def bruhat_interval_poset(self, x, y, facade=False):
if (x == 1):
x = self.one()
if (y == 1):
y = self.one()
from sage.combinat.posets.posets import Poset
if (x == y):
return Poset([[x], []])
if (not x.bruhat_le(y)):
return Poset()
curlayer = {y}
d = {}
while curlayer:
nextlayer = set()
for z in curlayer:
for t in z.bruhat_lower_covers():
if (not x.bruhat_le(t)):
continue
if (t in d):
d[t].append(z)
else:
d[t] = [z]
if (t not in nextlayer):
nextlayer.add(t)
curlayer = nextlayer
from sage.graphs.digraph import DiGraph
return Poset(DiGraph(d, format='dict_of_lists', data_structure='static_sparse'), cover_relations=True, facade=facade)
def bruhat_graph(self, x=None, y=None, edge_labels=False):
if ((x is None) or (x == 1)):
x = self.one()
if (y is None):
if self.is_finite():
y = self.long_element()
else:
raise TypeError('infinite groups must specify a maximal element')
elif (y == 1):
y = self.one()
g = sorted(self.bruhat_interval(x, y), key=(lambda w: (- w.length())))
d = []
if self.is_finite():
ref = self.reflections()
for (i, u) in enumerate(g):
for v in g[:i]:
w = (u * v.inverse())
if (w in ref):
if edge_labels:
d.append((u, v, w))
else:
d.append((u, v))
else:
for (i, u) in enumerate(g):
for v in g[:i]:
w = (u * v.inverse())
if w.is_reflection():
if edge_labels:
d.append((u, v, w))
else:
d.append((u, v))
from sage.graphs.digraph import DiGraph
return DiGraph(d)
def canonical_representation(self):
from sage.groups.matrix_gps.coxeter_group import CoxeterMatrixGroup
return CoxeterMatrixGroup(self.coxeter_matrix(), index_set=self.index_set())
def elements_of_length(self, n):
I = self.weak_order_ideal(ConstantFunction(True), side='right')
return I.elements_of_depth_iterator(n)
def random_element_of_length(self, n):
from sage.misc.prandom import randint
x = self.one()
for _ in range(1, (n + 1)):
antiD = x.descents(positive=True)
rnd = randint(0, (len(antiD) - 1))
x = x.apply_simple_reflection_right(antiD[rnd])
return x
def _test_simple_projections(self, **options):
tester = self._tester(**options)
for side in ['left', 'right']:
pi = self.simple_projections(side=side)
opi = self.simple_projections(side=side, length_increasing=False)
for i in self.index_set():
for w in tester.some_elements():
tester.assertEqual(pi[i](w), w.apply_simple_projection(i, side=side))
tester.assertEqual(pi[i](w), w.apply_simple_projection(i, side=side, length_increasing=True))
tester.assertEqual(opi[i](w), w.apply_simple_projection(i, side=side, length_increasing=False))
tester.assertTrue(pi[i](w).has_descent(i, side=side))
tester.assertFalse(opi[i](w).has_descent(i, side=side))
tester.assertEqual({pi[i](w), opi[i](w)}, {w, w.apply_simple_reflection(i, side=side)})
def _test_has_descent(self, **options):
tester = self._tester(**options)
s = self.simple_reflections()
for i in self.index_set():
tester.assertTrue((not self.one().has_descent(i)))
tester.assertTrue((not self.one().has_descent(i, side='left')))
tester.assertTrue((not self.one().has_descent(i, side='right')))
tester.assertTrue(self.one().has_descent(i, positive=True))
tester.assertTrue(self.one().has_descent(i, positive=True, side='left'))
tester.assertTrue(self.one().has_descent(i, positive=True, side='right'))
for j in self.index_set():
tester.assertEqual(s[i].has_descent(j, side='left'), (i == j))
tester.assertEqual(s[i].has_descent(j, side='right'), (i == j))
tester.assertEqual(s[i].has_descent(j), (i == j))
tester.assertEqual(s[i].has_descent(j, positive=True, side='left'), (i != j))
tester.assertEqual(s[i].has_descent(j, positive=True, side='right'), (i != j))
tester.assertEqual(s[i].has_descent(j, positive=True), (i != j))
if (i == j):
continue
u = s[i].apply_simple_reflection_right(j)
v = s[j].apply_simple_reflection_right(i)
tester.assertTrue(u.has_descent(i, side='left'))
tester.assertTrue(u.has_descent(j, side='right'))
tester.assertEqual(u.has_descent(j, side='left'), (u == v))
tester.assertEqual(u.has_descent(i, side='right'), (u == v))
def _test_descents(self, **options):
tester = self._tester(**options)
s = self.simple_reflections()
tester.assertEqual(len(self.one().descents(side='right')), 0)
tester.assertEqual(len(self.one().descents(side='left')), 0)
for i in self.index_set():
si = s[i]
tester.assertEqual([i], si.descents(side='left'))
tester.assertEqual([i], si.descents(side='right'))
tester.assertNotIn(i, si.descents(positive=True, side='left'))
tester.assertNotIn(i, si.descents(positive=True, side='right'))
def _test_coxeter_relations(self, **options):
tester = self._tester(**options)
s = self.simple_reflections()
one = self.one()
for si in s:
tester.assertEqual((si ** 2), one)
try:
cox_mat = self.coxeter_matrix()
except ImportError:
return
I = cox_mat.index_set()
for (ii, i) in enumerate(I):
for j in I[(ii + 1):]:
mij = cox_mat[(i, j)]
if (mij == (- 1)):
continue
l = (s[i] * s[j])
tester.assertEqual((l ** mij), one, 'Coxeter relation fails')
for p in range(1, mij):
tester.assertNotEqual((l ** p), one, 'unexpected relation')
class ElementMethods():
def has_descent(self, i, side='right', positive=False):
if (not isinstance(positive, bool)):
raise TypeError(('%s is not a boolean' % bool))
if (side == 'right'):
return (self.has_right_descent(i) != positive)
if (side != 'left'):
raise ValueError(("%s is neither 'right' nor 'left'" % side))
return (self.has_left_descent(i) != positive)
def has_right_descent(self, i):
return (~ self).has_left_descent(i)
def has_left_descent(self, i):
return (~ self).has_right_descent(i)
def first_descent(self, side='right', index_set=None, positive=False):
if (index_set is None):
index_set = self.parent().index_set()
for i in index_set:
if self.has_descent(i, side=side, positive=positive):
return i
return None
def descents(self, side='right', index_set=None, positive=False):
if (index_set is None):
index_set = self.parent().index_set()
return [i for i in index_set if self.has_descent(i, side=side, positive=positive)]
def is_grassmannian(self, side='right') -> bool:
return (len(self.descents(side=side)) <= 1)
def is_fully_commutative(self) -> bool:
word = self.reduced_word()
from sage.combinat.root_system.braid_orbit import is_fully_commutative as is_fully_comm
group = self.parent()
braid_rels = group.braid_relations()
I = group.index_set()
from sage.rings.integer_ring import ZZ
be_careful = any(((i not in ZZ) for i in I))
if be_careful:
Iinv = {i: j for (j, i) in enumerate(I)}
word = [Iinv[i] for i in word]
braid_rels = [[[Iinv[i] for i in l], [Iinv[i] for i in r]] for (l, r) in braid_rels]
return is_fully_comm(word, braid_rels)
def reduced_word_reverse_iterator(self):
while True:
i = self.first_descent()
if (i is None):
return
self = self.apply_simple_reflection(i, 'right')
(yield i)
def reduced_word(self):
result = list(self.reduced_word_reverse_iterator())
return list(reversed(result))
def reduced_words_iter(self):
return self.parent().braid_orbit_iter(self.reduced_word())
def reduced_words(self):
return list(self.reduced_words_iter())
def support(self):
return set(self.reduced_word())
def has_full_support(self):
return (self.support() == set(self.parent().index_set()))
def reduced_word_graph(self):
R = self.reduced_words()
from sage.graphs.graph import Graph
if (len(R) == 1):
return Graph({tuple(R[0]): []}, immutable=True)
P = self.parent()
edges = []
for (i, x) in enumerate(R):
x = tuple(x)
for y in R[i:]:
y = tuple(y)
j = 0
while ((j < len(x)) and (x[j] == y[j])):
j += 1
if (j == len(x)):
continue
(a, b) = (x[j], y[j])
m = P.coxeter_matrix()[(a, b)]
subword = ([a, b] * (m // 2))
subword2 = ([b, a] * (m // 2))
if (m % 2):
subword.append(a)
subword2.append(b)
if ((x[j:(j + m)] != tuple(subword)) or (y[j:(j + m)] != tuple(subword2)) or (x[(j + m):] != y[(j + m):])):
continue
edges.append([x, y, m])
G = Graph(edges, immutable=True, format='list_of_edges')
colors = {2: 'blue', 3: 'red', 4: 'green'}
G.set_latex_options(edge_labels=True, color_by_label=(lambda x: colors[x]))
return G
def length(self):
return len(self.reduced_word())
def reflection_length(self):
return self.absolute_length()
def absolute_length(self):
M = self.canonical_matrix()
return (M - 1).image().dimension()
def absolute_le(self, other):
if (self == other):
return True
if (self.absolute_length() >= other.absolute_length()):
return False
return ((self.absolute_length() + (self.inverse() * other).absolute_length()) == other.absolute_length())
def absolute_covers(self):
W = self.parent()
return [(self * t) for t in W.reflections() if (self.absolute_length() < (self * t).absolute_length())]
def canonical_matrix(self):
G = self.parent().canonical_representation()
return G.prod((G.simple_reflection(i) for i in self.reduced_word())).matrix()
def coset_representative(self, index_set, side='right'):
while True:
i = self.first_descent(side=side, index_set=index_set)
if (i is None):
return self
self = self.apply_simple_reflection(i, side=side)
def apply_simple_projection(self, i, side='right', length_increasing=True):
if self.has_descent(i, side=side, positive=length_increasing):
return self.apply_simple_reflection(i, side=side)
return self
def binary_factorizations(self, predicate=ConstantFunction(True)):
from sage.sets.recursively_enumerated_set import RecursivelyEnumeratedSet_forest
W = self.parent()
if (not predicate(W.one())):
from sage.sets.finite_enumerated_set import FiniteEnumeratedSet
return FiniteEnumeratedSet([])
def succ(u_v):
(u, v) = u_v
for i in v.descents(side='left'):
u1 = u.apply_simple_reflection_right(i)
if ((i == u1.first_descent()) and predicate(u1)):
(yield (u1, v.apply_simple_reflection_left(i)))
return RecursivelyEnumeratedSet_forest(((W.one(), self),), succ, category=FiniteEnumeratedSets())
_in_parent_method
def bruhat_lower_covers(self):
desc = self.first_descent(side='right')
if (desc is None):
return []
ww = self.apply_simple_reflection(desc, side='right')
return ([u.apply_simple_reflection(desc, side='right') for u in ww.bruhat_lower_covers() if (not u.has_descent(desc, side='right'))] + [ww])
_in_parent_method
def bruhat_upper_covers(self):
Covers = set()
for i in self.parent().index_set():
if (i in self.descents(side='right')):
Covers.update((x.apply_simple_reflection(i, side='right') for x in self.apply_simple_reflection(i, side='right').bruhat_upper_covers() if (i not in x.descents(side='right'))))
else:
Covers.add(self.apply_simple_reflection(i, side='right'))
return sorted(Covers)
_in_parent_method
def bruhat_lower_covers_reflections(self):
i = self.first_descent(side='right')
if (i is None):
return []
wi = self.apply_simple_reflection(i, side='right')
return ([(u.apply_simple_reflection(i, side='right'), r.apply_conjugation_by_simple_reflection(i)) for (u, r) in wi.bruhat_lower_covers_reflections() if (not u.has_descent(i, side='right'))] + [(wi, self.parent().simple_reflection(i))])
def lower_cover_reflections(self, side='right'):
if (side == 'left'):
self = self.inverse()
return [x[1] for x in self.bruhat_lower_covers_reflections()]
_in_parent_method
def bruhat_upper_covers_reflections(self):
Covers = set()
for i in self.parent().index_set():
wi = self.apply_simple_reflection(i)
if (i in self.descents()):
Covers.update(((u.apply_simple_reflection(i), r.apply_conjugation_by_simple_reflection(i)) for (u, r) in wi.bruhat_upper_covers_reflections() if (i not in u.descents())))
else:
Covers.add((wi, self.parent().simple_reflection(i)))
return sorted(Covers)
def cover_reflections(self, side='right'):
if (side == 'left'):
self = self.inverse()
return [x[1] for x in self.bruhat_upper_covers_reflections()]
_in_parent_method
def bruhat_le(self, other):
if (not have_same_parent(self, other)):
raise TypeError(('%s and %s do not have the same parent' % (self, other)))
desc = other.first_descent()
if (desc is not None):
return self.apply_simple_projection(desc, length_increasing=False).bruhat_le(other.apply_simple_reflection(desc))
return (self == other)
def weak_le(self, other, side='right'):
if (not have_same_parent(self, other)):
raise TypeError(f'{self} and {other} do not have the same parent')
prefix_side = ('left' if (side == 'right') else 'right')
while True:
desc = self.first_descent(side=prefix_side)
if (desc is None):
return True
if (not other.has_descent(desc, side=prefix_side)):
return False
self = self.apply_simple_reflection(desc, side=prefix_side)
other = other.apply_simple_reflection(desc, side=prefix_side)
def weak_covers(self, side='right', index_set=None, positive=False):
return [self.apply_simple_reflection(i, side=side) for i in self.descents(side=side, index_set=index_set, positive=positive)]
def coxeter_sorting_word(self, c):
if hasattr(c, 'reduced_word'):
c = c.reduced_word()
elif (not isinstance(c, list)):
c = list(c)
n = self.parent().rank()
pi = self
l = pi.length()
i = 0
sorting_word = []
while (l > 0):
s = c[i]
if pi.has_left_descent(s):
pi = pi.apply_simple_reflection_left(s)
l -= 1
sorting_word.append(s)
i += 1
if (i == n):
i = 0
return sorting_word
def is_coxeter_sortable(self, c, sorting_word=None):
if hasattr(c, 'reduced_word'):
c = c.reduced_word()
elif (not isinstance(c, list)):
c = list(c)
if (sorting_word is None):
sorting_word = self.coxeter_sorting_word(c)
n = len(c)
containment_list = ([True] * n)
l = 0
i = 0
while (l < len(sorting_word)):
s = c[i]
t = sorting_word[l]
if (s == t):
l += 1
if (not containment_list[i]):
return False
else:
containment_list[i] = False
i += 1
if (i == n):
i = 0
return True
def apply_demazure_product(self, element, side='right', length_increasing=True):
if self.parent().is_parent_of(element):
the_word = element.reduced_word()
else:
if isinstance(element, tuple):
element = list(element)
elif (not isinstance(element, list)):
raise TypeError(f'Bad Coxeter group element input: {element}')
I = self.parent().index_set()
if (not all(((i in I) for i in element))):
raise ValueError(('%s does not have all its members in the index set of the %s' % (element, self.parent())))
the_word = copy(element)
if (side == 'left'):
the_word.reverse()
for i in the_word:
self = self.apply_simple_projection(i, side=side, length_increasing=length_increasing)
return self
def min_demazure_product_greater(self, element):
if self.parent().is_parent_of(element):
the_word = element.reduced_word()
else:
if (not isinstance(element, (tuple, list))):
raise TypeError(('Bad Coxeter group element input: %s' % element))
I = self.parent().index_set()
if (not all(((i in I) for i in element))):
raise ValueError(('%s does not have all its members in the index set of the %s' % (element, self.parent())))
the_word = element
for i in the_word:
if self.has_descent(i, side='left'):
self = self.apply_simple_reflection(i, side='left')
return self
def deodhar_factor_element(self, w, index_set):
if (self != self.coset_representative(index_set)):
raise ValueError(('%s is not of minimum length in its coset for the parabolic subgroup with index set %s' % (self.reduced_word(), index_set)))
if (w != w.coset_representative(index_set)):
raise ValueError(('%s is not of minimum length in its coset for the parabolic subgroup with index set %s' % (w.reduced_word(), index_set)))
if (not self.bruhat_le(w)):
raise ValueError(('Must have %s <= %s' % (self.reduced_word(), w.reduced_word())))
if w.is_one():
return w
i = w.first_descent(side='left')
sw = w.apply_simple_reflection(i, side='left')
sv = self.apply_simple_reflection(i, side='left')
if self.has_descent(i, side='left'):
return sv.deodhar_factor_element(sw, index_set)
dsp = self.deodhar_factor_element(sw, index_set)
des = sv.first_descent(side='right', index_set=index_set)
if (des is None):
return dsp
return dsp.apply_simple_projection(des, side='left')
def deodhar_lift_up(self, w, index_set):
vmin = self.coset_representative(index_set)
wmin = w.coset_representative(index_set)
if (not vmin.bruhat_le(wmin)):
raise ValueError(('Must have %s <= %s mod the parabolic subgroup with index set %s' % (self.reduced_word(), w.reduced_word(), index_set)))
vJ = (vmin.inverse() * self)
dsp = vmin.deodhar_factor_element(wmin, index_set)
return (wmin * vJ.min_demazure_product_greater(dsp))
def deodhar_lift_down(self, w, index_set):
vmin = self.coset_representative(index_set)
wmin = w.coset_representative(index_set)
if (not wmin.bruhat_le(vmin)):
raise ValueError(('Must have %s <= %s mod the parabolic subgroup with index set %s' % (w.reduced_word(), self.reduced_word(), index_set)))
vJ = (vmin.inverse() * self)
dsp = wmin.deodhar_factor_element(vmin, index_set)
return (wmin * dsp.apply_demazure_product(vJ))
_in_parent_method
def inversions_as_reflections(self):
i = self.first_descent()
if (i is None):
return []
wi = self.apply_simple_reflection(i)
return ([self.parent().simple_reflection(i)] + [u.apply_conjugation_by_simple_reflection(i) for u in wi.inversions_as_reflections()])
def left_inversions_as_reflections(self):
return self.inverse().inversions_as_reflections()
def lower_covers(self, side='right', index_set=None):
return self.weak_covers(side=side, index_set=index_set, positive=False)
def upper_covers(self, side='right', index_set=None):
return self.weak_covers(side=side, index_set=index_set, positive=True)
def kazhdan_lusztig_cell(self, side='left'):
from sage.algebras.iwahori_hecke_algebra import IwahoriHeckeAlgebra
from sage.rings.polynomial.laurent_polynomial_ring import LaurentPolynomialRing
from sage.rings.integer_ring import ZZ
R = LaurentPolynomialRing(ZZ, 'v')
v = R.gen(0)
H = IwahoriHeckeAlgebra(self.parent(), (v ** 2))
Cp = H.Cp()
w = self.parent()(self)
(vertices, edges) = ({w}, set())
queue = deque([w])
while queue:
x = queue.pop()
cp_x = Cp(x)
for s in self.parent().simple_reflections():
cp_s = Cp(s)
terms = []
if ((side == 'left') or (side == 'two-sided')):
terms.extend(list((cp_s * cp_x)))
if ((side == 'right') or (side == 'two-sided')):
terms.extend(list((cp_x * cp_s)))
for (y, _) in terms:
if (y != x):
edges.add((x, y))
if (y not in vertices):
vertices.add(y)
queue.appendleft(y)
from sage.graphs.digraph import DiGraph
g = DiGraph([list(vertices), list(edges)])
return set(g.strongly_connected_component_containing_vertex(w)) |
def hypotest(pdf, data):
return pyhf.infer.hypotest(1.0, data, pdf, pdf.config.suggested_init(), pdf.config.suggested_bounds(), return_tail_probs=True, return_expected=True, return_expected_set=True) |
def calculate_loss(rule_model, criterion, info, gt):
(pred, mask) = get_prediction(rule_model, info)
n_valid_entries = torch.sum((mask.view((- 1)) != 0))
loss = criterion(pred, gt)
loss = loss.masked_fill((mask == 0), 0)
loss = (torch.sum(loss) / n_valid_entries)
mean_highest_success_correct = get_accuracy(pred, gt, mask)
masked_gt = torch.sum(gt.masked_fill((mask == 0), 0), dim=(- 1))
mean_oracle_success = masked_gt.masked_fill((masked_gt != 0), 1.0).mean()
return ({'loss': loss, 'mean_highest_success_correct': mean_highest_success_correct}, mean_oracle_success) |
class Solarize(DauphinTransform):
value_range = (0, 256)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, 'float')
return (ImageOps.solarize(pil_img, degree), label) |
def get_src_findex_by_pad(s, S, padding_mode, align_corners):
if (padding_mode == 'zero'):
return get_src_findex_with_zero_pad(s, S)
elif (padding_mode == 'reflect'):
if align_corners:
return get_src_findex_with_reflect_pad(s, S, True)
else:
sf = get_src_findex_with_reflect_pad(s, S, False)
return get_src_findex_with_repeat_pad(sf, S)
elif (padding_mode == 'repeat'):
return get_src_findex_with_repeat_pad(s, S) |
def save_deblur_checkpoints(file_path, epoch_idx, deblurnet, deblurnet_solver, Best_Img_PSNR, Best_Epoch):
print(('[INFO] %s Saving checkpoint to %s ...\n' % (dt.now(), file_path)))
checkpoint = {'epoch_idx': epoch_idx, 'Best_Img_PSNR': Best_Img_PSNR, 'Best_Epoch': Best_Epoch, 'deblurnet_state_dict': deblurnet.state_dict(), 'deblurnet_solver_state_dict': deblurnet_solver.state_dict()}
torch.save(checkpoint, file_path) |
def compute_rhs(ui_hat, bh_hat):
global ui, uiuj, uiuj_hat, V1, bh_hat0
bh_hat.fill(0)
ui = W1.backward(ui_hat, ui)
uiuj = outer(ui, ui, uiuj)
uiuj_hat = uiuj.forward(uiuj_hat)
bi_hat = bh_hat[0]
bi_hat = inner(v, div(uiuj_hat), output_array=bi_hat)
return bh_hat |
def log_mel_filterbank_from_raw(raw_audio: Tensor, *, in_spatial_dim: Dim, out_dim: Dim, sampling_rate: int=16000, window_len: float=0.025, step_len: float=0.01, n_fft: Optional[int]=None, log_base: Union[(int, float)]=10) -> Tuple[(Tensor, Dim)]:
if (raw_audio.feature_dim and (raw_audio.feature_dim.dimension == 1)):
raw_audio = rf.squeeze(raw_audio, axis=raw_audio.feature_dim)
window_num_frames = int((window_len * sampling_rate))
step_num_frames = int((step_len * sampling_rate))
if (not n_fft):
n_fft = util_math.next_power_of_two(window_num_frames)
(spectrogram, out_spatial_dim, in_dim_) = rf.stft(raw_audio, in_spatial_dim=in_spatial_dim, frame_step=step_num_frames, frame_length=window_num_frames, fft_length=n_fft)
power_spectrogram = (rf.abs(spectrogram) ** 2.0)
mel_fbank = mel_filterbank(power_spectrogram, in_dim=in_dim_, out_dim=out_dim, sampling_rate=sampling_rate)
log_mel_fbank = rf.safe_log(mel_fbank, eps=1e-10)
if (log_base != math.e):
log_mel_fbank = (log_mel_fbank * (1.0 / math.log(log_base)))
return (log_mel_fbank, out_spatial_dim) |
_params(name='unroll')
class UnrollCodeGen(TargetCodeGenerator):
target_name = 'unroll'
title = 'Unrolled'
language = 'cpp'
def __init__(self, frame_codegen: DaCeCodeGenerator, sdfg: dace.SDFG):
self._frame = frame_codegen
self._dispatcher = frame_codegen.dispatcher
dispatcher = self._dispatcher
self._dispatcher.register_map_dispatcher(dace.ScheduleType.Unrolled, self)
def get_generated_codeobjects(self):
return []
def nsdfg_prepare_unroll(self, scope: ScopeSubgraphView, paramname: str, paramval: str):
backup = []
for node in scope.nodes():
if isinstance(node, nd.NestedSDFG):
backup.append((node, node.unique_name, node.sdfg.name, node.symbol_mapping, node.sdfg.constants_prop))
node.unique_name = copy.deepcopy(node.unique_name)
node.sdfg.name = copy.deepcopy(node.sdfg.name)
node.symbol_mapping = copy.deepcopy(node.symbol_mapping)
node.sdfg.constants_prop = copy.deepcopy(node.sdfg.constants_prop)
node.unique_name = f'{node.unique_name}_{paramname}{paramval}'
node.sdfg.name = f'{node.sdfg.name}_{paramname}{paramval}'
for nstate in node.sdfg.nodes():
backup.extend(self.nsdfg_prepare_unroll(nstate, paramname, paramval))
if (paramname in node.symbol_mapping):
node.symbol_mapping.pop(paramname)
node.sdfg.add_constant(paramname, int(paramval))
return backup
def nsdfg_after_unroll(self, backup: 'list[tuple[str, str, dict, dict]]'):
for (node, unique_name, name, symbols, constants) in backup:
node.unique_name = unique_name
node.sdfg.name = name
node.symbol_mapping = symbols
node.sdfg.constants_prop = constants
def generate_scope(self, sdfg: dace.SDFG, scope: ScopeSubgraphView, state_id: int, function_stream: CodeIOStream, callsite_stream: CodeIOStream):
entry_node: nd.MapEntry = scope.source_nodes()[0]
index_list = []
for (begin, end, stride) in entry_node.map.range:
l = []
while (begin <= end):
l.append(begin)
begin += stride
index_list.append(l)
sdfgconsts = sdfg.constants_prop
sdfg.constants_prop = copy.deepcopy(sdfg.constants_prop)
mapsymboltypes = entry_node.new_symbols(sdfg, scope, [entry_node.map.params])
for indices in product(*index_list):
callsite_stream.write('{')
nsdfg_unroll_info = None
for (param, index) in zip(entry_node.map.params, indices):
if (nsdfg_unroll_info is None):
nsdfg_unroll_info = self.nsdfg_prepare_unroll(scope, str(param), str(index))
else:
self.nsdfg_prepare_unroll(scope, str(param), str(index))
callsite_stream.write(f'''constexpr {mapsymboltypes[param]} {param} = {dace.codegen.common.sym2cpp(index)};
''', sdfg)
sdfg.add_constant(param, int(index))
callsite_stream.write('{')
self._dispatcher.dispatch_subgraph(sdfg, scope, state_id, function_stream, callsite_stream, skip_entry_node=True, skip_exit_node=True)
callsite_stream.write('}')
callsite_stream.write('}')
self.nsdfg_after_unroll(nsdfg_unroll_info)
sdfg.constants_prop = sdfgconsts |
def test_sample_missing_data():
spec = {'channels': [{'name': 'channel', 'samples': [{'name': 'sample', 'data': [], 'modifiers': []}]}]}
with pytest.raises(pyhf.exceptions.InvalidSpecification):
pyhf.Model(spec) |
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--root', type=str, default='datasets', help='path to dataset')
parser.add_argument('--dataset', type=str, default='kitti', help='dataset name')
parser.add_argument('--test_data_file', type=str, default='sval.list', help='validatation data list')
parser.add_argument('--train_data_file', type=str, default='train.list', help='validatation data list')
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--model', type=str, default='sd', help='chooses which model to use, sd|...')
parser.add_argument('--clip', action='store_true', help='clip the above part of the image')
parser.add_argument('--channels', type=int, default=32, help='channels')
parser.add_argument('--scale', type=float, default=80, help='scale')
parser.add_argument('--knn', nargs='+', type=int, default=6, help='number of nearest-neighbour')
parser.add_argument('--nsamples', nargs='+', type=int, default=10000, help='sampling ratio')
parser.add_argument('--nThreads', default=8, type=int, help='# threads for loading data')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--no_augment', action='store_true', help='if specified, do not use data augmentation, e.g., randomly shifting gamma')
parser.add_argument('--init_type', type=str, default='kaiming', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{which_model_netG}_size{loadSize}')
self.initialized = True
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
(opt, _) = parser.parse_known_args()
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
(opt, _) = parser.parse_known_args()
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
if self.isTrain:
expr_dir = os.path.join(opt.checkpoints_dir, opt.expr_name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.isTrain = self.isTrain
opt.expr_name = ((opt.dataset + '_') + opt.model)
if opt.suffix:
suffix = (('_' + opt.suffix.format(**vars(opt))) if (opt.suffix != '') else '')
opt.expr_name = (opt.expr_name + suffix)
if opt.isTrain:
self.print_options(opt)
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
opt.gpu_ids.append(id)
if (len(opt.gpu_ids) > 0):
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt |
_module()
class HRNet(nn.Module):
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, with_cp=False, zero_init_residual=False):
super(HRNet, self).__init__()
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, 64, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, 64, postfix=2)
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(self.conv_cfg, 64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = self.extra['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = self.stage1_cfg['block']
num_blocks = self.stage1_cfg['num_blocks'][0]
block = self.blocks_dict[block_type]
stage1_out_channels = (num_channels * block.expansion)
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
self.stage2_cfg = self.extra['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = self.stage2_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = self.extra['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = self.stage3_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = self.extra['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = self.stage4_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels)
def norm1(self):
return getattr(self, self.norm1_name)
def norm2(self):
return getattr(self, self.norm2_name)
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(build_conv_layer(self.conv_cfg, num_channels_pre_layer[i], num_channels_cur_layer[i], kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, num_channels_cur_layer[i])[1], nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv_downsamples = []
for j in range(((i + 1) - num_branches_pre)):
in_channels = num_channels_pre_layer[(- 1)]
out_channels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else in_channels)
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, out_channels)[1], nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv_downsamples))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (planes * block.expansion))[1])
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = self.blocks_dict[layer_config['block']]
hr_modules = []
for i in range(num_modules):
if ((not multiscale_output) and (i == (num_modules - 1))):
reset_multiscale_output = False
else:
reset_multiscale_output = True
hr_modules.append(HRModule(num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
return (nn.Sequential(*hr_modules), in_channels)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if (self.transition2[i] is not None):
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if (self.transition3[i] is not None):
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
return y_list
def train(self, mode=True):
super(HRNet, self).train(mode)
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
class Market1501(BaseImageDataset):
dataset_dir = 'market1501'
def __init__(self, root='/home/haoluo/data', verbose=True, **kwargs):
super(Market1501, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'train')
self.val_dir = osp.join(self.dataset_dir, 'val')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'gallery')
self._check_before_run()
pid2label = self.get_pid2label(self.train_dir)
train = self._process_dir(self.train_dir, pid2label=pid2label, relabel=True)
val = self._process_dir(self.val_dir, pid2label=pid2label, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print('=> Market1501 loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.val = val
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train)
(self.num_val_pids, self.num_val_imgs, self.num_val_cams) = self.get_imagedata_info(self.val)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery)
def get_pid2label(self, dir_path):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
pid_container.add(pid)
pid_container = np.sort(list(pid_container))
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
return pid2label
def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.query_dir)):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if (not osp.exists(self.gallery_dir)):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, pid2label=None, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
dataset = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
assert (0 <= pid <= 1501)
assert (1 <= camid <= 6)
camid -= 1
if (relabel and (pid2label is not None)):
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
return dataset |
.parametrize('seed', [313])
.parametrize('test', [True])
.parametrize('graph_ref, graph_act', [(resnet_ref, small_bn_resnet)])
def test_fused_batch_normalization(seed, test, graph_ref, graph_act):
from .graph_converter_test_utils import structure_tester, value_tester
np.random.seed(seed)
rng = np.random.RandomState(seed)
x_data = rng.randn(batch_size, 3, 32, 32)
x = nn.Variable.from_numpy_array(x_data)
y_tgt = graph_act(x, test=test)
modifiers = []
modifiers.append(GC.FusedBatchNormalizationModifier())
y_act = GC.GraphConverter(modifiers).convert(y_tgt)
y_ref = graph_ref(x, test=test, name='fused-bn-graph-ref')
structure_tester(y_ref, y_act)
value_tester(y_tgt, y_act, rtol=0.06, atol=0.05) |
_node_type()
class CompositeParametrization(optplan.Parametrization):
type = schema_utils.polymorphic_model_type('parametrization.composite')
param_list = types.ListType(optplan.ReferenceType(optplan.Parametrization)) |
class BertTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = BertTokenizer
def __init__(self, vocab_file, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if ((pre_tok_state.get('do_lower_case', do_lower_case) != do_lower_case) or (pre_tok_state.get('strip_accents', strip_accents) != strip_accents)):
pre_tok_class = getattr(normalizers, pre_tok_state.pop('type'))
pre_tok_state['do_lower_case'] = do_lower_case
pre_tok_state['strip_accents'] = strip_accents
self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
if token_ids_1:
output += (token_ids_1 + [self.sep_token_id])
return output
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files) |
def test_point_confusion_matrix(expected, observed):
expected_return = (3, 4, 2, 1)
returned = point_confusion_matrix(expected, observed)
np.testing.assert_array_equal(np.array(returned), np.array(expected_return)) |
class TanhShrink(Module):
def __init__(self):
super(TanhShrink, self).__init__()
self.tanh = Tanh()
def updateOutput(self, input):
th = self.tanh.updateOutput(input)
self.output.resize_as_(input).copy_(input)
self.output.add_((- 1), th)
return self.output
def updateGradInput(self, input, gradOutput):
dth = self.tanh.updateGradInput(input, gradOutput)
self.gradInput.resize_as_(input).copy_(gradOutput)
self.gradInput.add_((- 1), dth)
return self.gradInput |
class BaseDataLoader():
def __init__(self):
pass
def initialize(self):
pass
def load_data():
return None |
class StatsTest(unittest.TestCase):
def setUp(self):
self._integers = list(range(1, 50))
self._floats = [float(x) for x in self._integers]
self._floats2 = [(float(x) + 2.31) for x in self._integers]
self._mixed = [(x if ((x % 2) == 0) else (float(x) + 4.5)) for x in self._integers]
def _assert(self, stats, mean_val, geo_mean, min_val, max_val, std_dev):
self.assertAlmostEqual(mean_val, stats.mean)
self.assertAlmostEqual(geo_mean, stats.geom_mean)
self.assertEqual(min_val, stats.min)
self.assertEqual(max_val, stats.max)
self.assertAlmostEqual(std_dev, stats.std_dev)
def test_123(self):
stats = StatisticProperties()
stats.add([1, 2, 3])
self._assert(stats, 2, 1., 1, 3, 0.)
stats = StatisticProperties()
stats.add([1.0, 2.0, 3.0])
self._assert(stats, 2, 1., 1.0, 3.0, 0.)
def test_1to49(self):
stats = StatisticProperties()
stats.add(self._integers)
self._assert(stats, 25, 19., 1, 49, 14.)
stats = StatisticProperties()
stats.add(self._floats)
self._assert(stats, 25, 19., 1.0, 49.0, 14.)
def test_shifted(self):
stats = StatisticProperties()
stats.add(self._floats2)
self._assert(stats, (25 + 2.31), 22., (1.0 + 2.31), (49.0 + 2.31), 14.)
def test_mixed(self):
stats = StatisticProperties()
stats.add(self._mixed)
self.assertAlmostEqual(27., stats.mean)
self._assert(stats, 27., 22., 2, 53.5, 14.) |
def se_resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
model = SENet(num_classes=num_classes, loss=loss, block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=None, **kwargs)
if pretrained:
model_url = pretrained_settings['se_resnet152']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model |
def create_dist(latent_flat):
l = latent_flat[0].shape[0]
latent_dist = [[] for _ in range(l)]
for single_lat in tqdm(latent_flat):
for i in range(l):
latent_dist[i].append(single_lat[i])
return latent_dist |
def batch_all_triplet_loss(labels, embeddings, margin, squared=False):
pairwise_dist = _pairwise_distances(embeddings, squared=squared)
anchor_positive_dist = pairwise_dist.unsqueeze(2)
anchor_negative_dist = pairwise_dist.unsqueeze(1)
triplet_loss = ((anchor_positive_dist - anchor_negative_dist) + margin)
mask = _get_triplet_mask(labels)
triplet_loss = (mask.float() * triplet_loss)
triplet_loss[(triplet_loss < 0)] = 0
valid_triplets = triplet_loss[(triplet_loss > 1e-16)]
num_positive_triplets = valid_triplets.size(0)
num_valid_triplets = mask.sum()
fraction_positive_triplets = (num_positive_triplets / (num_valid_triplets.float() + 1e-16))
triplet_loss = (triplet_loss.sum() / (num_positive_triplets + 1e-16))
print(triplet_loss, fraction_positive_triplets)
return (triplet_loss, fraction_positive_triplets) |
class AnnotatedSkipQuantModel(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.sub = QuantWrapper(InnerModule())
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.fc.qconfig = None
def forward(self, x):
return self.fc(self.sub(x))
def fuse_modules(self):
self.sub.module.fuse_modules() |
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
model.train()
end = time.time()
for (i, (images, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(images)
diff = ((utils.orth_dist(model.module.layer2[0].downsample[0].weight) + utils.orth_dist(model.module.layer3[0].downsample[0].weight)) + utils.orth_dist(model.module.layer4[0].downsample[0].weight))
diff += ((utils.deconv_orth_dist(model.module.layer1[0].conv1.weight, stride=1) + utils.deconv_orth_dist(model.module.layer1[1].conv1.weight, stride=1)) + utils.deconv_orth_dist(model.module.layer1[2].conv1.weight, stride=1))
diff += (((utils.deconv_orth_dist(model.module.layer2[0].conv1.weight, stride=2) + utils.deconv_orth_dist(model.module.layer2[1].conv1.weight, stride=1)) + utils.deconv_orth_dist(model.module.layer2[2].conv1.weight, stride=1)) + utils.deconv_orth_dist(model.module.layer2[3].conv1.weight, stride=1))
diff += (((((utils.deconv_orth_dist(model.module.layer3[0].conv1.weight, stride=2) + utils.deconv_orth_dist(model.module.layer3[1].conv1.weight, stride=1)) + utils.deconv_orth_dist(model.module.layer3[2].conv1.weight, stride=1)) + utils.deconv_orth_dist(model.module.layer3[3].conv1.weight, stride=1)) + utils.deconv_orth_dist(model.module.layer3[4].conv1.weight, stride=1)) + utils.deconv_orth_dist(model.module.layer3[5].conv1.weight, stride=1))
diff += ((utils.deconv_orth_dist(model.module.layer4[0].conv1.weight, stride=2) + utils.deconv_orth_dist(model.module.layer4[1].conv1.weight, stride=1)) + utils.deconv_orth_dist(model.module.layer4[2].conv1.weight, stride=1))
loss = (criterion(output, target) + (args.r * diff))
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.display(i) |
class ComputeHistogramForBlobs(NetModifier):
def __init__(self, blobs, logging_frequency, num_buckets=30, lower_bound=0.0, upper_bound=1.0, accumulate=False):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._accumulate = accumulate
if self._accumulate:
self._field_name_suffix = '_acc_normalized_hist'
else:
self._field_name_suffix = '_curr_normalized_hist'
self._num_buckets = int(num_buckets)
assert (self._num_buckets > 0), 'num_buckets need to be greater than 0, got {}'.format(num_buckets)
self._lower_bound = float(lower_bound)
self._upper_bound = float(upper_bound)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None, modify_output_record=False):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
blob_float = net.Cast(blob, net.NextScopedBlob(prefix=(blob + '_float')), to=core.DataType.FLOAT)
(curr_hist, acc_hist) = net.AccumulateHistogram([blob_float], [net.NextScopedBlob(prefix=(blob + '_curr_hist')), net.NextScopedBlob(prefix=(blob + '_acc_hist'))], num_buckets=self._num_buckets, lower_bound=self._lower_bound, upper_bound=self._upper_bound)
if self._accumulate:
hist = net.Cast(acc_hist, net.NextScopedBlob(prefix=(blob + '_cast_hist')), to=core.DataType.FLOAT)
else:
hist = net.Cast(curr_hist, net.NextScopedBlob(prefix=(blob + '_cast_hist')), to=core.DataType.FLOAT)
normalized_hist = net.NormalizeL1(hist, net.NextScopedBlob(prefix=(blob + self._field_name_suffix)))
if (self._logging_frequency >= 1):
net.Print(normalized_hist, [], every_n=self._logging_frequency)
if modify_output_record:
output_field_name = (str(blob) + self._field_name_suffix)
output_scalar = schema.Scalar((np.float32, ((self._num_buckets + 2),)), normalized_hist)
if (net.output_record() is None):
net.set_output_record(schema.Struct((output_field_name, output_scalar)))
else:
net.AppendOutputRecordField(output_field_name, output_scalar)
def field_name_suffix(self):
return self._field_name_suffix |
class Bernoulli(ExponentialFamily):
arg_constraints = {'probs': constraints.unit_interval}
support = constraints.boolean
has_enumerate_support = True
_mean_carrier_measure = 0
def __init__(self, probs=None, logits=None, validate_args=None):
if ((probs is None) == (logits is None)):
raise ValueError('Either `probs` or `logits` must be specified, but not both.')
if (probs is not None):
is_scalar = isinstance(probs, Number)
(self.probs,) = broadcast_all(probs)
else:
is_scalar = isinstance(logits, Number)
(self.logits,) = broadcast_all(logits)
self._param = (self.probs if (probs is not None) else self.logits)
if is_scalar:
batch_shape = torch.Size()
else:
batch_shape = self._param.size()
super(Bernoulli, self).__init__(batch_shape, validate_args=validate_args)
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
def mean(self):
return self.probs
def variance(self):
return (self.probs * (1 - self.probs))
_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
def param_shape(self):
return self._param.size()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.bernoulli(self.probs.expand(shape))
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
(logits, value) = broadcast_all(self.logits, value)
return (- binary_cross_entropy_with_logits(logits, value, reduction='none'))
def entropy(self):
return binary_cross_entropy_with_logits(self.logits, self.probs, reduction='none')
def enumerate_support(self):
values = self._new((2,))
torch.arange(2, out=values)
values = values.view((((- 1),) + ((1,) * len(self._batch_shape))))
values = values.expand((((- 1),) + self._batch_shape))
return values
def _natural_params(self):
return (torch.log((self.probs / (1 - self.probs))),)
def _log_normalizer(self, x):
return torch.log((1 + torch.exp(x))) |
class Cifar10Data(Dataset):
def __init__(self, data_dir=None):
super(Cifar10Data, self).__init__('cifar10', 32, 32, data_dir=data_dir, queue_runner_required=True, num_classes=10)
def read_data_files(self, subset='train'):
assert self.data_dir, 'Cannot call `read_data_files` when using synthetic data'
if (subset == 'train'):
filenames = [os.path.join(self.data_dir, ('data_batch_%d' % i)) for i in xrange(1, 6)]
elif (subset == 'validation'):
filenames = [os.path.join(self.data_dir, 'test_batch')]
else:
raise ValueError(('Invalid data subset "%s"' % subset))
inputs = []
for filename in filenames:
with gfile.Open(filename, 'r') as f:
inputs.append(cPickle.load(f))
all_images = np.concatenate([each_input['data'] for each_input in inputs]).astype(np.float32)
all_labels = np.concatenate([each_input['labels'] for each_input in inputs])
return (all_images, all_labels)
def num_examples_per_epoch(self, subset='train'):
if (subset == 'train'):
return 50000
elif (subset == 'validation'):
return 10000
else:
raise ValueError(('Invalid data subset "%s"' % subset))
def get_image_preprocessor(self):
if self.use_synthetic_gpu_images():
return preprocessing.SyntheticImagePreprocessor
else:
return preprocessing.Cifar10ImagePreprocessor |
class SequenceField(Field[DataArray]):
def sequence_length(self) -> int:
raise NotImplementedError |
def test_clean_fix_missing(df_dates: pd.DataFrame) -> None:
df_clean_minimum = clean_date(df_dates, 'date', fix_missing='minimum')
df_clean_empty = clean_date(df_dates, 'date', fix_missing='empty')
df_check_minimum = df_dates.copy()
df_check_minimum['date_clean'] = ['1996-07-10 15:08:56', '2003-09-25 10:36:28', '2003-09-25 10:36:28', '2003-09-25 10:36:28', '2003-09-25 10:36:28', '2000-01-01 10:36:28', '2000-01-01 10:36:00', '2000-01-01 10:36:00', '2003-09-25 00:00:00', '2003-09-25 00:00:00', '2003-09-01 00:00:00', '2000-09-01 00:00:00', '2003-01-01 00:00:00', '2003-09-25 00:00:00', '2003-09-25 00:00:00', '2003-09-25 00:00:00', '2003-09-25 00:00:00', '2003-09-25 00:00:00', '2003-10-09 00:00:00', '2003-10-09 00:00:00', '2003-09-25 00:00:00', '2003-09-25 00:00:00', '2003-09-25 00:00:00', '2003-09-25 00:00:00', '2000-01-01 22:00:00', '2000-01-01 12:00:00', '2003-09-01 00:00:00', '2003-09-01 00:00:00', '2096-07-10 00:00:00', '1996-07-10 15:08:56', '1952-04-12 15:30:42', '1994-11-05 08:15:30', '2001-05-03 00:00:00', '1990-06-13 05:50:00', np.nan, np.nan, np.nan, np.nan]
df_check_empty = df_dates.copy()
df_check_empty['date_clean'] = ['1996-07-10 15:08:56', '2003-09-25 10:36:28', '2003-09-25 10:36:28', '2003-09-25 10:36:28', '2003-09-25 10:36:28', ' 10:36:28', ' 10:36:--', ' 10:36:--', '2003-09-25 --:--:--', '2003-09-25 --:--:--', '2003-09--- --:--:--', '-----09--- --:--:--', '2003------ --:--:--', '2003-09-25 --:--:--', '2003-09-25 --:--:--', '2003-09-25 --:--:--', '2003-09-25 --:--:--', '2003-09-25 --:--:--', '2003-10-09 --:--:--', '2003-10-09 --:--:--', '2003-09-25 --:--:--', '2003-09-25 --:--:--', '2003-09-25 --:--:--', '2003-09-25 --:--:--', ' 22:--:--', ' 12:00:--', '2003-09--- --:--:--', '2003-09--- --:--:--', '2096-07-10 --:--:--', '1996-07-10 15:08:56', '1952-04-12 15:30:42', '1994-11-05 08:15:30', '2001-05-03 --:--:--', '1990-06-13 05:50:--', np.nan, np.nan, np.nan, np.nan]
assert df_clean_minimum.equals(df_check_minimum)
assert df_clean_empty.equals(df_check_empty) |
def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')):
ret = Forward()
lastExpr = (baseExpr | ((lpar + ret) + rpar))
for (i, operDef) in enumerate(opList):
(opExpr, arity, rightLeftAssoc, pa) = (operDef + (None,))[:4]
termName = (('%s term' % opExpr) if (arity < 3) else ('%s%s term' % opExpr))
if (arity == 3):
if ((opExpr is None) or (len(opExpr) != 2)):
raise ValueError('if numterms=3, opExpr must be a tuple or list of two expressions')
(opExpr1, opExpr2) = opExpr
thisExpr = Forward().setName(termName)
if (rightLeftAssoc == opAssoc.LEFT):
if (arity == 1):
matchExpr = (FollowedBy((lastExpr + opExpr)) + Group((lastExpr + OneOrMore(opExpr))))
elif (arity == 2):
if (opExpr is not None):
matchExpr = (FollowedBy(((lastExpr + opExpr) + lastExpr)) + Group((lastExpr + OneOrMore((opExpr + lastExpr)))))
else:
matchExpr = (FollowedBy((lastExpr + lastExpr)) + Group((lastExpr + OneOrMore(lastExpr))))
elif (arity == 3):
matchExpr = (FollowedBy(((((lastExpr + opExpr1) + lastExpr) + opExpr2) + lastExpr)) + Group(((((lastExpr + opExpr1) + lastExpr) + opExpr2) + lastExpr)))
else:
raise ValueError('operator must be unary (1), binary (2), or ternary (3)')
elif (rightLeftAssoc == opAssoc.RIGHT):
if (arity == 1):
if (not isinstance(opExpr, Optional)):
opExpr = Optional(opExpr)
matchExpr = (FollowedBy((opExpr.expr + thisExpr)) + Group((opExpr + thisExpr)))
elif (arity == 2):
if (opExpr is not None):
matchExpr = (FollowedBy(((lastExpr + opExpr) + thisExpr)) + Group((lastExpr + OneOrMore((opExpr + thisExpr)))))
else:
matchExpr = (FollowedBy((lastExpr + thisExpr)) + Group((lastExpr + OneOrMore(thisExpr))))
elif (arity == 3):
matchExpr = (FollowedBy(((((lastExpr + opExpr1) + thisExpr) + opExpr2) + thisExpr)) + Group(((((lastExpr + opExpr1) + thisExpr) + opExpr2) + thisExpr)))
else:
raise ValueError('operator must be unary (1), binary (2), or ternary (3)')
else:
raise ValueError('operator must indicate right or left associativity')
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= (matchExpr.setName(termName) | lastExpr)
lastExpr = thisExpr
ret <<= lastExpr
return ret |
class MM_reg(atomic_reg):
OP_NAME = 'MM'
_fields_ = [('cmd_short', ctypes.c_uint64, 1), ('op_code', ctypes.c_uint64, 16), ('cmd_id_dep', ctypes.c_uint64, 23), ('dbg_mode', ctypes.c_uint64, 1), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('opt_rq', ctypes.c_uint64, 1), ('tsk_opd_num', ctypes.c_uint64, 2), ('rsvd1', ctypes.c_uint64, 2), ('opt_res0_sign', ctypes.c_uint64, 1), ('rsvd0', ctypes.c_uint64, 3), ('pwr_step', ctypes.c_uint64, 4), ('intr_en', ctypes.c_uint64, 1), ('opt_res_add', ctypes.c_uint64, 1), ('opt_relu', ctypes.c_uint64, 1), ('opt_left_tran', ctypes.c_uint64, 1), ('opt_opd4_const', ctypes.c_uint64, 1), ('opt_kernel_rotate', ctypes.c_uint64, 1), ('opt_opd0_sign', ctypes.c_uint64, 1), ('opt_opd1_sign', ctypes.c_uint64, 1), ('opt_opd2_sign', ctypes.c_uint64, 1), ('opt_res0_prec', ctypes.c_uint64, 3), ('opt_opd0_prec', ctypes.c_uint64, 3), ('opt_opd1_prec', ctypes.c_uint64, 3), ('opt_opd2_prec', ctypes.c_uint64, 3), ('opt_opd0_const', ctypes.c_uint64, 1), ('opt_opd1_const', ctypes.c_uint64, 1), ('opt_opd2_const', ctypes.c_uint64, 1), ('short_res0_str', ctypes.c_uint64, 3), ('short_opd0_str', ctypes.c_uint64, 3), ('short_opd1_str', ctypes.c_uint64, 3), ('short_opd2_str', ctypes.c_uint64, 3), ('opt_res_add_sign', ctypes.c_uint64, 1), ('rsvd2', ctypes.c_uint64, 25), ('sym_range', ctypes.c_uint64, 1), ('opt_opd3_const', ctypes.c_uint64, 1), ('opt_opd5_const', ctypes.c_uint64, 1), ('opd0_x_ins0', ctypes.c_uint64, 4), ('opd0_y_ins0', ctypes.c_uint64, 4), ('opd1_x_ins0', ctypes.c_uint64, 4), ('opd1_y_ins0', ctypes.c_uint64, 4), ('opd0_up_pad', ctypes.c_uint64, 4), ('opd0_dn_pad', ctypes.c_uint64, 4), ('opd0_lf_pad', ctypes.c_uint64, 4), ('opd0_rt_pad', ctypes.c_uint64, 4), ('res_op_x_str', ctypes.c_uint64, 4), ('res_op_y_str', ctypes.c_uint64, 4), ('res0_h_shift', ctypes.c_uint64, 4), ('res0_w_shift', ctypes.c_uint64, 4), ('opd0_h_shift', ctypes.c_uint64, 4), ('opd0_w_shift', ctypes.c_uint64, 4), ('opd1_h_shift', ctypes.c_uint64, 4), ('opd1_w_shift', ctypes.c_uint64, 4), ('tsk_lane_num', ctypes.c_uint64, 64), ('res0_n', ctypes.c_uint64, 16), ('res0_c', ctypes.c_uint64, 16), ('res0_h', ctypes.c_uint64, 16), ('res0_w', ctypes.c_uint64, 16), ('opd0_n', ctypes.c_uint64, 16), ('opd0_c', ctypes.c_uint64, 16), ('opd0_h', ctypes.c_uint64, 16), ('opd0_w', ctypes.c_uint64, 16), ('opd1_n', ctypes.c_uint64, 16), ('opd1_c', ctypes.c_uint64, 16), ('opd1_h', ctypes.c_uint64, 16), ('opd1_w', ctypes.c_uint64, 16), ('res0_n_str', ctypes.c_uint64, 16), ('res0_c_str', ctypes.c_uint64, 16), ('opd0_n_str', ctypes.c_uint64, 16), ('opd0_c_str', ctypes.c_uint64, 16), ('opd1_n_str', ctypes.c_uint64, 16), ('opd1_c_str', ctypes.c_uint64, 16), ('opd2_n_str', ctypes.c_uint64, 16), ('opd2_c_str', ctypes.c_uint64, 16), ('res0_addr', ctypes.c_uint64, 32), ('opd0_addr', ctypes.c_uint64, 32), ('opd1_addr', ctypes.c_uint64, 32), ('opd2_addr', ctypes.c_uint64, 32), ('res0_h_str', ctypes.c_uint64, 32), ('res0_w_str', ctypes.c_uint64, 32), ('opd0_h_str', ctypes.c_uint64, 32), ('opd0_w_str', ctypes.c_uint64, 32), ('opd1_h_str', ctypes.c_uint64, 32), ('opd1_w_str', ctypes.c_uint64, 32), ('opd2_h_str', ctypes.c_uint64, 32), ('opd2_w_str', ctypes.c_uint64, 32), ('res1_addr', ctypes.c_uint64, 32), ('opd3_addr', ctypes.c_uint64, 32)]
cmd_short: int
op_code: int
cmd_id_dep: int
dbg_mode: int
tsk_typ: int
tsk_eu_typ: int
opt_rq: int
tsk_opd_num: int
rsvd1: int
opt_res0_sign: int
rsvd0: int
pwr_step: int
intr_en: int
opt_res_add: int
opt_relu: int
opt_left_tran: int
opt_opd4_const: int
opt_kernel_rotate: int
opt_opd0_sign: int
opt_opd1_sign: int
opt_opd2_sign: int
opt_res0_prec: int
opt_opd0_prec: int
opt_opd1_prec: int
opt_opd2_prec: int
opt_opd0_const: int
opt_opd1_const: int
opt_opd2_const: int
short_res0_str: int
short_opd0_str: int
short_opd1_str: int
short_opd2_str: int
opt_res_add_sign: int
rsvd2: int
sym_range: int
opt_opd3_const: int
opt_opd5_const: int
opd0_x_ins0: int
opd0_y_ins0: int
opd1_x_ins0: int
opd1_y_ins0: int
opd0_up_pad: int
opd0_dn_pad: int
opd0_lf_pad: int
opd0_rt_pad: int
res_op_x_str: int
res_op_y_str: int
res0_h_shift: int
res0_w_shift: int
opd0_h_shift: int
opd0_w_shift: int
opd1_h_shift: int
opd1_w_shift: int
tsk_lane_num: int
res0_n: int
res0_c: int
res0_h: int
res0_w: int
opd0_n: int
opd0_c: int
opd0_h: int
opd0_w: int
opd1_n: int
opd1_c: int
opd1_h: int
opd1_w: int
res0_n_str: int
res0_c_str: int
opd0_n_str: int
opd0_c_str: int
opd1_n_str: int
opd1_c_str: int
opd2_n_str: int
opd2_c_str: int
res0_addr: int
opd0_addr: int
opd1_addr: int
opd2_addr: int
res0_h_str: int
res0_w_str: int
opd0_h_str: int
opd0_w_str: int
opd1_h_str: int
opd1_w_str: int
opd2_h_str: int
opd2_w_str: int
res1_addr: int
opd3_addr: int
length: int = 1024 |
class CharadesProcessor():
def __init__(self):
super(CharadesProcessor, self).__init__()
self.idx_counter = 0
def reset_idx_counter(self):
self.idx_counter = 0
def process_data(self, data, charades, scope):
results = []
for line in tqdm(data, total=len(data), desc='process charades-sta {}'.format(scope)):
line = line.lstrip().rstrip()
if (len(line) == 0):
continue
(video_info, sentence) = line.split('##')
(vid, start_time, end_time) = video_info.split(' ')
duration = float(charades[vid]['duration'])
start_time = max(0.0, float(start_time))
end_time = min(float(end_time), duration)
words = word_tokenize(sentence.strip().lower(), language='english')
record = {'sample_id': self.idx_counter, 'vid': str(vid), 's_time': start_time, 'e_time': end_time, 'duration': duration, 'words': words}
results.append(record)
self.idx_counter += 1
return results
def convert(self, data_dir):
self.reset_idx_counter()
if (not os.path.exists(data_dir)):
raise ValueError('data dir {} does not exist'.format(data_dir))
charades = load_json(os.path.join(data_dir, 'charades.json'))
train_data = load_lines(os.path.join(data_dir, 'charades_sta_train.txt'))
test_data = load_lines(os.path.join(data_dir, 'charades_sta_test.txt'))
train_set = self.process_data(train_data, charades, scope='train')
test_set = self.process_data(test_data, charades, scope='test')
return (train_set, None, test_set) |
def main(unused_args):
if (not gfile.Exists(FLAGS.input)):
print((("Input graph file '" + FLAGS.input) + "' does not exist!"))
return (- 1)
known_modes = ['round', 'quantize', 'eightbit', 'weights', 'test', 'weights_rounded']
if (not any(((FLAGS.mode in s) for s in known_modes))):
print((((("mode is '" + FLAGS.mode) + "', not in ") + ', '.join(known_modes)) + '.'))
return (- 1)
tf_graph = graph_pb2.GraphDef()
with gfile.Open(FLAGS.input, 'rb') as f:
data = f.read()
tf_graph.ParseFromString(data)
graph = ops.Graph()
with graph.as_default():
importer.import_graph_def(tf_graph, input_map={}, name='')
quantized_input_range = None
if FLAGS.quantized_input:
quantized_input_range = [FLAGS.quantized_input_min, FLAGS.quantized_input_max]
fallback_quantization_range = None
if ((FLAGS.quantized_fallback_min is not None) or (FLAGS.quantized_fallback_max is not None)):
assert (FLAGS.quantized_fallback_min is not None)
assert (FLAGS.quantized_fallback_max is not None)
fallback_quantization_range = [FLAGS.quantized_fallback_min, FLAGS.quantized_fallback_max]
rewriter = GraphRewriter(tf_graph, FLAGS.mode, quantized_input_range, fallback_quantization_range)
output_graph = rewriter.rewrite(FLAGS.output_node_names.split(','))
f = gfile.FastGFile(FLAGS.output, 'wb')
f.write(output_graph.SerializeToString())
return 0 |
class InputFeatures(object):
def __init__(self, unique_id, entities, example_index, doc_span_index, word_ids, word_segment_ids, word_attention_mask, placeholder_position_ids, entity_position_ids, labels):
self.unique_id = unique_id
self.entities = entities
self.example_index = example_index
self.doc_span_index = doc_span_index
self.word_ids = word_ids
self.word_segment_ids = word_segment_ids
self.word_attention_mask = word_attention_mask
self.placeholder_position_ids = placeholder_position_ids
self.entity_position_ids = entity_position_ids
self.labels = labels |
def entropy_surrogate(estimator, samples):
dlog_q = estimator.compute_gradients(samples)
surrogate = tf.reduce_mean(tf.reduce_sum((tf.stop_gradient((- dlog_q)) * samples), (- 1)))
return surrogate |
def compute_link_entropy(G, a, b):
ka = int(G.indeg_vec[a])
kb = int(G.indeg_vec[b])
m = (G.csr.nnz / 2)
onevec = np.ones(kb)
vec = np.arange(kb)
num = (((m - ka) * onevec) - vec)
deno = ((m * onevec) - vec)
prob = (1.0 - np.prod((num / deno)))
link_ent = (- np.log2(prob))
return link_ent |
def load_examples_agn(path):
topics = [' politics', ' sports', ' business', ' technology']
label_path = '/gscratch/zlab/swj0419/knnlm/data/label_word/datasets/agnews/label_names_kb.txt'
label2synonym = load_label(label_path)
examples = []
with open(path) as fp:
reader = csv.DictReader(fp)
for row in reader:
label = (int(row['Class Index']) - 1)
title = row['Title']
summary = row['Description']
premise = f'''{title}
{summary}
The text is about'''
options = []
for h in topics:
o = {}
o['premise'] = premise
o['hypothesis'] = h
o['uncond_premise'] = '\n The text is about'
o['uncond_hypothesis'] = h
options.append(o)
label = label
examples.append({'options': options, 'label': label, 'label2synonym': label2synonym, 'label_list': topics})
return examples |
class DLDeviceType(ctypes.c_int):
kDLCPU = 1
kDLGPU = 2
kDLCPUPinned = 3
kDLOpenCL = 4
kDLVulkan = 7
kDLMetal = 8
kDLVPI = 9
kDLROCM = 10
kDLExtDev = 12 |
def test_add_call_for_rollback(method_mock, variable_reference_mock, default_test_case):
def side_effect(tc, f, p, callee=None):
tc.add_statement(stmt.IntPrimitiveStatement(tc, 5), position=p)
tc.add_statement(stmt.IntPrimitiveStatement(tc, 5), position=p)
tc.add_statement(stmt.IntPrimitiveStatement(tc, 5), position=p)
raise ConstructionFailedException()
int0 = stmt.IntPrimitiveStatement(default_test_case, 3)
default_test_case.add_statement(int0)
test_cluster = MagicMock(ModuleTestCluster)
test_factory = tf.TestFactory(test_cluster)
with mock.patch.object(test_factory, 'add_method') as add_field:
add_field.side_effect = side_effect
assert (not test_factory.add_call_for(default_test_case, variable_reference_mock, method_mock, 0))
assert (default_test_case.statements == [int0]) |
def r_cond_without_not3(t):
def fn(k, n):
return ['rightIsClear']
return [('cond_without_not', fn)] |
def get_daily_ci_runs(token, num_runs=7):
headers = None
if (token is not None):
headers = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
workflow_id = '636036'
url = f'
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
result = requests.get(url, headers=headers).json()
return result['workflow_runs'] |
class ConstMember(Member):
def __init__(self, type_ref, name, value_str):
super(ConstMember, self).__init__(type_ref, name)
if (not type_ref.is_const_type()):
raise TypeError("Constant '{}' from line {} must be one of {}. '{}' found.".format(name, type_ref.lineno, CONST_TYPES, type_ref.name))
try:
self.value = CONST_TYPE_MAP[type_ref.name](value_str)
except ValueError:
print('Error parsing const type {}.'.format(self.name))
raise
self.value_str = value_str
def __repr__(self):
return 'const {} {} = {};'.format(self.type_ref, self.name, self.value_str) |
def svc_classify(x, y, search):
kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=None)
accuracies = []
for (train_index, test_index) in kf.split(x, y):
(x_train, x_test) = (x[train_index], x[test_index])
(y_train, y_test) = (y[train_index], y[test_index])
if search:
params = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]}
classifier = GridSearchCV(SVC(), params, cv=5, scoring='accuracy', verbose=0)
else:
classifier = SVC(C=10)
classifier.fit(x_train, y_train)
accuracies.append(accuracy_score(y_test, classifier.predict(x_test)))
return np.mean(accuracies) |
class DataIterator():
def __init__(self, mode, data, batch_size=128, neg_sample=1, all_items=None, items_usr_clicked=None, shuffle=True):
self.mode = mode
self.data = data
self.datasize = data.shape[0]
self.neg_count = neg_sample
self.batch_size = batch_size
self.item_usr_clicked = items_usr_clicked
self.all_items = all_items
self.shuffle = shuffle
self.seed = 0
self.idx = 0
self.total_batch = round((self.datasize / float(self.batch_size)))
def __iter__(self):
return self
def reset(self):
self.idx = 0
if self.shuffle:
self.data = self.data.sample(frac=1).reset_index(drop=True)
self.seed = (self.seed + 1)
random.seed(self.seed)
def __next__(self):
if (self.idx >= self.datasize):
self.reset()
raise StopIteration
nums = self.batch_size
if ((self.datasize - self.idx) < self.batch_size):
nums = ((self.datasize - self.idx) - ((self.datasize - self.idx) % 3))
cur = self.data.iloc[self.idx:(self.idx + nums)]
batch_user = cur['user'].values
batch_seq = []
for seq in cur['seq'].values:
batch_seq.append(seq)
batch_pos = []
for t in cur['target'].values:
batch_pos.append(t)
batch_neg = []
if (self.mode == 'train'):
for (u, seq) in zip(cur['user'], cur['seq']):
user_item_set = (set(self.all_items) - set(self.item_usr_clicked[u]))
batch_neg.append(random.sample(user_item_set, len(seq)))
self.idx += self.batch_size
return (batch_user, batch_seq, batch_pos, batch_neg) |
def _findLine(comp, fileLines):
c = 0
found = []
for line in fileLines:
if (comp in line):
found.append(c)
c += 1
return found |
class SearchBPEtoWords(Job):
def __init__(self, search_output_bpe, script=Path('scripts/search-bpe-to-words.py')):
self.search_output_bpe = search_output_bpe
self.script = script
self.out = self.output_path('search_output.words')
def run(self):
self.sh('python3 {script} {search_output_bpe} --out {out}')
def tasks(self):
(yield Task('run', mini_task=True)) |
class KleeMinty(Benchmark):
params = [methods, [3, 6, 9]]
param_names = ['method', 'dimensions']
def setup(self, meth, dims):
(self.c, self.A_ub, self.b_ub, self.xf, self.obj) = klee_minty(dims)
self.fun = None
def time_klee_minty(self, meth, dims):
(method, options) = meth
res = linprog(c=self.c, A_ub=self.A_ub, b_ub=self.b_ub, method=method, options=options)
self.fun = res.fun
self.x = res.x
def track_klee_minty(self, meth, prob):
if (self.fun is None):
self.time_klee_minty(meth, prob)
self.abs_error = np.abs((self.fun - self.obj))
self.rel_error = np.abs(((self.fun - self.obj) / self.obj))
return min(self.abs_error, self.rel_error) |
def test_montage_fill_gray():
(n_images, n_rows, n_cols) = (3, 2, 3)
arr_in = np.arange(((n_images * n_rows) * n_cols), dtype=float)
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
arr_out = montage(arr_in, fill=0)
arr_ref = np.array([[0.0, 1.0, 2.0, 6.0, 7.0, 8.0], [3.0, 4.0, 5.0, 9.0, 10.0, 11.0], [12.0, 13.0, 14.0, 0.0, 0.0, 0.0], [15.0, 16.0, 17.0, 0.0, 0.0, 0.0]])
assert_array_equal(arr_out, arr_ref) |
def cross_entropy_torch(x, y):
x_softmax = [F.softmax(x[i]) for i in range(len(x))]
x_log = torch.tensor([torch.log(x_softmax[i][y[i]]) for i in range(len(y))])
loss = ((- torch.sum(x_log)) / len(y))
return loss |
def parse_task(domain_pddl, task_pddl):
(domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms) = parse_domain_pddl(domain_pddl)
(task_name, task_domain_name, task_requirements, objects, init, goal, use_metric) = parse_task_pddl(task_pddl, type_dict, predicate_dict)
assert (domain_name == task_domain_name)
requirements = pddl.Requirements(sorted(set((domain_requirements.requirements + task_requirements.requirements))))
objects = (constants + objects)
check_for_duplicates([o.name for o in objects], errmsg='error: duplicate object %r', finalmsg='please check :constants and :objects definitions')
init += [pddl.Atom('=', (obj.name, obj.name)) for obj in objects]
return pddl.Task(domain_name, task_name, requirements, types, objects, predicates, functions, init, goal, actions, axioms, use_metric) |
class Semantic_loss_functions(object):
def __init__(self):
print('semantic loss functions initialized')
def dice_coef(self, y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum((y_true_f * y_pred_f))
return (((2.0 * intersection) + K.epsilon()) / ((K.sum(y_true_f) + K.sum(y_pred_f)) + K.epsilon()))
def sensitivity(self, y_true, y_pred):
true_positives = K.sum(K.round(K.clip((y_true * y_pred), 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
return (true_positives / (possible_positives + K.epsilon()))
def specificity(self, y_true, y_pred):
true_negatives = K.sum(K.round(K.clip(((1 - y_true) * (1 - y_pred)), 0, 1)))
possible_negatives = K.sum(K.round(K.clip((1 - y_true), 0, 1)))
return (true_negatives / (possible_negatives + K.epsilon()))
def convert_to_logits(self, y_pred):
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon()))
return tf.math.log((y_pred / (1 - y_pred)))
def weighted_cross_entropyloss(self, y_true, y_pred):
y_pred = self.convert_to_logits(y_pred)
pos_weight = (beta / (1 - beta))
loss = tf.nn.weighted_cross_entropy_with_logits(logits=y_pred, targets=y_true, pos_weight=pos_weight)
return tf.reduce_mean(loss)
def focal_loss_with_logits(self, logits, targets, alpha, gamma, y_pred):
weight_a = ((alpha * ((1 - y_pred) ** gamma)) * targets)
weight_b = (((1 - alpha) * (y_pred ** gamma)) * (1 - targets))
return (((tf.math.log1p(tf.exp((- tf.abs(logits)))) + tf.nn.relu((- logits))) * (weight_a + weight_b)) + (logits * weight_b))
def focal_loss(self, y_true, y_pred):
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), (1 - tf.keras.backend.epsilon()))
logits = tf.math.log((y_pred / (1 - y_pred)))
loss = self.focal_loss_with_logits(logits=logits, targets=y_true, alpha=alpha, gamma=gamma, y_pred=y_pred)
return tf.reduce_mean(loss)
def depth_softmax(self, matrix):
sigmoid = (lambda x: (1 / (1 + K.exp((- x)))))
sigmoided_matrix = sigmoid(matrix)
softmax_matrix = (sigmoided_matrix / K.sum(sigmoided_matrix, axis=0))
return softmax_matrix
def generalized_dice_coefficient(self, y_true, y_pred):
smooth = 1.0
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum((y_true_f * y_pred_f))
score = (((2.0 * intersection) + smooth) / ((K.sum(y_true_f) + K.sum(y_pred_f)) + smooth))
return score
def dice_loss(self, y_true, y_pred):
loss = (1 - self.generalized_dice_coefficient(y_true, y_pred))
return loss
def bce_dice_loss(self, y_true, y_pred):
loss = (binary_crossentropy(y_true, y_pred) + self.dice_loss(y_true, y_pred))
return (loss / 2.0)
def confusion(self, y_true, y_pred):
smooth = 1
y_pred_pos = K.clip(y_pred, 0, 1)
y_pred_neg = (1 - y_pred_pos)
y_pos = K.clip(y_true, 0, 1)
y_neg = (1 - y_pos)
tp = K.sum((y_pos * y_pred_pos))
fp = K.sum((y_neg * y_pred_pos))
fn = K.sum((y_pos * y_pred_neg))
prec = ((tp + smooth) / ((tp + fp) + smooth))
recall = ((tp + smooth) / ((tp + fn) + smooth))
return (prec, recall)
def true_positive(self, y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pos = K.round(K.clip(y_true, 0, 1))
tp = ((K.sum((y_pos * y_pred_pos)) + smooth) / (K.sum(y_pos) + smooth))
return tp
def true_negative(self, y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = (1 - y_pred_pos)
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = (1 - y_pos)
tn = ((K.sum((y_neg * y_pred_neg)) + smooth) / (K.sum(y_neg) + smooth))
return tn
def tversky_index(self, y_true, y_pred):
y_true_pos = K.flatten(y_true)
y_pred_pos = K.flatten(y_pred)
true_pos = K.sum((y_true_pos * y_pred_pos))
false_neg = K.sum((y_true_pos * (1 - y_pred_pos)))
false_pos = K.sum(((1 - y_true_pos) * y_pred_pos))
alpha = 0.7
return ((true_pos + smooth) / (((true_pos + (alpha * false_neg)) + ((1 - alpha) * false_pos)) + smooth))
def tversky_loss(self, y_true, y_pred):
return (1 - self.tversky_index(y_true, y_pred))
def focal_tversky(self, y_true, y_pred):
pt_1 = self.tversky_index(y_true, y_pred)
gamma = 0.75
return K.pow((1 - pt_1), gamma)
def log_cosh_dice_loss(self, y_true, y_pred):
x = self.dice_loss(y_true, y_pred)
return tf.math.log(((tf.exp(x) + tf.exp((- x))) / 2.0))
def jacard_similarity(self, y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum((y_true_f * y_pred_f))
union = K.sum(((y_true_f + y_pred_f) - (y_true_f * y_pred_f)))
return (intersection / union)
def jacard_loss(self, y_true, y_pred):
return (1 - self.jacard_similarity(y_true, y_pred))
def ssim_loss(self, y_true, y_pred):
return (1 - tf.image.ssim(y_true, y_pred, max_val=1))
def unet3p_hybrid_loss(self, y_true, y_pred):
focal_loss = self.focal_loss(y_true, y_pred)
ms_ssim_loss = self.ssim_loss(y_true, y_pred)
jacard_loss = self.jacard_loss(y_true, y_pred)
return ((focal_loss + ms_ssim_loss) + jacard_loss)
def basnet_hybrid_loss(self, y_true, y_pred):
bce_loss = BinaryCrossentropy(from_logits=False)
bce_loss = bce_loss(y_true, y_pred)
ms_ssim_loss = self.ssim_loss(y_true, y_pred)
jacard_loss = self.jacard_loss(y_true, y_pred)
return ((bce_loss + ms_ssim_loss) + jacard_loss) |
def haar_like_feature_coord(width, height, feature_type=None):
feature_type_ = _validate_feature_type(feature_type)
(feat_coord, feat_type) = zip(*[haar_like_feature_coord_wrapper(width, height, feat_t) for feat_t in feature_type_])
return (np.concatenate(feat_coord), np.hstack(feat_type)) |
class ExperimentParameter():
name: str
default: Any
values: Any = None
description: str = '' |
def analyze(data_path):
if data_path.endswith('.gz'):
with gzip.open(data_path, 'r') as f:
(S, true_model) = pickle.load(f)
else:
with open(data_path, 'r') as f:
(S, true_model) = pickle.load(f)
print('True model:')
print(true_model)
T = float(S.shape[0])
N = S.sum(axis=0)
print('lambda0: ', true_model.bias_model.lambda0.mean())
print('Average event count: ', N.mean(), ' +- ', N.std())
print('Average event count: ', (N / T).mean(), ' +- ', (N / T).std()) |
def register_Ns3OlsrRoutingProtocol_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('SetMainInterface', 'void', [param('uint32_t', 'interface')])
cls.add_method('Dump', 'void', [])
cls.add_method('GetRoutingTableEntries', 'std::vector< ns3::olsr::RoutingTableEntry >', [], is_const=True)
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')])
cls.add_method('GetInterfaceExclusions', 'std::set< unsigned int >', [], is_const=True)
cls.add_method('SetInterfaceExclusions', 'void', [param('std::set< unsigned int >', 'exceptions')])
cls.add_method('AddHostNetworkAssociation', 'void', [param('ns3::Ipv4Address', 'networkAddr'), param('ns3::Ipv4Mask', 'netmask')])
cls.add_method('RemoveHostNetworkAssociation', 'void', [param('ns3::Ipv4Address', 'networkAddr'), param('ns3::Ipv4Mask', 'netmask')])
cls.add_method('SetRoutingTableAssociation', 'void', [param('ns3::Ptr< ns3::Ipv4StaticRouting >', 'routingTable')])
cls.add_method('GetRoutingTableAssociation', 'ns3::Ptr< ns3::Ipv4StaticRouting const >', [], is_const=True)
cls.add_constructor([param('ns3::olsr::RoutingProtocol const &', 'arg0')])
cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], visibility='private', is_virtual=True)
cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], visibility='private', is_virtual=True)
cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], visibility='private', is_virtual=True)
cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], visibility='private', is_virtual=True)
cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], visibility='private', is_virtual=True)
cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], visibility='private', is_virtual=True)
cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], visibility='private', is_virtual=True)
cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True)
return |
def measure_exploitability(game: Union[(str, pyspiel.Game)], populations: Dict[(AgentID, Dict[(PolicyID, Policy)])], policy_mixture_dict: Dict[(AgentID, Dict[(PolicyID, float)])], use_observation: bool=False, use_cpp_br: bool=False):
if isinstance(game, str):
game = pyspiel.load_game(game)
weights = {}
for (agent_id, population) in populations.items():
pids = list(population.keys())
populations[agent_id] = list(population.values())
policy_dist = policy_mixture_dict[agent_id]
weights[agent_id] = [policy_dist[k] for k in pids]
policies = [convert_to_os_policies(game, populations[f'player_{i}'], use_observation, player_ids=[i]) for i in range(game.num_players())]
weights = [weights[f'player_{i}'] for i in range(game.num_players())]
aggregator = policy_aggregator.PolicyAggregator(game)
with warnings.catch_warnings(record=True) as w:
aggr_policies = aggregator.aggregate(range(game.num_players()), policies=policies, weights=weights)
if ((len(w) == 1) and (w[0]._category_name == 'VisibleDeprecationWarning')):
print('\x1b[93m [WARNING] A `VisibleDeprecateionWarning` is caused by the use of `np.copy` of an object. You can avoid it by modifying line240 of `open_spiel.python.algorithms.policy_aggregator.py` as `new_reaches = [e.copy() for e in my_reaches]`\x1b[0m')
return nash_conv(game=game, policy=aggr_policies, return_only_nash_conv=False, use_cpp_br=use_cpp_br) |
def _get_all_misuses(data_base_path: str) -> List[str]:
misuses = []
project_dirs = [join(data_base_path, subdir) for subdir in listdir(data_base_path) if isdir(join(data_base_path, subdir))]
for project_dir in project_dirs:
misuses_dir = join(project_dir, 'misuses')
if (not exists(misuses_dir)):
continue
misuse_ids = [subdir for subdir in listdir(misuses_dir) if isdir(join(misuses_dir, subdir))]
misuse_ids = ['{}.{}'.format(basename(project_dir), misuse) for misuse in misuse_ids]
misuses.extend(misuse_ids)
return misuses |
class ConvFeatureExtractionModel(nn.Module):
def __init__(self, input_nc, num_decoders=5, inner_nc=128, num_additional_ids=0, smaller=False, num_masks=0):
super(ConvFeatureExtractionModel, self).__init__()
self.encoder = self.generate_encoder_layers(output_size=inner_nc, num_filters=num_additional_ids)
def forward(self, x):
x = self.encoder(x)
return x
def generate_encoder_layers(self, output_size=128, num_filters=64):
conv1 = nn.Conv2d(3, num_filters, 4, 2, 1)
conv2 = nn.Conv2d(num_filters, (num_filters * 2), 4, 2, 1)
conv3 = nn.Conv2d((num_filters * 2), (num_filters * 4), 4, 2, 1)
conv4 = nn.Conv2d((num_filters * 4), (num_filters * 8), 4, 2, 1)
conv5 = nn.Conv2d((num_filters * 8), (num_filters * 8), 4, 2, 1)
conv6 = nn.Conv2d((num_filters * 8), (num_filters * 8), 4, 2, 1)
conv7 = nn.Conv2d((num_filters * 8), (num_filters * 8), 4, 2, 1)
conv8 = nn.Conv2d((num_filters * 8), output_size, 4, 2, 1)
batch_norm = nn.BatchNorm2d(num_filters)
batch_norm2_0 = nn.BatchNorm2d((num_filters * 2))
batch_norm4_0 = nn.BatchNorm2d((num_filters * 4))
batch_norm8_0 = nn.BatchNorm2d((num_filters * 8))
batch_norm8_1 = nn.BatchNorm2d((num_filters * 8))
batch_norm8_2 = nn.BatchNorm2d((num_filters * 8))
batch_norm8_3 = nn.BatchNorm2d((num_filters * 8))
leaky_relu = nn.LeakyReLU(0.2, True)
return nn.Sequential(conv1, leaky_relu, conv2, batch_norm2_0, leaky_relu, conv3, batch_norm4_0, leaky_relu, conv4, batch_norm8_0, leaky_relu, conv5, batch_norm8_1, leaky_relu, conv6, batch_norm8_2, leaky_relu, conv7, batch_norm8_3, leaky_relu, conv8) |
class TeacherStudentKLLoss(BaseClassificationDistillationLoss):
def teacher_student_loss(teacher_logits, student_logits, temp):
teacher_dist = Categorical(logits=(teacher_logits / temp))
student_dist = Categorical(logits=(student_logits / temp))
return kl_divergence(teacher_dist, student_dist).mean() |
def parse_org_table(table_lines):
table_lines.pop(1)
table_list = [[b.strip() for b in a[1:(- 2)].split('|')] for a in table_lines]
column_list = table_list.pop(0)
table_data = []
for param in table_list:
param_dict = {}
for (column, value) in zip(column_list, param):
param_dict[column] = value
table_data.append(param_dict)
return table_data |
def compute_norm(x, axis, keepdims):
return (tf.math.reduce_sum((x ** 2), axis=axis, keepdims=keepdims) ** 0.5) |
class GaussianDiffusion(nn.Module):
def __init__(self, model, *, image_size, timesteps=1000, sampling_timesteps=None, loss_type='l1', objective='pred_noise', beta_schedule='cosine', p2_loss_weight_gamma=0.0, p2_loss_weight_k=1, ddim_sampling_eta=1.0):
super().__init__()
self.model = model
self.channels = 3
self.self_condition = False
self.image_size = image_size
self.objective = objective
assert (objective in {'pred_noise', 'pred_x0'}), 'objective must be either pred_noise (predict noise) or pred_x0 (predict image start)'
if (beta_schedule == 'linear'):
betas = linear_beta_schedule(timesteps)
elif (beta_schedule == 'cosine'):
betas = cosine_beta_schedule(timesteps)
else:
raise ValueError(f'unknown beta schedule {beta_schedule}')
alphas = (1.0 - betas)
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:(- 1)], (1, 0), value=1.0)
(timesteps,) = betas.shape
self.num_timesteps = int(timesteps)
self.loss_type = loss_type
self.sampling_timesteps = default(sampling_timesteps, timesteps)
assert (self.sampling_timesteps <= timesteps)
self.is_ddim_sampling = (self.sampling_timesteps < timesteps)
self.ddim_sampling_eta = ddim_sampling_eta
register_buffer = (lambda name, val: self.register_buffer(name, val.to(torch.float32)))
register_buffer('betas', betas)
register_buffer('alphas_cumprod', alphas_cumprod)
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt((1.0 - alphas_cumprod)))
register_buffer('log_one_minus_alphas_cumprod', torch.log((1.0 - alphas_cumprod)))
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt((1.0 / alphas_cumprod)))
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(((1.0 / alphas_cumprod) - 1)))
posterior_variance = ((betas * (1.0 - alphas_cumprod_prev)) / (1.0 - alphas_cumprod))
register_buffer('posterior_variance', posterior_variance)
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min=1e-20)))
register_buffer('posterior_mean_coef1', ((betas * torch.sqrt(alphas_cumprod_prev)) / (1.0 - alphas_cumprod)))
register_buffer('posterior_mean_coef2', (((1.0 - alphas_cumprod_prev) * torch.sqrt(alphas)) / (1.0 - alphas_cumprod)))
register_buffer('p2_loss_weight', ((p2_loss_weight_k + (alphas_cumprod / (1 - alphas_cumprod))) ** (- p2_loss_weight_gamma)))
def predict_start_from_noise(self, x_t, t, noise):
return ((extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - (extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise))
def predict_noise_from_start(self, x_t, t, x0):
return (((extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - x0) / extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape))
def q_posterior(self, x_start, x_t, t):
posterior_mean = ((extract(self.posterior_mean_coef1, t, x_t.shape) * x_start) + (extract(self.posterior_mean_coef2, t, x_t.shape) * x_t))
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return (posterior_mean, posterior_variance, posterior_log_variance_clipped)
def model_predictions(self, x, t, x_self_cond=None, clip_x_start=False):
model_output = self.model(x, t, x_self_cond)
maybe_clip = (partial(torch.clamp, min=(- 1.0), max=1.0) if clip_x_start else identity)
if (self.objective == 'pred_noise'):
pred_noise = model_output
x_start = self.predict_start_from_noise(x, t, pred_noise)
x_start = maybe_clip(x_start)
elif (self.objective == 'pred_x0'):
x_start = model_output
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
return ModelPrediction(pred_noise, x_start)
_grad()
def p_mean_variance(self, x, t, x_self_cond=None, clip_denoised=True):
preds = self.model_predictions(x, t, x_self_cond)
x_start = preds.pred_x_start
if clip_denoised:
x_start.clamp_((- 1.0), 1.0)
(model_mean, posterior_variance, posterior_log_variance) = self.q_posterior(x_start=x_start, x_t=x, t=t)
return (model_mean, posterior_variance, posterior_log_variance, x_start)
def p_sample(self, x, t: int, x_self_cond=None, clip_denoised=True, loss_fn=None):
(b, *_, device) = (*x.shape, x.device)
batched_times = torch.full((x.shape[0],), t, device=x.device, dtype=torch.long)
(model_mean, model_variance, model_log_variance, x_start) = self.p_mean_variance(x=x, t=batched_times, x_self_cond=x_self_cond, clip_denoised=clip_denoised)
noise = (torch.randn_like(x) if (t > 0) else 0.0)
if ((loss_fn is not None) and (t < 200)):
with torch.enable_grad():
x_in = x.detach().requires_grad_(True)
loss = loss_fn(x_in)
grad = torch.autograd.grad(loss, x_in)[0]
print(loss, grad.mean())
model_mean = (model_mean - ((grad * model_variance) * 0.5))
pred_img = (model_mean + ((0.5 * model_log_variance).exp() * noise))
else:
pred_img = (model_mean + ((0.5 * model_log_variance).exp() * noise))
return (pred_img, x_start)
def p_sample_loop(self, shape=None, loss_fn=None, img=None, start_t=None, condition=None):
if (start_t is None):
start_t = self.num_timesteps
if (img is None):
(batch, device) = (shape[0], self.betas.device)
img = torch.randn(shape, device=device)
x_start = None
for t in tqdm(reversed(range(0, start_t)), desc='sampling loop time step', total=self.num_timesteps):
self_cond = (condition if self.self_condition else None)
(img, x_start) = self.p_sample(img, t, self_cond, loss_fn=loss_fn)
img = unnormalize_to_zero_to_one(img)
return img
_grad()
def ddim_sample(self, shape, clip_denoised=True, sample_step=None, max_step=None, min_step=None, start_img=None, return_middle=False, condition=None, guid=None, middle_step=0, guid_step=700, style_enhance=0, filter_size=8):
(batch, device, total_timesteps, sampling_timesteps, eta, objective) = (shape[0], self.betas.device, self.num_timesteps, self.sampling_timesteps, self.ddim_sampling_eta, self.objective)
if (sample_step is not None):
sampling_timesteps = sample_step
if (max_step is None):
max_step = total_timesteps
if (min_step is None):
min_step = (- 1)
times = torch.linspace(min_step, (max_step - 1), steps=(sampling_timesteps + 1))
times = list(reversed(times.int().tolist()))
time_pairs = list(zip(times[:(- 1)], times[1:]))
if (start_img is not None):
if ((start_img.min() >= 0) and (start_img.max() <= 1)):
img = ((start_img * 2) - 1)
else:
img = start_img
batch = img.size(0)
else:
img = torch.randn(shape, device=device)
x_start = None
if (guid is not None):
filter_N = filter_size
shape = (img.size(0), 3, img.size((- 1)), img.size((- 1)))
shape_d = (img.size(0), 3, (img.size((- 1)) // filter_N), (img.size((- 1)) // filter_N))
down = Resizer(shape, (1 / filter_N)).cuda()
up = Resizer(shape_d, filter_N).cuda()
middle_img = None
self_cond = condition
for (iter, (time, time_next)) in enumerate(tqdm(time_pairs, desc='sampling loop time step')):
time_cond = torch.full((batch,), time, device=device, dtype=torch.long)
if (time_next >= middle_step):
(pred_noise, x_start, *_) = self.model_predictions(img, time_cond, self_cond, clip_x_start=clip_denoised)
else:
(pred_noise, x_start, *_) = self.model_predictions(img, time_cond, None, clip_x_start=clip_denoised)
if (time_next < 0):
img = x_start
continue
alpha = self.alphas_cumprod[time]
alpha_next = self.alphas_cumprod[time_next]
sigma = (eta * (((1 - (alpha / alpha_next)) * (1 - alpha_next)) / (1 - alpha)).sqrt())
c = ((1 - alpha_next) - (sigma ** 2)).sqrt()
noise = torch.randn_like(img)
img = (((x_start * alpha_next.sqrt()) + (c * pred_noise)) + (sigma * noise))
if ((time_next > guid_step) and (guid is not None)):
tmp_noise = self.p_losses(guid, (torch.ones(len(img)).long().to(device) * time_next), return_x=True)
(tmp_img, tmp_x_start) = self.p_sample(tmp_noise, time_next, self_cond, loss_fn=None)
for ii in range((style_enhance - 1)):
tmp_noise = self.p_losses(tmp_x_start, (torch.ones(len(img)).long().to(device) * time_next), return_x=True)
(tmp_img, tmp_x_start) = self.p_sample(tmp_noise, time_next, self_cond, loss_fn=None)
img = ((img - up(down(img))) + up(down(tmp_img)))
if (return_middle and ((iter % 5) == 0)):
if (middle_img is None):
middle_img = my_unnormalize_to_zero_to_one(x_start.clone())
else:
middle_img = torch.cat((middle_img, my_unnormalize_to_zero_to_one(x_start.clone())), dim=0)
if (min_step == (- 1)):
img = unnormalize_to_zero_to_one(img)
if return_middle:
return (img, middle_img)
return img
def sample(self, batch_size=16, loss_fn=None):
(image_size, channels) = (self.image_size, self.channels)
sample_fn = (self.p_sample_loop if (not self.is_ddim_sampling) else self.ddim_sample)
return sample_fn((batch_size, channels, image_size, image_size), loss_fn)
_grad()
def interpolate(self, x1, x2, t=None, lam=0.5):
(b, *_, device) = (*x1.shape, x1.device)
t = default(t, (self.num_timesteps - 1))
assert (x1.shape == x2.shape)
t_batched = torch.stack(([torch.tensor(t, device=device)] * b))
(xt1, xt2) = map((lambda x: self.q_sample(x, t=t_batched)), (x1, x2))
img = (((1 - lam) * xt1) + (lam * xt2))
for i in tqdm(reversed(range(0, t)), desc='interpolation sample time step', total=t):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))
return img
def q_sample(self, x_start, t, noise=None):
noise = default(noise, (lambda : torch.randn_like(x_start)))
return ((extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + (extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise))
def loss_fn(self):
if (self.loss_type == 'l1'):
return F.l1_loss
elif (self.loss_type == 'l2'):
return F.mse_loss
else:
raise ValueError(f'invalid loss type {self.loss_type}')
def p_losses(self, x_start, t, noise=None, return_x=False, x_self_cond=None):
(b, c, h, w) = x_start.shape
noise = default(noise, (lambda : torch.randn_like(x_start)))
if ((x_start.min() >= 0) and (x_start.max() <= 1)):
x_start = ((x_start * 2) - 1)
x = self.q_sample(x_start=x_start, t=t, noise=noise)
if return_x:
return x
model_out = self.model(x, t, x_self_cond)
if (self.objective == 'pred_noise'):
target = noise
elif (self.objective == 'pred_x0'):
target = x_start
else:
raise ValueError(f'unknown objective {self.objective}')
loss = self.loss_fn(model_out, target, reduction='none')
loss = reduce(loss, 'b ... -> b (...)', 'mean')
loss = (loss * extract(self.p2_loss_weight, t, loss.shape))
return loss.mean()
def few_shot_p_losses(self, x_start, t, return_x, noise=None, x_self_cond=None, return_x0=False):
(b, c, h, w) = x_start.shape
noise = default(noise, (lambda : torch.randn_like(x_start)))
x = self.q_sample(x_start=x_start, t=t, noise=noise)
if return_x:
return x
model_out = self.model(x, t, x_self_cond)
if (self.objective == 'pred_noise'):
target = noise
elif (self.objective == 'pred_x0'):
target = x_start
else:
raise ValueError(f'unknown objective {self.objective}')
loss = self.loss_fn(model_out, target, reduction='none')
loss = reduce(loss, 'b ... -> b (...)', 'mean')
loss = (loss * extract(self.p2_loss_weight, t, loss.shape))
return (x, loss.mean((- 1)))
def forward(self, img, *args, **kwargs):
(b, c, h, w, device, img_size) = (*img.shape, img.device, self.image_size)
assert ((h == img_size) and (w == img_size)), f'height and width of image must be {img_size}'
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
img = normalize_to_neg_one_to_one(img)
return self.p_losses(img, t, *args, **kwargs)
def few_shot_forward(self, img, *args, step=0, max_step=800, t=None, return_x=False, **kwargs):
(b, c, h, w, device, img_size) = (*img.shape, img.device, self.image_size)
assert ((h == img_size) and (w == img_size)), f'height and width of image must be {img_size}'
if (t is None):
t = torch.randint(step, max_step, (b,), device=device).long()
img = normalize_to_neg_one_to_one(img)
return (t, self.few_shot_p_losses(img, t, return_x, *args, **kwargs))
def batch_p_mean_variance(self, x, t, x_self_cond=None, clip_denoised=True):
preds = self.model_predictions(x, t, x_self_cond)
x_start = preds.pred_x_start
if clip_denoised:
x_start.clamp_((- 1.0), 1.0)
(model_mean, posterior_variance, posterior_log_variance) = self.q_posterior(x_start=x_start, x_t=x, t=t)
return (model_mean, posterior_variance, posterior_log_variance, x_start)
def batch_p_sample(self, x, t, x_self_cond=None, clip_denoised=True, loss_fn=None, return_model_mean=False):
(b, *_, device) = (*x.shape, x.device)
batched_times = t
(model_mean, _, model_log_variance, x_start) = self.batch_p_mean_variance(x=x, t=batched_times, x_self_cond=x_self_cond, clip_denoised=clip_denoised)
if return_model_mean:
return (x_start, model_mean)
return x_start |
class ABReLU_VGG(nn.Module):
def __init__(self, vgg_name):
super(ABReLU_VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 100)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), (- 1))
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if (x == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), ABReLU()]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers) |
class _OpLinearTerm():
def from_dim(cls, dim: Dim) -> _OpLinearTerm:
res = cls.zero()
res.extend_add_sub_(dim, kind='add', right=True)
return res
def zero(cls) -> _OpLinearTerm:
return _OpLinearTerm([])
def __init__(self, terms: List[_OpMultTerm]):
self.terms = terms
def __hash__(self):
return hash(tuple(self.terms))
def __eq__(self, other):
if isinstance(other, _OpLinearTerm):
return (self.terms == other.terms)
return False
def __ne__(self, other):
return (not self.__eq__(other))
def as_dim(self) -> Dim:
if self.is_zero():
return _make_constant_static_dim(0)
if (len(self.terms) == 1):
return self.terms[0].as_dim()
res = self.terms[0].as_dim()
for operand in self.terms[1:]:
res = _math_get_dim_via_bin_op(res, operand.as_dim(), 'add')
return res
def __repr__(self):
return ('Dim._OpLinearTerm(%r)' % (self.terms,))
def is_zero(self):
return (not self.terms)
def extend_add_sub_(self, other, kind, right):
assert (kind in {'add', 'sub'})
other = self._make_dim(other, kind=kind)
if (other.is_constant_static_dim() and (other.dimension == 0)):
return
if (other.derived_from_op and (other.derived_from_op.kind == 'add')):
for other_ in (other.derived_from_op.inputs if right else reversed(other.derived_from_op.inputs)):
self.extend_add_sub_(other_, kind=kind, right=right)
return
term = _OpMultTerm.from_dim(other)
neg_term = term.negative()
if (kind == 'sub'):
(term, neg_term) = (neg_term, term)
most_recent_term = (self.terms[((- 1) if right else 0)] if self.terms else None)
if most_recent_term:
if (most_recent_term == neg_term):
self.terms.pop(((- 1) if right else 0))
return
if (most_recent_term.is_constant_static_dim() and term.is_constant_static_dim()):
self.terms[((- 1) if right else 0)] = _OpMultTerm.from_dim(_make_constant_static_dim((most_recent_term.dimension + term.dimension), kind=other.kind))
return
if (most_recent_term.terms and term.terms and (most_recent_term.terms[(- 1)] == term.terms[(- 1)])):
a = _OpMultTerm.from_dim_factors(most_recent_term.terms[:(- 1)]).as_dim()
b = _OpMultTerm.from_dim_factors(term.terms[:(- 1)]).as_dim()
if (a.is_constant_static_dim() and (not b.is_constant_static_dim())):
a = a.dimension
elif (b.is_constant_static_dim() and (not a.is_constant_static_dim())):
b = b.dimension
res = _OpMultTerm.from_dim(((a + b) if right else (b + a)))
res.extend_mul_div_(term.terms[(- 1)], kind='mul', right=True)
self.terms[((- 1) if right else 0)] = res
return
if right:
self.terms.append(term)
else:
self.terms.insert(0, term)
def extend_mul_div_(self, other, kind, right):
assert (kind in {'mul', 'floordiv', 'truediv', 'ceildiv'})
other = self._make_dim(other, kind=kind)
if ((kind == 'mul') and right):
if (not all((term.can_simplify(other, kind=kind, right=right) for term in self.terms))):
(self.terms, other) = (_OpLinearTerm.from_dim(other).terms, self.as_dim())
right = False
if (other.is_constant_static_dim() and (other.dimension == 1)):
return
if (kind.endswith('div') and (len(self.terms) >= 2)):
if any(((not term.divisible(other, right=right)) for term in self.terms)):
self.terms = [_OpMultTerm.from_dim(_OpMultTerm.new_div_dim(self.as_dim(), other, kind=kind, right=right))]
return
for term in self.terms:
term.extend_mul_div_(other, kind=kind, right=right)
def _make_dim(self, other, kind):
if isinstance(other, int):
base_tag = self.representative_tag()
return _make_constant_static_dim(other, kind=(base_tag.kind if base_tag else None))
elif isinstance(other, _d.Dim):
return other.get_same_base()
else:
raise TypeError(('%s %s %s invalid for type %s' % (self, kind, other, type(other))))
def representative_tag(self):
terms = [_representative_tag(term.terms) for term in self.terms]
return _representative_tag([term for term in terms if term]) |
def _map_module(root: T.nn.Module, func: Callable[([T.nn.Module, str], T.nn.Module)], patt: Pattern, path: str) -> T.nn.Module:
for (name, child) in root.named_children():
node = _map_module(child, func, patt, f'{path}/{name}')
if (node != child):
setattr(root, name, node)
if patt.match((path or '/')):
root = func(root, (path or '/'))
return root |
class ADULT(data.Dataset):
def __init__(self, root='data/adult', split='train', sensible_attribute='gender', **kwargs):
assert (split in ['train', 'val', 'test'])
path = os.path.join(root, 'adult.csv')
(x, y, s) = load_dataset(path, sensible_attribute)
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).long()
s = torch.from_numpy(s).long()
(x_train, x_test, y_train, y_test, s_train, s_test) = train_test_split(x, y, s, test_size=0.2, random_state=1)
(x_train, x_val, y_train, y_val, s_train, s_val) = train_test_split(x_train, y_train, s_train, test_size=0.125, random_state=1)
if (split == 'train'):
self.x = x_train
self.y = y_train
self.s = s_train
elif (split == 'val'):
self.x = x_val
self.y = y_val
self.s = s_val
elif (split == 'test'):
self.x = x_test
self.y = y_test
self.s = s_test
print('loaded {} instances for split {}. y positives={}, {} positives={}'.format(len(self.y), split, sum(self.y), sensible_attribute, sum(self.s)))
def __len__(self):
return len(self.x)
def __getitem__(self, index):
return dict(data=self.x[index], labels=self.y[index], sensible_attribute=self.s[index])
def task_names(self):
return None |
def create_pipeline_configuration(DEBUG=False, batch_size=1):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Dropout, Softmax, Tanh, Embedding, Linear, LayerNorm), 'model_inputs': {'attention_mask': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'token_type_ids': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([1, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 1}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'token_type_ids': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([1, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([1, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/torch.nn.functional::gelu_1211': {'shape': torch.Size([1, 384, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 1}, 1: {'stage_cls': Partition1, 'inputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([1, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([1, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/torch.nn.functional::gelu_1211': {'shape': torch.Size([1, 384, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([1, 384, 2]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config |
class UpSampling(nn.Module):
def __init__(self, c1, c2, c3):
super(UpSampling, self).__init__()
self.conv1 = Conv(c1, c3, 1, 1)
self.upsampling = nn.UpsamplingNearest2d(scale_factor=2)
self.conv2 = Conv(c2, c3, 1, 1)
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
return torch.cat([self.upsampling(x), y], dim=1) |
def test_angles_4():
resi = ['RG_69_0', 'RU_37_0']
angles = ['gamma', 'alpha']
(angles_b, rr) = bb.backbone_angles(fname1, topology=fname, residues=resi, angles=angles)
stri = ''
for p in range(angles_b.shape[0]):
for k in range(angles_b.shape[2]):
stri += (' %10.4f %10.4f ' % (angles_b[(p, 0, k)], angles_b[(p, 1, k)]))
stri += '\n'
fh = open(('%s/angles_04.test.dat' % outdir), 'w')
fh.write(stri)
fh.close()
comp(('%s/angles_04.test.dat' % refdir)) |
class ResNet(nn.Module):
def __init__(self, dataset, depth, num_classes, norm_type='batch', size=(- 1), nch=3):
super(ResNet, self).__init__()
self.dataset = dataset
self.norm_type = norm_type
if (self.dataset.startswith('cifar') or ((0 < size) and (size <= 64))):
self.net_size = 'small'
elif ((64 < size) and (size <= 128)):
self.net_size = 'mid'
else:
self.net_size = 'large'
if self.dataset.startswith('cifar'):
self.inplanes = 32
n = int(((depth - 2) / 6))
block = BasicBlock
self.layer0 = IntroBlock(self.net_size, self.inplanes, norm_type, nch=nch)
self.layer1 = self._make_layer(block, 32, n, stride=1)
self.layer2 = self._make_layer(block, 64, n, stride=2)
self.layer3 = self._make_layer(block, 128, n, stride=2)
self.layer4 = self._make_layer(block, 256, n, stride=2)
self.avgpool = nn.AvgPool2d(4)
self.fc = nn.Linear((256 * block.expansion), num_classes)
else:
blocks = {10: BasicBlock, 18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck, 200: Bottleneck}
layers = {10: [1, 1, 1, 1], 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3]}
assert layers[depth], 'invalid detph for ResNet (depth should be one of 18, 34, 50, 101, 152, and 200)'
self.inplanes = 64
self.layer0 = IntroBlock(self.net_size, self.inplanes, norm_type, nch=nch)
self.layer1 = self._make_layer(blocks[depth], 64, layers[depth][0])
self.layer2 = self._make_layer(blocks[depth], 128, layers[depth][1], stride=2)
self.layer3 = self._make_layer(blocks[depth], 256, layers[depth][2], stride=2)
self.layer4 = self._make_layer(blocks[depth], 512, layers[depth][3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear((512 * blocks[depth].expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.GroupNorm)):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), normalization((planes * block.expansion), self.norm_type))
layers = []
layers.append(block(self.inplanes, planes, norm_type=self.norm_type, stride=stride, downsample=downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, norm_type=self.norm_type))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.avg_pool2d(x, x.shape[(- 1)])
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x
def get_feature(self, x, idx_from, idx_to=(- 1)):
if (idx_to == (- 1)):
idx_to = idx_from
features = []
x = self.layer0(x)
features.append(x)
if (idx_to < len(features)):
return features[idx_from:(idx_to + 1)]
x = self.layer1(x)
features.append(x)
if (idx_to < len(features)):
return features[idx_from:(idx_to + 1)]
x = self.layer2(x)
features.append(x)
if (idx_to < len(features)):
return features[idx_from:(idx_to + 1)]
x = self.layer3(x)
features.append(x)
if (idx_to < len(features)):
return features[idx_from:(idx_to + 1)]
x = self.layer4(x)
features.append(x)
if (idx_to < len(features)):
return features[idx_from:(idx_to + 1)]
x = F.avg_pool2d(x, x.shape[(- 1)])
x = x.view(x.size(0), (- 1))
features.append(x)
if (idx_to < len(features)):
return features[idx_from:(idx_to + 1)]
x = self.fc(x)
features.append(x)
return features[idx_from:(idx_to + 1)] |
def check_var_expect(distfn, arg, m, v, msg):
dist_looser_tolerances = {'rv_histogram_instance', 'ksone'}
kwargs = ({'rtol': 5e-06} if (msg in dist_looser_tolerances) else {})
if np.isfinite(v):
m2 = distfn.expect((lambda x: (x * x)), arg)
npt.assert_allclose(m2, (v + (m * m)), **kwargs) |
def generating_function_of_integral_points(polyhedron, split=False, result_as_tuple=None, name=None, names=None, **kwds):
import logging
logger = logging.getLogger(__name__)
from sage.combinat.permutation import Permutations
from sage.geometry.polyhedron.constructor import Polyhedron
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.structure.category_object import normalize_names
if (result_as_tuple is None):
result_as_tuple = split
if polyhedron.is_empty():
from sage.structure.factorization import Factorization
result = Factorization([], unit=0)
if result_as_tuple:
return (result,)
else:
return result
if (polyhedron.base_ring() not in (ZZ, QQ)):
raise TypeError('base ring {} of the polyhedron not ZZ or QQ'.format(polyhedron.base_ring()))
d = polyhedron.ambient_dim()
nonnegative_orthant = Polyhedron(ieqs=[(((dd * (0,)) + (1,)) + ((d - dd) * (0,))) for dd in range(1, (d + 1))])
if ((polyhedron & nonnegative_orthant) != polyhedron):
raise NotImplementedError('cannot compute the generating function of polyhedra with negative coordinates')
logger.info('%s', polyhedron)
if (names is not None):
names = normalize_names((- 1), names)
if (len(names) != 1):
raise NotImplementedError('exactly one variable name has to be provided')
if ((name is not None) and (name != names[0])):
raise ValueError("keyword argument 'name' cannot be combined with 'names'")
name = names[0]
if (name is None):
name = 'y'
if (split is False):
result = _generating_function_of_integral_points_(polyhedron, name=name, **kwds)
if result_as_tuple:
return result
else:
if (len(result) != 1):
raise ValueError("cannot unpack result (set 'result_as_tuple=True')")
return result[0]
if (d <= 1):
raise ValueError('cannot do splitting with only dimension {}'.format(d))
parts = None
if (split is True):
def polyhedron_from_permutation(pi):
def ieq(a, b):
return (((0 if (a < b) else (- 1)),) + tuple(((1 if (i == b) else ((- 1) if (i == a) else 0)) for i in range(1, (d + 1)))))
def ieq_repr_rhs(a, b):
return ((' <= ' if (a < b) else ' < ') + 'b{}'.format((b - 1)))
def ieqs_repr_lhs(pi):
return 'b{}'.format((pi[0] - 1))
(ieqs, repr_rhss) = zip(*[(ieq(a, b), ieq_repr_rhs(a, b)) for (a, b) in zip(pi[:(- 1)], pi[1:])])
return (Polyhedron(ieqs=ieqs), (ieqs_repr_lhs(pi) + ''.join(repr_rhss)))
split = (polyhedron_from_permutation(pi) for pi in Permutations(d))
parts = ZZ(d).factorial()
else:
if isinstance(split, (list, tuple)):
parts = len(split)
split = ((ph, ph.Hrepresentation_str(**Hrepresentation_str_options)) for ph in split)
result = []
for (part, (split_polyhedron, pi_log)) in enumerate(split):
if (parts is None):
parts_log = str((part + 1))
else:
parts_log = '{}/{}'.format((part + 1), parts)
logger.info('(%s) split polyhedron by %s', parts_log, pi_log)
result.append(_generating_function_of_integral_points_((polyhedron & split_polyhedron), name=name, **kwds))
if (not result_as_tuple):
raise ValueError("cannot unpack result(unset 'result_as_tuple=False')")
return sum(result, ()) |
def _train(config):
data_filter = get_squad_data_filter(config)
train_data = read_data(config, 'train', config.load, data_filter=data_filter)
dev_data = read_data(config, config.dev_name, True, data_filter=None)
update_config(config, [train_data, dev_data])
_config_debug(config)
word2vec_dict = (train_data.shared['lower_word2vec'] if config.lower_word else train_data.shared['word2vec'])
word2idx_dict = train_data.shared['word2idx']
idx2vec_dict = {word2idx_dict[word]: vec for (word, vec) in word2vec_dict.items() if (word in word2idx_dict)}
emb_mat = np.array([(idx2vec_dict[idx] if (idx in idx2vec_dict) else np.random.multivariate_normal(np.zeros(config.word_emb_size), np.eye(config.word_emb_size))) for idx in range(config.word_vocab_size)])
config.emb_mat = emb_mat
pprint(config.__flags, indent=2)
models = get_multi_gpu_models(config)
model = models[0]
trainer = MultiGPUTrainer(config, models)
if (config.model_name == 'basic'):
ThisEvaluator = MultiGPUF1Evaluator
elif (config.model_name in ['basic-class', 'basic-generate', 'baseline']):
ThisEvaluator = MultiGPUClassificationAccuracyEvaluator
elif (config.model_name == 'span-gen'):
ThisEvaluator = UnionEvaluator
evaluator = ThisEvaluator(config, models, tensor_dict=(model.tensor_dict if config.vis else None))
graph_handler = GraphHandler(config, model)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
num_steps = (config.num_steps or (int(math.ceil((train_data.num_examples / (config.batch_size * config.num_gpus)))) * config.num_epochs))
global_step = 0
for batches in tqdm(train_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps, shuffle=True, cluster=config.cluster), total=num_steps):
global_step = (sess.run(model.global_step) + 1)
get_summary = ((global_step % config.log_period) == 0)
(loss, summary, train_op) = trainer.step(sess, batches, get_summary=get_summary)
if get_summary:
graph_handler.add_summary(summary, global_step)
if ((global_step % config.save_period) == 0):
graph_handler.save(sess, global_step=global_step)
if (not config.eval):
continue
if ((global_step % config.eval_period) == 0):
num_steps = math.ceil((dev_data.num_examples / (config.batch_size * config.num_gpus)))
if (0 < config.val_num_batches < num_steps):
num_steps = config.val_num_batches
' \n train_batches = tqdm(train_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps), total=num_steps)\n e_train = evaluator.get_evaluation_from_batches(\n sess, train_batches\n )\n graph_handler.add_summaries(e_train.summaries, global_step)\n '
e_dev = evaluator.get_evaluation_from_batches(sess, tqdm(dev_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps), total=num_steps))
graph_handler.add_summaries(e_dev.summaries, global_step)
if config.dump_eval:
graph_handler.dump_eval(e_dev)
if config.dump_answer:
graph_handler.dump_answer(e_dev)
if ((global_step % config.save_period) != 0):
graph_handler.save(sess, global_step=global_step) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.