code stringlengths 101 5.91M |
|---|
def test_arraytype_record_1():
text = str(ak.Array([{'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}, {'x': 3, 'y': 3.3}], with_name='Thingy').type)
parsedtype = ak.types.from_datashape(text, highlevel=True)
assert isinstance(parsedtype, ak.types.ArrayType)
assert (str(parsedtype) == text) |
class Learner(BaseLearner):
def __init__(self, args):
super().__init__(args)
self._network = SimpleVitNet(args, True)
self.args = args
def after_task(self):
self._known_classes = self._total_classes
def replace_fc(self, trainloader, model, args):
model = model.eval()
embedding_list = []
label_list = []
with torch.no_grad():
for (i, batch) in enumerate(trainloader):
(_, data, label) = batch
data = data.to(self._device)
label = label.to(self._device)
embedding = model.backbone(data)
embedding_list.append(embedding.cpu())
label_list.append(label.cpu())
embedding_list = torch.cat(embedding_list, dim=0)
label_list = torch.cat(label_list, dim=0)
class_list = np.unique(self.train_dataset.labels)
proto_list = []
for class_index in class_list:
data_index = (label_list == class_index).nonzero().squeeze((- 1))
embedding = embedding_list[data_index]
proto = embedding.mean(0)
self._network.fc.weight.data[class_index] = proto
return model
def incremental_train(self, data_manager):
self._cur_task += 1
self._total_classes = (self._known_classes + data_manager.get_task_size(self._cur_task))
self._network.update_fc(self._total_classes)
logging.info('Learning on {}-{}'.format(self._known_classes, self._total_classes))
train_dataset = data_manager.get_dataset(np.arange(self._known_classes, self._total_classes), source='train', mode='train')
self.train_dataset = train_dataset
self.data_manager = data_manager
self.train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_dataset = data_manager.get_dataset(np.arange(0, self._total_classes), source='test', mode='test')
self.test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
train_dataset_for_protonet = data_manager.get_dataset(np.arange(self._known_classes, self._total_classes), source='train', mode='test')
self.train_loader_for_protonet = DataLoader(train_dataset_for_protonet, batch_size=batch_size, shuffle=True, num_workers=num_workers)
if (len(self._multiple_gpus) > 1):
print('Multiple GPUs')
self._network = nn.DataParallel(self._network, self._multiple_gpus)
self._train(self.train_loader, self.test_loader, self.train_loader_for_protonet)
if (len(self._multiple_gpus) > 1):
self._network = self._network.module
def _train(self, train_loader, test_loader, train_loader_for_protonet):
self._network.to(self._device)
self.replace_fc(train_loader_for_protonet, self._network, None) |
class Beta(Dirichlet):
def __init__(self, tau1, tau0):
tau1 = np.atleast_1d(tau1)
tau0 = np.atleast_1d(tau0)
gamma = np.concatenate((tau1[(..., None)], tau0[(..., None)]), axis=(- 1))
super(Beta, self).__init__(gamma)
def log_probability(self, p):
x = np.concatenate((p[(..., None)], (1 - p[(..., None)])), axis=(- 1))
return super(Beta, self).log_probability(x)
def expected_p(self):
E_g = self.expected_g()
return E_g[(..., 0)]
def expected_log_p(self):
E_logg = self.expected_log_g()
return E_logg[(..., 0)]
def expected_log_notp(self):
E_logg = self.expected_log_g()
return E_logg[(..., 1)]
def negentropy(self, E_ln_p=None, E_ln_notp=None):
if ((E_ln_p is not None) and (E_ln_notp is not None)):
E_ln_g = np.concatenate((E_ln_p[(..., None)], E_ln_notp[(..., None)]), axis=(- 1))
else:
E_ln_g = None
return super(Beta, self).negentropy(E_ln_g=E_ln_g) |
def train_defender():
model = get_model(args.model_tgt, args.dataset, args.pretrained)
model = model.to(args.device)
(train_loader, test_loader) = get_dataset(args.dataset, args.batch_size, augment=True)
savedir = '{}/{}/{}/'.format(args.logdir, args.dataset, args.model_tgt)
if (not os.path.exists(savedir)):
os.makedirs(savedir)
savepath = (savedir + 'T.pt')
sch = None
if (args.opt == 'sgd'):
opt = optim.SGD(model.parameters(), lr=args.lr_tgt, momentum=0.9, weight_decay=0.0005)
sch = optim.lr_scheduler.CosineAnnealingLR(opt, args.epochs, last_epoch=(- 1))
elif (args.opt == 'adam'):
opt = optim.Adam(model.parameters(), lr=args.lr_tgt)
else:
sys.exit('Invalid optimizer {}'.format(args.opt))
for epoch in range(args.epochs):
(train_loss, train_acc) = train_epoch(model, args.device, train_loader, opt, args)
(test_loss, test_acc) = test(model, args.device, test_loader)
print('Epoch: {} Loss: {:.4f} Train Acc: {:.2f}% Test Acc: {:.2f}%\n'.format((epoch + 1), train_loss, train_acc, test_acc))
wandb.log({'Train Acc': train_acc, 'Test Acc': test_acc, 'Train Loss': train_loss})
if sch:
sch.step()
torch.save(model.state_dict(), savepath) |
def test_reshape(backend):
tb = pyhf.tensorlib
assert (tb.tolist(tb.reshape(tb.ones((1, 2, 3)), ((- 1),))) == [1, 1, 1, 1, 1, 1]) |
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, conv_type='subm', norm_fn=None):
if (conv_type == 'subm'):
conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key)
elif (conv_type == 'spconv'):
conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False, indice_key=indice_key)
elif (conv_type == 'inverseconv'):
conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False)
else:
raise NotImplementedError
m = spconv.SparseSequential(conv, norm_fn(out_channels), nn.ReLU(True))
return m |
class ImageProcessingMixin(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
class VolumeSimilarity(ConfusionMatrixMetric):
def __init__(self, metric: str='VOLSMTY'):
super().__init__(metric)
def calculate(self):
tp = self.confusion_matrix.tp
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
if (((tp + fn) + fp) == 0):
warnings.warn('Unable to compute volume similarity due to division by zero, returning -inf', NotComputableMetricWarning)
return float('-inf')
return (1 - (abs((fn - fp)) / (((2 * tp) + fn) + fp))) |
def flowread(flow_path, quantize=False, concat_axis=0, *args, **kwargs):
if quantize:
assert (concat_axis in [0, 1])
cat_flow = cv2.imread(flow_path, cv2.IMREAD_UNCHANGED)
if (cat_flow.ndim != 2):
raise IOError(f'{flow_path} is not a valid quantized flow file, its dimension is {cat_flow.ndim}.')
assert ((cat_flow.shape[concat_axis] % 2) == 0)
(dx, dy) = np.split(cat_flow, 2, axis=concat_axis)
flow = dequantize_flow(dx, dy, *args, **kwargs)
else:
with open(flow_path, 'rb') as f:
try:
header = f.read(4).decode('utf-8')
except Exception:
raise IOError(f'Invalid flow file: {flow_path}')
else:
if (header != 'PIEH'):
raise IOError(f'Invalid flow file: {flow_path}, header does not contain PIEH')
w = np.fromfile(f, np.int32, 1).squeeze()
h = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, ((w * h) * 2)).reshape((h, w, 2))
return flow.astype(np.float32) |
class _NoneConstraint(_Constraint):
def is_satisfied_by(self, val):
return (val is None)
def __str__(self):
return 'None' |
def audiohandler(extension, data):
if (extension not in ['flac', 'mp3', 'sox', 'wav', 'm4a', 'ogg', 'wma']):
return None
try:
import torchaudio
except ImportError as e:
raise ModuleNotFoundError('Package `torchaudio` is required to be installed for default audio file loader.Please use `pip install torchaudio` or `conda install torchaudio -c pytorch`to install the package')
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, f'file.{extension}')
with open(fname, 'wb') as stream:
stream.write(data)
return torchaudio.load(fname) |
def customized_ccompiler(plat=None, compiler=None):
c = ccompiler.new_compiler(plat=plat, compiler=compiler)
c.customize('')
return c |
def subscribeContext(subscribeCtxEle, BrokerURL):
headers = {'Accept': 'application/ld+json', 'Content-Type': 'application/json', 'Link': '< rel=" type="application/ld+json"'}
response = requests.post((BrokerURL + '/ngsi-ld/v1/subscriptions/'), data=json.dumps(subscribeCtxEle), headers=headers)
if (response.status_code == 201):
return response.status_code
else:
return '' |
def add_edge_pref(graph, k=3):
info = defaultdict(list)
deg = dict(graph.degree)
edges_tried = set()
for _ in range(k):
u = min(deg, key=deg.get)
u_d = (deg[u] + 1)
deg.pop(u)
v = min(deg, key=deg.get)
deg[v] += 1
deg[u] = u_d
if (((u, v) not in edges_tried) and ((v, u) not in edges_tried)):
info['added'].append((u, v))
edges_tried.update([(u, v), (v, u)])
return info |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train_data_file', default=None, type=str, required=True, help='The input training data file (a text file).')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--eval_data_file', default=None, type=str, help='An optional input evaluation data file to evaluate the perplexity on (a text file).')
parser.add_argument('--test_data_file', default=None, type=str, help='An optional input evaluation data file to evaluate the perplexity on (a text file).')
parser.add_argument('--model_type', default='bert', type=str, help='The model architecture to be fine-tuned.')
parser.add_argument('--model_name_or_path', default=None, type=str, help='The model checkpoint for weights initialization.')
parser.add_argument('--base_model', default=None, type=str, help='Base Model')
parser.add_argument('--csv_store_path', default=None, type=str, help='Base Model')
parser.add_argument('--mlm', action='store_true', help='Train with masked-language modeling loss instead of language modeling.')
parser.add_argument('--mlm_probability', type=float, default=0.15, help='Ratio of tokens to mask for masked language modeling loss')
parser.add_argument('--number_labels', type=int, help='The number of labels.')
parser.add_argument('--config_name', default='', type=str, help='Optional pretrained config name or path if not the same as model_name_or_path')
parser.add_argument('--tokenizer_name', default='', type=str, help='Optional pretrained tokenizer name or path if not the same as model_name_or_path')
parser.add_argument('--data_flow_length', default=64, type=int, help='Optional Data Flow input sequence length after tokenization.')
parser.add_argument('--code_length', default=256, type=int, help='Optional Code input sequence length after tokenization.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--use_ga', action='store_true', help='Whether to GA-Attack.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_test', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--eval_batch_size', default=4, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--cache_dir', default='', type=str, help='Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)')
args = parser.parse_args()
args.device = torch.device('cuda')
set_seed(args.seed)
args.start_epoch = 0
args.start_step = 0
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
if (os.path.exists(checkpoint_last) and os.listdir(checkpoint_last)):
args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
args.config_name = os.path.join(checkpoint_last, 'config.json')
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
with open(idx_file, encoding='utf-8') as idxf:
args.start_epoch = (int(idxf.readlines()[0].strip()) + 1)
step_file = os.path.join(checkpoint_last, 'step_file.txt')
if os.path.exists(step_file):
with open(step_file, encoding='utf-8') as stepf:
args.start_step = int(stepf.readlines()[0].strip())
logger.info('reload model from {}, resume from {} epoch'.format(checkpoint_last, args.start_epoch))
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained((args.config_name if args.config_name else args.model_name_or_path), cache_dir=(args.cache_dir if args.cache_dir else None))
config.num_labels = args.number_labels
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, do_lower_case=False, cache_dir=(args.cache_dir if args.cache_dir else None))
if args.model_name_or_path:
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, cache_dir=(args.cache_dir if args.cache_dir else None))
else:
model = model_class(config)
model = Model(model, config, tokenizer, args)
checkpoint_prefix = 'checkpoint-best-acc/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
codebert_mlm = RobertaForMaskedLM.from_pretrained(args.base_model)
tokenizer_mlm = RobertaTokenizer.from_pretrained(args.base_model)
codebert_mlm.to('cuda')
eval_dataset = TextDataset(tokenizer, args, args.eval_data_file)
file_type = args.eval_data_file.split('/')[(- 1)].split('.')[0]
folder = '/'.join(args.eval_data_file.split('/')[:(- 1)])
codes_file_path = os.path.join(folder, '{}_subs.jsonl'.format(file_type))
print(codes_file_path)
source_codes = []
substs = []
with open(codes_file_path) as rf:
for line in rf:
item = json.loads(line.strip())
source_codes.append(item['code'].replace('\\n', '\n').replace('"', '"'))
substs.append(item['substitutes'])
assert (len(source_codes) == len(eval_dataset) == len(substs))
success_attack = 0
total_cnt = 0
recoder = Recorder(args.csv_store_path)
query_times = 0
attacker = Attacker(args, model, tokenizer, codebert_mlm, tokenizer_mlm, use_bpe=1, threshold_pred_score=0)
start_time = time.time()
for (index, example) in enumerate(eval_dataset):
example_start_time = time.time()
code = source_codes[index]
subs = substs[index]
(code, prog_length, adv_code, true_label, orig_label, temp_label, is_success, variable_names, names_to_importance_score, nb_changed_var, nb_changed_pos, replaced_words) = attacker.greedy_attack(example, code, subs)
attack_type = 'Greedy'
if ((is_success == (- 1)) and args.use_ga):
(code, prog_length, adv_code, true_label, orig_label, temp_label, is_success, variable_names, names_to_importance_score, nb_changed_var, nb_changed_pos, replaced_words) = attacker.ga_attack(example, code, subs, initial_replace=replaced_words)
attack_type = 'GA'
example_end_time = ((time.time() - example_start_time) / 60)
print('Example time cost: ', round(example_end_time, 2), 'min')
print('ALL examples time cost: ', round(((time.time() - start_time) / 60), 2), 'min')
score_info = ''
if (names_to_importance_score is not None):
for key in names_to_importance_score.keys():
score_info += (((key + ':') + str(names_to_importance_score[key])) + ',')
replace_info = ''
if (replaced_words is not None):
for key in replaced_words.keys():
replace_info += (((key + ':') + replaced_words[key]) + ',')
print('Query times in this attack: ', (model.query - query_times))
print('All Query times: ', model.query)
recoder.write(index, code, prog_length, adv_code, true_label, orig_label, temp_label, is_success, variable_names, score_info, nb_changed_var, nb_changed_pos, replace_info, attack_type, (model.query - query_times), example_end_time)
query_times = model.query
if (is_success >= (- 1)):
total_cnt += 1
if (is_success == 1):
success_attack += 1
if (total_cnt == 0):
continue
print('Success rate: ', ((1.0 * success_attack) / total_cnt))
print('Successful items count: ', success_attack)
print('Total count: ', total_cnt)
print('Index: ', index)
print() |
def mean_squared_error(image0, image1):
check_shape_equality(image0, image1)
(image0, image1) = _as_floats(image0, image1)
return np.mean(((image0 - image1) ** 2), dtype=np.float64) |
def main():
error_sentences = ['', '', '', ' _ ,', ',', ',', '', '', '', '']
m_kenlm = Corrector()
m_macbert = MacBertCorrector()
for line in error_sentences:
r = m_kenlm.correct(line)
print('kenlm: {}'.format(r))
r = m_macbert.correct(line)
print('macbert: {}'.format(r))
print() |
def to_bool(s, fallback=None):
if (not s):
return fallback
s = s.lower()
if (s in ['1', 'true', 'yes', 'y']):
return True
if (s in ['0', 'false', 'no', 'n']):
return False
return fallback |
def main(args):
if args.seed:
random.seed(args.seed)
for line in sys.stdin:
constraints = []
def add_constraint(constraint):
constraints.append(constraint)
source = line.rstrip()
if ('\t' in line):
(source, target) = line.split('\t')
if args.add_sos:
target = f'<s> {target}'
if args.add_eos:
target = f'{target} </s>'
if (len(target.split()) >= args.len):
words = [target]
num = args.number
choices = {}
for i in range(num):
if (len(words) == 0):
break
segmentno = random.choice(range(len(words)))
segment = words.pop(segmentno)
tokens = segment.split()
phrase_index = random.choice(range(len(tokens)))
choice = ' '.join(tokens[phrase_index:min(len(tokens), (phrase_index + args.len))])
for j in range(phrase_index, min(len(tokens), (phrase_index + args.len))):
tokens.pop(phrase_index)
if (phrase_index > 0):
words.append(' '.join(tokens[0:phrase_index]))
if ((phrase_index + 1) < len(tokens)):
words.append(' '.join(tokens[phrase_index:]))
choices[target.find(choice)] = choice
target = target.replace(choice, (' ' * len(choice)), 1)
for key in sorted(choices.keys()):
add_constraint(choices[key])
print(source, *constraints, sep='\t') |
class InceptionModule(nn.Module):
def __init__(self, in_channels, out_channels: Tuple[(int, int, int, int, int, int)], name: str) -> None:
super().__init__()
self.b0 = Unit3D(in_channels=in_channels, output_channels=out_channels[0], kernel_shape=(1, 1, 1), padding=0, name=(name + '/Branch_0/Conv3d_0a_1x1'))
self.b1a = Unit3D(in_channels=in_channels, output_channels=out_channels[1], kernel_shape=(1, 1, 1), padding=0, name=(name + '/Branch_1/Conv3d_0a_1x1'))
self.b1b = Unit3D(in_channels=out_channels[1], output_channels=out_channels[2], kernel_shape=(3, 3, 3), name=(name + '/Branch_1/Conv3d_0b_3x3'))
self.b2a = Unit3D(in_channels=in_channels, output_channels=out_channels[3], kernel_shape=(1, 1, 1), padding=0, name=(name + '/Branch_2/Conv3d_0a_1x1'))
self.b2b = Unit3D(in_channels=out_channels[3], output_channels=out_channels[4], kernel_shape=(3, 3, 3), name=(name + '/Branch_2/Conv3d_0b_3x3'))
self.b3a = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(1, 1, 1), padding=0)
self.b3b = Unit3D(in_channels=in_channels, output_channels=out_channels[5], kernel_shape=(1, 1, 1), padding=0, name=(name + '/Branch_3/Conv3d_0b_1x1'))
self.name = name
def forward(self, x: torch.Tensor) -> torch.Tensor:
b0 = self.b0(x)
b1 = self.b1b(self.b1a(x))
b2 = self.b2b(self.b2a(x))
b3 = self.b3b(self.b3a(x))
return torch.cat((b0, b1, b2, b3), dim=1) |
class LinearTempDecay():
def __init__(self, t_max: int, rel_start_decay: float=0.2, start_b: int=20, end_b: int=2):
self.t_max = t_max
self.start_decay = (rel_start_decay * t_max)
self.start_b = start_b
self.end_b = end_b
def __call__(self, t: int) -> float:
is_before_start_decay = tf.cast((t < self.start_decay), tf.float32)
rel_t = ((t - self.start_decay) / (self.t_max - self.start_decay))
return ((self.start_b * is_before_start_decay) + ((1 - is_before_start_decay) * (self.end_b + ((self.start_b - self.end_b) * tf.math.maximum(0.0, (1 - rel_t)))))) |
class SampleDataLoader(ClassDataLoader):
def __init__(self, data, batch_size):
dataset = self.shuffle_dataset(data)
self.dataset = dataset
self.batch_size = batch_size
self.num_iters = math.ceil((len(self.dataset) / self.batch_size))
def shuffle_dataset(self, data):
data = list(chain(*data.values()))
random.shuffle(data)
return data
def gather_batch(self, i):
start = (self.batch_size * i)
end = (self.batch_size * (i + 1))
batch = self.dataset[start:end]
return batch |
def get_pqsource(prob_label):
prob2tuples = {'sg5': (density.IsotropicNormal(np.zeros(5), 1), data.DSIsotropicNormal(np.zeros(5), 1)), 'gmd5': (density.IsotropicNormal(np.zeros(5), 1), data.DSIsotropicNormal(np.hstack((0.2, np.zeros(4))), 1)), 'gmd1': (density.IsotropicNormal(np.zeros(1), 1), data.DSIsotropicNormal((np.ones(1) * 0.2), 1)), 'gmd100': (density.IsotropicNormal(np.zeros(100), 1), data.DSIsotropicNormal(np.hstack((1, np.zeros(99))), 1)), 'gvd5': (density.Normal(np.zeros(5), np.eye(5)), data.DSNormal(np.zeros(5), np.diag(np.hstack((2, np.ones(4)))))), 'gvd10': (density.Normal(np.zeros(10), np.eye(10)), data.DSNormal(np.zeros(10), np.diag(np.hstack((2, np.ones(9)))))), 'gbrbm_dx50_dh10_v0': gaussbern_rbm_tuple(0, dx=50, dh=10, n=sample_size), 'gbrbm_dx5_dh3_v0': gaussbern_rbm_tuple(0, dx=5, dh=3, n=sample_size), 'gbrbm_dx50_dh10_v1em3': gaussbern_rbm_tuple(0.001, dx=50, dh=10, n=sample_size), 'gbrbm_dx5_dh3_v5em3': gaussbern_rbm_tuple(0.005, dx=5, dh=3, n=sample_size), 'gmm_d1': (density.IsoGaussianMixture(np.array([[0], [3.0]]), np.array([1, 0.01])), data.DSIsoGaussianMixture(np.array([[(- 3.0)], [0]]), np.array([0.01, 1]))), 'g_vs_gmm_d5': (density.IsotropicNormal(np.zeros(5), 1), data.DSIsoGaussianMixture(np.vstack((np.hstack((0.0, np.zeros(4))), np.zeros(5))), np.array([0.0001, 1]), pmix=[0.1, 0.9])), 'g_vs_gmm_d2': (density.IsotropicNormal(np.zeros(2), 1), data.DSIsoGaussianMixture(np.vstack((np.hstack((0.0, np.zeros(1))), np.zeros(2))), np.array([0.01, 1]), pmix=[0.1, 0.9])), 'g_vs_gmm_d1': (density.IsotropicNormal(np.zeros(1), 1), data.DSIsoGaussianMixture(np.array([[0.0], [0]]), np.array([0.01, 1]), pmix=[0.1, 0.9]))}
if (prob_label not in prob2tuples):
raise ValueError(('Unknown problem label. Need to be one of %s' % str(prob2tuples.keys())))
return prob2tuples[prob_label] |
def GaussianIntegers(names='I', latex_name='i'):
from sage.rings.complex_double import CDF
from sage.rings.number_field.number_field import NumberField
f = ZZ['x']([1, 0, 1])
nf = NumberField(f, names, embedding=CDF(0, 1), latex_name=latex_name)
return nf.ring_of_integers() |
def color_segmap(sample_seg, color_map):
sample_seg = torch.argmax(sample_seg, dim=1)
sample_mask = torch.zeros((sample_seg.shape[0], sample_seg.shape[1], sample_seg.shape[2], 3), dtype=torch.float)
for key in color_map:
sample_mask[(sample_seg == key)] = torch.tensor(color_map[key], dtype=torch.float)
sample_mask = sample_mask.permute(0, 3, 1, 2)
return sample_mask |
class DMA_reverse_reg(atomic_reg):
OP_NAME = 'DMA_reverse'
_fields_ = [('intr_en', ctypes.c_uint64, 1), ('stride_enable', ctypes.c_uint64, 1), ('nchw_copy', ctypes.c_uint64, 1), ('cmd_short', ctypes.c_uint64, 1), ('reversed', ctypes.c_uint64, 1), ('reserved', ctypes.c_uint64, 4), ('reserved', ctypes.c_uint64, 20), ('Reserved', ctypes.c_uint64, 3), ('cmd_type', ctypes.c_uint64, 4), ('cmd_special_function', ctypes.c_uint64, 3), ('fill_constant_en', ctypes.c_uint64, 1), ('src_data_format', ctypes.c_uint64, 3), ('reserved', ctypes.c_uint64, 21), ('cmd_id_dep', ctypes.c_uint64, 24), ('reserved', ctypes.c_uint64, 8), ('constant_value', ctypes.c_uint64, 32), ('src_nstride', ctypes.c_uint64, 32), ('src_cstride', ctypes.c_uint64, 32), ('src_hstride', ctypes.c_uint64, 32), ('src_wstride', ctypes.c_uint64, 32), ('dst_nstride', ctypes.c_uint64, 32), ('dst_cstride', ctypes.c_uint64, 32), ('dst_hstride', ctypes.c_uint64, 32), ('dst_wstride', ctypes.c_uint64, 32), ('src_nsize', ctypes.c_uint64, 16), ('src_csize', ctypes.c_uint64, 16), ('src_hsize', ctypes.c_uint64, 16), ('src_wsize', ctypes.c_uint64, 16), ('dst_nsize', ctypes.c_uint64, 16), ('dst_csize', ctypes.c_uint64, 16), ('dst_hsize', ctypes.c_uint64, 16), ('dst_wsize', ctypes.c_uint64, 16), ('src_start_addr_l32', ctypes.c_uint64, 32), ('src_start_addr_h8', ctypes.c_uint64, 8), ('reserved', ctypes.c_uint64, 24), ('dst_start_addr_l32', ctypes.c_uint64, 32), ('dst_start_addr_h8', ctypes.c_uint64, 8), ('reserved', ctypes.c_uint64, 24), ('Reserved', ctypes.c_uint64, 32), ('Reserved', ctypes.c_uint64, 32), ('localmem_mask_l32', ctypes.c_uint64, 32), ('localmem_mask_h32', ctypes.c_uint64, 32)]
intr_en: int
stride_enable: int
nchw_copy: int
cmd_short: int
reversed: int
reserved: int
reserved: int
Reserved: int
cmd_type: int
cmd_special_function: int
fill_constant_en: int
src_data_format: int
reserved: int
cmd_id_dep: int
reserved: int
constant_value: int
src_nstride: int
src_cstride: int
src_hstride: int
src_wstride: int
dst_nstride: int
dst_cstride: int
dst_hstride: int
dst_wstride: int
src_nsize: int
src_csize: int
src_hsize: int
src_wsize: int
dst_nsize: int
dst_csize: int
dst_hsize: int
dst_wsize: int
src_start_addr_l32: int
src_start_addr_h8: int
reserved: int
dst_start_addr_l32: int
dst_start_addr_h8: int
reserved: int
Reserved: int
Reserved: int
localmem_mask_l32: int
localmem_mask_h32: int
length: int = 768 |
class GlobalGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', activation=nn.ReLU(True), up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True), dilated_blocks_n=0, dilated_blocks_n_start=0, dilated_blocks_n_middle=0, add_out_act=True, max_features=1024, is_resblock_depthwise=False, ffc_positions=None, ffc_kwargs={}, dilation=1, second_dilation=None, dilation_block_kind='simple', multidilation_kwargs={}):
assert (n_blocks >= 0)
super().__init__()
conv_layer = get_conv_block_ctor(conv_kind)
norm_layer = get_norm_layer(norm_layer)
if (affine is not None):
norm_layer = partial(norm_layer, affine=affine)
up_norm_layer = get_norm_layer(up_norm_layer)
if (affine is not None):
up_norm_layer = partial(up_norm_layer, affine=affine)
if (ffc_positions is not None):
ffc_positions = collections.Counter(ffc_positions)
model = [nn.ReflectionPad2d(3), conv_layer(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
identity = Identity()
for i in range(n_downsampling):
mult = (2 ** i)
model += [conv_layer(min(max_features, (ngf * mult)), min(max_features, ((ngf * mult) * 2)), kernel_size=3, stride=2, padding=1), norm_layer(min(max_features, ((ngf * mult) * 2))), activation]
mult = (2 ** n_downsampling)
feats_num_bottleneck = min(max_features, (ngf * mult))
dilated_block_kwargs = dict(dim=feats_num_bottleneck, padding_type=padding_type, activation=activation, norm_layer=norm_layer)
if (dilation_block_kind == 'simple'):
dilated_block_kwargs['conv_kind'] = conv_kind
elif (dilation_block_kind == 'multi'):
dilated_block_kwargs['conv_layer'] = functools.partial(get_conv_block_ctor('multidilated'), **multidilation_kwargs)
if ((dilated_blocks_n_start is not None) and (dilated_blocks_n_start > 0)):
model += make_dil_blocks(dilated_blocks_n_start, dilation_block_kind, dilated_block_kwargs)
for i in range(n_blocks):
if ((i == (n_blocks // 2)) and (dilated_blocks_n_middle is not None) and (dilated_blocks_n_middle > 0)):
model += make_dil_blocks(dilated_blocks_n_middle, dilation_block_kind, dilated_block_kwargs)
if ((ffc_positions is not None) and (i in ffc_positions)):
for _ in range(ffc_positions[i]):
model += [FFCResnetBlock(feats_num_bottleneck, padding_type, norm_layer, activation_layer=nn.ReLU, inline=True, **ffc_kwargs)]
if is_resblock_depthwise:
resblock_groups = feats_num_bottleneck
else:
resblock_groups = 1
model += [ResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation=activation, norm_layer=norm_layer, conv_kind=conv_kind, groups=resblock_groups, dilation=dilation, second_dilation=second_dilation)]
if ((dilated_blocks_n is not None) and (dilated_blocks_n > 0)):
model += make_dil_blocks(dilated_blocks_n, dilation_block_kind, dilated_block_kwargs)
for i in range(n_downsampling):
mult = (2 ** (n_downsampling - i))
model += [nn.ConvTranspose2d(min(max_features, (ngf * mult)), min(max_features, int(((ngf * mult) / 2))), kernel_size=3, stride=2, padding=1, output_padding=1), up_norm_layer(min(max_features, int(((ngf * mult) / 2)))), up_activation]
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
if add_out_act:
model.append(get_activation(('tanh' if (add_out_act is True) else add_out_act)))
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input) |
_model()
class EigenValue(SimOutput):
type = goos.ModelNameType('output.eigen_value')
bloch_vector = goos.types.FloatType() |
class TensorGroup(EasyDict):
def __init__(self, **kwargs):
keys = list(kwargs.keys())
values = list(kwargs.values())
assert (len(keys) == len(values))
assert all((isinstance(key, str) for key in keys)), f'Wrong types for keys: {keys}'
assert all(((isinstance(t, torch.Tensor) or isinstance(t, TensorGroup)) for t in values)), f'Wrong types for values: {dict(zip(keys, [type(v) for v in values]))}'
assert all(((len(t) == len(values[0])) for t in values)), f'Wrong shapes: {dict(zip(keys, [v.shape for v in values]))}'
assert all(((t.device == values[0].device) for t in values)), f'All tensor should be on the same device, but got: {dict(zip(keys, [v.device for v in values]))}'
assert all(((not (field in keys)) for field in ['_length', 'shape']))
self._length = values[0].shape[0]
self.shape = [len(self), None]
super(TensorGroup, self).__init__(**kwargs)
def __len__(self) -> int:
return self._length
def __getitem__(self, item: Any):
if isinstance(item, str):
return super(TensorGroup, self).__getitem__(item)
else:
return TensorGroup(**{k: v[item] for (k, v) in self.items()})
def items(self) -> List[Tuple[(str, Union[(torch.Tensor, 'TensorGroup')])]]:
return [(k, v) for (k, v) in super(TensorGroup, self).items() if (not (k in ('_length', 'shape')))]
def keys(self) -> List[str]:
return [k for (k, _) in self.items()]
def values(self) -> List[Union[(torch.Tensor, 'TensorGroup')]]:
return [v for (_, v) in self.items()]
def split(self, group_size: int) -> List['TensorGroup']:
result = []
for group_idx in range((((len(self) + group_size) - 1) // group_size)):
result.append(self[(group_idx * group_size):((group_idx + 1) * group_size)])
return result
def max(self) -> torch.Tensor:
return torch.stack([v.max() for (k, v) in self.items()]).max()
def reduce_mean(self) -> torch.Tensor:
return (self.sum() / self.numel())
def sum(self) -> torch.Tensor:
return torch.stack([v.sum() for (k, v) in self.items()]).sum()
def numel(self) -> int:
return sum([v.numel() for (k, v) in self.items()])
def to(self, *args, **kwargs) -> 'TensorGroup':
return TensorGroup(**{k: v.to(*args, **kwargs) for (k, v) in self.items()})
def clone(self) -> 'TensorGroup':
return TensorGroup(**{k: v.clone() for (k, v) in self.items()})
def repeat_interleave(self, *args, **kwargs) -> 'TensorGroup':
return TensorGroup(**{k: v.repeat_interleave(*args, **kwargs) for (k, v) in self.items()})
def __add__(self, other: Any) -> 'TensorGroup':
if isinstance(other, TensorGroup):
return TensorGroup(**{k: (v + other[k]) for (k, v) in self.items()})
else:
return TensorGroup(**{k: (v + other) for (k, v) in self.items()})
def __radd__(self, other) -> 'TensorGroup':
return self.__add__(other)
def __sub__(self, other: Any) -> 'TensorGroup':
if isinstance(other, TensorGroup):
return TensorGroup(**{k: (v - other[k]) for (k, v) in self.items()})
else:
return TensorGroup(**{k: (v - other) for (k, v) in self.items()})
def __pow__(self, other: Any) -> 'TensorGroup':
if isinstance(other, TensorGroup):
return TensorGroup(**{k: (v ** other[k]) for (k, v) in self.items()})
else:
return TensorGroup(**{k: (v ** other) for (k, v) in self.items()})
def __mul__(self, other: Any) -> 'TensorGroup':
if isinstance(other, TensorGroup):
return TensorGroup(**{k: (v * other[k]) for (k, v) in self.items()})
else:
return TensorGroup(**{k: (v * other) for (k, v) in self.items()})
def __rmul__(self, other: Any) -> 'TensorGroup':
return self.__mul__(other)
def float(self) -> 'TensorGroup':
return TensorGroup(**{k: v.float() for (k, v) in self.items()})
def device(self):
return next(iter(self.items()))[1].device
def shapes(self) -> List[torch.Size]:
return [v.shape for v in self.values()]
def detach(self) -> 'TensorGroup':
return TensorGroup(**{k: v.detach() for (k, v) in self.items()})
def reshape_each(self, reshaper: Callable) -> 'TensorGroup':
return TensorGroup(**{k: v.reshape(reshaper(v)) for (k, v) in self.items()})
def cpu(self) -> 'TensorGroup':
return TensorGroup(**{k: v.cpu() for (k, v) in self.items()})
def clamp(self, *args, **kwargs) -> 'TensorGroup':
return TensorGroup(**{k: v.clamp(*args, **kwargs) for (k, v) in self.items()})
def permute(self, *args, **kwargs) -> 'TensorGroup':
return TensorGroup(**{k: v.permute(*args, **kwargs) for (k, v) in self.items()})
def mean(self, *args, **kwargs) -> 'TensorGroup':
return TensorGroup(**{k: v.mean(*args, **kwargs) for (k, v) in self.items()})
def cat(tgroups: List['TensorGroup'], dim: int=0):
keys_set = set(tgroups[0].keys())
assert [(set(tg.keys()) == keys_set) for tg in tgroups], f'Keys should be the same: {[list(tg.keys()) for tg in tgroups]}'
return TensorGroup(**{k: torch.cat([tg[k] for tg in tgroups], dim=dim) for k in keys_set}) |
def BK_pieces(max_letter):
forbidden_border_labels = [('%s(%s)' % (i, j)) for i in range(1, (max_letter + 1)) for j in range(1, i)]
pieces = PuzzlePieces(forbidden_border_labels)
for i in range(1, (max_letter + 1)):
piece = DeltaPiece(('%s' % i), ('%s' % i), ('%s' % i))
pieces.add_piece(piece, rotations=60)
for j in range(1, i):
piece = DeltaPiece(north_west=('%s' % i), north_east=('%s' % j), south=('%s(%s)' % (i, j)))
pieces.add_piece(piece, rotations=60)
return pieces |
(params=DDPG_PARAMS)
def ddpg_critic_param(request):
param = request.param
return (CriticDRR(state_repr_dim=param['state_repr_dim'], action_emb_dim=param['action_emb_dim'], hidden_dim=param['hidden_dim'], heads_num=param['heads_num'], heads_q=param['heads_q']), param) |
class MakeParsingFrontend():
def __init__(self, parser_type, lexer_type):
self.parser_type = parser_type
self.lexer_type = lexer_type
def __call__(self, lexer_conf, parser_conf, options):
assert isinstance(lexer_conf, LexerConf)
assert isinstance(parser_conf, ParserConf)
parser_conf.parser_type = self.parser_type
lexer_conf.lexer_type = self.lexer_type
return ParsingFrontend(lexer_conf, parser_conf, options)
def deserialize(self, data, memo, lexer_conf, callbacks, options):
parser_conf = ParserConf.deserialize(data['parser_conf'], memo)
parser = LALR_Parser.deserialize(data['parser'], memo, callbacks, options.debug)
parser_conf.callbacks = callbacks
return ParsingFrontend(lexer_conf, parser_conf, options, parser=parser) |
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
dilation = 1
input = torch.rand(1, 1, 15, 17)
pool = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
out = pool(input)
assert ((out.shape[2], out.shape[3]) == (16, 32))
input = torch.rand(1, 1, 16, 17)
out = pool(input)
assert ((out.shape[2], out.shape[3]) == (16, 32))
kernel_size = (2, 2)
stride = (2, 2)
dilation = (1, 1)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (12, 14))
kernel_size = (2, 2)
stride = (10, 10)
dilation = (1, 1)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
input = torch.rand(1, 1, 10, 13)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (10, 13))
kernel_size = (11, 11)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (21, 21))
input = torch.rand(1, 1, 11, 13)
stride = (3, 4)
kernel_size = (4, 5)
dilation = (2, 2)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
dilation_out = adap_pad(input)
assert ((dilation_out.shape[2], dilation_out.shape[3]) == (16, 21))
kernel_size = (7, 9)
dilation = (1, 1)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
kernel79_out = adap_pad(input)
assert ((kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21))
assert (kernel79_out.shape == dilation_out.shape)
with pytest.raises(AssertionError):
AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=1) |
def validate_email(x: Union[(str, pd.Series)]) -> Union[(bool, pd.Series)]:
if isinstance(x, pd.Series):
return x.apply(_check_email, clean=False)
return _check_email(x, False) |
class GCNConv(MessagePassing):
def __init__(self, emb_dim):
super(GCNConv, self).__init__(aggr='add')
self.linear = torch.nn.Linear(emb_dim, emb_dim)
self.root_emb = torch.nn.Embedding(1, emb_dim)
self.edge_encoder = torch.nn.Linear(2, emb_dim)
def forward(self, x, edge_index, edge_attr):
x = self.linear(x)
edge_embedding = self.edge_encoder(edge_attr)
(row, col) = edge_index
deg = (degree(row, x.size(0), dtype=x.dtype) + 1)
deg_inv_sqrt = deg.pow((- 0.5))
deg_inv_sqrt[(deg_inv_sqrt == float('inf'))] = 0
norm = (deg_inv_sqrt[row] * deg_inv_sqrt[col])
return (self.propagate(edge_index, x=x, edge_attr=edge_embedding, norm=norm) + ((F.relu((x + self.root_emb.weight)) * 1.0) / deg.view((- 1), 1)))
def message(self, x_j, edge_attr, norm):
return (norm.view((- 1), 1) * F.relu((x_j + edge_attr)))
def update(self, aggr_out):
return aggr_out |
class PcieMemoryArray():
def __getitem__(self, v):
pass
def __setitem__(self, k, v):
pass |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input_json_file', type=str, required=True, help='A json file that contains the input samples.')
parser.add_argument('--shared_knowledge_file', type=str, default=None, help='A file that contains all the background knowledge for all samples in the input json file.')
parser.add_argument('--run_factual', action='store_true', help='Run factualness evaluation.')
parser.add_argument('--run_safety', action='store_true', help='Run safety evaluation.')
parser.add_argument('--run_constraint', action='store_true', help='Run constraint evaluation.')
parser.add_argument('--run_prompthelper', action='store_true', help='Run prompt helper.')
parser.add_argument('--run_explanation', action='store_true', help='Run explanation.')
parser.add_argument('--factual_method', type=str, default='openai/gpt-3.5-turbo', help='The model used for factualness evaluation.')
parser.add_argument('--safety_method', type=str, default='Salesforce/safety-flan-t5-base', help='The model used for safety evaluation.')
parser.add_argument('--constraint_method', type=str, default='openai/gpt-3.5-turbo', help='The model used for constraint evaluation.')
parser.add_argument('--prompthelper_method', type=str, default='openai/gpt-3.5-turbo/#critique_revision', help='The model used for prompt helper.')
parser.add_argument('--prompthelper_only_better', action='store_true', help='Only use the prompt helper if it improves the trust score.')
parser.add_argument('--explanation_method', type=str, default='openai/gpt-3.5-turbo', help='The model used for explanation.')
parser.add_argument('--output_path', type=str, default='./output', help='The path to save the output file.')
parser.add_argument('--batch_size', type=int, default=16, help='batch size for local model inference')
parser.add_argument('--use_cuda', action='store_true', help='Use GPU for local model inference.')
parser.add_argument('--gpu_device', type=int, default=0)
return parser.parse_args() |
def construct_decoders(loc: str, t: str, hidden_dim: int, nb_dims: int, name: str):
linear = functools.partial(hk.Linear, name=f'{name}_dec_linear')
if (loc == _Location.NODE):
if (t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]):
decoders = (linear(1),)
elif (t == _Type.CATEGORICAL):
decoders = (linear(nb_dims),)
elif (t in [_Type.POINTER, _Type.PERMUTATION_POINTER]):
decoders = (linear(hidden_dim), linear(hidden_dim), linear(hidden_dim), linear(1))
else:
raise ValueError(f'Invalid Type {t}')
elif (loc == _Location.EDGE):
if (t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]):
decoders = (linear(1), linear(1), linear(1))
elif (t == _Type.CATEGORICAL):
decoders = (linear(nb_dims), linear(nb_dims), linear(nb_dims))
elif (t == _Type.POINTER):
decoders = (linear(hidden_dim), linear(hidden_dim), linear(hidden_dim), linear(hidden_dim), linear(1))
else:
raise ValueError(f'Invalid Type {t}')
elif (loc == _Location.GRAPH):
if (t in [_Type.SCALAR, _Type.MASK, _Type.MASK_ONE]):
decoders = (linear(1), linear(1))
elif (t == _Type.CATEGORICAL):
decoders = (linear(nb_dims), linear(nb_dims))
elif (t == _Type.POINTER):
decoders = (linear(1), linear(1), linear(1))
else:
raise ValueError(f'Invalid Type {t}')
else:
raise ValueError(f'Invalid Location {loc}')
return decoders |
class Trainer(object):
def __init__(self, args, task, model, criterion, dummy_batch):
if (not torch.cuda.is_available()):
raise NotImplementedError('Training on CPU is not supported')
self.args = args
self.task = task
self.criterion = criterion.cuda()
if args.fp16:
self._model = model.half().cuda()
else:
self._model = model.cuda()
self._dummy_batch = dummy_batch
self._num_updates = 0
self._optim_history = None
self._optimizer = None
self._wrapped_model = None
self.init_meters(args)
self.no_sample_size_normalization = args.no_sample_size_normalization
def init_meters(self, args):
self.meters = OrderedDict()
self.meters['train_loss'] = AverageMeter()
self.meters['train_nll_loss'] = AverageMeter()
self.meters['valid_loss'] = AverageMeter()
self.meters['valid_nll_loss'] = AverageMeter()
self.meters['wps'] = TimeMeter()
self.meters['ups'] = TimeMeter()
self.meters['wpb'] = AverageMeter()
self.meters['bsz'] = AverageMeter()
self.meters['gnorm'] = AverageMeter()
self.meters['clip'] = AverageMeter()
self.meters['oom'] = AverageMeter()
if args.fp16:
self.meters['loss_scale'] = AverageMeter()
self.meters['wall'] = TimeMeter()
self.meters['train_wall'] = StopwatchMeter()
if hasattr(self.task, 'extra_meters'):
self.meters['task'] = self.task.extra_meters()
def model(self):
if (self._wrapped_model is None):
if (self.args.distributed_world_size > 1):
self._wrapped_model = models.DistributedFairseqModel(self.args, self._model)
else:
self._wrapped_model = self._model
return self._wrapped_model
def optimizer(self):
if (self._optimizer is None):
self._build_optimizer()
return self._optimizer
def _build_optimizer(self):
if self.args.fp16:
if (torch.cuda.get_device_capability(0)[0] < 7):
print('| WARNING: your device does NOT support faster training with --fp16, please switch to FP32 which is likely to be faster')
params = list(filter((lambda p: p.requires_grad), self.model.parameters()))
self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params)
else:
if (torch.cuda.get_device_capability(0)[0] >= 7):
print('| NOTICE: your device may support faster training with --fp16')
self._optimizer = optim.build_optimizer(self.args, self.model.parameters())
self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self._optimizer)
def save_checkpoint(self, filename, extra_state):
if distributed_utils.is_master(self.args):
extra_state['train_meters'] = self.meters
utils.save_state(filename, self.args, self.get_model(), self.criterion, self.optimizer, self.lr_scheduler, self._num_updates, self._optim_history, extra_state)
def load_checkpoint(self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None):
(extra_state, self._optim_history, last_optim_state) = utils.load_model_state(filename, self.get_model())
if ((last_optim_state is not None) and (not reset_optimizer)):
self._build_optimizer()
last_optim = self._optim_history[(- 1)]
assert (last_optim['criterion_name'] == self.criterion.__class__.__name__), 'criterion does not match; please reset the optimizer (--reset-optimizer)'
assert (last_optim['optimizer_name'] == self.optimizer.__class__.__name__), 'optimizer does not match; please reset the optimizer (--reset-optimizer)'
if (not reset_lr_scheduler):
self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self._num_updates = last_optim['num_updates']
else:
self._build_optimizer()
if ((extra_state is not None) and ('train_meters' in extra_state)):
self.meters.update(extra_state['train_meters'])
del extra_state['train_meters']
for meter in self.meters.values():
if isinstance(meter, TimeMeter):
meter.reset()
return extra_state
def train_step(self, samples, dummy_batch=False):
seed = (self.args.seed + self.get_num_updates())
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.model.train()
self.zero_grad()
if (not dummy_batch):
self.meters['train_wall'].start()
(logging_outputs, sample_sizes, ooms) = ([], [], 0)
for (i, sample) in enumerate(samples):
sample = self._prepare_sample(sample)
if (sample is None):
sample = self._prepare_sample(self._dummy_batch)
ignore_grad = True
else:
ignore_grad = False
try:
(loss, sample_size, logging_output) = self.task.get_loss(self.model, self.criterion, sample)
if ignore_grad:
loss *= 0
if (self.args.distributed_world_size > 1):
if (i < (len(samples) - 1)):
self.model.need_reduction = False
else:
self.model.need_reduction = True
self.optimizer.backward(loss)
if (not ignore_grad):
logging_outputs.append(logging_output)
sample_sizes.append(sample_size)
except RuntimeError as e:
if ('out of memory' in str(e)):
print('| WARNING: ran out of memory, skipping batch')
ooms += 1
self.zero_grad()
else:
raise e
if dummy_batch:
return None
if (self.args.distributed_world_size > 1):
(logging_outputs, sample_sizes, ooms) = zip(*distributed_utils.all_gather_list([logging_outputs, sample_sizes, ooms]))
logging_outputs = list(chain.from_iterable(logging_outputs))
sample_sizes = list(chain.from_iterable(sample_sizes))
ooms = sum(ooms)
if (ooms == self.args.distributed_world_size):
print('| WARNING: OOM in all workers, skipping update')
self.zero_grad()
return None
logging_output = self.criterion._aggregate_logging_outputs(logging_outputs)
sample_size = self.criterion.__class__.grad_denom(sample_sizes)
if (not all(((k in logging_output) for k in ['ntokens', 'nsentences']))):
raise Exception('Please update the {}.aggregate_logging_outputs() method to return ntokens and nsentences'.format(self.criterion.__class__.__name__))
try:
if self.no_sample_size_normalization:
self.optimizer.multiply_grads(self.args.distributed_world_size)
else:
self.optimizer.multiply_grads((self.args.distributed_world_size / float(sample_size)))
grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm)
self.optimizer.step()
self._num_updates += 1
self.lr_scheduler.step_update(self._num_updates)
ntokens = logging_output.get('ntokens', 0)
nsentences = logging_output.get('nsentences', 0)
self.meters['wps'].update(ntokens)
self.meters['ups'].update(1.0)
self.meters['wpb'].update(ntokens)
self.meters['bsz'].update(nsentences)
self.meters['gnorm'].update(grad_norm)
self.meters['clip'].update((1.0 if ((grad_norm > self.args.clip_norm) and (self.args.clip_norm > 0)) else 0.0))
self.meters['oom'].update(ooms)
self.meters['train_loss'].update(logging_output.get('loss', 0), sample_size)
if ('nll_loss' in logging_output):
self.meters['train_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)
except OverflowError as e:
print(('| WARNING: overflow detected, ' + str(e)))
self.zero_grad()
logging_output = None
if self.args.fp16:
self.meters['loss_scale'].reset()
self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale)
self.meters['train_wall'].stop()
return logging_output
def valid_step(self, sample, raise_oom=False):
with torch.no_grad():
self.model.eval()
sample = self._prepare_sample(sample)
if (sample is None):
sample = self._prepare_sample(self._dummy_batch)
ignore_results = True
else:
ignore_results = False
try:
(_loss, sample_size, logging_output) = self.task.get_loss(self.model, self.criterion, sample, is_valid=True)
except RuntimeError as e:
if (('out of memory' in str(e)) and (not raise_oom)):
print('| WARNING: ran out of memory, retrying batch')
for p in self.model.parameters():
if (p.grad is not None):
del p.grad
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
else:
raise e
if ignore_results:
(logging_output, sample_size) = ({}, 0)
if (self.args.distributed_world_size > 1):
(logging_output, sample_size) = zip(*distributed_utils.all_gather_list([logging_output, sample_size]))
logging_output = list(logging_output)
sample_size = list(sample_size)
else:
logging_output = [logging_output]
sample_size = [sample_size]
if hasattr(self.task, 'aggregate_extra_metrics'):
extra_metrics = self.task.aggregate_extra_metrics(logging_output)
else:
extra_metrics = None
logging_output = self.criterion._aggregate_logging_outputs(logging_output)
sample_size = self.criterion.__class__.grad_denom(sample_size)
if (extra_metrics is not None):
logging_output['extra_metrics'] = extra_metrics
ntokens = logging_output.get('ntokens', 0)
self.meters['valid_loss'].update(logging_output.get('loss', 0), sample_size)
if ('nll_loss' in logging_output):
self.meters['valid_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)
if ('extra_metrics' in logging_output):
for (n, m) in self.meters['task'].items():
m.update(*logging_output['extra_metrics'][n])
return logging_output
def dummy_train_step(self, dummy_batch):
self.train_step(dummy_batch, dummy_batch=True)
self.zero_grad()
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step(self, epoch, val_loss=None):
return self.lr_scheduler.step(epoch, val_loss)
def lr_step_update(self, num_updates):
return self.lr_scheduler.step_update(num_updates)
def get_lr(self):
return self.optimizer.get_lr()
def get_model(self):
return self._model
def get_meter(self, name):
if (name not in self.meters):
return None
return self.meters[name]
def get_num_updates(self):
return self._num_updates
def _prepare_sample(self, sample):
if ((sample is None) or (len(sample) == 0)):
return None
return utils.move_to_cuda(sample) |
class DyRepMemory(torch.nn.Module):
def __init__(self, num_nodes: int, raw_msg_dim: int, memory_dim: int, time_dim: int, message_module: Callable, aggregator_module: Callable, memory_updater_type: str, use_src_emb_in_msg: bool=False, use_dst_emb_in_msg: bool=False):
super().__init__()
self.num_nodes = num_nodes
self.raw_msg_dim = raw_msg_dim
self.memory_dim = memory_dim
self.time_dim = time_dim
self.msg_s_module = message_module
self.msg_d_module = copy.deepcopy(message_module)
self.aggr_module = aggregator_module
self.time_enc = TimeEncoder(time_dim)
assert (memory_updater_type in ['gru', 'rnn']), 'Memor updater can be either `rnn` or `gru`.'
if (memory_updater_type == 'gru'):
self.memory_updater = GRUCell(message_module.out_channels, memory_dim)
elif (memory_updater_type == 'rnn'):
self.memory_updater = RNNCell(message_module.out_channels, memory_dim)
else:
raise ValueError("Undefined memory updater!!! Memory updater can be either 'gru' or 'rnn'.")
self.use_src_emb_in_msg = use_src_emb_in_msg
self.use_dst_emb_in_msg = use_dst_emb_in_msg
self.register_buffer('memory', torch.empty(num_nodes, memory_dim))
last_update = torch.empty(self.num_nodes, dtype=torch.long)
self.register_buffer('last_update', last_update)
self.register_buffer('_assoc', torch.empty(num_nodes, dtype=torch.long))
self.msg_s_store = {}
self.msg_d_store = {}
self.reset_parameters()
def device(self) -> torch.device:
return self.time_enc.lin.weight.device
def reset_parameters(self):
if hasattr(self.msg_s_module, 'reset_parameters'):
self.msg_s_module.reset_parameters()
if hasattr(self.msg_d_module, 'reset_parameters'):
self.msg_d_module.reset_parameters()
if hasattr(self.aggr_module, 'reset_parameters'):
self.aggr_module.reset_parameters()
self.time_enc.reset_parameters()
self.memory_updater.reset_parameters()
self.reset_state()
def reset_state(self):
zeros(self.memory)
zeros(self.last_update)
self._reset_message_store()
def detach(self):
self.memory.detach_()
def forward(self, n_id: Tensor) -> Tuple[(Tensor, Tensor)]:
if self.training:
(memory, last_update) = self._get_updated_memory(n_id)
else:
(memory, last_update) = (self.memory[n_id], self.last_update[n_id])
return (memory, last_update)
def update_state(self, src: Tensor, dst: Tensor, t: Tensor, raw_msg: Tensor, embeddings: Tensor=None, assoc: Tensor=None):
n_id = torch.cat([src, dst]).unique()
if self.training:
self._update_memory(n_id, embeddings, assoc)
self._update_msg_store(src, dst, t, raw_msg, self.msg_s_store)
self._update_msg_store(dst, src, t, raw_msg, self.msg_d_store)
else:
self._update_msg_store(src, dst, t, raw_msg, self.msg_s_store)
self._update_msg_store(dst, src, t, raw_msg, self.msg_d_store)
self._update_memory(n_id, embeddings, assoc)
def _reset_message_store(self):
i = self.memory.new_empty((0,), device=self.device, dtype=torch.long)
msg = self.memory.new_empty((0, self.raw_msg_dim), device=self.device)
self.msg_s_store = {j: (i, i, i, msg) for j in range(self.num_nodes)}
self.msg_d_store = {j: (i, i, i, msg) for j in range(self.num_nodes)}
def _update_memory(self, n_id: Tensor, embeddings: Tensor=None, assoc: Tensor=None):
(memory, last_update) = self._get_updated_memory(n_id, embeddings, assoc)
self.memory[n_id] = memory
self.last_update[n_id] = last_update
def _get_updated_memory(self, n_id: Tensor, embeddings: Tensor=None, assoc: Tensor=None) -> Tuple[(Tensor, Tensor)]:
self._assoc[n_id] = torch.arange(n_id.size(0), device=n_id.device)
(msg_s, t_s, src_s, dst_s) = self._compute_msg(n_id, self.msg_s_store, self.msg_s_module, embeddings, assoc)
(msg_d, t_d, src_d, dst_d) = self._compute_msg(n_id, self.msg_d_store, self.msg_d_module, embeddings, assoc)
idx = torch.cat([src_s, src_d], dim=0)
msg = torch.cat([msg_s, msg_d], dim=0)
t = torch.cat([t_s, t_d], dim=0)
aggr = self.aggr_module(msg, self._assoc[idx], t, n_id.size(0))
memory = self.memory_updater(aggr, self.memory[n_id])
dim_size = self.last_update.size(0)
last_update = scatter(t, idx, 0, dim_size, reduce='max')[n_id]
return (memory, last_update)
def _update_msg_store(self, src: Tensor, dst: Tensor, t: Tensor, raw_msg: Tensor, msg_store: TGNMessageStoreType):
(n_id, perm) = src.sort()
(n_id, count) = n_id.unique_consecutive(return_counts=True)
for (i, idx) in zip(n_id.tolist(), perm.split(count.tolist())):
msg_store[i] = (src[idx], dst[idx], t[idx], raw_msg[idx])
def _compute_msg(self, n_id: Tensor, msg_store: TGNMessageStoreType, msg_module: Callable, embeddings: Tensor=None, assoc: Tensor=None):
data = [msg_store[i] for i in n_id.tolist()]
(src, dst, t, raw_msg) = list(zip(*data))
src = torch.cat(src, dim=0)
dst = torch.cat(dst, dim=0)
t = torch.cat(t, dim=0)
raw_msg = torch.cat(raw_msg, dim=0)
t_rel = (t - self.last_update[src])
t_enc = self.time_enc(t_rel.to(raw_msg.dtype))
source_memory = self.memory[src]
if (self.use_src_emb_in_msg and (embeddings != None)):
if (src.size(0) > 0):
(curr_src, curr_src_idx) = ([], [])
for (s_idx, s) in enumerate(src):
if (s in n_id):
curr_src.append(s.item())
curr_src_idx.append(s_idx)
source_memory[curr_src_idx] = embeddings[assoc[curr_src]]
destination_memory = self.memory[dst]
if (self.use_dst_emb_in_msg and (embeddings != None)):
if (dst.size(0) > 0):
(curr_dst, curr_dst_idx) = ([], [])
for (d_idx, d) in enumerate(dst):
if (d in n_id):
curr_dst.append(d.item())
curr_dst_idx.append(d_idx)
destination_memory[curr_dst_idx] = embeddings[assoc[curr_dst]]
msg = msg_module(source_memory, destination_memory, raw_msg, t_enc)
return (msg, t, src, dst)
def train(self, mode: bool=True):
if (self.training and (not mode)):
self._update_memory(torch.arange(self.num_nodes, device=self.memory.device))
self._reset_message_store()
super().train(mode) |
def real_image3d():
img = imread(os.path.join(_root_dir(), 'data', 'img3d.tif'))
mask = imread(os.path.join(_root_dir(), 'data', 'mask3d.tif'))
return (img, mask) |
class ChamferDistanceL1(torch.nn.Module):
def __init__(self, ignore_zeros=False):
super().__init__()
self.ignore_zeros = ignore_zeros
def forward(self, xyz1, xyz2):
batch_size = xyz1.size(0)
if ((batch_size == 1) and self.ignore_zeros):
non_zeros1 = torch.sum(xyz1, dim=2).ne(0)
non_zeros2 = torch.sum(xyz2, dim=2).ne(0)
xyz1 = xyz1[non_zeros1].unsqueeze(dim=0)
xyz2 = xyz2[non_zeros2].unsqueeze(dim=0)
(dist1, dist2) = ChamferFunction.apply(xyz1, xyz2)
dist1 = torch.sqrt(dist1)
dist2 = torch.sqrt(dist2)
return ((torch.mean(dist1) + torch.mean(dist2)) / 2) |
class OptimizerGroupWrapper():
def __init__(self, optimizers, max_optimization_epochs=1, minibatch_size=None):
self._optimizers = optimizers
self._max_optimization_epochs = max_optimization_epochs
self._minibatch_size = minibatch_size
def get_minibatch(self, data, max_optimization_epochs=None):
batch_dataset = DictBatchDataset(data, self._minibatch_size)
if (max_optimization_epochs is None):
max_optimization_epochs = self._max_optimization_epochs
for _ in range(max_optimization_epochs):
for dataset in batch_dataset.iterate():
(yield dataset)
def zero_grad(self, keys=None):
if (keys is None):
keys = self._optimizers.keys()
for key in keys:
self._optimizers[key].zero_grad()
def step(self, keys=None, **closure):
if (keys is None):
keys = self._optimizers.keys()
for key in keys:
self._optimizers[key].step(**closure)
def target_parameters(self, keys=None):
if (keys is None):
keys = self._optimizers.keys()
for key in keys:
for pg in self._optimizers[key].param_groups:
for p in pg['params']:
(yield p) |
class GlobalFeatureExtractor(nn.Module):
def __init__(self, in_channels=64, block_channels=(64, 96, 128), out_channels=128, expand_ratio=6, num_blocks=(3, 3, 3), strides=(2, 2, 1), pool_scales=(1, 2, 3, 6), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), align_corners=False):
super(GlobalFeatureExtractor, self).__init__()
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
assert (len(block_channels) == len(num_blocks) == 3)
self.bottleneck1 = self._make_layer(in_channels, block_channels[0], num_blocks[0], strides[0], expand_ratio)
self.bottleneck2 = self._make_layer(block_channels[0], block_channels[1], num_blocks[1], strides[1], expand_ratio)
self.bottleneck3 = self._make_layer(block_channels[1], block_channels[2], num_blocks[2], strides[2], expand_ratio)
self.ppm = PPM(pool_scales, block_channels[2], (block_channels[2] // 4), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, align_corners=align_corners)
self.out = ConvModule((block_channels[2] * 2), out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
def _make_layer(self, in_channels, out_channels, blocks, stride=1, expand_ratio=6):
layers = [InvertedResidual(in_channels, out_channels, stride, expand_ratio, norm_cfg=self.norm_cfg)]
for i in range(1, blocks):
layers.append(InvertedResidual(out_channels, out_channels, 1, expand_ratio, norm_cfg=self.norm_cfg))
return nn.Sequential(*layers)
def forward(self, x):
x = self.bottleneck1(x)
x = self.bottleneck2(x)
x = self.bottleneck3(x)
x = torch.cat([x, *self.ppm(x)], dim=1)
x = self.out(x)
return x |
class cv_colors(Enum):
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
PURPLE = (247, 44, 200)
ORANGE = (44, 162, 247)
MINT = (239, 255, 66)
YELLOW = (2, 255, 250) |
class Associahedron_class_base():
def __new__(typ, parent=None, Vrep=None, Hrep=None, cartan_type=None, **kwds):
if (cartan_type or ((parent is None) and (Vrep is None) and (Hrep is None))):
return super().__new__(typ, parent, Vrep, Hrep, **kwds)
else:
mro = typ.mro()
for typ1 in mro:
if (typ1 in ancestors_of_associahedron):
return typ1(parent, Vrep, Hrep, **kwds)
raise ValueError('could not determine a parent class')
def __init__(self, parent, Vrep, Hrep, cartan_type=None, **kwds):
if cartan_type:
self._cartan_type = cartan_type
super().__init__(parent, Vrep, Hrep, **kwds)
else:
raise ValueError('associahedron must be initialized with cartan type')
def _repr_(self):
msg = 'Generalized associahedron of type {} with {} vertices'
return msg.format(self._cartan_type, self.n_vertices())
def cartan_type(self):
return self._cartan_type
def vertices_in_root_space(self):
root_space = self._cartan_type.root_system().root_space()
return tuple((root_space.from_vector(vector(V)) for V in self.vertex_generator())) |
class ConvSecondMomentNet(torch.nn.Module):
def __init__(self):
super(ConvSecondMomentNet, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 1, kernel_size=1, stride=1)
self.conv1 = conv_weight_change(self.conv1)
self.bn = torch.nn.BatchNorm2d(1)
self.bn = bn_weight_change(self.bn)
def forward(self, inp):
x = self.conv1(inp)
x = self.bn(x)
x = torch.relu(x)
return (x + inp) |
class CgpInfoConvSet(object):
def __init__(self, rows=30, cols=40, level_back=40, min_active_num=8, max_active_num=50):
self.input_num = 1
self.func_type = ['ConvBlock32_3', 'ConvBlock32_5', 'ConvBlock64_3', 'ConvBlock64_5', 'ConvBlock128_3', 'ConvBlock128_5', 'pool_max', 'pool_ave', 'concat', 'sum']
self.func_in_num = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2]
self.out_num = 1
self.out_type = ['full']
self.out_in_num = [1]
self.rows = rows
self.cols = cols
self.node_num = (rows * cols)
self.level_back = level_back
self.min_active_num = min_active_num
self.max_active_num = max_active_num
self.func_type_num = len(self.func_type)
self.out_type_num = len(self.out_type)
self.max_in_num = np.max([np.max(self.func_in_num), np.max(self.out_in_num)]) |
class JNDDataset(BaseDataset):
def initialize(self, dataroot, load_size=64):
self.root = dataroot
self.load_size = load_size
self.dir_p0 = os.path.join(self.root, 'p0')
self.p0_paths = make_dataset(self.dir_p0)
self.p0_paths = sorted(self.p0_paths)
self.dir_p1 = os.path.join(self.root, 'p1')
self.p1_paths = make_dataset(self.dir_p1)
self.p1_paths = sorted(self.p1_paths)
transform_list = []
transform_list.append(transforms.Scale(load_size))
transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
self.transform = transforms.Compose(transform_list)
self.dir_S = os.path.join(self.root, 'same')
self.same_paths = make_dataset(self.dir_S, mode='np')
self.same_paths = sorted(self.same_paths)
def __getitem__(self, index):
p0_path = self.p0_paths[index]
p0_img_ = Image.open(p0_path).convert('RGB')
p0_img = self.transform(p0_img_)
p1_path = self.p1_paths[index]
p1_img_ = Image.open(p1_path).convert('RGB')
p1_img = self.transform(p1_img_)
same_path = self.same_paths[index]
same_img = np.load(same_path).reshape((1, 1, 1))
same_img = torch.FloatTensor(same_img)
return {'p0': p0_img, 'p1': p1_img, 'same': same_img, 'p0_path': p0_path, 'p1_path': p1_path, 'same_path': same_path}
def __len__(self):
return len(self.p0_paths) |
def test_mean_agg_zero_neighbours():
agg = MeanAggregator(4, bias=False, act=(lambda x: x), kernel_initializer='ones')
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert (expected == pytest.approx(actual)) |
class BasicBlock1d(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, kernel_size=[3, 3], downsample=None):
super().__init__()
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
self.conv1 = conv(inplanes, planes, stride=stride, kernel_size=kernel_size[0])
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv(planes, planes, kernel_size=kernel_size[1])
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = (x if (self.downsample is None) else self.downsample(x))
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x += residual
x = self.relu(x)
return x |
def im_detect_bbox_aug(model, images, device):
boxlists_ts = []
for _ in range(len(images)):
boxlists_ts.append([])
def add_preds_t(boxlists_t):
for (i, boxlist_t) in enumerate(boxlists_t):
if (len(boxlists_ts[i]) == 0):
boxlists_ts[i].append(boxlist_t)
else:
boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size))
boxlists_i = im_detect_bbox(model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device)
add_preds_t(boxlists_i)
if cfg.TEST.BBOX_AUG.H_FLIP:
boxlists_hf = im_detect_bbox_hflip(model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device)
add_preds_t(boxlists_hf)
for scale in cfg.TEST.BBOX_AUG.SCALES:
max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
boxlists_scl = im_detect_bbox_scale(model, images, scale, max_size, device)
add_preds_t(boxlists_scl)
if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
boxlists_scl_hf = im_detect_bbox_scale(model, images, scale, max_size, device, hflip=True)
add_preds_t(boxlists_scl_hf)
boxlists = []
for (i, boxlist_ts) in enumerate(boxlists_ts):
bbox = torch.cat([boxlist_t.bbox for boxlist_t in boxlist_ts])
scores = torch.cat([boxlist_t.get_field('scores') for boxlist_t in boxlist_ts])
labels = torch.cat([boxlist_t.get_field('labels') for boxlist_t in boxlist_ts])
boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode)
boxlist.add_field('scores', scores)
boxlist.add_field('labels', labels)
boxlists.append(boxlist)
post_processor = make_fcos_postprocessor(cfg)
results = post_processor.select_over_all_levels(boxlists)
return results |
def validate_rst_syntax(text, name, dots=True):
if (text is None):
if dots:
output_dot('E')
return (False, f'ERROR: {name}: no documentation')
ok_unknown_items = set(['mod', 'currentmodule', 'autosummary', 'data', 'legacy', 'obj', 'versionadded', 'versionchanged', 'module', 'class', 'meth', 'ref', 'func', 'toctree', 'moduleauthor', 'deprecated', 'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'])
error_stream = io.StringIO()
def resolve(name, is_label=False):
return (' name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(text, token, settings_overrides=dict(halt_level=5, traceback=True, default_reference_context='title-reference', default_role='emphasis', link_base='', resolve_name=resolve, stylesheet_path='', raw_enabled=0, file_insertion_enabled=0, warning_stream=error_stream))
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ''
for error in errors:
lines = error.splitlines()
if (not lines):
continue
m = re.match('.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if (m.group(1) in ok_unknown_items):
continue
m = re.match('.*Error in "math" directive:.*unknown option: "label"', ' '.join(lines), re.S)
if m:
continue
output += ((((name + lines[0]) + '::\n ') + '\n '.join(lines[1:]).rstrip()) + '\n')
success = False
if (not success):
output += ((' ' + ('-' * 72)) + '\n')
for (lineno, line) in enumerate(text.splitlines()):
output += (' %-4d %s\n' % ((lineno + 1), line))
output += ((' ' + ('-' * 72)) + '\n\n')
if dots:
output_dot(('.' if success else 'F'))
return (success, output) |
class LSTMTrain(object):
def __init__(self, model, iteration, learning_rate, paths_between_pairs, positive_label, all_variables, all_user, all_movie):
super(LSTMTrain, self).__init__()
self.model = model
self.iteration = iteration
self.learning_rate = learning_rate
self.paths_between_pairs = paths_between_pairs
self.positive_label = positive_label
self.all_variables = all_variables
self.all_user = all_user
self.all_movie = all_movie
def dump_post_embedding(self):
embedding_dict = {}
node_list = (self.all_user + self.all_movie)
for node in node_list:
node_id = torch.LongTensor([int(self.all_variables[node])])
node_id = Variable(node_id)
if torch.cuda.is_available():
ur_id = ur_id.cuda()
node_embedding = self.model.embedding(node_id).squeeze().cpu().data.numpy()
if (node not in embedding_dict):
embedding_dict.update({node: node_embedding})
return embedding_dict
def train(self):
criterion = nn.BCELoss()
optimizer = optim.SGD(self.model.parameters(), lr=self.learning_rate)
for epoch in range(self.iteration):
running_loss = 0.0
data_size = len(self.paths_between_pairs)
label = Variable(torch.Tensor())
for pair in self.paths_between_pairs:
paths_between_one_pair = self.paths_between_pairs[pair]
paths_between_one_pair_size = len(paths_between_one_pair)
paths_between_one_pair_id = []
for path in paths_between_one_pair:
path_id = [self.all_variables[x] for x in path]
paths_between_one_pair_id.append(path_id)
paths_between_one_pair_id = np.array(paths_between_one_pair_id)
paths_between_one_pair_id = Variable(torch.LongTensor(paths_between_one_pair_id))
if torch.cuda.is_available():
paths_between_one_pair_id = paths_between_one_pair_id.cuda()
out = self.model(paths_between_one_pair_id)
out = out.squeeze()
if (pair in self.positive_label):
label = Variable(torch.Tensor([1]))
else:
label = Variable(torch.Tensor([0]))
loss = criterion(out.cpu(), label)
running_loss += (loss.item() * label.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(((('epoch[' + str(epoch)) + ']: loss is ') + str(running_loss)))
return self.dump_post_embedding() |
def add_csc_loss(model, cpg_blob='cpg', cls_prob_blob='cls_prob', rois_pred_blob='rois_pred', rois_blob='rois', loss_weight=1.0, csc_layer='CSC', prefix='', **kwargs):
csc_func = getattr(model.net, csc_layer)
csc_args = {}
csc_args['tau'] = cfg.WSL.CPG_TAU
csc_args['max_iter'] = cfg.WSL.CSC_MAX_ITER
csc_args['fg_threshold'] = cfg.WSL.CSC_FG_THRESHOLD
csc_args['mass_threshold'] = cfg.WSL.CSC_MASS_THRESHOLD
csc_args['density_threshold'] = cfg.WSL.CSC_DENSITY_THRESHOLD
csc_args.update(kwargs)
(csc, labels_oh_pos, labels_oh_neg) = csc_func([cpg_blob, 'labels_oh', cls_prob_blob, rois_blob], [(prefix + 'csc'), (prefix + 'labels_oh_pos'), (prefix + 'labels_oh_neg')], **csc_args)
model.net.CSCConstraint([rois_pred_blob, csc], [(prefix + 'rois_pred_pos'), (prefix + 'csc_pos')], polar=True)
model.net.CSCConstraint([rois_pred_blob, csc], [(prefix + 'rois_pred_neg'), (prefix + 'csc_neg')], polar=False)
add_cls_pred((prefix + 'rois_pred_pos'), (prefix + 'cls_prob_pos'), model)
add_cls_pred((prefix + 'rois_pred_neg'), (prefix + 'cls_prob_neg'), model)
weight = None
add_cross_entropy_loss(model, (prefix + 'cls_prob_pos'), (prefix + 'labels_oh_pos'), (prefix + 'cross_entropy_pos'), cpg=cpg_blob, weight=weight)
add_cross_entropy_loss(model, (prefix + 'cls_prob_neg'), (prefix + 'labels_oh_neg'), (prefix + 'cross_entropy_neg'), cpg=cpg_blob, weight=weight)
loss_cls_pos = model.net.AveragedLoss([(prefix + 'cross_entropy_pos')], [(prefix + 'loss_cls_pos')])
loss_cls_neg = model.net.AveragedLoss([(prefix + 'cross_entropy_neg')], [(prefix + 'loss_cls_neg')])
loss_gradients = get_loss_gradients_weighted(model, [loss_cls_pos, loss_cls_neg], loss_weight)
model.Accuracy([(prefix + 'cls_prob_pos'), 'labels_int32'], (prefix + 'accuracy_cls_pos'))
model.AddLosses([(prefix + 'loss_cls_pos'), (prefix + 'loss_cls_neg')])
model.AddMetrics([(prefix + 'accuracy_cls_pos')])
return loss_gradients |
def launch_ps(task, config):
ps_info = config.resource_info['ps'][task]
_prepare_ps(ps_info)
cmd = _get_launch_ps_cmd(task, config)
env = _get_ps_env(ps_info, config)
if (config.redirect_path is not None):
(stdout, stderr) = _create_log_files(config.redirect_path, 'ps', task)
logfiles = [stdout, stderr]
else:
(stdout, stderr) = (None, None)
logfiles = []
try:
python_venv = os.environ['VIRTUAL_ENV']
except:
python_venv = None
return (remote_exec(cmd, ps_info['hostname'], stdout, stderr, env, python_venv=python_venv), logfiles) |
_experiment
def multi_env_ppo(ctxt=None, seed=1):
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env1 = GarageEnv(normalize(gym.make('Adventure-ram-v4')))
env2 = GarageEnv(normalize(gym.make('Alien-ram-v4')))
env = MultiEnvWrapper([env1, env2])
policy = CategoricalMLPPolicy(env_spec=env.spec, hidden_nonlinearity=tf.nn.tanh)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = PPO(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=100, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, policy_ent_coeff=0.0, optimizer_args=dict(batch_size=32, max_epochs=10, learning_rate=0.001))
runner.setup(algo, env)
runner.train(n_epochs=120, batch_size=2048, plot=False) |
def maybe_zero_3(param, ignore_status=False, name=None):
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
if hasattr(param, 'ds_id'):
if (param.ds_status == ZeroParamStatus.NOT_AVAILABLE):
if (not ignore_status):
logging.warning(f'{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}')
with zero.GatheredParameters([param]):
param = param.data.detach().cpu().clone()
else:
param = param.detach().cpu().clone()
return param |
def get_score(submission_folder='../env'):
submission_path = os.path.join(submission_folder, 'submission.csv')
solution = pd.read_csv(os.path.join(os.path.dirname(__file__), 'answer.csv'))[DIMENSIONS].to_numpy()
submission = pd.read_csv(submission_path)[DIMENSIONS].to_numpy()
metrics = compute_metrics_for_regression(solution, submission)
return np.mean(list(metrics.values())) |
class LocalQueue():
ops = 0
stored = 0
uid = 0
empty = 0
def __init__(self, name='unnamed'):
self.items = []
self.name = name
self.uid = LocalQueue.uid
LocalQueue.uid += 1
def put(self, item, block=True):
LocalQueue.ops += 1
LocalQueue.stored += 1
self.items.append(item)
def get(self, block=True, timeout=0):
LocalQueue.ops += 1
if (not len(self.items)):
LocalQueue.empty += 1
raise Exception('LocalQueue is empty')
LocalQueue.stored -= 1
return self.items.pop() |
def train_step():
model.train()
model.zero_grad()
(data, label, op) = rules(args.batch_size, args.seq_len, args.gt_rules, 2, args.search_version, args.data_seed)
data = torch.Tensor(data).to(device)
label = torch.Tensor(label).to(device)
op = torch.Tensor(op).to(device)
(out, score) = model(data, op)
loss = criterion(out, label)
loss.backward()
optimizer.step()
return loss |
def _recurse_unknown_any(layout: ak.contents.EmptyArray, type_: ak.types.Type) -> ak.contents.Content:
type_form = ak.forms.from_type(type_)
return type_form.length_zero_array(highlevel=False).copy(parameters=type_._parameters) |
def cantor_reduction(a, b, f, h, genus):
assert (a.degree() < ((2 * genus) + 1))
assert (b.degree() < a.degree())
k = ((f - (h * b)) - (b ** 2))
if ((2 * a.degree()) == k.degree()):
g1 = a.degree()
x = a.parent().gen()
r = (((x ** 2) + (h[g1] * x)) - f[(2 * g1)]).roots()[0][0]
b = (b + (r * ((x ** g1) - ((x ** g1) % a))))
k = ((f - (h * b)) - (b ** 2))
assert ((k % a) == 0)
a = (k // a).monic()
b = ((- (b + h)) % a)
if (a.degree() > genus):
return cantor_reduction(a, b, f, h, genus)
return (a, b) |
class MaxPool3dSamePadding(nn.MaxPool3d):
def compute_pad(self, dim, s):
if ((s % self.stride[dim]) == 0):
return max((self.kernel_size[dim] - self.stride[dim]), 0)
else:
return max((self.kernel_size[dim] - (s % self.stride[dim])), 0)
def forward(self, x):
(batch, channel, t, h, w) = x.size()
out_t = np.ceil((float(t) / float(self.stride[0])))
out_h = np.ceil((float(h) / float(self.stride[1])))
out_w = np.ceil((float(w) / float(self.stride[2])))
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
pad_t_f = (pad_t // 2)
pad_t_b = (pad_t - pad_t_f)
pad_h_f = (pad_h // 2)
pad_h_b = (pad_h - pad_h_f)
pad_w_f = (pad_w // 2)
pad_w_b = (pad_w - pad_w_f)
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
x = F.pad(x, pad)
return super(MaxPool3dSamePadding, self).forward(x) |
class IntRange(IntParamType):
name = 'integer range'
def __init__(self, min=None, max=None, clamp=False):
self.min = min
self.max = max
self.clamp = clamp
def convert(self, value, param, ctx):
rv = IntParamType.convert(self, value, param, ctx)
if self.clamp:
if ((self.min is not None) and (rv < self.min)):
return self.min
if ((self.max is not None) and (rv > self.max)):
return self.max
if (((self.min is not None) and (rv < self.min)) or ((self.max is not None) and (rv > self.max))):
if (self.min is None):
self.fail('{} is bigger than the maximum valid value {}.'.format(rv, self.max), param, ctx)
elif (self.max is None):
self.fail('{} is smaller than the minimum valid value {}.'.format(rv, self.min), param, ctx)
else:
self.fail('{} is not in the valid range of {} to {}.'.format(rv, self.min, self.max), param, ctx)
return rv
def __repr__(self):
return 'IntRange({}, {})'.format(self.min, self.max) |
def odometry_residual(pose_a: sf.Pose2, pose_b: sf.Pose2, dist: sf.Scalar, epsilon: sf.Scalar) -> sf.V1:
return sf.V1(((pose_b.t - pose_a.t).norm(epsilon=epsilon) - dist)) |
_SEG_HEADS_REGISTRY.register()
class TransformerEncoderPixelDecoder(BasePixelDecoder):
def __init__(self, input_shape: Dict[(str, ShapeSpec)], *, transformer_dropout: float, transformer_nheads: int, transformer_dim_feedforward: int, transformer_enc_layers: int, transformer_pre_norm: bool, conv_dim: int, mask_dim: int, norm: Optional[Union[(str, Callable)]]=None):
super().__init__(input_shape, conv_dim=conv_dim, mask_dim=mask_dim, norm=norm)
input_shape = sorted(input_shape.items(), key=(lambda x: x[1].stride))
self.in_features = [k for (k, v) in input_shape]
feature_strides = [v.stride for (k, v) in input_shape]
feature_channels = [v.channels for (k, v) in input_shape]
in_channels = feature_channels[(len(self.in_features) - 1)]
self.input_proj = Conv2d(in_channels, conv_dim, kernel_size=1)
weight_init.c2_xavier_fill(self.input_proj)
self.transformer = TransformerEncoderOnly(d_model=conv_dim, dropout=transformer_dropout, nhead=transformer_nheads, dim_feedforward=transformer_dim_feedforward, num_encoder_layers=transformer_enc_layers, normalize_before=transformer_pre_norm)
N_steps = (conv_dim // 2)
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
use_bias = (norm == '')
output_norm = get_norm(norm, conv_dim)
output_conv = Conv2d(conv_dim, conv_dim, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=output_norm, activation=F.relu)
weight_init.c2_xavier_fill(output_conv)
delattr(self, 'layer_{}'.format(len(self.in_features)))
self.add_module('layer_{}'.format(len(self.in_features)), output_conv)
self.output_convs[0] = output_conv
def from_config(cls, cfg, input_shape: Dict[(str, ShapeSpec)]):
ret = super().from_config(cfg, input_shape)
ret['transformer_dropout'] = cfg.MODEL.M2FP.DROPOUT
ret['transformer_nheads'] = cfg.MODEL.M2FP.NHEADS
ret['transformer_dim_feedforward'] = cfg.MODEL.M2FP.DIM_FEEDFORWARD
ret['transformer_enc_layers'] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS
ret['transformer_pre_norm'] = cfg.MODEL.M2FP.PRE_NORM
return ret
def forward_features(self, features):
multi_scale_features = []
num_cur_levels = 0
for (idx, f) in enumerate(self.in_features[::(- 1)]):
x = features[f]
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
if (lateral_conv is None):
transformer = self.input_proj(x)
pos = self.pe_layer(x)
transformer = self.transformer(transformer, None, pos)
y = output_conv(transformer)
transformer_encoder_features = transformer
else:
cur_fpn = lateral_conv(x)
y = (cur_fpn + F.interpolate(y, size=cur_fpn.shape[(- 2):], mode='nearest'))
y = output_conv(y)
if (num_cur_levels < self.maskformer_num_feature_levels):
multi_scale_features.append(y)
num_cur_levels += 1
return (self.mask_features(y), transformer_encoder_features, multi_scale_features)
def forward(self, features, targets=None):
logger = logging.getLogger(__name__)
logger.warning('Calling forward() may cause unpredicted behavior of PixelDecoder module.')
return self.forward_features(features) |
def loss_example(pred, true):
if (cfg.model.loss_fun == 'smoothl1'):
l1_loss = nn.SmoothL1Loss()
loss = l1_loss(pred, true)
return (loss, pred) |
.core
.parametrize('feature_source', ['user_id', 'item_id', ['item_id', 'user_id']])
.usefixtures('full_pandas_dataset')
def test_get_encoder(full_pandas_dataset, feature_source):
encoder = DatasetLabelEncoder()
user_item_features = ['user_id', 'item_id']
dataset_for_fit = Dataset(feature_schema=get_features(full_pandas_dataset).subset(user_item_features), interactions=full_pandas_dataset['interactions'])
encoder.fit(dataset_for_fit)
encoder_from_get = encoder.get_encoder(feature_source)
assert (encoder_from_get is not None)
assert isinstance(encoder_from_get, LabelEncoder) |
def is_shuffle(stage: str) -> bool:
is_sh = {'train': True, 'val': False, 'test': False}
return is_sh[stage] |
class Timex3Tagger(Tagger):
def __init__(self, normalizer=None, stopwords=None):
default_sw = {'may'}
self.stopwords = (default_sw if (not stopwords) else stopwords)
self.normalizer = normalizer
self.tag_name = 'TIMEX3'
self._init()
def _matches(self, matchers, doc, ngrams, group=0):
matches = {}
for (i, sent) in enumerate(doc.sentences):
matches[i] = {}
for (j, rgx) in enumerate(matchers):
for match in re.finditer(rgx, sent.text, re.I):
span = match.span(group)
(start, end) = span
tspan = Span(char_start=start, char_end=(end - 1), sentence=sent)
matches[i][(start, (end - 1), ((end - 1) - start))] = tspan
mask = {}
for key in sorted(matches[i], key=(lambda x: x[(- 1)]), reverse=1):
is_longest = True
(start, end, length) = key
for j in range(start, end):
if (j not in mask):
mask[j] = tspan
else:
is_longest = False
tspan = matches[i][key]
if is_longest:
ignore_span = False
for entity_name in doc.annotations[sent.i]:
for span in doc.annotations[sent.i][entity_name]:
if (span and self._is_overlapping(span, tspan)):
ignore_span = True
break
if (not ignore_span):
(yield (i, tspan))
def _is_overlapping(self, a, b):
if ((a.abs_char_start >= b.abs_char_start) and (a.abs_char_start <= b.abs_char_end)):
return True
if ((a.abs_char_end >= b.abs_char_start) and (a.abs_char_end <= b.abs_char_end)):
return True
if ((b.abs_char_start >= a.abs_char_start) and (b.abs_char_start <= a.abs_char_end)):
return True
if ((b.abs_char_end >= a.abs_char_start) and (b.abs_char_end <= a.abs_char_end)):
return True
return False
def tag(self, document, ngrams=6):
matches = defaultdict(list)
for (sidx, match) in self._matches(self.matchers[self.tag_name], document, None, group=0):
if (match.get_span().lower() in self.stopwords):
continue
matches[sidx].append(match)
print(match)
if self.normalizer:
self.normalizer.normalize(matches)
for sidx in matches:
document.annotations[sidx].update({self.tag_name: matches[sidx]})
def _init(self):
self.matchers = {self.tag_name: regexes} |
class BinanceGetRealTimePrice(VirtualFunctionTool):
name = 'BinanceGetRealTimePrice'
summary = 'Retrieve real-time price information for a specified cryptocurrency pair.'
parameters: List[ArgParameter] = [{'name': 'pair', 'type': 'string', 'description': "The cryptocurrency pair to retrieve real-time price information for, for example, 'BTCUSD', 'USDBTC', 'ETHUSD', etc.", 'required': True}]
returns: List[ArgReturn] = [{'name': 'price', 'type': 'number', 'description': 'Price of first currency in a pair in units of the second currency.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'pair' is not valid."}] |
def write_outputs(image_paths, ocr_responses, output_folder, json_out):
if (not os.path.exists(output_folder)):
os.makedirs(output_folder)
for (img, ocr) in zip(image_paths, ocr_responses):
(filename, _) = os.path.splitext(img.split('/')[(- 1)])
if json_out:
with open(''.join([output_folder, '/', filename, '.json']), 'w') as out:
json.dump(ocr, out, separators=(',', ': '), ensure_ascii=False, indent=4)
else:
with open(''.join([output_folder, '/', filename, '.txt']), 'w') as out:
out.write(ocr) |
def to_standard(p, key=None):
ev_dict = evaluation_dict(p)
ordered_alphabet = sorted(ev_dict, key=key)
offset = 0
for k in ordered_alphabet:
temp = ev_dict[k]
ev_dict[k] = offset
offset += temp
result = []
for l in p:
ev_dict[l] += 1
result.append(ev_dict[l])
return Permutations(len(result))(result) |
def register_Ns3SnrTag_methods(root_module, cls):
cls.add_constructor([param('ns3::SnrTag const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True)
cls.add_method('Get', 'double', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('double', 'snr')])
return |
_once
def sympy_init():
from sympy import Add
if (Add._sage_ == _sympysage_add):
return
from sympy import Mul, Pow, Symbol, Subs
from sympy.core.function import Function, AppliedUndef, Derivative
from sympy.core.numbers import Float, Integer, Rational, Infinity, NegativeInfinity, ComplexInfinity, Exp1, Pi, GoldenRatio, EulerGamma, Catalan, ImaginaryUnit
from sympy.core.numbers import NaN as sympy_nan
from sympy.core.relational import Relational
from sympy.functions.combinatorial.factorials import RisingFactorial, FallingFactorial
from sympy.functions.elementary.complexes import re, im, Abs
from sympy.functions.elementary.exponential import LambertW
from sympy.functions.elementary.integers import ceiling
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.error_functions import fresnels, fresnelc
from sympy.functions.special.bessel import besselj, bessely, besseli, besselk
from sympy.functions.special.delta_functions import DiracDelta, Heaviside
from sympy.functions.special.error_functions import expint
from sympy.functions.special.elliptic_integrals import elliptic_k
from sympy.functions.special.gamma_functions import loggamma, polygamma
from sympy.functions.special.hyper import hyper
from sympy.functions.special.spherical_harmonics import Ynm
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.logic.boolalg import BooleanTrue, BooleanFalse
from sympy.integrals.integrals import Integral
from sympy.polys import Poly
from sympy.polys.domains.integerring import IntegerRing
from sympy.polys.domains.rationalfield import RationalField
from sympy.polys.domains.polynomialring import PolynomialRing
from sympy.polys.rootoftools import CRootOf
from sympy.polys.rootisolation import RealInterval, ComplexInterval
from sympy.series.order import Order
from sympy.matrices import ImmutableMatrix, ImmutableSparseMatrix, Matrix, SparseMatrix
Float._sage_ = _sympysage_float
Integer._sage_ = _sympysage_integer
Rational._sage_ = _sympysage_rational
RealInterval._sage_ = _sympysage_real_interval
ComplexInterval._sage_ = _sympysage_complex_interval
IntegerRing._sage_ = _sympysage_integer_ring
RationalField._sage_ = _sympysage_rational_field
PolynomialRing._sage_ = _sympysage_polynomial_ring
Poly._sage_ = _sympysage_polynomial
Infinity._sage_ = _sympysage_pinfty
NegativeInfinity._sage_ = _sympysage_ninfty
ComplexInfinity._sage_ = _sympysage_uinfty
sympy_nan._sage_ = _sympysage_nan
ImmutableMatrix._sage_ = _sympysage_matrix
ImmutableSparseMatrix._sage_ = _sympysage_matrix
Matrix._sage_ = _sympysage_matrix
SparseMatrix._sage_ = _sympysage_matrix
Relational._sage_ = _sympysage_relational
Exp1._sage_ = _sympysage_e
Pi._sage_ = _sympysage_pi
GoldenRatio._sage_ = _sympysage_golden_ratio
EulerGamma._sage_ = _sympysage_eulerg
Catalan._sage_ = _sympysage_catalan
ImaginaryUnit._sage_ = _sympysage_i
Add._sage_ = _sympysage_add
Mul._sage_ = _sympysage_mul
Pow._sage_ = _sympysage_pow
Symbol._sage_ = _sympysage_symbol
Subs._sage_ = _sympysage_Subs
Function._sage_ = _sympysage_function
AppliedUndef._sage_ = _sympysage_function
import sympy.core.function
sympy.core.function._undef_sage_helper = UndefSageHelper()
Integral._sage_ = _sympysage_integral
Derivative._sage_ = _sympysage_derivative
Order._sage_ = _sympysage_order
LambertW._sage_ = _sympysage_lambertw
RisingFactorial._sage_ = _sympysage_rf
FallingFactorial._sage_ = _sympysage_ff
loggamma._sage_ = _sympysage_lgamma
polygamma._sage_ = _sympysage_polygamma
DiracDelta._sage_ = _sympysage_dirac_delta
Heaviside._sage_ = _sympysage_heaviside
expint._sage_ = _sympysage_expint
hyper._sage_ = _sympysage_hyp
elliptic_k._sage_ = _sympysage_elliptic_k
KroneckerDelta._sage_ = _sympysage_kronecker_delta
Piecewise._sage_ = _sympysage_piecewise
fresnels._sage_ = _sympysage_fresnels
fresnelc._sage_ = _sympysage_fresnelc
besselj._sage_ = _sympysage_besselj
bessely._sage_ = _sympysage_bessely
besseli._sage_ = _sympysage_besseli
besselk._sage_ = _sympysage_besselk
Ynm._sage_ = _sympysage_ynm
re._sage_ = _sympysage_re
im._sage_ = _sympysage_im
Abs._sage_ = _sympysage_abs
CRootOf._sage_ = _sympysage_crootof
BooleanFalse._sage_ = _sympysage_false
BooleanTrue._sage_ = _sympysage_true
ceiling._sage_ = _sympysage_ceiling |
def gcno_files_exist(cargs):
found_code_coverage_support = False
for (root, dirs, files) in os.walk(cargs.code_dir):
for filename in files:
if (filename[(- 5):] == '.gcno'):
found_code_coverage_support = True
if (not found_code_coverage_support):
print(("[*] Could not find any *.gcno files in --code-dir '%s', is code coverage ('-fprofile-arcs -ftest-coverage') compiled in?" % cargs.code_dir))
return False
return True |
def read_serialized_data_from_files(paths: List[str]) -> List:
results = []
for (i, path) in enumerate(paths):
with open(path, 'rb') as reader:
logger.info('Reading file %s', path)
data = pickle.load(reader)
results.extend(data)
logger.info('Aggregated data size: {}'.format(len(results)))
logger.info('Total data size: {}'.format(len(results)))
return results |
class TestLoader(unittest.TestCase):
def setUp(self):
self.data_home = (tempfile.gettempdir() + '/data')
def test_netset(self):
clear_data_home(self.data_home)
try:
graph = load_netset('stub', self.data_home)
except:
warnings.warn('Could not reach the NetSet collection. Corresponding test has not been performed.', RuntimeWarning)
return
n = 2
self.assertEqual(graph.adjacency.shape, (n, n))
self.assertEqual(len(graph.names), n)
clear_data_home(self.data_home)
def test_invalid_netset(self):
try:
with self.assertRaises(ValueError):
load_netset('junk', self.data_home)
except:
warnings.warn('Could not reach the NetSet collection. Corresponding test has not been performed.', RuntimeWarning)
return
load_netset()
def test_konect(self):
try:
with TimeOut(2):
data = load_konect('moreno_crime', self.data_home)
except (TimeoutError, RuntimeError):
warnings.warn('Could not reach Konect. Corresponding test has not been performed.', RuntimeWarning)
return
self.assertEqual(data.biadjacency.shape[0], 829)
self.assertEqual(data.name.shape[0], 829)
data = load_konect('moreno_crime', self.data_home)
self.assertEqual(data.biadjacency.shape[0], 829)
try:
with TimeOut(2):
data = load_konect('ego-facebook', self.data_home)
except (TimeoutError, RuntimeError):
warnings.warn('Could not reach Konect. Corresponding test has not been performed.', RuntimeWarning)
return
self.assertEqual(data.adjacency.shape[0], 2888)
clear_data_home(self.data_home)
def test_invalid_konect(self):
try:
with TimeOut(4):
with self.assertRaises(ValueError):
load_konect('junk', self.data_home)
with self.assertRaises(ValueError):
load_konect('', self.data_home)
except (TimeoutError, RuntimeError):
warnings.warn('Could not reach Konect. Corresponding test has not been performed.', RuntimeWarning)
return
def test_save_load(self):
data = house()
save((self.data_home + '/house'), data)
loaded_data = load((self.data_home + '/house'))
self.assertTrue(np.allclose(data.data, loaded_data.adjacency.data))
data = star_wars()
save((self.data_home + '/star_wars'), data)
loaded_data = load((self.data_home + '/star_wars'))
self.assertTrue(np.allclose(data.data, loaded_data.biadjacency.data))
data = star_wars(metadata=True)
save((self.data_home + '/star_wars'), data)
loaded_data = load((self.data_home + '/star_wars'))
self.assertTrue(np.allclose(data.biadjacency.data, loaded_data.biadjacency.data))
self.assertEqual(data.names_col[0], loaded_data.names_col[0]) |
def sentence_preprocess(phrase):
replacements = {'12': 'half', '': '-', 'TM': '', '': 'cent', 'c': 'c', 'u': 'u', 'e': 'e', '': ' degree', 'e': 'e', '...': ''}
phrase = phrase.encode('utf-8')
phrase = phrase.lstrip(' ').rstrip(' ')
for (k, v) in replacements.items():
phrase = phrase.replace(k, v)
return str(phrase).lower().translate(None, string.punctuation).decode('utf-8', 'ignore') |
def __relay_host_torrc_defaults(relay):
includes = [TORRC_RELAY_FILENAME, __relay_to_torrc_default_include(relay)]
rate = max(BW_RATE_MIN, relay['bandwidth_rate'])
burst = max(BW_RATE_MIN, relay['bandwidth_burst'])
return {'includes': includes, 'bandwidth_rate': rate, 'bandwidth_burst': burst} |
def list_datasets():
return [EMOPIADataset, EssenFolkSongDatabase, HaydnOp20Dataset, HymnalDataset, HymnalTuneDataset, JSBChoralesDataset, LakhMIDIAlignedDataset, LakhMIDIDataset, LakhMIDIMatchedDataset, MAESTRODatasetV1, MAESTRODatasetV2, MAESTRODatasetV3, Music21Dataset, MusicNetDataset, NESMusicDatabase, NottinghamDatabase, WikifoniaDataset] |
def get_profile(user_id):
user = UserOper.get_user_by_uid(user_id)
if user:
storage.info('user {id} has already crawled'.format(id=user_id))
SeedidsOper.set_seed_crawled(user_id, 1)
is_crawled = 1
else:
user = get_url_from_web(user_id)
if (user is not None):
SeedidsOper.set_seed_crawled(user_id, 1)
else:
SeedidsOper.set_seed_crawled(user_id, 2)
is_crawled = 0
return (user, is_crawled) |
class SchwartzHearstLabelingFunction(LabelingFunction):
def __init__(self, name: str, dictionary: Set[str], label: int, stopwords: Set[str]=None):
super().__init__(name, label)
self._index = {}
self.dictionary = dictionary
self.stopwords = (set() if (not stopwords) else stopwords)
def _doc_term_forms(self, doc):
if (doc.name in self._index):
return self._index[doc.name]
abbrv_map = collections.defaultdict(set)
for sent in doc.sentences:
for i in get_parenthetical_short_forms(sent):
short_form = sent.words[i]
long_form = extract_long_form(i, sent)
if (not long_form):
continue
abbrv_map[short_form].add(long_form)
term_labels = {}
for sf in abbrv_map:
label = None
for term in abbrv_map[sf]:
if ((term.text in self.dictionary) or (term.text.lower() in self.dictionary)):
label = self.label
break
if label:
term_labels[sf] = label
for term in abbrv_map[sf]:
term_labels[term.text.lower()] = label
self._index[doc.name] = term_labels
return self._index[doc.name]
def __call__(self, sentence):
doc_term_dict = self._doc_term_forms(sentence.document)
m = apply_matcher(sentence.words, sentence.char_offsets, doc_term_dict, max_ngrams=5, split_on=None, longest_match_only=False)
if (not m):
return {}
L = {}
for ((char_start, char_end), term) in m:
if (term in self.stopwords):
continue
(start, end) = get_word_index_span((char_start, (char_end - 1)), sentence)
for i in range(start, (end + 1)):
L[i] = self.label
return L |
def read_best_info(path):
with open(path, 'r') as bi_file:
next(bi_file)
headers = next(bi_file).split(',')
values = next(bi_file).split(',')
best_metric_info = {}
best_metric_info['metrics'] = {}
best_metric_info['metrics'][''] = float(values[headers.index('')])
best_metric_info['epoch'] = int(values[headers.index('Epoch')])
best_metric_info['batch_number'] = int(values[headers.index('batch_number')])
best_metric_info[''] = '-'
if (values[headers.index('')] != '-'):
best_metric_info[''] = int(values[headers.index('')])
return best_metric_info |
def starts_with(s, prefix, ignore_case=False):
if is_str(prefix):
prefix = [prefix]
prefix = list(prefix)
if ignore_case:
for (idx, pre) in enumerate(prefix):
prefix[idx] = to_lowercase(pre)
s = to_lowercase(s)
prefix = tuple(prefix)
return s.startswith(prefix) |
def _check_graph(sgv, graph):
if (not isinstance(sgv, SubGraphView)):
raise TypeError('Expected a SubGraphView, got: {}'.format(type(graph)))
if ((graph is None) or (not sgv.graph)):
return sgv
if (not isinstance(graph, tf_ops.Graph)):
raise TypeError('Expected a tf.Graph, got: {}'.format(type(graph)))
if (sgv.graph is not graph):
raise ValueError('Graph mismatch.')
return sgv |
def main(config_path: str):
config_dict = read_json(config_path)
num_cross_val = config_dict['num_cross_val']
SEEDS = config_dict['seeds']
to_dump = {'config': config_dict}
to_dump['stats'] = {}
for SEED in tqdm(SEEDS):
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
to_dump['stats'][SEED] = {split: {} for split in ['train', 'val', 'test']}
for i in range(num_cross_val):
config_dict['data_loader']['args']['test_cross_val'] = i
config = ConfigParser(config_dict)
train(config)
logs = train_to_dump(config, checkpoint=os.path.join(config.save_dir, 'model_best.pth'))
to_dump['stats'][SEED]['train'].update({i: logs['train']})
to_dump['stats'][SEED]['val'].update({i: logs['val']})
log = test(config, checkpoint=os.path.join(config.save_dir, 'model_best.pth'))
to_dump['stats'][SEED]['test'].update({i: log})
pprint(to_dump['stats'][SEED])
for split in ['train', 'val', 'test']:
for metric in (['loss'] + config['metrics']):
to_dump['stats'][f'{split}_{metric}_mean'] = np.nanmean([to_dump['stats'][SEED][split][i][metric] for SEED in SEEDS for i in range(num_cross_val)])
to_dump['stats'][f'{split}_{metric}_std'] = np.nanstd([to_dump['stats'][SEED][split][i][metric] for SEED in SEEDS for i in range(num_cross_val)])
filepath = os.path.join(config_dict['trainer']['save_dir'], (datetime.now().strftime('%m%d_%H%M%S') + '_cross-val-results.json'))
write_json(to_dump, filepath) |
def get_ancestral_step(sigma_from, sigma_to, eta=1.0):
if (not eta):
return (sigma_to, 0.0)
sigma_up = min(sigma_to, (eta * ((((sigma_to ** 2) * ((sigma_from ** 2) - (sigma_to ** 2))) / (sigma_from ** 2)) ** 0.5)))
sigma_down = (((sigma_to ** 2) - (sigma_up ** 2)) ** 0.5)
return (sigma_down, sigma_up) |
def test_unknown_length_regularization():
layout = ak.to_layout([1, 2, 3, 4, 5, 6]).to_typetracer(forget_length=False)
assert (layout[unknown_length:].length == unknown_length)
assert (layout[:unknown_length].length == unknown_length)
assert (layout[::unknown_length].length == unknown_length) |
class TestNetworks(unittest.TestCase):
(itertools.product(helpers.DEBUG_DATASETS))
def test_featurizer(self, dataset_name):
batch_size = 8
hparams = hparams_registry.default_hparams('ERM', dataset_name)
dataset = datasets.get_dataset_class(dataset_name)('', [], hparams)
input_ = helpers.make_minibatches(dataset, batch_size)[0][0]
input_shape = dataset.input_shape
algorithm = networks.Featurizer(input_shape, hparams).cuda()
output = algorithm(input_)
self.assertEqual(list(output.shape), [batch_size, algorithm.n_outputs]) |
def _gamma(theta, mu):
concentration = theta
rate = (theta / mu)
gamma_d = Gamma(concentration=concentration, rate=rate)
return gamma_d |
def reverse_dict_value_list(dict_of_list):
return {v: k for (k, vals) in dict_of_list.items() for v in vals} |
def main():
args = parse_args()
data = load_data(args.data_path)
output_dir = Path(args.output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
download(data, output_dir) |
_paths
def parse_args(args=None, namespace=None):
parser = argparse.ArgumentParser(description='Extract frames from videos.')
parser.add_argument('-i', '--in_dir', type=pathlib.Path, help='input directory')
parser.add_argument('-o', '--out_dir', type=pathlib.Path, help='output directory')
parser.add_argument('-f', '--fps', default=1, type=int, help='frames per second')
parser.add_argument('-s', '--skip_existing', default=False, action='store_true', help='whether to skip existing outputs')
parser.add_argument('-e', '--ignore_exceptions', default=False, action='store_true', help='whether to ignore all exceptions')
parser.add_argument('-j', '--jobs', default=1, type=int, help='number of jobs')
parser.add_argument('-q', '--quiet', action='store_true', help='show warnings only')
return parser.parse_args(args=args, namespace=namespace) |
def get_precision_recall(G, G_gt):
(p, r, f1, t) = (0.0, 0.0, 0.0, 0.0)
for i in G:
(pi, ri, f1i) = _get_precision_recall_single(G[i], G_gt[i])
p += pi
r += ri
f1 += f1i
t += 1.0
precision = (p / t)
recall = (r / t)
f1 = (f1 / t)
return (precision, recall, f1)
return (precision, recall, (((2.0 * precision) * recall) / ((precision + recall) + 1e-07))) |
def log_env_info():
logging.info('Collecting environment information...')
env_info = torch.utils.collect_env.get_pretty_env_info()
logging.info(f'{env_info}') |
def inorder(node):
tags = (str(node.dep_) + ' ')
if node.lefts:
for n in node.lefts:
tags += inorder(n)
if node.rights:
for n in node.rights:
tags += inorder(n)
return tags |
def search_benchmark(pattern: str):
regexp = re.compile(pattern)
for (name, route) in BENCHMARKS.items():
if regexp.search(name):
(yield (name, route)) |
class TestFlowIncludeExclude(FLSpec):
include_exclude_error_list = []
def start(self):
print((f'{bcolors.OKBLUE}Testing FederatedFlow - Starting Test for Include and Exclude ' + f'Attributes {bcolors.ENDC}'))
self.collaborators = self.runtime.collaborators
self.exclude_agg_to_agg = 10
self.include_agg_to_agg = 100
self.next(self.test_include_exclude_agg_to_agg, exclude=['exclude_agg_to_agg'])
def test_include_exclude_agg_to_agg(self):
if ((hasattr(self, 'include_agg_to_agg') is True) and (hasattr(self, 'exclude_agg_to_agg') is False)):
print((f'{bcolors.OKGREEN} ... Exclude test passed in test_include_exclude_agg_to_agg ' + f'{bcolors.ENDC}'))
else:
TestFlowIncludeExclude.include_exclude_error_list.append('test_include_exclude_agg_to_agg')
print((f'{bcolors.FAIL} ... Exclude test failed in test_incude_exclude_agg_to_agg ' + f'{bcolors.ENDC}'))
self.include_agg_to_collab = 100
self.exclude_agg_to_collab = 78
self.next(self.test_include_exclude_agg_to_collab, foreach='collaborators', include=['include_agg_to_collab', 'collaborators'])
def test_include_exclude_agg_to_collab(self):
if ((hasattr(self, 'include_agg_to_agg') is False) and (hasattr(self, 'exclude_agg_to_agg') is False) and (hasattr(self, 'exclude_agg_to_collab') is False) and (hasattr(self, 'include_agg_to_collab') is True)):
print((f'{bcolors.OKGREEN} ... Include test passed in test_include_exclude_agg_to_collab ' + f'{bcolors.ENDC}'))
else:
TestFlowIncludeExclude.include_exclude_error_list.append('test_incude_exclude_agg_to_collab')
print((f'{bcolors.FAIL} ... Include test failed in test_include_exclude_agg_to_collab ' + f'{bcolors.ENDC}'))
self.exclude_collab_to_collab = 10
self.include_collab_to_collab = 44
self.next(self.test_include_exclude_collab_to_collab, exclude=['exclude_collab_to_collab'])
def test_include_exclude_collab_to_collab(self):
if ((hasattr(self, 'include_agg_to_agg') is False) and (hasattr(self, 'include_agg_to_collab') is True) and (hasattr(self, 'include_collab_to_collab') is True) and (hasattr(self, 'exclude_agg_to_agg') is False) and (hasattr(self, 'exclude_agg_to_collab') is False) and (hasattr(self, 'exclude_collab_to_collab') is False)):
print((f'{bcolors.OKGREEN} ... Exclude test passed in ' + f'test_include_exclude_collab_to_collab {bcolors.ENDC}'))
else:
TestFlowIncludeExclude.include_exclude_error_list.append('test_incude_exclude_collab_to_collab')
print((f'{bcolors.FAIL} ... Exclude test failed in test_include_exclude_collab_to_collab ' + f'{bcolors.ENDC}'))
self.exclude_collab_to_agg = 20
self.include_collab_to_agg = 56
self.next(self.join, include=['include_collab_to_agg'])
def join(self, inputs):
validate = ((hasattr(self, 'include_agg_to_agg') is True) and (hasattr(self, 'include_agg_to_collab') is True) and (hasattr(self, 'exclude_agg_to_collab') is True) and (hasattr(self, 'exclude_agg_to_agg') is False))
for input in inputs:
validation = (validate and ((hasattr(input, 'include_collab_to_collab') is False) and (hasattr(input, 'exclude_collab_to_collab') is False) and (hasattr(input, 'exclude_collab_to_agg') is False) and (hasattr(input, 'include_collab_to_agg') is True)))
if validation:
print(f'{bcolors.OKGREEN} ... Include and Exclude tests passed in join {bcolors.ENDC}')
else:
TestFlowIncludeExclude.include_exclude_error_list.append('join')
print(f'{bcolors.FAIL} ... Include and Exclude tests failed in join {bcolors.ENDC}')
print(f'''
{bcolors.UNDERLINE} Include and exclude attributes test summary: {bcolors.ENDC}
''')
if TestFlowIncludeExclude.include_exclude_error_list:
validated_include_exclude_variables = ','.join(TestFlowIncludeExclude.include_exclude_error_list)
print((f'{bcolors.FAIL} ...Test case failed for {validated_include_exclude_variables} ' + f'{bcolors.ENDC}'))
self.next(self.end)
def end(self):
print((f'{bcolors.OKBLUE}Testing FederatedFlow - Ending Test for Include and Exclude ' + f'Attributes {bcolors.ENDC}'))
if TestFlowIncludeExclude.include_exclude_error_list:
raise AssertionError(f'''{bcolors.FAIL}
...Test case failed ... {bcolors.ENDC}''') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.