code stringlengths 101 5.91M |
|---|
class Token(object):
def __init__(self, word, pos, idx):
self.word = word
self.pos = pos
self.idx = idx
self.parent = None
self.children = []
self.dep = None |
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if (args.distributed_init_method is None):
distributed_utils.infer_init_method(args)
if (args.distributed_init_method is not None):
if ((torch.cuda.device_count() > 1) and (not args.distributed_no_spawn)):
start_rank = args.distributed_rank
args.distributed_rank = None
torch.multiprocessing.spawn(fn=distributed_main, args=(args, start_rank), nprocs=torch.cuda.device_count())
else:
distributed_main(args.device_id, args)
elif (args.distributed_world_size > 1):
assert (args.distributed_world_size <= torch.cuda.device_count())
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None
if ((max(args.update_freq) > 1) and (args.ddp_backend != 'no_c10d')):
logger.info('NOTE: you may get faster training with: --ddp-backend=no_c10d')
torch.multiprocessing.spawn(fn=distributed_main, args=(args,), nprocs=args.distributed_world_size)
else:
main(args) |
class TestSVNProjectCheckout():
def setup(self):
self.temp_dir = mkdtemp(prefix='mubench-checkout-svn_')
self.svn_url = Path(join(dirname(realpath(__file__)), 'test_svn')).as_uri()
self.checkouts_dir = join(self.temp_dir, 'checkouts')
self.uut = SVNProjectCheckout('-project-', '-version-', self.svn_url, '1', self.checkouts_dir)
def teardown(self):
remove_tree(self.temp_dir)
def test_not_exists(self):
assert (not self.uut.exists())
def test_create_checks_repo_out(self):
self.uut.create(0)
assert exists(join(self.checkouts_dir, '-project-', '-version-', 'checkout'))
def test_exists(self):
self.uut.create(0)
assert self.uut.exists()
def test_not_exists_no_svn_checkout(self):
os.makedirs(self.uut.checkout_dir)
assert (not self.uut.exists())
def test_delete(self):
self.uut.create(0)
self.uut.delete()
assert (not exists(self.uut.checkout_dir))
def test_multiple_versions(self):
checkout_version2 = SVNProjectCheckout(self.uut.name, 'other-version', self.svn_url, '1', self.checkouts_dir)
self.uut.create(0)
checkout_version2.create(0)
assert checkout_version2.exists()
def test_to_string(self):
assert_equals('svn:{}'.format(self.svn_url), str(self.uut)) |
class AutoFeatureExtractor():
def __init__(self):
raise EnvironmentError('AutoFeatureExtractor is designed to be instantiated using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.')
_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
config = kwargs.pop('config', None)
trust_remote_code = kwargs.pop('trust_remote_code', False)
kwargs['_from_auto'] = True
(config_dict, _) = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
feature_extractor_class = config_dict.get('feature_extractor_type', None)
feature_extractor_auto_map = None
if ('AutoFeatureExtractor' in config_dict.get('auto_map', {})):
feature_extractor_auto_map = config_dict['auto_map']['AutoFeatureExtractor']
if ((feature_extractor_class is None) and (feature_extractor_auto_map is None)):
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
feature_extractor_class = getattr(config, 'feature_extractor_type', None)
if (hasattr(config, 'auto_map') and ('AutoFeatureExtractor' in config.auto_map)):
feature_extractor_auto_map = config.auto_map['AutoFeatureExtractor']
if (feature_extractor_class is not None):
if (feature_extractor_auto_map is not None):
if (not trust_remote_code):
raise ValueError(f'Loading {pretrained_model_name_or_path} requires you to execute the feature extractor file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.')
feature_extractor_class = get_class_from_dynamic_module(feature_extractor_auto_map, pretrained_model_name_or_path, **kwargs)
else:
feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class)
return feature_extractor_class.from_dict(config_dict, **kwargs)
elif (type(config) in FEATURE_EXTRACTOR_MAPPING):
feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)]
return feature_extractor_class.from_dict(config_dict, **kwargs)
raise ValueError(f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a `feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following `model_type` keys in its {CONFIG_NAME}: {', '.join((c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys()))}")
def register(config_class, feature_extractor_class):
FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class) |
def plot_vs(pred_json, save_dir_i, base_json=None):
pred_saliency = np.array(pred_json['top_pred'])
gt_saliency = np.array(pred_json['gt'])
total_cells = pred_json['shots']
(t_min, t_max) = (0, total_cells)
x = np.arange(t_min, t_max, clip_len)
x = x[:len(pred_saliency)]
colors1 = (['white'] * total_cells)
colors2 = (['white'] * total_cells)
for idx in gt_saliency:
colors1[idx] = color1_dark
for idx in pred_saliency:
colors2[idx] = color2_dark
(fig, ax) = plt.subplots(2, 1, figsize=(50, 2), gridspec_kw={'height_ratios': [1, 1], 'hspace': 0.05})
bars1 = ax[0].bar(range(total_cells), np.ones(total_cells), color=colors1, width=2, label='GT Summary ')
ax[0].axis('off')
rect1 = patches.Rectangle((0, 0), total_cells, 1, linewidth=1, edgecolor='black', facecolor='none')
ax[0].add_patch(rect1)
legend = ax[0].legend(loc='upper right', prop=font_prop3, handlelength=0)
for text in legend.get_texts():
text.set_color(color1_dark)
bars2 = ax[1].bar(range(total_cells), np.ones(total_cells), color=colors2, width=2, label="UniVTG's Summary ")
ax[1].axis('off')
rect2 = patches.Rectangle((0, 0), total_cells, 1, linewidth=1, edgecolor='black', facecolor='none')
ax[1].add_patch(rect2)
legend = ax[1].legend(loc='upper right', prop=font_prop3, handlelength=0)
for text in legend.get_texts():
text.set_color(color2_dark)
ax[0].set_xlim(left=0, right=total_cells)
ax[1].set_xlim(left=0, right=total_cells)
offset = (pred_json['shots'] * 0.01)
start_time = convert_seconds_to_hms(0)
ax[1].text(offset, (- 0.3), start_time, va='center', ha='center', color='black', fontproperties=font_prop1)
end_time = convert_seconds_to_hms((pred_json['shots'] * 5))
ax[1].text((pred_json['shots'] - offset), (- 0.3), end_time, va='center', ha='center', color='black', fontproperties=font_prop1)
plt.savefig(os.path.join(save_dir_i, '2_vs.jpg'), bbox_inches='tight', pad_inches=0.2, dpi=100)
return |
class BaseOptions(object):
def __init__(self):
self._parser = argparse.ArgumentParser()
self._initialized = False
def initialize(self):
self._parser.add_argument('--checkpoints_dir', type=str, default='./outputs/checkpoints/', help='models are saved here')
self._parser.add_argument('--data_dir', type=str, default='/p300/datasets/iPER', help='path to dataset')
self._parser.add_argument('--dataset_mode', type=str, default='iPER', help='chooses dataset to be used')
self._parser.add_argument('--train_ids_file', type=str, default='train.txt', help='file containing train ids')
self._parser.add_argument('--test_ids_file', type=str, default='val.txt', help='file containing test ids')
self._parser.add_argument('--images_folder', type=str, default='images_HD', help='images folder')
self._parser.add_argument('--smpls_folder', type=str, default='smpls', help='smpls folder')
self._parser.add_argument('--map_name', type=str, default='uv_seg', help='mapping function')
self._parser.add_argument('--part_info', type=str, default='assets/pretrains/smpl_part_info.json', help='smpl part info path.')
self._parser.add_argument('--uv_mapping', type=str, default='assets/pretrains/mapper.txt', help='uv mapping.')
self._parser.add_argument('--hmr_model', type=str, default='assets/pretrains/hmr_tf2pt.pth', help='pretrained hmr model path.')
self._parser.add_argument('--smpl_model', type=str, default='assets/pretrains/smpl_model.pkl', help='pretrained smpl model path.')
self._parser.add_argument('--face_model', type=str, default='assets/pretrains/sphere20a_.pth', help='pretrained face model path.')
self._parser.add_argument('--load_epoch', type=int, default=(- 1), help='which epoch to load? set to -1 to use latest cached model')
self._parser.add_argument('--load_path', type=str, default='./outputs/checkpoints/lwb_imper_fashion_place/net_epoch_30_id_G.pth', help='pretrained model path')
self._parser.add_argument('--batch_size', type=int, default=4, help='input batch size')
self._parser.add_argument('--time_step', type=int, default=10, help='time step size')
self._parser.add_argument('--tex_size', type=int, default=3, help='input tex size')
self._parser.add_argument('--image_size', type=int, default=256, help='input image size')
self._parser.add_argument('--repeat_num', type=int, default=6, help='number of residual blocks.')
self._parser.add_argument('--cond_nc', type=int, default=3, help='# of conditions')
self._parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self._parser.add_argument('--model', type=str, default='impersonator', help='model to run')
self._parser.add_argument('--name', type=str, default='running', help='name of the experiment. It decides where to store samples and models')
self._parser.add_argument('--gen_name', type=str, default='impersonator', help='chooses generator to be used, resnet or unet')
self._parser.add_argument('--norm_type', type=str, default='instance', help='choose use what norm layer in discriminator')
self._parser.add_argument('--n_threads_test', default=2, type=int, help='# threads for loading data')
self._parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self._parser.add_argument('--do_saturate_mask', action='store_true', default=False, help='do use mask_fake for mask_cyc')
self._parser.add_argument('--bg_replace', action='store_true', default=False, help='replace original pixels or not')
self._parser.add_argument('--debug', action='store_true', default=False, help='debug or not')
self._initialized = True
def set_zero_thread_for_Win(self):
import platform
if (platform.system() == 'Windows'):
if ('n_threads_test' in self._opt.__dict__):
self._opt.__setattr__('n_threads_test', 0)
if ('n_threads_train' in self._opt.__dict__):
self._opt.__setattr__('n_threads_train', 0)
def parse(self):
if (not self._initialized):
self.initialize()
self._opt = self._parser.parse_args()
self.set_zero_thread_for_Win()
self._opt.is_train = self.is_train
self._set_and_check_load_epoch()
self._get_set_gpus()
args = vars(self._opt)
self._print(args)
self._save(args)
return self._opt
def _set_and_check_load_epoch(self):
models_dir = os.path.join(self._opt.checkpoints_dir, self._opt.name)
if os.path.exists(models_dir):
if (self._opt.load_epoch == (- 1)):
load_epoch = 0
for file in os.listdir(models_dir):
if file.startswith('net_epoch_'):
load_epoch = max(load_epoch, int(file.split('_')[2]))
self._opt.load_epoch = load_epoch
else:
found = False
for file in os.listdir(models_dir):
if file.startswith('net_epoch_'):
found = (int(file.split('_')[2]) == self._opt.load_epoch)
if found:
break
assert found, ('Model for epoch %i not found' % self._opt.load_epoch)
else:
assert (self._opt.load_epoch < 1), ('Model for epoch %i not found' % self._opt.load_epoch)
self._opt.load_epoch = 0
def _get_set_gpus(self):
os.environ['CUDA_DEVICES_ORDER'] = 'PCI_BUS_ID'
if (len(self._opt.gpu_ids) > 0):
os.environ['CUDA_VISIBLE_DEVICES'] = self._opt.gpu_ids
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def _print(self, args):
print(' Options ')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print(' End ')
def _save(self, args):
expr_dir = os.path.join(self._opt.checkpoints_dir, self._opt.name)
print(expr_dir)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, ('opt_%s.txt' % ('train' if self.is_train else 'test')))
with open(file_name, 'wt') as opt_file:
opt_file.write(' Options \n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write(' End \n') |
def stream_file_list(filenames, buffer_count=20, batch_size=10, chunk_size=1, shuffle=True):
filenames = filenames.copy()
if shuffle:
random.shuffle(filenames)
def _loaded_files():
for (i, fname) in enumerate(filenames):
(yield (i, load_file(fname)))
loaded_files = threaded(_loaded_files(), queue_size=20)
result = []
streams = []
total_files = len(filenames)
curr_file_idx = (- 1)
def make_stream():
(i, filedata) = next(loaded_files)
stream = stream_array(filedata, shuffle=shuffle, chunk_size=chunk_size)
return (i, stream)
while ((len(streams) < buffer_count) and ((curr_file_idx + 1) < total_files)):
try:
(curr_file_idx, stream) = make_stream()
streams.append(stream)
except IOError:
pass
except EOFError:
pass
while ((len(streams) > 0) or ((curr_file_idx + 1) < total_files)):
i = 0
while ((len(result) < batch_size) and (len(streams) > 0)):
try:
i = (i % len(streams))
next_item = next(streams[i])
i = ((i + 1) % len(streams))
result.append(next_item)
except StopIteration:
stream = None
while ((curr_file_idx + 1) < total_files):
try:
(curr_file_idx, stream) = make_stream()
break
except IOError:
pass
except EOFError:
pass
if (stream is not None):
streams[i] = stream
else:
streams = (streams[:i] + streams[(i + 1):])
if (len(result) > 0):
if all(((x.shape == result[0].shape) for x in result)):
(yield np.stack(result))
result = []
if shuffle:
random.shuffle(streams) |
def _transform(parsed_date_data: ParsedDate, parsed_output_format_data: ParsedTargetFormat, output_format: str, output_timezone: str) -> str:
result = deepcopy(output_format)
if (output_timezone != ''):
parsed_date_data = _change_timezone(parsed_date_data, output_timezone)
result = _transform_year(result, parsed_output_format_data.ymd_token['year_token'], parsed_date_data.ymd['year'])
result = _transform_day(result, parsed_output_format_data.ymd_token['day_token'], parsed_date_data.ymd['day'])
result = _transform_hms(result, str(parsed_output_format_data.hms_token['hour_token']), bool(parsed_output_format_data.hms_token['ispm']), parsed_date_data.hms['hour'])
result = _transform_hms(result, str(parsed_output_format_data.hms_token['minute_token']), False, parsed_date_data.hms['minute'])
result = _transform_hms(result, str(parsed_output_format_data.hms_token['second_token']), False, parsed_date_data.hms['second'])
result = _transform_month(result, parsed_output_format_data.ymd_token['month_token'], parsed_date_data.ymd['month'])
result = _transform_weekday(result, parsed_output_format_data.weekday_token, parsed_date_data.weekday)
result = _transform_timezone(result, parsed_output_format_data.timezone_token, str(parsed_date_data.tzinfo['timezone']), str(parsed_date_data.tzinfo['utc_add']), int(parsed_date_data.tzinfo['utc_offset_hours']), int(parsed_date_data.tzinfo['utc_offset_minutes']))
return result |
_gloo()
class ReducerTest(TestCase):
def setUp(self):
self.file = tempfile.NamedTemporaryFile(delete=False)
self.store = c10d.FileStore(self.file.name, 1)
self.process_group = c10d.ProcessGroupGloo(self.store, 0, 1)
def test_single_dtype_single_bucket(self):
model = ReducerModule()
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
dist.Reducer([parameters], buckets, self.process_group)
def _create_mixed_precision_model(self):
model = ReducerModule()
model.float()
model.fc1.double()
return model
def test_multi_dtype_single_bucket(self):
model = self._create_mixed_precision_model()
with self.assertRaises(RuntimeError):
parameters = [list(model.parameters())]
buckets = [list(range(len(parameters[0])))]
dist.Reducer(parameters, buckets, self.process_group)
def test_multi_dtype_multi_bucket(self):
model = self._create_mixed_precision_model()
parameters = [list(model.parameters())]
group_by_dtype = groupby(range(len(parameters[0])), key=(lambda i: parameters[0][i].dtype))
buckets = [list(indices) for (_, indices) in group_by_dtype]
dist.Reducer(parameters, buckets, self.process_group)
def _create_reducer_for_models(self, models, find_unused_parameters=False):
parameters = [list(model.parameters()) for model in models]
group_by_dtype = groupby(range(len(parameters[0])), key=(lambda i: parameters[0][i].dtype))
buckets = [list(indices) for (_, indices) in group_by_dtype]
return dist.Reducer(parameters, buckets, self.process_group, find_unused_parameters=find_unused_parameters)
def test_forward_backward_single_replica(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model])
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input), target)
reducer.prepare_for_backward(output)
output.backward()
def test_forward_backward_multi_replica(self):
batch_size = 10
num_replicas = 2
models = [self._create_mixed_precision_model() for _ in range(num_replicas)]
reducer = self._create_reducer_for_models(models)
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double).chunk(num_replicas)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
outputs = [models[i](input[i]) for i in range(num_replicas)]
output = loss(torch.cat(outputs), target)
reducer.prepare_for_backward(output)
output.backward()
for parameters in zip(*[model.parameters() for model in models]):
for parameter in parameters:
self.assertEqual(parameters[0].grad, parameter.grad)
def test_forward_backward_unused_parameters(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input, use_fc3=False), target)
self.assertEqual(None, model.fc3.weight.grad)
reducer.prepare_for_backward(output)
output.backward()
self.assertEqual(None, model.fc3.weight.grad)
def test_forward_backward_optimizer(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
for i in range(3):
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
optimizer.zero_grad()
output = loss(model(input, use_fc3=(i > 0)), target)
reducer.prepare_for_backward(output)
output.backward()
optimizer.step()
def test_ddp_comm_hook_multiple_replica_check(self):
num_replicas = 2
models = [self._create_mixed_precision_model() for _ in range(num_replicas)]
reducer = self._create_reducer_for_models(models)
def dummy_hook(state, bucket):
fut = torch.futures.Future()
fut.set_result(bucket.get_tensors())
return fut
with self.assertRaisesRegex(RuntimeError, 'Communication hook does not support single-process multiple-device mode.'):
dist._register_comm_hook(reducer, None, dummy_hook) |
class TimesformerConfig(PretrainedConfig):
model_type = 'timesformer'
def __init__(self, image_size=224, patch_size=16, num_channels=3, num_frames=8, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, attention_type='divided_space_time', drop_path_rate=0, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_frames = num_frames
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.attention_type = attention_type
self.drop_path_rate = drop_path_rate |
def Eva(n_neighbors, min_dist, log_file):
min_dist = min_dist
n_neighbors = n_neighbors
print({'min_dist': min_dist, 'n_neighbors': n_neighbors})
mp_new = loadmap('../fingerprint.mp')
mp_new.fit(method='umap', min_dist=min_dist, n_neighbors=n_neighbors)
X_new = mp2.rearrangement(X2, mp_new)
trainX = X_new[train_idx]
validX = X_new[valid_idx]
testX = X_new[test_idx]
clf = MultiLabelEstimator(n_outputs=1, fmap_shape1=trainX.shape[1:], batch_size=128, dense_layers=[128, 32], gpuid='0', patience=1000000, monitor='val_auc', epochs=200)
clf.fit(trainX, trainY, validX, validY)
best_epoch = clf._performance.best_epoch
train_aucs = clf._performance.evaluate(trainX, trainY)
valid_aucs = clf._performance.evaluate(validX, validY)
test_aucs = clf._performance.evaluate(testX, testY)
train_best_auc = np.nanmean(train_aucs)
valid_best_auc = np.nanmean(valid_aucs)
test_auc = np.nanmean(test_aucs)
dfx = pd.DataFrame(clf._performance.history)
valid_best_loss = dfx[(dfx.epoch == clf._performance.best_epoch)].val_loss.iloc[0]
with open(log_file, 'a') as f:
f.write((','.join([str(min_dist), str(n_neighbors), str(valid_best_loss), str(valid_best_auc), str(train_best_auc), str(best_epoch), str(test_auc)]) + '\n'))
return [valid_best_auc, train_best_auc, best_epoch] |
def test_tfidf_vectorizer_setters():
(norm, use_idf, smooth_idf, sublinear_tf) = ('l2', False, False, False)
tv = TfidfVectorizer(norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf)
tv.fit(JUNK_FOOD_DOCS)
assert (tv._tfidf.norm == norm)
assert (tv._tfidf.use_idf == use_idf)
assert (tv._tfidf.smooth_idf == smooth_idf)
assert (tv._tfidf.sublinear_tf == sublinear_tf)
tv.norm = 'l1'
tv.use_idf = True
tv.smooth_idf = True
tv.sublinear_tf = True
assert (tv._tfidf.norm == norm)
assert (tv._tfidf.use_idf == use_idf)
assert (tv._tfidf.smooth_idf == smooth_idf)
assert (tv._tfidf.sublinear_tf == sublinear_tf)
tv.fit(JUNK_FOOD_DOCS)
assert (tv._tfidf.norm == tv.norm)
assert (tv._tfidf.use_idf == tv.use_idf)
assert (tv._tfidf.smooth_idf == tv.smooth_idf)
assert (tv._tfidf.sublinear_tf == tv.sublinear_tf) |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
print('work_dir:', args.work_dir)
print('use_mox:', args.use_mox)
if (importlib.util.find_spec('mc') is None):
traverse_replace(cfg, 'memcached', False)
if (args.launcher == 'none'):
distributed = False
assert (cfg.model.type not in ['DeepCluster', 'MOCO', 'SimCLR', 'ODC', 'NPID']), '{} does not support non-dist training.'.format(cfg.model.type)
else:
distributed = True
if (args.launcher == 'slurm'):
cfg.dist_params['port'] = args.port
init_dist(args.launcher, **cfg.dist_params)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, 'train_{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level, use_mox=args.use_mox)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join(['{}: {}'.format(k, v) for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
if (args.seed is not None):
logger.info('Set random seed to {}, deterministic: {}'.format(args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_model(cfg.model)
datasets = [build_dataset(cfg.data.train)]
assert (len(cfg.workflow) == 1), 'Validation is called by hook.'
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(openselfsup_version=__version__, config=cfg.text)
train_model_eval(model, datasets, cfg, distributed=distributed, timestamp=timestamp, meta=meta) |
class InfoGraph(nn.Module):
def __init__(self, hidden_dim, num_gc_layers, alpha=0.5, beta=1.0, gamma=0.1):
super(InfoGraph, self).__init__()
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.prior = args.prior
self.embedding_dim = mi_units = (hidden_dim * num_gc_layers)
self.encoder = Encoder(dataset_num_features, hidden_dim, num_gc_layers)
self.local_d = FF(self.embedding_dim)
self.global_d = FF(self.embedding_dim)
if self.prior:
self.prior_d = PriorDiscriminator(self.embedding_dim)
self.init_emb()
def init_emb(self):
initrange = ((- 1.5) / self.embedding_dim)
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if (m.bias is not None):
m.bias.data.fill_(0.0)
def forward(self, x, edge_index, batch, num_graphs):
if (x is None):
x = torch.ones(batch.shape[0]).to(device)
(y, M) = self.encoder(x, edge_index, batch)
g_enc = self.global_d(y)
l_enc = self.local_d(M)
mode = 'fd'
measure = 'JSD'
local_global_loss = local_global_loss_(l_enc, g_enc, edge_index, batch, measure)
if self.prior:
prior = torch.rand_like(y)
term_a = torch.log(self.prior_d(prior)).mean()
term_b = torch.log((1.0 - self.prior_d(y))).mean()
PRIOR = ((- (term_a + term_b)) * self.gamma)
else:
PRIOR = 0
return (local_global_loss + PRIOR) |
class RoCBertConfig(PretrainedConfig):
model_type = 'roc_bert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=0, position_embedding_type='absolute', classifier_dropout=None, enable_pronunciation=True, enable_shape=True, pronunciation_embed_dim=768, pronunciation_vocab_size=910, shape_embed_dim=512, shape_vocab_size=24858, concat_input=True, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.enable_pronunciation = enable_pronunciation
self.enable_shape = enable_shape
self.pronunciation_embed_dim = pronunciation_embed_dim
self.pronunciation_vocab_size = pronunciation_vocab_size
self.shape_embed_dim = shape_embed_dim
self.shape_vocab_size = shape_vocab_size
self.concat_input = concat_input
self.position_embedding_type = position_embedding_type
self.classifier_dropout = classifier_dropout
super().__init__(pad_token_id=pad_token_id, **kwargs) |
class MixtureGroupNorm(nn.Module):
__constants__ = ['num_groups', 'num_channels', 'k', 'eps', 'weight', 'bias']
def __init__(self, num_channels, num_groups, k, eps=1e-05):
super(MixtureGroupNorm, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.k = k
self.eps = eps
self.affine = True
self.weight_ = nn.Parameter(torch.Tensor(k, num_channels))
self.bias_ = nn.Parameter(torch.Tensor(k, num_channels))
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.attention_weights = AttentionWeights(num_channels, k, norm='gn', groups=1)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight_, 1, 0.1)
nn.init.normal_(self.bias_, 0, 0.1)
def forward(self, x):
output = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
size = output.size()
y = self.attention_weights(x)
weight = (y self.weight_)
bias = (y self.bias_)
weight = weight.unsqueeze((- 1)).unsqueeze((- 1)).expand(size)
bias = bias.unsqueeze((- 1)).unsqueeze((- 1)).expand(size)
return ((weight * output) + bias)
def extra_repr(self):
return '{num_groups}, {num_channels}, eps={eps}, affine={affine}'.format(**self.__dict__) |
def clean_module_name(name):
if name.startswith('sciann_applications'):
name = name.replace('sciann_applications', 'sciann.applications')
if name.startswith('sciann_preprocessing'):
name = name.replace('sciann_preprocessing', 'sciann.preprocessing')
return name |
class Communication():
def __init__(self, vehicle_type, vehicle_id):
self.vehicle_type = vehicle_type
self.vehicle_id = vehicle_id
self.local_pose = None
self.current_yaw = 0
self.current_state = None
self.target_motion = PositionTarget()
self.arm_state = False
self.hover_flag = 0
self.coordinate_frame = 1
self.motion_type = 0
self.flight_mode = None
self.plane_mission = None
self.local_pose_sub = rospy.Subscriber((((self.vehicle_type + '_') + self.vehicle_id) + '/mavros/local_position/pose'), PoseStamped, self.local_pose_callback, queue_size=1)
self.cmd_pose_flu_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd_pose_flu'), Pose, self.cmd_pose_flu_callback, queue_size=1)
self.cmd_pose_enu_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd_pose_enu'), Pose, self.cmd_pose_enu_callback, queue_size=1)
self.cmd_vel_flu_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd_vel_flu'), Twist, self.cmd_vel_flu_callback, queue_size=1)
self.cmd_vel_enu_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd_vel_enu'), Twist, self.cmd_vel_enu_callback, queue_size=1)
self.cmd_accel_flu_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd_accel_flu'), Twist, self.cmd_accel_flu_callback, queue_size=1)
self.cmd_accel_enu_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd_accel_enu'), Twist, self.cmd_accel_enu_callback, queue_size=1)
self.cmd_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd'), String, self.cmd_callback, queue_size=3)
self.target_motion_pub = rospy.Publisher((((self.vehicle_type + '_') + self.vehicle_id) + '/mavros/setpoint_raw/local'), PositionTarget, queue_size=1)
self.armService = rospy.ServiceProxy((((self.vehicle_type + '_') + self.vehicle_id) + '/mavros/cmd/arming'), CommandBool)
self.flightModeService = rospy.ServiceProxy((((self.vehicle_type + '_') + self.vehicle_id) + '/mavros/set_mode'), SetMode)
self.transition = rospy.ServiceProxy((((self.vehicle_type + '_') + self.vehicle_id) + '/mavros/cmd/vtol_transition'), CommandVtolTransition)
self.transition_state = 'multirotor'
print(self.transition_state)
print(self.transition(state=3))
print('communication initialized')
def start(self):
rospy.init_node((((self.vehicle_type + '_') + self.vehicle_id) + '_communication'))
rate = rospy.Rate(30)
while (not rospy.is_shutdown()):
self.target_motion_pub.publish(self.target_motion)
rate.sleep()
def local_pose_callback(self, msg):
self.local_pose = msg.pose
self.current_yaw = self.q2yaw(msg.pose.orientation)
def q2yaw(self, q):
if isinstance(q, Quaternion):
rotate_z_rad = q.yaw_pitch_roll[0]
else:
q_ = Quaternion(q.w, q.x, q.y, q.z)
rotate_z_rad = q_.yaw_pitch_roll[0]
return rotate_z_rad
def construct_target(self, x=0, y=0, z=0, vx=0, vy=0, vz=0, afx=0, afy=0, afz=0, yaw=0, yaw_rate=0):
target_raw_pose = PositionTarget()
target_raw_pose.coordinate_frame = self.coordinate_frame
if (self.coordinate_frame == 1):
target_raw_pose.position.x = x
target_raw_pose.position.y = y
target_raw_pose.position.z = z
else:
target_raw_pose.position.x = (- y)
target_raw_pose.position.y = x
target_raw_pose.position.z = z
if (self.transition_state == 'plane'):
if (self.plane_mission == 'takeoff'):
target_raw_pose.type_mask = 4096
elif (self.plane_mission == 'land'):
target_raw_pose.type_mask = 8192
elif (self.plane_mission == 'loiter'):
target_raw_pose.type_mask = 12288
else:
target_raw_pose.type_mask = 16384
else:
if (self.coordinate_frame == 1):
target_raw_pose.velocity.x = vx
target_raw_pose.velocity.y = vy
target_raw_pose.velocity.z = vz
target_raw_pose.acceleration_or_force.x = afx
target_raw_pose.acceleration_or_force.y = afy
target_raw_pose.acceleration_or_force.z = afz
else:
target_raw_pose.velocity.x = (- vy)
target_raw_pose.velocity.y = vx
target_raw_pose.velocity.z = vz
target_raw_pose.acceleration_or_force.x = (- afy)
target_raw_pose.acceleration_or_force.y = afx
target_raw_pose.acceleration_or_force.z = afz
target_raw_pose.yaw = yaw
target_raw_pose.yaw_rate = yaw_rate
if (self.motion_type == 0):
target_raw_pose.type_mask = ((((((PositionTarget.IGNORE_VX + PositionTarget.IGNORE_VY) + PositionTarget.IGNORE_VZ) + PositionTarget.IGNORE_AFX) + PositionTarget.IGNORE_AFY) + PositionTarget.IGNORE_AFZ) + PositionTarget.IGNORE_YAW_RATE)
if (self.motion_type == 1):
target_raw_pose.type_mask = ((((((PositionTarget.IGNORE_PX + PositionTarget.IGNORE_PY) + PositionTarget.IGNORE_PZ) + PositionTarget.IGNORE_AFX) + PositionTarget.IGNORE_AFY) + PositionTarget.IGNORE_AFZ) + PositionTarget.IGNORE_YAW)
if (self.motion_type == 2):
target_raw_pose.type_mask = ((((((PositionTarget.IGNORE_PX + PositionTarget.IGNORE_PY) + PositionTarget.IGNORE_PZ) + PositionTarget.IGNORE_VX) + PositionTarget.IGNORE_VY) + PositionTarget.IGNORE_VZ) + PositionTarget.IGNORE_YAW)
return target_raw_pose
def cmd_pose_flu_callback(self, msg):
self.coordinate_frame = 9
yaw = self.q2yaw(msg.orientation)
self.target_motion = self.construct_target(x=msg.position.x, y=msg.position.y, z=msg.position.z, yaw=yaw)
def cmd_pose_enu_callback(self, msg):
self.coordinate_frame = 1
yaw = self.q2yaw(msg.orientation)
self.target_motion = self.construct_target(x=msg.position.x, y=msg.position.y, z=msg.position.z, yaw=yaw)
def cmd_vel_flu_callback(self, msg):
self.hover_state_transition(msg.linear.x, msg.linear.y, msg.linear.z, msg.angular.z)
if (self.hover_flag == 0):
self.coordinate_frame = 8
self.motion_type = 1
self.target_motion = self.construct_target(vx=msg.linear.x, vy=msg.linear.y, vz=msg.linear.z, yaw_rate=msg.angular.z)
def cmd_vel_enu_callback(self, msg):
self.hover_state_transition(msg.linear.x, msg.linear.y, msg.linear.z, msg.angular.z)
if (self.hover_flag == 0):
self.coordinate_frame = 1
self.motion_type = 1
self.target_motion = self.construct_target(vx=msg.linear.x, vy=msg.linear.y, vz=msg.linear.z, yaw_rate=msg.angular.z)
def cmd_accel_flu_callback(self, msg):
self.hover_state_transition(msg.linear.x, msg.linear.y, msg.linear.z, msg.angular.z)
if (self.hover_flag == 0):
self.coordinate_frame = 8
self.motion_type = 2
self.target_motion = self.construct_target(ax=msg.linear.x, ay=msg.linear.y, az=msg.linear.z, yaw_rate=msg.angular.z)
def cmd_accel_enu_callback(self, msg):
self.hover_state_transition(msg.linear.x, msg.linear.y, msg.linear.z, msg.angular.z)
if (self.hover_flag == 0):
self.coordinate_frame = 1
self.motion_type = 2
self.target_motion = self.construct_target(ax=msg.linear.x, ay=msg.linear.y, az=msg.linear.z, yaw_rate=msg.angular.z)
def hover_state_transition(self, x, y, z, w):
if ((abs(x) > 0.005) or (abs(y) > 0.005) or (abs(z) > 0.005) or (abs(w) > 0.005)):
self.hover_flag = 0
self.flight_mode = 'OFFBOARD'
def cmd_callback(self, msg):
if (msg.data == ''):
return
elif (msg.data == 'ARM'):
self.arm_state = self.arm()
print(((((self.vehicle_type + '_') + self.vehicle_id) + ': Armed ') + str(self.arm_state)))
elif (msg.data == 'DISARM'):
self.arm_state = (not self.disarm())
print(((((self.vehicle_type + '_') + self.vehicle_id) + ': Armed ') + str(self.arm_state)))
elif (msg.data == 'multirotor'):
self.transition_state = msg.data
print(((((self.vehicle_type + '_') + self.vehicle_id) + ':') + msg.data))
print(self.transition(state=3))
elif (msg.data == 'plane'):
self.transition_state = msg.data
print(((((self.vehicle_type + '_') + self.vehicle_id) + ':') + msg.data))
print(self.transition(state=4))
elif (msg.data in ['loiter', 'idle']):
self.plane_mission = msg.data
print(((((self.vehicle_type + '_') + self.vehicle_id) + ':') + self.plane_mission))
else:
self.flight_mode = msg.data
self.flight_mode_switch()
def arm(self):
if self.armService(True):
return True
else:
print((((self.vehicle_type + '_') + self.vehicle_id) + ': arming failed!'))
return False
def disarm(self):
if self.armService(False):
return True
else:
print((((self.vehicle_type + '_') + self.vehicle_id) + ': disarming failed!'))
return False
def hover(self):
self.coordinate_frame = 1
self.motion_type = 0
self.target_motion = self.construct_target(x=self.local_pose.position.x, y=self.local_pose.position.y, z=self.local_pose.position.z, yaw=self.current_yaw)
def flight_mode_switch(self):
if (self.flight_mode == 'HOVER'):
self.hover_flag = 1
self.hover()
print((((self.vehicle_type + '_') + self.vehicle_id) + ': Hover'))
elif self.flightModeService(custom_mode=self.flight_mode):
self.hover_flag = 0
print(((((self.vehicle_type + '_') + self.vehicle_id) + ': ') + self.flight_mode))
return True
else:
print((((((self.vehicle_type + '_') + self.vehicle_id) + ': ') + self.flight_mode) + 'Failed'))
return False |
def load_net_config(path):
with open(path, 'r') as f:
net_config = ''
while True:
line = f.readline().strip()
if ('net_type' in line):
net_type = line.split(': ')[(- 1)]
break
else:
net_config += line
return (net_config, net_type) |
class TestExog(unittest.TestCase):
def test_exog_ensemble(self):
self._test_exog_ensemble(automl=False)
def test_exog_automl_ensemble(self):
self._test_exog_ensemble(automl=True)
def _test_exog_ensemble(self, automl: bool):
print(('-' * 80))
logger.info((f'''TestExog.test_exog{('_automl' if automl else '')}_ensemble
''' + ('-' * 80)))
csv = os.path.join(rootdir, 'data', 'walmart', 'walmart_mini.csv')
index_cols = ['Store', 'Dept']
target = ['Weekly_Sales']
(ts, md) = CustomDataset(rootdir=csv, test_frac=0.25, index_cols=index_cols)[0]
train = TimeSeries.from_pd(ts.loc[(md.trainval, target)])
test = TimeSeries.from_pd(ts.loc[((~ md.trainval), target)])
exog = TimeSeries.from_pd(ts[[c for c in ts.columns if (('MarkDown' in c) or ('Holiday' in c))]])
if automl:
models = [AutoProphet(AutoProphetConfig()), AutoSarima(AutoSarimaConfig(maxiter=10))]
else:
models = [Prophet(ProphetConfig()), Arima(ArimaConfig(order=(4, 1, 2))), ETS(ETSConfig())]
for ex in [None, exog]:
logger.info(('With exogenous data...' if (ex is not None) else 'No exogenous data...'))
model = ForecasterEnsemble(config=ForecasterEnsembleConfig(combiner=ModelSelector(metric=ForecastMetric.sMAPE), models=models))
model.train(train_data=train, train_config=EnsembleTrainConfig(valid_frac=0.5), exog_data=ex)
val_results = [(type(m).__name__, v) for (m, v) in zip(model.models, model.combiner.metric_values)]
logger.info(f"Validation {model.combiner.metric.name}: {', '.join((f'{m}={v:.2f}' for (m, v) in val_results))}")
(pred, _) = model.forecast(time_stamps=test.time_stamps, exog_data=ex)
smape = ForecastMetric.sMAPE.value(test, pred)
logger.info(f'''Ensemble test sMAPE = {smape:.2f}
''')
name = ((('automl' if automl else 'base') + '_') + ('no_exog' if (ex is None) else 'exog'))
model.save(os.path.join(rootdir, 'tmp', 'exog', name))
loaded_model = ModelFactory.load('ForecasterEnsemble', os.path.join(rootdir, 'tmp', 'exog', name))
(loaded_pred, _) = loaded_model.forecast(time_stamps=test.time_stamps, exog_data=ex)
self.assertListEqual(list(pred), list(loaded_pred)) |
def test(cfg):
du.init_distributed_training(cfg)
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
logging.setup_logging(cfg.OUTPUT_DIR)
logger.info('Test with config:')
logger.info(cfg)
model = build_model(cfg)
if (du.is_master_proc() and cfg.LOG_MODEL_INFO):
misc.log_model_info(model, cfg, use_train_input=False)
cu.load_test_checkpoint(cfg, model)
test_loader = loader.construct_loader(cfg, 'test')
logger.info('Testing model for {} iterations'.format(len(test_loader)))
if cfg.DETECTION.ENABLE:
assert ((cfg.NUM_GPUS == cfg.TEST.BATCH_SIZE) or (cfg.NUM_GPUS == 0))
test_meter = AVAMeter(len(test_loader), cfg, mode='test')
else:
assert ((test_loader.dataset.num_videos % (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS)) == 0)
test_meter = TestMeter((test_loader.dataset.num_videos // (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS)), (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS), cfg.MODEL.NUM_CLASSES, len(test_loader), cfg.DATA.MULTI_LABEL, cfg.DATA.ENSEMBLE_METHOD)
if (cfg.TENSORBOARD.ENABLE and du.is_master_proc((cfg.NUM_GPUS * cfg.NUM_SHARDS))):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
test_meter = perform_test(test_loader, model, test_meter, cfg, writer)
if (writer is not None):
writer.close() |
def overview(target, data):
target.write('<a href="./')
dirName = os.getcwd()
dirNameList = list(dirName.split('/'))
dirNameIndex = dirNameList.index('Vitis_Accel_Examples')
diff = ((len(dirNameList) - dirNameIndex) - 1)
while (diff > 0):
target.write('../')
diff -= 1
for locs in range((dirNameIndex + 1), len(dirNameList)):
target.write(dirNameList[locs])
target.write('/')
target.write('">')
title = data['name']
title = title.replace('(C)', '')
title = title.replace('(CL)', '')
title = title.replace('(RTL)', '')
title = title.replace('(HLS C/C++ Kernel)', '')
target.write(title)
target.write('</a>')
target.write('\n')
target.write('\n\n')
target.write('\n'.join(data['description']))
target.write('\n\n')
if ('more_info' in data):
target.write('\n'.join(data['more_info']))
target.write('\n\n')
if ('perf_fields' in data):
target.write('### PERFORMANCE\n')
ctr = len(data['perf_fields'])
for idx in range(0, (ctr - 1)):
target.write(data['perf_fields'][idx])
target.write('|')
target.write(data['perf_fields'][(ctr - 1)])
target.write('\n')
for idx in range(0, (ctr - 1)):
target.write('-----|')
target.write('-----\n')
count = len(data['performance'])
for result in range(0, count):
for i in range(0, (ctr - 1)):
target.write(data['performance'][result][i])
target.write('|')
target.write(data['performance'][result][(ctr - 1)])
target.write('\n')
if ('key_concepts' in data):
target.write('***KEY CONCEPTS:*** ')
elem_count = len(data['key_concepts'])
for result in data['key_concepts']:
elem_count -= 1
target.write(result)
if (elem_count != 0):
target.write(', ')
target.write('\n\n')
if ('keywords' in data):
target.write('***KEYWORDS:*** ')
word_count = len(data['keywords'])
for result in data['keywords']:
word_count -= 1
target.write(result)
if (word_count != 0):
target.write(', ')
target.write('\n\n')
listfiles = os.listdir('./')
if ('details.md' in listfiles):
with open('details.md', 'r') as fin:
for (i, x) in enumerate(fin):
if (2 <= i):
target.write(x)
target.write('\n')
return |
def cli_optimize_on_call(sdfg: 'SDFG'):
from dace.transformation.optimizer import SDFGOptimizer
opt = SDFGOptimizer(sdfg)
return opt.optimize() |
class SpectrumTemplates(metaclass=ABCMeta):
def __init__(self):
raise NotImplementedError
def absolute_magnitudes(self, coefficients, filters, stellar_mass=None):
mass_modulus = (((- 2.5) * np.log10(stellar_mass)) if (stellar_mass is not None) else 0)
M = mag_ab(self.wavelength, self.templates, filters, coefficients=coefficients)
return (M.T + mass_modulus).T
def apparent_magnitudes(self, coefficients, redshift, filters, cosmology, *, stellar_mass=None, resolution=1000):
distmod = cosmology.distmod(redshift).value
mass_modulus = (((- 2.5) * np.log10(stellar_mass)) if (stellar_mass is not None) else 0)
m = mag_ab(self.wavelength, self.templates, filters, redshift=redshift, coefficients=coefficients, distmod=distmod, interpolate=resolution)
return (m.T + mass_modulus).T |
def load_wav_to_torch(full_path, sr=None):
(data, sr) = librosa.load(full_path, sr=sr)
data = np.clip(data, (- 1), 1)
data = (data * 32768.0)
return (torch.FloatTensor(data.astype(np.float32)), sr) |
def test_var_args_aot():
with pytest.raises(SyntaxError):
def arg_aot(*args: dace.float64[20]):
return (args[0] + args[1])
arg_aot.compile() |
def test_floor2vector():
x_vector = np.array([[1.1, 2, 3], [4.2, (- 3), 1]])
x_v = [0, 1, 1.5, 3]
(x_near, index) = cubic_utils.floor2vector(x_vector, x_v)
np.testing.assert_array_almost_equal(x_near, [1, 1.5, 1.5, 3, 1.5, 1])
np.testing.assert_array_almost_equal(index, [1, 2, 2, (- 1), 2, 1]) |
.parametrize('with_timestamp', [False, True])
def test_d3rlpy_logger(with_timestamp: bool) -> None:
logger = D3RLPyLogger(StubLoggerAdapterFactory(), 'test', with_timestamp)
adapter = logger.adapter
assert isinstance(adapter, StubLoggerAdapter)
if with_timestamp:
assert (adapter.experiment_name != 'test')
else:
assert (adapter.experiment_name == 'test')
assert (not adapter.is_write_params_called)
logger.add_params({'test': 1})
assert adapter.is_write_params_called
logger.add_metric('test', 1)
with logger.measure_time('test'):
pass
assert (not adapter.is_before_write_metric_called)
assert (not adapter.is_write_metric_called)
assert (not adapter.is_after_write_metric_called)
metrics = logger.commit(1, 1)
assert ('test' in metrics)
assert ('time_test' in metrics)
assert adapter.is_before_write_metric_called
assert adapter.is_write_metric_called
assert adapter.is_after_write_metric_called
assert (not adapter.is_save_model_called)
logger.save_model(1, StubAlgo())
assert adapter.is_save_model_called
assert (not adapter.is_close_called)
logger.close()
assert adapter.is_close_called |
class LossNet(nn.Module):
def __init__(self, feature_sizes=[32, 16, 8, 4], num_channels=[64, 128, 256, 512], interm_dim=128):
super(LossNet, self).__init__()
self.GAP1 = nn.AvgPool2d(feature_sizes[0])
self.GAP2 = nn.AvgPool2d(feature_sizes[1])
self.GAP3 = nn.AvgPool2d(feature_sizes[2])
self.GAP4 = nn.AvgPool2d(feature_sizes[3])
self.FC1 = nn.Linear(num_channels[0], interm_dim)
self.FC2 = nn.Linear(num_channels[1], interm_dim)
self.FC3 = nn.Linear(num_channels[2], interm_dim)
self.FC4 = nn.Linear(num_channels[3], interm_dim)
self.linear = nn.Linear((4 * interm_dim), 1)
def forward(self, features):
out1 = self.GAP1(features[0])
out1 = out1.view(out1.size(0), (- 1))
out1 = F.relu(self.FC1(out1))
out2 = self.GAP2(features[1])
out2 = out2.view(out2.size(0), (- 1))
out2 = F.relu(self.FC2(out2))
out3 = self.GAP3(features[2])
out3 = out3.view(out3.size(0), (- 1))
out3 = F.relu(self.FC3(out3))
out4 = self.GAP4(features[3])
out4 = out4.view(out4.size(0), (- 1))
out4 = F.relu(self.FC4(out4))
out = self.linear(torch.cat((out1, out2, out3, out4), 1))
return out |
def get_dataloaders(args, epic_ds=None, featuresloader=None):
dss = get_datasets(args, epic_ds=epic_ds, featuresloader=featuresloader)
dl_args = {'batch_size': args.batch_size, 'pin_memory': True, 'num_workers': args.num_workers, 'drop_last': False}
if (args.mode in ['train', 'training']):
dls = {'train': DataLoader(dss['train'], shuffle=True, **dl_args), 'validation': DataLoader(dss['validation'], shuffle=False, **dl_args), 'eval': DataLoader(dss['eval'], shuffle=False, **dl_args)}
elif (args.mode in ['validate', 'validation', 'validating']):
dls = {'validation': DataLoader(dss['validation'], shuffle=False, **dl_args), 'eval': DataLoader(dss['eval'], shuffle=False, **dl_args)}
elif (args.mode == 'test'):
if (args.ek_version == 'ek55'):
dls = {'test_s1': DataLoader(dss['test_s1'], shuffle=False, **dl_args), 'test_s2': DataLoader(dss['test_s2'], shuffle=False, **dl_args)}
elif (args.ek_version == 'ek100'):
dls = {'test': DataLoader(dss['test'], shuffle=False, **dl_args)}
else:
raise Exception(f'Error. Mode "{args.mode}" not supported.')
return dls |
class JointTotalVariation(BaseSimilarityMeasure):
def __init__(self, mesh, wire_map, eps=1e-08, **kwargs):
super().__init__(mesh, wire_map=wire_map, **kwargs)
self.set_weights(volume=self.regularization_mesh.vol)
self.eps = eps
self._G = self.regularization_mesh.cell_gradient
def W(self):
if (getattr(self, '_W', None) is None):
weights = np.prod(list(self._weights.values()), axis=0)
self._W = (sp.diags((weights ** 2)) * self.regularization_mesh.average_face_to_cell)
return self._W
def wire_map(self):
return self._wire_map
_map.setter
def wire_map(self, wires):
n = self.regularization_mesh.nC
maps = wires.maps
for (_, mapping) in maps:
map_n = mapping.shape[0]
if (n != map_n):
raise ValueError(f'All mapping outputs must match the number of cells in the regularization mesh! Got {n} and {map_n}')
self._wire_map = wires
def __call__(self, model):
W = self.W
G = self._G
v2 = (self.regularization_mesh.vol ** 2)
g2 = 0
for m in (self.wire_map * model):
g_m = (G m)
g2 += (g_m ** 2)
W_g = (W g2)
sq = np.sqrt((W_g + (self.eps * v2)))
return np.sum(sq)
def deriv(self, model):
W = self.W
G = self._G
g2 = 0
gs = []
v2 = (self.regularization_mesh.vol ** 2)
for m in (self.wire_map * model):
g_mi = (G m)
g2 += (g_mi ** 2)
gs.append(g_mi)
W_g = (W g2)
sq = np.sqrt((W_g + (self.eps * v2)))
mid = (W.T (1 / sq))
ps = []
for g_mi in gs:
ps.append((G.T (mid * g_mi)))
return np.concatenate(ps)
def deriv2(self, model, v=None):
W = self.W
G = self._G
v2 = (self.regularization_mesh.vol ** 2)
gs = []
g2 = 0
for m in (self.wire_map * model):
g_m = (G m)
g2 += (g_m ** 2)
gs.append(g_m)
W_g = (W g2)
sq = np.sqrt((W_g + (self.eps * v2)))
mid = (W.T (1 / sq))
if (v is not None):
g_vs = []
tmp_sum = 0
for (vi, g_i) in zip((self.wire_map * v), gs):
g_vi = (G vi)
tmp_sum += (W.T ((W (g_i * g_vi)) / (sq ** 3)))
g_vs.append(g_vi)
ps = []
for (g_vi, g_i) in zip(g_vs, gs):
ps.append((G.T ((mid * g_vi) - (g_i * tmp_sum))))
return np.concatenate(ps)
else:
Pieces = []
Diags = []
SQ = sp.diags((sq ** (- 1.5)))
diag_block = ((G.T sp.diags(mid)) G)
for g_mi in gs:
Pieces.append((((SQ W) sp.diags(g_mi)) G))
Diags.append(diag_block)
Row = sp.hstack(Pieces, format='csr')
Diag = sp.block_diag(Diags, format='csr')
return (Diag - (Row.T Row)) |
def random_shuffle_forever(batch_size, data, *other_data):
data_list = ([data] + list(other_data))
indices = np.arange(len(data))
while True:
batch_indices = np.random.choice(indices, batch_size, replace=False)
batch = [x[batch_indices] for x in data_list]
(yield (batch[0] if (len(batch) == 1) else batch)) |
def register_Ns3CallbackImpl__Void_Double_Double_Ns3Mac48Address_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, double, double, ns3::Mac48Address, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('double', 'arg0'), param('double', 'arg1'), param('ns3::Mac48Address', 'arg2')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
def register_types_ns3_dsr(module):
root_module = module.get_root()
module.add_enum('LinkStates', ['PROBABLE', 'QUESTIONABLE'])
module.add_enum('DsrMessageType', ['DSR_CONTROL_PACKET', 'DSR_DATA_PACKET'])
module.add_enum('ErrorType', ['NODE_UNREACHABLE', 'FLOW_STATE_NOT_SUPPORTED', 'OPTION_NOT_SUPPORTED'])
module.add_class('BlackList')
module.add_class('DsrErrorBuffEntry')
module.add_class('DsrErrorBuffer')
module.add_class('DsrFsHeader', parent=root_module['ns3::Header'])
module.add_class('DsrGraReply', parent=root_module['ns3::Object'])
module.add_class('DsrLinkStab')
module.add_class('DsrMaintainBuffEntry')
module.add_class('DsrMaintainBuffer')
module.add_class('DsrNetworkQueue', parent=root_module['ns3::Object'])
module.add_class('DsrNetworkQueueEntry')
module.add_class('DsrNodeStab')
module.add_class('DsrOptionField')
module.add_class('DsrOptionHeader', parent=root_module['ns3::Header'])
module.add_class('Alignment', outer_class=root_module['ns3::dsr::DsrOptionHeader'])
module.add_class('DsrOptionPad1Header', parent=root_module['ns3::dsr::DsrOptionHeader'])
module.add_class('DsrOptionPadnHeader', parent=root_module['ns3::dsr::DsrOptionHeader'])
module.add_class('DsrOptionRerrHeader', parent=root_module['ns3::dsr::DsrOptionHeader'])
module.add_class('DsrOptionRerrUnreachHeader', parent=root_module['ns3::dsr::DsrOptionRerrHeader'])
module.add_class('DsrOptionRerrUnsupportHeader', parent=root_module['ns3::dsr::DsrOptionRerrHeader'])
module.add_class('DsrOptionRrepHeader', parent=root_module['ns3::dsr::DsrOptionHeader'])
module.add_class('DsrOptionRreqHeader', parent=root_module['ns3::dsr::DsrOptionHeader'])
module.add_class('DsrOptionSRHeader', parent=root_module['ns3::dsr::DsrOptionHeader'])
module.add_class('DsrOptions', parent=root_module['ns3::Object'])
module.add_class('DsrPassiveBuffEntry')
module.add_class('DsrPassiveBuffer', parent=root_module['ns3::Object'])
module.add_class('DsrReceivedRreqEntry')
module.add_class('DsrRouteCache', parent=root_module['ns3::Object'])
module.add_class('Neighbor', outer_class=root_module['ns3::dsr::DsrRouteCache'])
module.add_class('DsrRouteCacheEntry')
module.add_class('DsrRouting', parent=root_module['ns3::IpL4Protocol'])
module.add_class('DsrRoutingHeader', parent=[root_module['ns3::dsr::DsrFsHeader'], root_module['ns3::dsr::DsrOptionField']])
module.add_class('DsrRreqTable', parent=root_module['ns3::Object'])
module.add_class('DsrSendBuffEntry')
module.add_class('DsrSendBuffer')
module.add_class('GraReplyEntry')
module.add_class('Link')
module.add_class('LinkKey')
module.add_class('NetworkKey')
module.add_class('PassiveKey')
module.add_class('RreqTableEntry')
module.add_class('DsrOptionAck', parent=root_module['ns3::dsr::DsrOptions'])
module.add_class('DsrOptionAckHeader', parent=root_module['ns3::dsr::DsrOptionHeader'])
module.add_class('DsrOptionAckReq', parent=root_module['ns3::dsr::DsrOptions'])
module.add_class('DsrOptionAckReqHeader', parent=root_module['ns3::dsr::DsrOptionHeader'])
module.add_class('DsrOptionPad1', parent=root_module['ns3::dsr::DsrOptions'])
module.add_class('DsrOptionPadn', parent=root_module['ns3::dsr::DsrOptions'])
module.add_class('DsrOptionRerr', parent=root_module['ns3::dsr::DsrOptions'])
module.add_class('DsrOptionRrep', parent=root_module['ns3::dsr::DsrOptions'])
module.add_class('DsrOptionRreq', parent=root_module['ns3::dsr::DsrOptions'])
module.add_class('DsrOptionSR', parent=root_module['ns3::dsr::DsrOptions'])
module.add_container('std::vector< ns3::dsr::DsrErrorBuffEntry >', 'ns3::dsr::DsrErrorBuffEntry', container_type=u'vector')
module.add_container('std::vector< ns3::dsr::DsrNetworkQueueEntry >', 'ns3::dsr::DsrNetworkQueueEntry', container_type=u'vector')
module.add_container('std::vector< ns3::Ipv4Address >', 'ns3::Ipv4Address', container_type=u'vector')
module.add_container('std::list< ns3::dsr::DsrRouteCacheEntry >', 'ns3::dsr::DsrRouteCacheEntry', container_type=u'list')
module.add_container('std::list< std::vector< ns3::Ipv4Address > >', 'std::vector< ns3::Ipv4Address >', container_type=u'list')
module.add_container('ns3::dsr::DsrRouteCacheEntry::IP_VECTOR', 'ns3::Ipv4Address', container_type=u'vector')
module.add_container('std::vector< ns3::dsr::DsrRouteCache::Neighbor >', 'ns3::dsr::DsrRouteCache::Neighbor', container_type=u'vector')
module.add_container('std::vector< ns3::Ptr< ns3::ArpCache > >', 'ns3::Ptr< ns3::ArpCache >', container_type=u'vector')
module.add_container('std::vector< std::string >', 'std::string', container_type=u'vector')
module.add_container('std::vector< ns3::dsr::DsrSendBuffEntry >', 'ns3::dsr::DsrSendBuffEntry', container_type=u'vector') |
class Rouge(object):
def __init__(self):
self.beta = 1.2
def calc_score(self, candidate, refs):
assert (len(candidate) == 1)
assert (len(refs) > 0)
prec = []
rec = []
token_c = candidate[0].split(' ')
for reference in refs:
token_r = reference.split(' ')
lcs = my_lcs(token_r, token_c)
prec.append((lcs / float(len(token_c))))
rec.append((lcs / float(len(token_r))))
prec_max = max(prec)
rec_max = max(rec)
if ((prec_max != 0) and (rec_max != 0)):
score = (((1 + (self.beta ** 2)) * prec_max) * rec_max)
score /= float((rec_max + ((self.beta ** 2) * prec_max)))
else:
score = 0.0
return score
def compute_score(self, gts, res):
score = []
for id in sorted(gts.keys()):
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) > 0)
average_score = np.mean(np.array(score))
return (average_score, np.array(score))
def method(self):
return 'Rouge' |
def checking_feature_entry():
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
features = torch.load('feature_cache/grail_mini_bert_32')
encoded_lens = sanity_check_feature(tokenizer, features)
frac = (lambda t: (sum([(x <= t) for x in encoded_lens]) / len(encoded_lens)))
for l in [16, 32, 64, 96, 128, 160, 192, 256]:
print(l, frac(l)) |
def line_graph_forbidden_subgraphs():
from sage.graphs.graph import Graph
from sage.graphs.generators.basic import ClawGraph
graphs = [ClawGraph()]
graphs.append(Graph({0: [1, 2, 3], 1: [2, 3], 4: [2], 5: [3]}))
graphs.append(Graph({0: [1, 2, 3, 4], 1: [2, 3, 4], 3: [4], 2: [5]}))
graphs.append(Graph({0: [1, 2, 3], 1: [2, 3], 4: [2, 3]}))
graphs.append(Graph({0: [1, 2, 3], 1: [2, 3], 4: [2], 5: [3, 4]}))
graphs.append(Graph({0: [1, 2, 3, 4], 1: [2, 3, 4], 3: [4], 5: [2, 0, 1]}))
graphs.append(Graph({5: [0, 1, 2, 3, 4], 0: [1, 4], 2: [1, 3], 3: [4]}))
graphs.append(Graph({1: [0, 2, 3, 4], 3: [0, 4], 2: [4, 5], 4: [5]}))
graphs.append(Graph({0: [1, 2, 3], 1: [2, 3, 4], 2: [3, 4], 3: [4]}))
return graphs |
def ua_check_converted_mot():
phase = config['phase']
dataset_name = config['dataset_name']
if ((phase == 'train') and (dataset_name == 'UA-DETRAC')):
ua_root = config['dataset_path']
if (not os.path.exists(os.path.join(config['dataset_path'], 'DETRAC-Train-Annotations-MOT'))):
warnings.warn('cannot find {} in the dataset directory, try to fixing ...'.format('DETRAC-Train-Annotations-MOT'))
from dataset.tools import ConvertMat2UA
ConvertMat2UA.run(mat_folder=os.path.join(ua_root, 'DETRAC-Train-Annotations-MAT'), save_folder=os.path.join(ua_root, 'DETRAC-Train-Annotations-MOT')) |
.parametrize('value, result', [(['foo', 'bar'], False), ({'foo', 'bar'}, False), ({'foo': 'bar'}, True), (('foo', 'bar'), False)])
def test_is_dict(value, result):
assert (is_dict(type(value)) == result) |
class FreeModuleCoBasis(Basis_abstract):
def __init__(self, basis, symbol, latex_symbol=None, indices=None, latex_indices=None):
self._basis = basis
Basis_abstract.__init__(self, basis._fmodule, symbol, latex_symbol, indices, latex_indices)
vl = list()
fmodule = self._fmodule
ring_one = fmodule._ring.one()
for i in fmodule.irange():
v = fmodule.linear_form()
v.set_comp(basis)[i] = ring_one
vl.append(v)
self._vec = tuple(vl)
self.set_name(symbol, latex_symbol=latex_symbol, indices=indices, latex_indices=latex_indices, index_position='up')
def _test_iter_len(self, **options):
tester = self._tester(**options)
g = iter(self)
b = list(g)
for x in b:
tester.assertIn(x, self.free_module().dual())
tester.assertEqual(len(b), len(self))
tester.assertEqual(len(b), self.free_module().rank())
def _repr_(self):
return 'Dual basis {} on the {}'.format(self._name, self._fmodule) |
def compute_f1_score(gt, pred):
gt_class = gt.cpu().detach().numpy()
pred_np = pred.cpu().detach().numpy()
pred_class = np.argmax(pred_np, axis=1)
F1 = f1_score(gt_class, pred_class, average='macro')
return F1 |
def mkdir_ifnotexists(directory):
if (not os.path.exists(directory)):
os.mkdir(directory) |
def build_param(ctx, py_arg):
if (getattr(py_arg, 'annotation', None) is not None):
raise ValueError("Compiled functions don't support annotations")
name = (py_arg.id if PY2 else py_arg.arg)
r = ctx.make_range(py_arg.lineno, py_arg.col_offset, (py_arg.col_offset + len(name)))
return Param(TensorType(r), Ident(r, name)) |
def deprecate_method(method, old_name, removal_version=None, future_warn=False, error=False):
new_name = method.__qualname__
split_name = new_name.split('.')
if (len(split_name) > 1):
old_name = f'{split_name[0]}.{old_name}'
message = f'{old_name} has been deprecated, please use {new_name}.'
if error:
message = f'{old_name} has been removed, please use {new_name}.'
elif (removal_version is not None):
message += f' It will be removed in version {removal_version} of SimPEG.'
else:
message += ' It will be removed in a future version of SimPEG.'
def new_method(*args, **kwargs):
if future_warn:
warnings.warn(message, FutureWarning, stacklevel=2)
elif error:
raise NotImplementedError(message)
else:
warnings.warn(message, DeprecationWarning, stacklevel=2)
return method(*args, **kwargs)
doc = f'`{old_name}` has been deprecated. See `{new_name}` for documentation'
new_method.__doc__ = doc
return new_method |
def BaulieuIII_calc(TP, FP, FN, TN):
try:
n = (((TP + FP) + FN) + TN)
part1 = ((n * n) - (4 * ((TP * TN) - (FP * FN))))
return (part1 / ((2 * n) * n))
except Exception:
return 'None' |
def check_disjoint(a, b):
s = fd_solver()
s.add(a)
s.add(b)
return (unsat == s.check()) |
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('Network', net)
print(('Total number of parameters: %d' % num_params)) |
def SQLiteFileLock(*args, **kwds):
from . import sqlitelockfile
return _fl_helper(sqlitelockfile.SQLiteLockFile, 'lockfile.sqlitelockfile', *args, **kwds) |
def training_stopping_msg(best_val):
print('\nStopping training, validation accuracy not improving after {:.2f}\n'.format(best_val), flush=True) |
def img_random_flip(image, choice):
image = cv2.flip(image, 1)
steering = choice[0]
throttle = choice[1]
steering = (- steering)
new_choice = [steering, throttle]
return (image, new_choice) |
class RandomFlip(object):
def __call__(self, rgb_img, label_img):
if (random.random() < 0.5):
rgb_img = rgb_img.transpose(Image.FLIP_LEFT_RIGHT)
label_img = label_img.transpose(Image.FLIP_LEFT_RIGHT)
return (rgb_img, label_img) |
class ChunkCacheBuilder():
def __init__(self, broker_ref, cache_dir: str, source: ShardedDataset[T], processor: BatchProcessor[T], rows_per_chunk: int):
logging.basicConfig(level=logging.INFO)
self.broker_ref = broker_ref
self.shard_status: Dict[(str, _ShardStatus)] = dict()
self._current_round_robin = []
self.source = source
self._metrics = InProgressCacheMetrics()
self_ref = current_actor_handle()
if (len(source.shard_names) == 0):
logger.warning('No shards to index?!?')
self._finish()
else:
logger.info(f'Starting cache build for {len(source.shard_names)} shards')
self._shard_writers = []
self._shard_readers = []
self._processor_actors = []
for shard_name in source.shard_names:
self.shard_status[shard_name] = _ShardStatus()
num_shards = len(source.shard_names)
def priority_fn(shard_idx, chunk_idx):
return ((chunk_idx * num_shards) + shard_idx)
num_shard_groups = max(min(len(ray.nodes()), num_shards), 1)
shard_groups: list[list[str]] = [[] for _ in range(num_shard_groups)]
for (i, shard_name) in enumerate(source.shard_names):
self._current_round_robin.append(shard_name)
shard_groups[(i % num_shard_groups)].append(shard_name)
for shard_group in shard_groups:
writer = _GroupShardWriterWorker.remote(self_ref, cache_dir, shard_group)
self._shard_writers.append(writer)
processor_actor = _BatchProcessorQueue.remote(processor)
self._processor_actors.append(processor_actor)
reader = _alternating_shard_reader.remote(self_ref, writer, source, shard_group, priority_fn, processor_actor, processor.batch_size, rows_per_chunk)
self._shard_readers.append(reader)
def new_chunk(self, shard_name: str, *chunks: ChunkMetadata):
self.shard_status[shard_name].current_buffer.extend(chunks)
self._attempt_to_flush_buffers()
self._metrics.chunks_finished += len(chunks)
for chunk in chunks:
self._metrics.rows_finished += chunk.num_rows
for (field, count) in chunk.field_counts.items():
self._metrics.field_counts[field] = (self._metrics.field_counts.get(field, 0) + count)
if (len(chunks) > 0):
ray.get(self.broker_ref._new_metrics.remote(self._metrics))
def shard_finished(self, shard_name: str, expected_num_chunks: int):
shard_status = self.shard_status[shard_name]
shard_status.expected_num_chunks = expected_num_chunks
self._attempt_to_flush_buffers()
self._metrics.shards_finished += 1
ray.get(self.broker_ref._new_metrics.remote(self._metrics))
if self._all_shards_done():
assert (len(self._current_round_robin) == 0)
self._finish()
def _all_shards_done(self):
return all((status.is_finished_and_buffer_empty for status in self.shard_status.values()))
def shard_failed(self, shard_name: str, error: ExceptionInfo):
ray.get(self.broker_ref._writer_exception.remote(shard_name, error))
def other_failed(self, error: ExceptionInfo):
ray.get(self.broker_ref._writer_exception.remote(None, error))
def _attempt_to_flush_buffers(self):
chunks_to_send = []
while (len(self._current_round_robin) > 0):
name = self._current_round_robin[0]
status = self.shard_status[name]
if status.is_finished_and_buffer_empty:
self._current_round_robin.pop(0)
logger.debug(f'Shard {name} is finished, removing from round robin')
continue
next_chunk = status.pop_chunk_to_send()
if (next_chunk is not None):
logger.debug(f'Sending chunk from {name}')
self._current_round_robin.pop(0)
self._current_round_robin.append(name)
chunks_to_send.append(next_chunk)
continue
else:
logger.debug(f'Shard {name} has no chunks to send and is not known to be finished')
break
if (len(chunks_to_send) > 0):
logger.debug(f'Sending {len(chunks_to_send)} chunks to broker')
ray.get(self.broker_ref._append_chunks.remote(*chunks_to_send))
def _finish(self):
self._metrics.is_finished = True
ray.get(self.broker_ref._new_metrics.remote(self._metrics))
ray.get(self.broker_ref._finalize.remote()) |
_task('audio_pretraining')
class AudioPretrainingTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-rate', default=16000, type=int, help='target sample rate. audio files will be up/down sampled to this rate')
parser.add_argument('--normalize', action='store_true', help='if set, normalizes input to have 0 mean and unit variance')
parser.add_argument('--max-sample-size', default=None, type=int, help='max sample size to crop to for batching. default = min sample length')
parser.add_argument('--min-sample-size', default=None, type=int, help='min sample size to crop to for batching. default = same as --max-sample-size')
parser.add_argument('--enable-padding', action='store_true', help='pad shorter samples instead of cropping')
parser.add_argument('--labels', type=str, default=None, help='extension of the label file to load, if any')
def __init__(self, args, source_dictionary=None):
super().__init__(args)
self._target_dictionary = None
self._source_dictionary = source_dictionary
self.is_ctc = (args.criterion == 'ctc')
def setup_task(cls, args, **kwargs):
return cls(args)
def load_dataset(self, split, **kwargs):
manifest = os.path.join(self.args.data, '{}.tsv'.format(split))
self.datasets[split] = FileAudioDataset(manifest, sample_rate=self.args.sample_rate, max_sample_size=self.args.max_sample_size, min_sample_size=self.args.max_sample_size, min_length=self.args.min_sample_size, pad=((self.args.labels is not None) or self.args.enable_padding), normalize=self.args.normalize)
if self.args.labels:
dict_path = os.path.join(self.args.data, f'dict.{self.args.labels}.txt')
self._target_dictionary = Dictionary.load(dict_path)
label_path = os.path.join(self.args.data, f'{split}.{self.args.labels}')
labels = []
with open(label_path, 'r') as f:
for line in f:
labels.append(line)
process_label = LabelEncoder(self.target_dictionary)
self.datasets[split] = AddTargetDataset(self.datasets[split], labels, pad=self.target_dictionary.pad(), eos=self.target_dictionary.eos(), batch_targets=True, process_label=process_label, add_to_input=(not self.is_ctc))
def source_dictionary(self):
return self._source_dictionary
def target_dictionary(self):
return self._target_dictionary
def max_positions(self):
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(self, indices, dataset, max_positions=None, ignore_invalid_inputs=False):
return indices |
class FeatureFusionBlock_custom(nn.Module):
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
super(FeatureFusionBlock_custom, self).__init__()
self.deconv = deconv
self.align_corners = align_corners
self.groups = 1
self.expand = expand
out_features = features
if (self.expand == True):
out_features = (features // 2)
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, *xs):
output = xs[0]
if (len(xs) == 2):
res = self.resConfUnit1(xs[1])
output = self.skip_add.add(output, res)
output = self.resConfUnit2(output)
output = nn.functional.interpolate(output, scale_factor=2, mode='bilinear', align_corners=self.align_corners)
output = self.out_conv(output)
return output |
class MetaNetDefTest(unittest.TestCase):
def test_minimal(self):
metanet_pb2.NetsMap(key='test_key', value=caffe2_pb2.NetDef())
def test_adding_net(self):
meta_net_def = metanet_pb2.MetaNetDef()
net_def = caffe2_pb2.NetDef()
meta_net_def.nets.add(key='test_key', value=net_def)
def test_replace_blobs(self):
meta_net_def = metanet_pb2.MetaNetDef()
blob_name = 'Test'
blob_def = ['AA']
blob_def2 = ['BB']
replaced_blob_def = ['CC']
pred_utils.AddBlobs(meta_net_def, blob_name, blob_def)
self.assertEqual(blob_def, pred_utils.GetBlobs(meta_net_def, blob_name))
pred_utils.AddBlobs(meta_net_def, blob_name, blob_def2)
self.assertEqual((blob_def + blob_def2), pred_utils.GetBlobs(meta_net_def, blob_name))
pred_utils.ReplaceBlobs(meta_net_def, blob_name, replaced_blob_def)
self.assertEqual(replaced_blob_def, pred_utils.GetBlobs(meta_net_def, blob_name)) |
class ExplainStateEvolution(MessagePassing):
def __init__(self, model, keys=[], print_incoming=True, print_outcoming=True):
model.init_second_moments()
super().__init__(model, message_keys=['a'])
self.keys = keys
self.print_incoming = print_incoming
self.print_outcoming = print_outcoming
def forward(self, node, message):
if self.print_incoming:
print(f'{node}: incoming message')
print(info_message(message, self.keys))
new_message = node.forward_state_evolution(message)
if self.print_outcoming:
print(f'{node}: outgoing message')
print(info_message(new_message, self.keys))
return new_message
def backward(self, node, message):
if self.print_incoming:
print(f'{node}: incoming message')
print(info_message(message, self.keys))
new_message = node.backward_state_evolution(message)
if self.print_outcoming:
print(f'{node}: outgoing message')
print(info_message(new_message, self.keys))
return new_message
def update(self, variable, message):
v = variable.posterior_v(message)
return dict(v=v)
def run(self, n_iter=1, initializer=None):
initializer = (initializer or ConstantInit(a=0, b=0))
logger.info(f'init message dag with {initializer}')
self.init_message_dag(initializer)
for _ in range(n_iter):
print('FORWARD PASS')
print(('-' * len('FORWARD PASS')))
self.forward_message()
print('BACKWARD PASS')
print(('-' * len('BACKWARD PASS')))
self.backward_message() |
class DepthCompletion():
def __init__(self):
self.main_img_path = os.path.expanduser('dataset\\kitti_validation_cropped\\image')
self.input_depth_dir = os.path.expanduser('dataset\\kitti_validation_cropped\\velodyne_raw')
self.img_size = (450, 130)
def save_for_evaluation(self, sufficient_depth, img_name):
path = 'outputs/kitti/depth_for_evaluation/'
cv2.imwrite((path + img_name), sufficient_depth)
def save_final_outputs(self, img, img_name):
path = 'outputs/kitti/final_output/'
img = cv2.applyColorMap(np.uint8(((img / np.amax(img)) * 255)), cv2.COLORMAP_JET)
cv2.imwrite((path + img_name), img)
def process(self):
main_img_pathes = os.listdir(self.main_img_path)
main_image_list = []
for item in main_img_pathes:
main_image_list.append(cv2.imread(((self.main_img_path + '/') + item)))
img_pathes = os.listdir(self.input_depth_dir)
image_list = []
for item in img_pathes:
image_list.append(cv2.imread(((self.input_depth_dir + '/') + item), cv2.IMREAD_ANYDEPTH))
num_images = len(image_list)
for i in range(num_images):
depth_image = image_list[i]
main_image = main_image_list[i]
projected_depths = np.float32((depth_image / 255.0))
(final_depths, process_dict) = design_depth_map.create_map(main_image, projected_depths, show_process=True)
self.show_result(process_dict, main_image)
self.save_for_evaluation(process_dict['s9_depths_out'], img_pathes[i])
self.save_final_outputs(process_dict['s9_depths_out'], img_pathes[i])
import metrics
metrics.print_metrics()
def show_image(self, window_name, image, size_wh=None, location_xy=None):
if (size_wh is not None):
cv2.namedWindow(window_name, (cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL))
cv2.resizeWindow(window_name, *size_wh)
else:
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
if (location_xy is not None):
cv2.moveWindow(window_name, *location_xy)
cv2.imshow(window_name, image)
def show_result(self, process_dict, main_image):
x_offset = self.img_size[0]
y_offset = self.img_size[1]
x_padding = 0
y_padding = 28
x_start = 0
y_start = 100
img_x = x_start
img_y = y_start
max_x = 1500
row_idx = 0
for (key, value) in process_dict.items():
if (key == 'main_image'):
image_jet = main_image
self.show_image(key, image_jet, self.img_size, (img_x, img_y))
img_x += (x_offset + x_padding)
if (((img_x + x_offset) + x_padding) > max_x):
img_x = x_start
row_idx += 1
img_y = (y_start + (row_idx * (y_offset + y_padding)))
else:
image_jet = cv2.applyColorMap(np.uint8(((value / np.amax(value)) * 255)), cv2.COLORMAP_JET)
self.show_image(key, image_jet, self.img_size, (img_x, img_y))
img_x += (x_offset + x_padding)
if (((img_x + x_offset) + x_padding) > max_x):
img_x = x_start
row_idx += 1
img_y = (y_start + (row_idx * (y_offset + y_padding)))
cv2.waitKey(delay=1) |
def mlp(input, out_dim, name, is_train, reuse, norm=None, activation=None, dtype=tf.float32, bias=True):
with tf.variable_scope(name, reuse=reuse):
(_, n) = input.get_shape()
w = tf.get_variable('w', [n, out_dim], dtype, tf.random_normal_initializer(0.0, 0.02))
out = tf.matmul(input, w)
b = tf.get_variable('b', [out_dim], initializer=tf.constant_initializer(0.0))
out = (out + b)
out = _activation(out, activation)
out = _norm(out, is_train, reuse, norm)
return out |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('including_pad', [True, False])
.parametrize('ignore_border', [True, False])
.parametrize('channel_last', [False, True])
.parametrize('inshape, kernel, stride, pad', [((4, 6), (2, 2), (2, 1), (1, 0)), ((2, 4, 6), (2, 2), (2, 1), (1, 0)), ((2, 2, 4, 6), (2, 2), (2, 1), (1, 0)), ((2, 2, 2, 4, 6), (2, 2), (1, 2), (0, 1))])
def test_average_pooling_2d(seed, inshape, kernel, stride, pad, ignore_border, channel_last, including_pad, ctx, func_name):
from nbla_test_utils import function_tester
if (channel_last and (not func_name.endswith('Cudnn'))):
pytest.skip('Channel last is only supported in Cudnn so far')
if channel_last:
t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
inshape = tuple((inshape[i] for i in t.inv_axes))
rng = np.random.RandomState(seed)
inputs = [rng.randn(*inshape).astype(np.float32)]
func_args = [kernel, stride, ignore_border, pad, channel_last, including_pad]
function_tester(rng, F.average_pooling, ref_average_pooling, inputs=inputs, func_args=func_args, func_name=func_name, ctx=ctx, atol_f=1e-06, atol_b=0.01) |
class PreActBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, drop_rate=0.2):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False))
self.drop = nn.Dropout(p=0.2)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return self.drop(out) |
class Issue111Test(ReBenchTestCase):
def setUp(self):
super(Issue111Test, self).setUp()
self._set_path(__file__)
def test_invocation_and_mean_with_warmup_2(self):
ds = DataStore(self.ui)
cnf = Configurator(load_config((self._path + '/issue_111.conf')), ds, self.ui, exp_name='test-warmup-2', data_file=self._tmp_file)
runs = cnf.get_runs()
ds.load_data(runs, False)
self._assert_runs(cnf, 1, 0, 0)
ex = Executor(runs, False, self.ui)
ex.execute()
self._assert_runs(cnf, 1, 7, 1)
run = runs.pop()
self.assertEqual(run.get_mean_of_totals(), 10)
ds = DataStore(self.ui)
cnf = Configurator(load_config((self._path + '/issue_111.conf')), ds, self.ui, exp_name='test-warmup-2', data_file=self._tmp_file)
runs = cnf.get_runs()
ds.load_data(runs, False)
self._assert_runs(cnf, 1, 7, 1)
run = runs.pop()
self.assertEqual(run.get_mean_of_totals(), 10)
def test_invocation_and_mean_with_warmup_0(self):
ds = DataStore(self.ui)
cnf = Configurator(load_config((self._path + '/issue_111.conf')), ds, self.ui, exp_name='test-warmup-0', data_file=self._tmp_file)
runs = cnf.get_runs()
ds.load_data(runs, False)
self._assert_runs(cnf, 1, 0, 0)
ex = Executor(runs, False, self.ui)
ex.execute()
self._assert_runs(cnf, 1, 9, 1)
run = runs.pop()
self.assertEqual(run.get_mean_of_totals(), 230)
ds = DataStore(self.ui)
cnf = Configurator(load_config((self._path + '/issue_111.conf')), ds, self.ui, exp_name='test-warmup-0', data_file=self._tmp_file)
runs = cnf.get_runs()
ds.load_data(runs, False)
self._assert_runs(cnf, 1, 9, 1)
run = runs.pop()
self.assertEqual(run.get_mean_of_totals(), 230) |
def extract_mosei(args, dim):
assert os.path.exists(args.flac_path), f'{args.flac_path} not exists'
todo = list(Path(args.flac_path).glob('*.flac'))
print(len(todo), 'audio files found in MOSEI')
assert (args.feature_type in ['mel', 'linear', 'fbank']), 'Feature type unsupported'
if (not os.path.exists(args.output_path)):
os.makedirs(args.output_path)
npy_dir = os.path.join(args.output_path, (str(args.feature_type) + str(dim)))
for target_dir in [npy_dir]:
if os.path.exists(target_dir):
decision = input(f'{target_dir} already exists. Remove it? [Y/N]: ')
if (decision.upper() == 'Y'):
print(f'Removing {target_dir}')
shutil.rmtree(target_dir)
else:
print('Abort')
exit(0)
os.makedirs(target_dir)
print('Extracting acoustic feature...', flush=True)
tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, cmvn=args.apply_cmvn, save_feature=os.path.join(npy_dir, str(file).split('/')[(- 1)].replace('.flac', ''))) for file in tqdm(todo))) |
_numpy_output()
def test_flip_3d_axis02(A: dace.int32[(10, 5, 7)]):
return np.flip(A, axis=(0, 2)) |
def imread(img_path):
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img |
def __scale_width(img, target_size, crop_size, method=Image.BICUBIC):
(ow, oh) = img.size
if ((ow == target_size) and (oh >= crop_size)):
return img
w = target_size
h = int(max(((target_size * oh) / ow), crop_size))
return img.resize((w, h), method) |
def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 23, 3], last_stride=2, fc_dims=None, dropout_p=None, **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['resnet101'])
return model |
class DeepV3Plus(nn.Module):
def __init__(self, num_classes, trunk='resnet-50', criterion=None, criterion_aux=None, cont_proj_head=0, wild_cont_dict_size=0, variant='D16', skip='m1', skip_num=48, args=None):
super(DeepV3Plus, self).__init__()
self.args = args
self.criterion = criterion
self.criterion_aux = criterion_aux
self.criterion_kl = nn.KLDivLoss(reduction='batchmean').cuda()
self.cont_proj_head = cont_proj_head
if (wild_cont_dict_size > 0):
if (cont_proj_head > 0):
self.cont_dict = {}
self.cont_dict['size'] = wild_cont_dict_size
self.cont_dict['dim'] = self.cont_proj_head
self.register_buffer('wild_cont_dict', torch.randn(self.cont_dict['dim'], self.cont_dict['size']))
self.wild_cont_dict = nn.functional.normalize(self.wild_cont_dict, p=2, dim=0)
self.register_buffer('wild_cont_dict_ptr', torch.zeros(1, dtype=torch.long))
self.cont_dict['wild'] = self.wild_cont_dict.cuda()
self.cont_dict['wild_ptr'] = self.wild_cont_dict_ptr
else:
raise 'dimension of wild-content dictionary is zero'
self.variant = variant
self.trunk = trunk
channel_1st = 3
channel_2nd = 64
channel_3rd = 256
channel_4th = 512
prev_final_channel = 1024
final_channel = 2048
if (trunk == 'resnet-50'):
resnet = Resnet.resnet50(fs_layer=self.args.fs_layer)
resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
else:
raise ValueError('Not a valid network arch')
self.layer0 = resnet.layer0
(self.layer1, self.layer2, self.layer3, self.layer4) = (resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4)
if (self.variant == 'D16'):
os = 16
for (n, m) in self.layer4.named_modules():
if ('conv2' in n):
(m.dilation, m.padding, m.stride) = ((2, 2), (2, 2), (1, 1))
elif ('downsample.0' in n):
m.stride = (1, 1)
else:
raise 'unknown deepv3 variant: {}'.format(self.variant)
self.output_stride = os
self.aspp = _AtrousSpatialPyramidPoolingModule(final_channel, 256, output_stride=os)
self.bot_fine = nn.Sequential(nn.Conv2d(channel_3rd, 48, kernel_size=1, bias=False), Norm2d(48), nn.ReLU(inplace=True))
self.bot_aspp = nn.Sequential(nn.Conv2d(1280, 256, kernel_size=1, bias=False), Norm2d(256), nn.ReLU(inplace=True))
self.final1 = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, padding=1, bias=False), Norm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False), Norm2d(256), nn.ReLU(inplace=True))
self.final2 = nn.Sequential(nn.Conv2d(256, num_classes, kernel_size=1, bias=True))
self.dsn = nn.Sequential(nn.Conv2d(prev_final_channel, 512, kernel_size=3, stride=1, padding=1), Norm2d(512), nn.ReLU(inplace=True), nn.Dropout2d(0.1), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
initialize_weights(self.dsn)
initialize_weights(self.aspp)
initialize_weights(self.bot_aspp)
initialize_weights(self.bot_fine)
initialize_weights(self.final1)
initialize_weights(self.final2)
if (self.cont_proj_head > 0):
self.proj = nn.Sequential(nn.Linear(256, 256, bias=True), nn.ReLU(inplace=False), nn.Linear(256, self.cont_proj_head, bias=True))
initialize_weights(self.proj)
self.eps = 1e-05
self.whitening = False
if (trunk == 'resnet-50'):
in_channel_list = [0, 0, 64, 256, 512, 1024, 2048]
out_channel_list = [0, 0, 32, 128, 256, 512, 1024]
else:
raise ValueError('Not a valid network arch')
def forward(self, x, gts=None, aux_gts=None, x_w=None, apply_fs=False):
x_size = x.size()
x = self.layer0[0](x)
if (self.training & apply_fs):
with torch.no_grad():
x_w = self.layer0[0](x_w)
x = self.layer0[1](x)
if (self.training & apply_fs):
x_sw = self.layer0[1](x, x_w)
with torch.no_grad():
x_w = self.layer0[1](x_w)
x = self.layer0[2](x)
x = self.layer0[3](x)
if (self.training & apply_fs):
with torch.no_grad():
x_w = self.layer0[2](x_w)
x_w = self.layer0[3](x_w)
x_sw = self.layer0[2](x_sw)
x_sw = self.layer0[3](x_sw)
if (self.training & apply_fs):
x_tuple = self.layer1([x, x_w, x_sw])
low_level = x_tuple[0]
low_level_w = x_tuple[1]
low_level_sw = x_tuple[2]
else:
x_tuple = self.layer1([x])
low_level = x_tuple[0]
x_tuple = self.layer2(x_tuple)
x_tuple = self.layer3(x_tuple)
aux_out = x_tuple[0]
if (self.training & apply_fs):
aux_out_w = x_tuple[1]
aux_out_sw = x_tuple[2]
x_tuple = self.layer4(x_tuple)
x = x_tuple[0]
if (self.training & apply_fs):
x_w = x_tuple[1]
x_sw = x_tuple[2]
x = self.aspp(x)
dec0_up = self.bot_aspp(x)
dec0_fine = self.bot_fine(low_level)
dec0_up = Upsample(dec0_up, low_level.size()[2:])
dec0 = [dec0_fine, dec0_up]
dec0 = torch.cat(dec0, 1)
dec1 = self.final1(dec0)
dec2 = self.final2(dec1)
main_out = Upsample(dec2, x_size[2:])
if self.training:
loss_orig = self.criterion(main_out, gts)
aux_out = self.dsn(aux_out)
if (aux_gts.dim() == 1):
aux_gts = gts
aux_gts = aux_gts.unsqueeze(1).float()
aux_gts = nn.functional.interpolate(aux_gts, size=aux_out.shape[2:], mode='nearest')
aux_gts = aux_gts.squeeze(1).long()
loss_orig_aux = self.criterion_aux(aux_out, aux_gts)
return_loss = [loss_orig, loss_orig_aux]
if apply_fs:
x_sw = self.aspp(x_sw)
dec0_up_sw = self.bot_aspp(x_sw)
dec0_fine_sw = self.bot_fine(low_level_sw)
dec0_up_sw = Upsample(dec0_up_sw, low_level_sw.size()[2:])
dec0_sw = [dec0_fine_sw, dec0_up_sw]
dec0_sw = torch.cat(dec0_sw, 1)
dec1_sw = self.final1(dec0_sw)
dec2_sw = self.final2(dec1_sw)
main_out_sw = Upsample(dec2_sw, x_size[2:])
with torch.no_grad():
x_w = self.aspp(x_w)
dec0_up_w = self.bot_aspp(x_w)
dec0_fine_w = self.bot_fine(low_level_w)
dec0_up_w = Upsample(dec0_up_w, low_level_w.size()[2:])
dec0_w = [dec0_fine_w, dec0_up_w]
dec0_w = torch.cat(dec0_w, 1)
dec1_w = self.final1(dec0_w)
dec2_w = self.final2(dec1_w)
main_out_w = Upsample(dec2_w, x_size[2:])
if self.args.use_cel:
assert (self.cont_proj_head > 0)
proj2 = self.proj(dec1.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
proj2_sw = self.proj(dec1_sw.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
with torch.no_grad():
proj2_w = self.proj(dec1_w.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
loss_cel = get_content_extension_loss(proj2, proj2_sw, proj2_w, gts, self.cont_dict)
return_loss.append(loss_cel)
if self.args.use_sel:
loss_sel = self.criterion(main_out_sw, gts)
aux_out_sw = self.dsn(aux_out_sw)
loss_sel_aux = self.criterion_aux(aux_out_sw, aux_gts)
return_loss.append(loss_sel)
return_loss.append(loss_sel_aux)
if self.args.use_scr:
loss_scr = torch.clamp((self.criterion_kl(nn.functional.log_softmax(main_out_sw, dim=1), nn.functional.softmax(main_out, dim=1)) / torch.prod(torch.tensor(main_out.shape[1:]))), min=0)
loss_scr_aux = torch.clamp((self.criterion_kl(nn.functional.log_softmax(aux_out_sw, dim=1), nn.functional.softmax(aux_out, dim=1)) / torch.prod(torch.tensor(aux_out.shape[1:]))), min=0)
return_loss.append(loss_scr)
return_loss.append(loss_scr_aux)
return return_loss
else:
return main_out |
class Trainer():
def __init__(self):
self.dataloader = None
self.model = None
self.color_loss = None
self.exposure_loss = None
self.illumination_smoothing_loss = None
self.spatial_consistency_loss = None
self.optimizer = None
def build_dataloader(self, image_path, image_size=256, batch_size=8, num_workers=4):
dataset = LowLightDataset(image_files=image_path, image_size=image_size)
self.dataloader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True)
def build_model(self, pretrain_weights=None):
self.model = DCENet().cuda()
self.model.apply(weights_init)
if (pretrain_weights is not None):
self.load_weights(pretrain_weights)
def compile(self, pretrain_weights=None, learning_rate=0.0001, weight_decay=0.0001):
self.build_model(pretrain_weights=pretrain_weights)
self.color_loss = ColorConstancyLoss().cuda()
self.spatial_consistency_loss = SpatialConsistancyLoss().cuda()
self.exposure_loss = ExposureLoss(patch_size=16, mean_val=0.6).cuda()
self.illumination_smoothing_loss = IlluminationSmoothnessLoss().cuda()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate, weight_decay=weight_decay)
def _train_step(self, image_lowlight):
image_lowlight = image_lowlight.cuda()
(enhanced_image_1, enhanced_image, A) = self.model(image_lowlight)
loss_tv = (200 * self.illumination_smoothing_loss(A))
loss_spa = torch.mean(self.spatial_consistency_loss(enhanced_image, image_lowlight))
loss_col = (5 * torch.mean(self.color_loss(enhanced_image)))
loss_exp = (10 * torch.mean(self.exposure_loss(enhanced_image)))
loss = (((loss_tv + loss_spa) + loss_col) + loss_exp)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), 0.1)
self.optimizer.step()
return loss.item()
def save_model(self, path):
torch.save(self.model.state_dict(), path)
def _log_step(self, loss, epoch, iteration):
wandb.log({'Loss': loss})
if ((epoch % 20) == 0):
self.save_model(os.path.join('./checkpoints/', 'model_{}_{}.pth'.format(epoch, iteration)))
def train(self, epochs=200, log_frequency=100, notebook=True):
wandb.watch(self.model)
self.model.train()
if notebook:
from tqdm.notebook import tqdm as tqdm_notebook
tqdm = tqdm_notebook
for epoch in range(1, (epochs + 1)):
print('Epoch {}/{}'.format(epoch, epochs))
for (iteration, image_lowlight) in tqdm(enumerate(self.dataloader)):
loss = self._train_step(image_lowlight)
if ((iteration % log_frequency) == 0):
self._log_step(loss, epoch, iteration)
def load_weights(self, weights_path):
self.model.load_state_dict(torch.load(weights_path))
def infer_cpu(self, image_path, image_resize_factor=None):
with torch.no_grad():
image_lowlight = Image.open(image_path)
(width, height) = image_lowlight.size
if (image_resize_factor is not None):
image = image_lowlight.resize(((width // image_resize_factor), (height // image_resize_factor)), Image.ANTIALIAS)
lowlight = (np.asarray(image) / 255.0)
lowlight = torch.from_numpy(lowlight).float()
lowlight = lowlight.permute(2, 0, 1)
lowlight = lowlight.unsqueeze(0)
model = self.model.cpu()
(_, enhanced, _) = model(lowlight)
enhanced = enhanced.squeeze().permute(1, 2, 0)
return (image_lowlight, enhanced.numpy())
def infer_gpu(self, image_path, image_resize_factor=None):
with torch.no_grad():
image_lowlight = Image.open(image_path)
(width, height) = image_lowlight.size
if (image_resize_factor is not None):
image = image_lowlight.resize(((width // image_resize_factor), (height // image_resize_factor)), Image.ANTIALIAS)
lowlight = (np.asarray(image) / 255.0)
lowlight = torch.from_numpy(lowlight).float()
lowlight = lowlight.permute(2, 0, 1)
lowlight = lowlight.cuda().unsqueeze(0)
(_, enhanced, _) = self.model(lowlight)
enhanced = enhanced.squeeze().permute(1, 2, 0)
return (image_lowlight, enhanced.cpu().numpy()) |
class ODEfunc(nn.Module):
def __init__(self, diffeq):
super(ODEfunc, self).__init__()
self.diffeq = diffeq
self.divergence_fn = divergence_approx
self.register_buffer('_num_evals', torch.tensor(0.0))
def before_odeint(self, e=None):
self._e = e
self._num_evals.fill_(0)
def forward(self, t, states):
y = states[0]
t = (torch.ones(y.size(0), 1).to(y) * t.clone().detach().requires_grad_(True).type_as(y))
self._num_evals += 1
for state in states:
state.requires_grad_(True)
if (self._e is None):
self._e = torch.randn_like(y, requires_grad=True).to(y)
with torch.set_grad_enabled(True):
if (len(states) == 3):
c = states[2]
tc = torch.cat([t, c.view(y.size(0), (- 1))], dim=1)
dy = self.diffeq(tc, y)
divergence = self.divergence_fn(dy, y, e=self._e).unsqueeze((- 1))
return (dy, (- divergence), torch.zeros_like(c).requires_grad_(True))
elif (len(states) == 2):
dy = self.diffeq(t, y)
divergence = self.divergence_fn(dy, y, e=self._e).view((- 1), 1)
return (dy, (- divergence))
else:
assert 0, '`len(states)` should be 2 or 3' |
def _to2d(coors):
if (coors.shape[1] == 1):
coors = nm.c_[(coors, nm.zeros_like(coors))]
return coors |
class FlaxGPTJModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def download_extract(url, root, filename, md5):
download_url(url, root, filename, md5)
with tarfile.open(os.path.join(root, filename), 'r') as tar:
tar.extractall(path=root) |
('pyscipopt')
class TestSCIPBackend(GenericBackendTests):
def backend(self) -> GenericBackend:
return MixedIntegerLinearProgram(solver='SCIP').get_backend() |
_utils.test(arch=[ti.cpu, ti.cuda])
def test_function_without_return():
x = ti.field(ti.i32, shape=())
_func
def foo(val: ti.i32):
x[None] += val
def run():
foo(40)
foo(2)
x[None] = 0
run()
assert (x[None] == 42) |
def main(args):
logging.basicConfig(filename='struc2vec.log', filemode='w', level=logging.DEBUG, format='%(asctime)s %(message)s')
G = read_graph(args.edgelist_file)
build_struc_layers(G, args.OPT1, args.OPT2, args.OPT3, args.until_layer, args.workers)
fin = open(args.nodelabels_file, 'r')
if args.disassortative:
tmp = fin.readlines()[1:]
d = {}
for l in tmp:
n_id = int(l.split('\t')[0])
n_f = list(map(int, l.split('\t')[1].split(',')))
n_l = int(l.split('\t')[2].split('\n')[0])
d[n_id] = (n_f, n_l)
y = []
nfs = []
for n in sorted(d):
if (args.dataset == 'film'):
features = np.zeros(932, dtype=np.float)
features[d[n][0]] = 1.0
nfs.append(features)
else:
nfs.append(d[n][0])
y.append(d[n][1])
y = torch.LongTensor(y)
x = torch.LongTensor(nfs)
networkx_graph = nx.read_edgelist(args.edgelist_file, nodetype=int, comments='node', delimiter='\t')
else:
x = None
tmp = fin.readlines()[0]
y = tmp.strip('][').split(', ')
y = list(map(int, y))
y = torch.LongTensor(y)
networkx_graph = nx.read_edgelist(args.edgelist_file, nodetype=int)
data = build_multigraph_from_layers(networkx_graph, y, x)
print(data)
try:
os.makedirs(os.path.dirname(args.output_file))
except OSError as e:
pass
torch.save(data, args.output_file)
print('pyg data saved') |
class Data():
def __init__(self, args):
kwargs = {'num_workers': args.n_threads, 'pin_memory': True}
if args.cpu:
kwargs['pin_memory'] = False
module = import_module(('data.' + args.data_train.lower()))
(self.loader_train, self.loader_test) = module.get_loader(args, kwargs) |
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = ('git', 'git+ 'git+ 'git+ssh', 'git+git', 'git+file')
unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')
default_arg_rev = 'HEAD'
def get_base_rev_args(rev):
return [rev]
def is_immutable_rev_checkout(self, url, dest):
(_, rev_options) = self.get_url_rev_options(hide_url(url))
if (not rev_options.rev):
return False
if (not self.is_commit_id_equal(dest, rev_options.rev)):
return False
is_tag_or_branch = bool(self.get_revision_sha(dest, rev_options.rev)[0])
return (not is_tag_or_branch)
def get_git_version(self):
VERSION_PFX = 'git version '
version = self.run_command(['version'])
if version.startswith(VERSION_PFX):
version = version[len(VERSION_PFX):].split()[0]
else:
version = ''
version = '.'.join(version.split('.')[:3])
return parse_version(version)
def get_current_branch(cls, location):
args = ['symbolic-ref', '-q', 'HEAD']
output = cls.run_command(args, extra_ok_returncodes=(1,), cwd=location)
ref = output.strip()
if ref.startswith('refs/heads/'):
return ref[len('refs/heads/'):]
return None
def export(self, location, url):
if (not location.endswith('/')):
location = (location + '/')
with TempDirectory(kind='export') as temp_dir:
self.unpack(temp_dir.path, url=url)
self.run_command(['checkout-index', '-a', '-f', '--prefix', location], cwd=temp_dir.path)
def get_revision_sha(cls, dest, rev):
output = ''
try:
output = cls.run_command(['show-ref', rev], cwd=dest)
except SubProcessError:
pass
refs = {}
for line in output.strip().splitlines():
try:
(sha, ref) = line.split()
except ValueError:
raise ValueError('unexpected show-ref line: {!r}'.format(line))
refs[ref] = sha
branch_ref = 'refs/remotes/origin/{}'.format(rev)
tag_ref = 'refs/tags/{}'.format(rev)
sha = refs.get(branch_ref)
if (sha is not None):
return (sha, True)
sha = refs.get(tag_ref)
return (sha, False)
def resolve_revision(cls, dest, url, rev_options):
rev = rev_options.arg_rev
assert (rev is not None)
(sha, is_branch) = cls.get_revision_sha(dest, rev)
if (sha is not None):
rev_options = rev_options.make_new(sha)
rev_options.branch_name = (rev if is_branch else None)
return rev_options
if (not looks_like_hash(rev)):
logger.warning("Did not find branch or tag '%s', assuming revision or ref.", rev)
if (not rev.startswith('refs/')):
return rev_options
cls.run_command(make_command('fetch', '-q', url, rev_options.to_args()), cwd=dest)
sha = cls.get_revision(dest, rev='FETCH_HEAD')
rev_options = rev_options.make_new(sha)
return rev_options
def is_commit_id_equal(cls, dest, name):
if (not name):
return False
return (cls.get_revision(dest) == name)
def fetch_new(self, dest, url, rev_options):
rev_display = rev_options.to_display()
logger.info('Cloning %s%s to %s', url, rev_display, display_path(dest))
self.run_command(make_command('clone', '-q', url, dest))
if rev_options.rev:
rev_options = self.resolve_revision(dest, url, rev_options)
branch_name = getattr(rev_options, 'branch_name', None)
if (branch_name is None):
if (not self.is_commit_id_equal(dest, rev_options.rev)):
cmd_args = make_command('checkout', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
elif (self.get_current_branch(dest) != branch_name):
track_branch = 'origin/{}'.format(branch_name)
cmd_args = ['checkout', '-b', branch_name, '--track', track_branch]
self.run_command(cmd_args, cwd=dest)
self.update_submodules(dest)
def switch(self, dest, url, rev_options):
self.run_command(make_command('config', 'remote.origin.url', url), cwd=dest)
cmd_args = make_command('checkout', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
self.update_submodules(dest)
def update(self, dest, url, rev_options):
if (self.get_git_version() >= parse_version('1.9.0')):
self.run_command(['fetch', '-q', '--tags'], cwd=dest)
else:
self.run_command(['fetch', '-q'], cwd=dest)
rev_options = self.resolve_revision(dest, url, rev_options)
cmd_args = make_command('reset', '--hard', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
self.update_submodules(dest)
def get_remote_url(cls, location):
stdout = cls.run_command(['config', '--get-regexp', 'remote\\..*\\.url'], extra_ok_returncodes=(1,), cwd=location)
remotes = stdout.splitlines()
try:
found_remote = remotes[0]
except IndexError:
raise RemoteNotFoundError
for remote in remotes:
if remote.startswith('remote.origin.url '):
found_remote = remote
break
url = found_remote.split(' ')[1]
return url.strip()
def get_revision(cls, location, rev=None):
if (rev is None):
rev = 'HEAD'
current_rev = cls.run_command(['rev-parse', rev], cwd=location)
return current_rev.strip()
def get_subdirectory(cls, location):
git_dir = cls.run_command(['rev-parse', '--git-dir'], cwd=location).strip()
if (not os.path.isabs(git_dir)):
git_dir = os.path.join(location, git_dir)
repo_root = os.path.abspath(os.path.join(git_dir, '..'))
return find_path_to_setup_from_repo_root(location, repo_root)
def get_url_rev_and_auth(cls, url):
(scheme, netloc, path, query, fragment) = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:(- len(path.lstrip('/')))]
newpath = (initial_slashes + urllib_request.url2pathname(path).replace('\\', '/').lstrip('/'))
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = (scheme.find('+') + 1)
url = (scheme[:after_plus] + urlunsplit((scheme[after_plus:], netloc, newpath, query, fragment)))
if ('://' not in url):
assert ('file:' not in url)
url = url.replace('git+', 'git+ssh://')
(url, rev, user_pass) = super(Git, cls).get_url_rev_and_auth(url)
url = url.replace('ssh://', '')
else:
(url, rev, user_pass) = super(Git, cls).get_url_rev_and_auth(url)
return (url, rev, user_pass)
def update_submodules(cls, location):
if (not os.path.exists(os.path.join(location, '.gitmodules'))):
return
cls.run_command(['submodule', 'update', '--init', '--recursive', '-q'], cwd=location)
def get_repository_root(cls, location):
loc = super(Git, cls).get_repository_root(location)
if loc:
return loc
try:
r = cls.run_command(['rev-parse', '--show-toplevel'], cwd=location, log_failed_cmd=False)
except BadCommand:
logger.debug('could not determine if %s is under git control because git is not available', location)
return None
except SubProcessError:
return None
return os.path.normpath(r.rstrip('\r\n')) |
class KerasDDPGAgent(KerasAgent):
def __init__(self, observation_space, action_space, filename='KerasDDPGAgent.h5f'):
nb_actions = action_space.shape[0]
actor = Sequential()
actor.add(Flatten(input_shape=((1,) + observation_space.shape)))
actor.add(Dense(32))
actor.add(Activation('relu'))
actor.add(Dense(32))
actor.add(Activation('relu'))
actor.add(Dense(32))
actor.add(Activation('relu'))
actor.add(Dense(nb_actions))
actor.add(Activation('sigmoid'))
print(actor.summary())
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=((1,) + observation_space.shape), name='observation_input')
flattened_observation = Flatten()(observation_input)
x = concatenate([action_input, flattened_observation])
x = Dense(64)(x)
x = Activation('relu')(x)
x = Dense(64)(x)
x = Activation('relu')(x)
x = Dense(64)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
print(critic.summary())
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(theta=0.15, mu=0.0, sigma=0.2, size=nb_actions)
self.agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input, memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100, random_process=random_process, gamma=0.99, target_model_update=0.001, delta_clip=1.0)
self.agent.compile(Adam(lr=0.001, clipnorm=1.0), metrics=['mae'])
self.filename = filename |
def distance(str1, str2):
m = np.zeros([(len(str2) + 1), (len(str1) + 1)], dtype=int)
for x in range(1, (len(str2) + 1)):
m[(x, 0)] = (m[((x - 1), 0)] + 1)
for y in range(1, (len(str1) + 1)):
m[(0, y)] = (m[(0, (y - 1))] + 1)
for x in range(1, (len(str2) + 1)):
for y in range(1, (len(str1) + 1)):
if (str1[(y - 1)] == str2[(x - 1)]):
dg = 0
else:
dg = 1
m[(x, y)] = min((m[((x - 1), y)] + 1), (m[(x, (y - 1))] + 1), (m[((x - 1), (y - 1))] + dg))
return m[(len(str2), len(str1))] |
_method
def random_diagonalizable_matrix(parent, eigenvalues=None, dimensions=None):
from sage.misc.prandom import randint
size = parent.nrows()
if (parent.nrows() != parent.ncols()):
raise TypeError('a diagonalizable matrix must be square.')
if ((eigenvalues is not None) and (dimensions is None)):
raise ValueError('the list of eigenvalues must have a list of dimensions corresponding to each eigenvalue.')
if ((eigenvalues is None) and (dimensions is not None)):
raise ValueError('the list of dimensions must have a list of corresponding eigenvalues.')
if ((eigenvalues is None) and (dimensions is None)):
values = []
for eigen_index in range(size):
eigenvalue = randint((- 10), 10)
values.append(eigenvalue)
values.sort()
dimensions = []
eigenvalues = []
for eigenvalue in range(size):
if (values[eigenvalue] not in eigenvalues):
eigenvalues.append(values[eigenvalue])
for dimension in range(len(eigenvalues)):
dimensions.append(values.count(eigenvalues[dimension]))
size_check = 0
for check in range(len(dimensions)):
size_check = (size_check + dimensions[check])
if (not all(((x in ZZ) for x in eigenvalues))):
raise TypeError('eigenvalues must be integers.')
if (size != size_check):
raise ValueError('the size of the matrix must equal the sum of the dimensions.')
if (min(dimensions) < 1):
raise ValueError('eigenspaces must have a dimension of at least 1.')
if (len(eigenvalues) != len(dimensions)):
raise ValueError('each eigenvalue must have a corresponding dimension and each dimension a corresponding eigenvalue.')
dimensions_sort = sorted(zip(dimensions, eigenvalues))
dimensions = [x[0] for x in dimensions_sort]
eigenvalues = [x[1] for x in dimensions_sort]
diagonal_matrix = matrix(QQ, size)
up_bound = 0
low_bound = 0
for row_index in range(len(dimensions)):
up_bound = (up_bound + dimensions[row_index])
for entry in range(low_bound, up_bound):
diagonal_matrix[(entry, entry)] = eigenvalues[row_index]
low_bound = (low_bound + dimensions[row_index])
eigenvector_matrix = matrix(QQ, size, size, 1)
upper_limit = 0
lower_limit = 0
for dimension_index in range((len(dimensions) - 1)):
upper_limit = (upper_limit + dimensions[dimension_index])
lowest_index_row_with_one = (size - dimensions[dimension_index])
for eigen_ones in range(lower_limit, upper_limit):
eigenvector_matrix[(lowest_index_row_with_one, eigen_ones)] = 1
lowest_index_row_with_one += 1
lower_limit = (lower_limit + dimensions[dimension_index])
dimension_check = []
for i in range(len(dimensions)):
for k in range(dimensions[i]):
dimension_check.append(dimensions[i])
for dimension_multiplicity in range(max(dimensions), min(dimensions), (- 1)):
highest_one_row = (size - dimension_multiplicity)
highest_one_column = 0
while (eigenvector_matrix[(highest_one_row, highest_one_column)] == 0):
highest_one_column += 1
for bottom_entry_filler in range(len(dimension_check)):
if ((dimension_check[bottom_entry_filler] < dimension_multiplicity) and (eigenvector_matrix[(highest_one_row, bottom_entry_filler)] == 0)):
eigenvector_matrix.add_multiple_of_column(bottom_entry_filler, highest_one_column, randint((- 4), 4))
for row in range((size - max(dimensions)), size):
for upper_row in range((size - max(dimensions))):
eigenvector_matrix.add_multiple_of_row(upper_row, row, randint((- 4), 4))
return ((eigenvector_matrix * diagonal_matrix) * eigenvector_matrix.inverse()) |
def max_val_accuracy(stats):
val_acc = stats['val_acc']
max_val_acc_idx = val_acc.index(max(val_acc))
max_val_acc = max(val_acc)
train_acc = stats['train_acc'][max_val_acc_idx]
val_loss = stats['val_loss'][max_val_acc_idx]
train_loss = stats['train_loss'][max_val_acc_idx]
return {'epoch': max_val_acc_idx, 'max_val_acc': max_val_acc, 'train_acc': train_acc, 'val_loss': val_loss, 'train_loss': train_loss} |
def clean_up_gcda() -> None:
gcda_files = get_gcda_files()
for item in gcda_files:
remove_file(item) |
class ScalarMix(torch.nn.Module):
def __init__(self, mix_dim: int):
super().__init__()
self.scalars = torch.nn.Parameter(torch.zeros(mix_dim))
def __repr__(self):
return f'{self.__class__.__name__}(mix_dim={self.scalars.size(0)})'
def forward(self, tensors: Union[(torch.FloatTensor, List[torch.FloatTensor])]):
if isinstance(tensors, (list, tuple)):
tensors = torch.stack(tensors)
norm_weights_shape = tuple(([(- 1)] + ([1] * (tensors.dim() - 1))))
norm_weights = torch.nn.functional.softmax(self.scalars, dim=0).view(*norm_weights_shape)
return (tensors * norm_weights).sum(dim=0) |
.parametrize('bisecting_strategy', ['biggest_inertia', 'largest_cluster'])
.parametrize('init', ['k-means++', 'random'])
def test_three_clusters(bisecting_strategy, init):
X = np.array([[1, 1], [10, 1], [3, 1], [10, 0], [2, 1], [10, 2], [10, 8], [10, 9], [10, 10]])
bisect_means = BisectingKMeans(n_clusters=3, random_state=0, bisecting_strategy=bisecting_strategy, init=init)
bisect_means.fit(X)
expected_centers = [[2, 1], [10, 1], [10, 9]]
expected_labels = [0, 1, 0, 1, 0, 1, 2, 2, 2]
assert_allclose(sorted(expected_centers), sorted(bisect_means.cluster_centers_.tolist()))
assert_allclose(v_measure_score(expected_labels, bisect_means.labels_), 1.0) |
class Renderbuffer(object):
def __init__(self, internalformat, W, H):
self.__id = np.empty(1, dtype=np.uint32)
glCreateRenderbuffers(len(self.__id), self.__id)
glNamedRenderbufferStorage(self.__id[0], internalformat, W, H)
def delete(self):
glDeleteRenderbuffers(1, self.__id)
def id(self):
return self.__id[0] |
def test_multi_stage():
batch_size = 32
linear = unittest.mock.Mock(wraps=torch.nn.Linear(8, 8))
inp = torch.autograd.Variable(torch.rand(batch_size, 8))
batcher = torch_batcher.TorchBatcher()
async def process(item, iters):
for iter in range(iters):
item = (await batcher(linear, item))
return item
results = batcher.run([process(inp[i], ((i + 1) // 4)) for i in range(batch_size)])
assert (linear.call_count == (32 // 4))
for i in range(batch_size):
row = inp[i:(i + 1)]
for iter in range(((i + 1) // 4)):
row = linear(row)
single_result = row.squeeze(0)
assert (results[i].data.numpy() == pytest.approx(single_result.data.numpy(), abs=1e-06)) |
def compress_to_zip(dir_to_compress: os.PathLike, delete: bool=False):
shutil.make_archive(dir_to_compress, 'zip', root_dir=os.path.dirname(dir_to_compress), base_dir=os.path.basename(dir_to_compress))
if delete:
shutil.rmtree(dir_to_compress) |
def extract_warnings(artifact_dir, targets):
selected_warnings = set()
paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if (p.endswith('.zip') or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(p, targets))
return selected_warnings |
def make_plots(all_logdirs, legend=None, xaxis=None, values=None, count=False, font_scale=1.5, smooth=1, select=None, exclude=None, estimator='mean'):
data = get_all_datasets(all_logdirs, legend, select, exclude)
values = (values if isinstance(values, list) else [values])
condition = ('Condition2' if count else 'Condition1')
estimator = getattr(np, estimator)
for value in values:
plt.figure()
plot_data(data, xaxis=xaxis, value=value, condition=condition, smooth=smooth, estimator=estimator)
plt.show() |
class ResidualBlock(nn.Module):
def __init__(self, kernel_size=3, n_channels=64):
super(ResidualBlock, self).__init__()
self.conv_block1 = ConvolutionalBlock(in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, batch_norm=True, activation='PReLu')
self.conv_block2 = ConvolutionalBlock(in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, batch_norm=True, activation=None)
def forward(self, input):
residual = input
output = self.conv_block1(input)
output = self.conv_block2(output)
output = (output + residual)
return output |
class LogParser():
def __init__(self, config: object):
name = config.parsing_algorithm.lower()
config_class = factory.get_config_class('parsing', name)
algorithm_class = factory.get_algorithm_class('parsing', name)
self.parser = algorithm_class((config.parsing_algo_params if config.parsing_algo_params else config_class()))
def fit(self, loglines: pd.Series):
self.parser.fit(loglines)
def parse(self, loglines: pd.Series) -> pd.DataFrame:
if (self.parser is None):
raise RuntimeError('Parser is None.')
parsed_loglines = self.parser.parse(loglines)
if (loglines.name is not constants.LOGLINE_NAME):
loglines.name = constants.LOGLINE_NAME
parsed_loglines.name = constants.PARSED_LOGLINE_NAME
parsed_result = pd.concat([loglines, parsed_loglines], axis=1)
parsed_result[constants.PARAMETER_LIST_NAME] = parsed_result.apply(self.get_parameter_list, axis=1)
return parsed_result
def fit_parse(self, loglines: pd.Series) -> pd.DataFrame:
try:
self.fit(loglines)
except RuntimeError as e:
logging.ERROR('Cannot train parser')
return self.parse(loglines)
def save(self, out_path):
if (not exists(dirname(out_path))):
try:
os.makedirs(dirname(out_path))
except OSError as exc:
if (exc.errno != exc.errno.EEXIST):
raise RuntimeError('{} is not a valid output directory!'.format(out_path))
with open(out_path, 'wb') as f:
pickle.dump(self.parser, f)
f.close()
def load(self, model_path):
with open(model_path, 'rb') as f:
self.parser = pickle.load(f)
f.close()
def get_parameter_list(row):
parameter_list = []
if ((not isinstance(row.logline, str)) or (not isinstance(row.parsed_logline, str))):
return parameter_list
ll = row.logline.split()
for t in ll:
t = t.strip()
if ((not t) or (t in row.parsed_logline)):
continue
parameter_list.append(t)
return parameter_list |
class ExtendedAffineWeylGroup_Class(UniqueRepresentation, Parent):
def __init__(self, cartan_type, general_linear, **print_options):
if (not cartan_type.is_affine()):
raise ValueError(('%s is not affine' % cartan_type))
self._cartan_type = cartan_type
self._prefixt = 't'
self._prefixf = 'pi'
self._prefixcl = None
self._prefixaf = None
self._print_tuple = False
if (general_linear is True):
self._general_linear = True
self._n = (self._cartan_type.n + 1)
else:
self._general_linear = False
for option in print_options:
if (option == 'translation'):
self._prefixt = print_options['translation']
elif (option == 'fundamental'):
self._prefixf = print_options['fundamental']
elif (option == 'print_tuple'):
self._print_tuple = print_options['print_tuple']
elif (option == 'affine'):
self._prefixaf = print_options['affine']
elif (option == 'classical'):
self._prefixcl = print_options['classical']
else:
raise ValueError(('Print option %s is unrecognized' % option))
if self._prefixaf:
if (not self._prefixcl):
if self._prefixaf.islower():
self._prefixcl = self._prefixaf.upper()
else:
self._prefixcl = self._prefixaf.lower()
elif self._prefixcl:
if self._prefixcl.islower():
self._prefixaf = self._prefixcl.upper()
else:
self._prefixaf = self._prefixcl.lower()
else:
self._prefixaf = 'S'
self._prefixcl = 's'
self._ct0 = cartan_type.classical()
self._R0 = self._ct0.root_system()
self._I0 = self._ct0.index_set()
self._ct0v = self._ct0.dual()
self._R0v = self._ct0v.root_system()
self._a0check = self._cartan_type.acheck()[self._cartan_type.special_node()]
if self._cartan_type.is_untwisted_affine():
self._type = 'untwisted'
elif self._cartan_type.dual().is_untwisted_affine():
self._type = 'dual_untwisted'
elif (self._a0check == 1):
self._type = 'special_extra_short'
else:
self._type = 'special_extra_long'
self._untwisted = (self._type in ('untwisted', 'special_extra_long'))
self._fundamental_group = FundamentalGroupOfExtendedAffineWeylGroup(cartan_type, prefix=self._prefixf, general_linear=self._general_linear)
if self._untwisted:
if self._general_linear:
self._lattice = self._R0.ambient_space()
self._simpleR0 = self._lattice.simple_roots()
else:
self._lattice = self._R0.coweight_lattice()
self._basis_name = 'Lambdacheck'
self._simpleR0 = self._R0.root_lattice().simple_roots()
self._basis = self._lattice.fundamental_weights()
if (self._type == 'special_extra_long'):
self._special_root = self._R0.coroot_lattice().highest_root()
node_adjacent_to_special = self._cartan_type.dynkin_diagram().neighbors(self._cartan_type.special_node())[0]
self._special_translation = self._lattice.fundamental_weight(node_adjacent_to_special)
else:
self._special_root = self._R0.root_lattice().highest_root().associated_coroot()
self._special_translation = self._special_root
self._special_translation_covector = self._special_root.associated_coroot()
if self._general_linear:
self._dual_lattice = self._lattice
else:
self._dual_lattice = self._R0v.weight_lattice()
self._dual_basis_name = 'Lambda'
self._dual_basis = self._dual_lattice.fundamental_weights()
else:
self._lattice = self._R0.weight_lattice()
self._basis = self._lattice.fundamental_weights()
self._basis_name = 'Lambda'
self._simpleR0 = self._R0.coroot_lattice().simple_roots()
if (self._type == 'special_extra_short'):
self._special_root = self._R0.root_lattice().highest_root()
node_adjacent_to_special = self._cartan_type.dynkin_diagram().neighbors(self._cartan_type.special_node())[0]
self._special_translation = self._lattice.fundamental_weight(node_adjacent_to_special)
self._special_translation_covector = (2 * self._special_root.associated_coroot())
else:
self._special_root = self._R0.coroot_lattice().highest_root().associated_coroot()
self._special_translation = self._special_root
self._special_translation_covector = self._special_root.associated_coroot()
self._dual_lattice = self._lattice
self._dual_basis = self._basis
self._dual_basis_name = 'Lambda'
self._W0 = WeylGroup(self._lattice, prefix=self._prefixcl)
self._W = WeylGroup(self._cartan_type.root_system().root_lattice(), prefix=self._prefixaf)
self._special_reflection = self._W0.from_reduced_word(self._special_root.associated_reflection())
if self._general_linear:
self._special_root = self._special_root.to_ambient()
self._special_translation = self._special_root
self._special_translation_covector = self._special_root
self._W0v = WeylGroup(self._dual_lattice, prefix=self._prefixcl)
self._exp_lattice = GroupExp()(self._lattice)
self._exp_dual_lattice = GroupExp()(self._dual_lattice)
self._extended = True
Parent.__init__(self, category=Groups().WithRealizations().Infinite())
PW0 = self.PW0()
W0P = self.W0P()
WF = self.WF()
FW = self.FW()
PvW0 = self.PvW0()
W0Pv = self.W0Pv()
W0P_to_PW0 = SetMorphism(Hom(W0P, PW0, Groups()), (lambda x: PW0(x.to_opposite())))
W0P_to_PW0.register_as_coercion()
PW0_to_W0P = SetMorphism(Hom(PW0, W0P, Groups()), (lambda x: W0P(x.to_opposite())))
PW0_to_W0P.register_as_coercion()
FW_to_WF = SetMorphism(Hom(FW, WF, Groups()), (lambda x: WF(x.to_opposite())))
FW_to_WF.register_as_coercion()
WF_to_FW = SetMorphism(Hom(WF, FW, Groups()), (lambda x: FW(x.to_opposite())))
WF_to_FW.register_as_coercion()
PW0_to_WF = SetMorphism(Hom(PW0, WF, Groups()), self.PW0_to_WF_func)
PW0_to_WF.register_as_coercion()
WF_to_PW0 = SetMorphism(Hom(WF, PW0, Groups()), self.WF_to_PW0_func)
WF_to_PW0.register_as_coercion()
PvW0_to_W0Pv = SetMorphism(Hom(PvW0, W0Pv, Groups()), (lambda x: W0Pv(x.to_opposite())))
PvW0_to_W0Pv.register_as_coercion()
W0Pv_to_PvW0 = SetMorphism(Hom(W0Pv, PvW0, Groups()), (lambda x: PvW0(x.to_opposite())))
W0Pv_to_PvW0.register_as_coercion()
if self._general_linear:
PW0_to_PvW0 = SetMorphism(Hom(PW0, PvW0, Groups()), (lambda x: PvW0((x.cartesian_projection(0), x.cartesian_projection(1)))))
PvW0_to_PW0 = SetMorphism(Hom(PvW0, PW0, Groups()), (lambda x: PW0((x.cartesian_projection(0), x.cartesian_projection(1)))))
W0P_to_W0Pv = SetMorphism(Hom(W0P, W0Pv, Groups()), (lambda x: W0Pv((x.cartesian_projection(0), x.cartesian_projection(1)))))
W0Pv_to_W0P = SetMorphism(Hom(W0Pv, W0P, Groups()), (lambda x: W0P((x.cartesian_projection(0), x.cartesian_projection(1)))))
elif self._untwisted:
PW0_to_PvW0 = SetMorphism(Hom(PW0, PvW0, Groups()), (lambda x: PvW0((self.exp_dual_lattice()(x.cartesian_projection(0).value.to_dual_type_cospace()), self.dual_classical_weyl().from_reduced_word(x.cartesian_projection(1).reduced_word())))))
PvW0_to_PW0 = SetMorphism(Hom(PvW0, PW0, Groups()), (lambda x: PW0((self.exp_lattice()(x.cartesian_projection(0).value.to_dual_type_cospace()), self.classical_weyl().from_reduced_word(x.cartesian_projection(1).reduced_word())))))
W0P_to_W0Pv = SetMorphism(Hom(W0P, W0Pv, Groups()), (lambda x: W0Pv((self.dual_classical_weyl().from_reduced_word(x.cartesian_projection(0).reduced_word()), self.exp_dual_lattice()(x.cartesian_projection(1).value.to_dual_type_cospace())))))
W0Pv_to_W0P = SetMorphism(Hom(W0Pv, W0P, Groups()), (lambda x: W0P((self.classical_weyl().from_reduced_word(x.cartesian_projection(0).reduced_word()), self.exp_lattice()(x.cartesian_projection(1).value.to_dual_type_cospace())))))
else:
PW0_to_PvW0 = SetMorphism(Hom(PW0, PvW0, Groups()), (lambda x: PvW0((x.cartesian_projection(0), self.dual_classical_weyl().from_reduced_word(x.cartesian_projection(1).reduced_word())))))
PvW0_to_PW0 = SetMorphism(Hom(PvW0, PW0, Groups()), (lambda x: PW0((x.cartesian_projection(0), self.classical_weyl().from_reduced_word(x.cartesian_projection(1).reduced_word())))))
W0P_to_W0Pv = SetMorphism(Hom(W0P, W0Pv, Groups()), (lambda x: W0Pv((self.dual_classical_weyl().from_reduced_word(x.cartesian_projection(0).reduced_word()), x.cartesian_projection(1)))))
W0Pv_to_W0P = SetMorphism(Hom(W0Pv, W0P, Groups()), (lambda x: W0P((self.classical_weyl().from_reduced_word(x.cartesian_projection(0).reduced_word()), x.cartesian_projection(1)))))
PW0_to_PvW0.register_as_coercion()
PvW0_to_PW0.register_as_coercion()
W0P_to_W0Pv.register_as_coercion()
W0Pv_to_W0P.register_as_coercion()
P_to_PW0 = SetMorphism(Hom(self.lattice(), PW0, Sets()), PW0.from_translation)
P_to_PW0.register_as_coercion()
P_to_W0P = SetMorphism(Hom(self.lattice(), W0P, Sets()), W0P.from_translation)
P_to_W0P.register_as_coercion()
Pv_to_PvW0 = SetMorphism(Hom(self.dual_lattice(), PvW0, Sets()), PvW0.from_dual_translation)
Pv_to_PvW0.register_as_coercion()
Pv_to_W0Pv = SetMorphism(Hom(self.dual_lattice(), W0Pv, Sets()), W0Pv.from_dual_translation)
Pv_to_W0Pv.register_as_coercion()
W0_to_PW0 = SetMorphism(Hom(self.classical_weyl(), PW0, Groups()), PW0.from_classical_weyl)
W0_to_PW0.register_as_coercion()
W0_to_W0P = SetMorphism(Hom(self.classical_weyl(), W0P, Groups()), W0P.from_classical_weyl)
W0_to_W0P.register_as_coercion()
W0v_to_PvW0 = SetMorphism(Hom(self.dual_classical_weyl(), PvW0, Groups()), PvW0.from_dual_classical_weyl)
W0v_to_PvW0.register_as_coercion()
W0v_to_W0Pv = SetMorphism(Hom(self.dual_classical_weyl(), W0Pv, Groups()), W0Pv.from_dual_classical_weyl)
W0v_to_W0Pv.register_as_coercion()
F_to_WF = SetMorphism(Hom(self.fundamental_group(), WF, Groups()), WF.from_fundamental)
F_to_WF.register_as_coercion()
F_to_FW = SetMorphism(Hom(self.fundamental_group(), FW, Groups()), FW.from_fundamental)
F_to_FW.register_as_coercion()
W_to_WF = SetMorphism(Hom(self.affine_weyl(), WF, Groups()), WF.from_affine_weyl)
W_to_WF.register_as_coercion()
W_to_FW = SetMorphism(Hom(self.affine_weyl(), FW, Groups()), FW.from_affine_weyl)
W_to_FW.register_as_coercion()
def PW0(self):
return self.ExtendedAffineWeylGroupPW0()
def W0P(self):
return self.ExtendedAffineWeylGroupW0P()
def WF(self):
return self.ExtendedAffineWeylGroupWF()
def FW(self):
return self.ExtendedAffineWeylGroupFW()
def PvW0(self):
return self.ExtendedAffineWeylGroupPvW0()
def W0Pv(self):
return self.ExtendedAffineWeylGroupW0Pv()
def cartan_type(self):
return self._cartan_type
def _repr_(self):
if self._general_linear:
return ('Extended affine Weyl group of GL(%s)' % self._n)
return ('Extended affine Weyl group of type %s' % self.cartan_type())
def fundamental_group(self):
return self._fundamental_group
def lattice(self):
return self._lattice
def exp_lattice(self):
return self._exp_lattice
def lattice_basis(self):
return self._basis
def dual_lattice(self):
return self._dual_lattice
def exp_dual_lattice(self):
return self._exp_dual_lattice
def dual_lattice_basis(self):
return self._dual_basis
def classical_weyl(self):
return self._W0
def dual_classical_weyl(self):
return self._W0v
def affine_weyl(self):
return self._W
def classical_weyl_to_affine(self, w):
return self.affine_weyl().from_reduced_word(w.reduced_word())
def dual_classical_weyl_to_affine(self, w):
return self.affine_weyl().from_reduced_word(w.reduced_word())
def a_realization(self):
return self.PW0()
def group_generators(self):
return self.a_realization().group_generators()
_method
def PW0_to_WF_func(self, x):
i = x.first_descent(side='left')
if (i is None):
t = x.to_translation_left()
if self._general_linear:
ispecial = ZZ.sum([t[j] for j in t.support()])
elif (t == self.lattice().zero()):
ispecial = 0
else:
supp = t.support()
assert (len(supp) == 1)
ispecial = supp[0]
return self.WF().from_fundamental(self.fundamental_group()(ispecial))
return self.PW0_to_WF_func(x.apply_simple_reflection(i, side='left')).apply_simple_reflection(i, side='left')
_method
def WF_to_PW0_func(self, x):
w = x.to_affine_weyl_left()
f = x.to_fundamental_group()
i = w.first_descent(side='left')
if (i is not None):
return self.WF_to_PW0_func(x.apply_simple_reflection(i, side='left')).apply_simple_reflection(i, side='left')
PW0 = self.PW0()
ispecial = f.value()
W = self.classical_weyl()
if self._general_linear:
r = ZZ(Mod(ispecial, self._n))
weight = self.lattice().from_vector(vector(([ZZ(((ispecial - r) / self._n))] * self._n)))
if (r != ZZ(0)):
weight = (weight + self.lattice_basis()[r])
wo = W.from_reduced_word(self.fundamental_group().reduced_word(r))
else:
wo = W.one()
elif (ispecial == 0):
weight = self.lattice().zero()
wo = W.one()
else:
weight = self.lattice_basis()[ispecial]
wo = W.from_reduced_word(self.fundamental_group().reduced_word(ispecial))
return PW0((weight, wo))
class Realizations(Category_realization_of_parent):
def super_categories(self):
return [Groups().Realizations()]
class ParentMethods():
_method
def from_fundamental(self, x):
WF = self.realization_of().WF()
return self(WF.from_fundamental(x))
def from_translation(self, la):
PW0 = self.realization_of().PW0()
return self(PW0.from_translation(la))
def from_dual_translation(self, la):
return self(self.realization_of().PvW0().from_dual_translation(la))
_method
def simple_reflections(self):
def simple_reflection(self, i):
return self.simple_reflections()[i]
def from_classical_weyl(self, w):
PW0 = self.realization_of().PW0()
return self(PW0.from_classical_weyl(w))
def from_dual_classical_weyl(self, w):
return self(self.realization_of().PvW0().from_dual_classical_weyl(w))
def from_affine_weyl(self, w):
WF = self.realization_of().WF()
return self(WF.from_affine_weyl(w))
def from_reduced_word(self, word):
return self.from_affine_weyl(self.realization_of().affine_weyl().from_reduced_word(word))
class ElementMethods():
_method
def has_descent(self, i, side='right', positive=False):
def first_descent(self, side='right', positive=False, index_set=None):
if (index_set is None):
index_set = self.parent().realization_of().cartan_type().index_set()
for i in index_set:
if self.has_descent(i, side=side, positive=positive):
return i
return None
def apply_simple_reflection(self, i, side='right'):
s = self.parent().simple_reflection(i)
if (side == 'right'):
return (self * s)
else:
return (s * self)
def apply_simple_projection(self, i, side='right', length_increasing=True):
if self.has_descent(i, side=side, positive=length_increasing):
return self.apply_simple_reflection(i, side=side)
return self
def to_fundamental_group(self):
WF = self.parent().realization_of().WF()
return WF(self).to_fundamental_group()
def to_classical_weyl(self):
PW0 = self.parent().realization_of().PW0()
return PW0(self).to_classical_weyl()
def to_dual_classical_weyl(self):
PvW0 = self.parent().realization_of().PvW0()
return PvW0(self).to_dual_classical_weyl()
def to_affine_weyl_left(self):
WF = self.parent().realization_of().WF()
return WF(self).to_affine_weyl_left()
def to_affine_weyl_right(self):
FW = self.parent().realization_of().FW()
return FW(self).to_affine_weyl_right()
def to_translation_left(self):
PW0 = self.parent().realization_of().PW0()
return PW0(self).to_translation_left()
def to_translation_right(self):
W0P = self.parent().realization_of().W0P()
return W0P(self).to_translation_right()
def to_dual_translation_left(self):
PvW0 = self.parent().realization_of().PvW0()
return PvW0(self).to_dual_translation_left()
def to_dual_translation_right(self):
W0Pv = self.parent().realization_of().W0Pv()
return W0Pv(self).to_dual_translation_right()
def length(self):
return self.to_affine_weyl_left().length()
def coset_representative(self, index_set, side='right'):
while True:
i = self.first_descent(index_set=index_set, side=side)
if (i is None):
return self
self = self.apply_simple_reflection(i, side=side)
def is_grassmannian(self, index_set, side='right'):
return (self == self.coset_representative(index_set=index_set, side=side))
def to_affine_grassmannian(self):
return self.coset_representative(index_set=self.parent().realization_of().cartan_type().classical().index_set())
def is_affine_grassmannian(self):
return (self == self.to_affine_grassmannian())
def bruhat_le(self, x):
WF = self.parent().realization_of().WF()
return WF(self).bruhat_le(WF(x))
def is_translation(self):
w = self.to_classical_weyl()
return (w == w.parent().one())
def action(self, la):
PW0 = self.parent().realization_of().PW0()
return PW0(self).action(la)
def dual_action(self, la):
PvW0 = self.parent().realization_of().PvW0()
return PvW0(self).dual_action(la)
def action_on_affine_roots(self, beta):
E = self.parent().realization_of()
assert (beta in RootSystem(E.cartan_type()).root_lattice())
return E.FW()(self).action_on_affine_roots(beta)
def face_data(self, i):
Qaf = self.parent().realization_of().cartan_type().root_system().root_lattice()
gamma = self.action_on_affine_roots(Qaf.simple_root(i))
return (gamma[0], Qaf.classical()(gamma))
def alcove_walk_signs(self):
We = self.parent()
gw = We.realization_of().FW()(self)
g = gw.cartesian_projection(0)
w = gw.cartesian_projection(1)
rw = w.reduced_word()
u_curr = We.from_fundamental(g.value())
signs = []
for i in rw:
(m, beta) = u_curr.face_data(i)
if beta.is_positive_root():
signs = (signs + [1])
else:
signs = (signs + [(- 1)])
u_curr = (u_curr * We.simple_reflection(i))
return (g, rw, signs)
class ExtendedAffineWeylGroupPW0Element(GroupSemidirectProduct.Element):
def has_descent(self, i, side='right', positive=False):
E = self.parent().realization_of()
if (side == 'right'):
self = (~ self)
if positive:
return (not self.has_descent(i, side='left'))
la = self.cartesian_projection(0).value
w = self.cartesian_projection(1)
if (i == 0):
ip = (la.scalar(E._special_translation_covector) * E._a0check)
if (ip > 1):
return True
if (ip < 1):
return False
return E._special_root.weyl_action(w, inverse=True).is_positive_root()
ip = la.scalar(E._simpleR0[i])
if (ip < 0):
return True
if (ip > 0):
return False
return w.has_descent(i, side='left')
def action(self, la):
w = self.cartesian_projection(1)
assert (la in w.parent().domain())
return (self.cartesian_projection(0).value + w.action(la))
def to_translation_left(self):
return self.cartesian_projection(0).value
def to_classical_weyl(self):
return self.cartesian_projection(1)
class ExtendedAffineWeylGroupPW0(GroupSemidirectProduct, BindableClass):
def __init__(self, E):
def twist(w, l):
return E.exp_lattice()(w.action(l.value))
GroupSemidirectProduct.__init__(self, E.exp_lattice(), E.classical_weyl(), twist=twist, act_to_right=False, prefix0=E._prefixt, print_tuple=E._print_tuple, category=E.Realizations())
self._style = 'PW0'
def _repr_(self):
return ((self.realization_of()._repr_() + ' realized by ') + super()._repr_())
def from_translation(self, la):
E = self.realization_of()
return self((E.exp_lattice()(la), self.cartesian_factors()[1].one()))
_method
def S0(self):
E = self.realization_of()
return self((E.exp_lattice()(E.lattice()(E._special_translation)), E._special_reflection))
_method
def simple_reflection(self, i):
if (i == 0):
return self.S0()
else:
E = self.realization_of()
return self.from_classical_weyl(E.classical_weyl().simple_reflection(i))
_method
def simple_reflections(self):
return Family(self.realization_of().cartan_type().index_set(), self.simple_reflection)
def from_classical_weyl(self, w):
return self((self.cartesian_factors()[0].one(), w))
class ExtendedAffineWeylGroupW0PElement(GroupSemidirectProduct.Element):
def has_descent(self, i, side='right', positive=False):
E = self.parent().realization_of()
if (side == 'left'):
self = (~ self)
if positive:
return (not self.has_descent(i, side='right'))
w = self.cartesian_projection(0)
la = self.cartesian_projection(1).value
if (i == 0):
ip = (la.scalar(E._special_translation_covector) * E._a0check)
if (ip < (- 1)):
return True
if (ip > (- 1)):
return False
return E._special_root.weyl_action(w).is_positive_root()
ip = la.scalar(E._simpleR0[i])
if (ip > 0):
return True
if (ip < 0):
return False
return w.has_descent(i, side='right')
def to_classical_weyl(self):
return self.cartesian_projection(0)
def to_translation_right(self):
return self.cartesian_projection(1).value
class ExtendedAffineWeylGroupW0P(GroupSemidirectProduct, BindableClass):
def __init__(self, E):
def twist(w, l):
return E.exp_lattice()(w.action(l.value))
GroupSemidirectProduct.__init__(self, E.classical_weyl(), E.exp_lattice(), twist=twist, act_to_right=True, prefix1=E._prefixt, print_tuple=E._print_tuple, category=E.Realizations())
self._style = 'W0P'
def _repr_(self):
return ((self.realization_of()._repr_() + ' realized by ') + super()._repr_())
def S0(self):
E = self.realization_of()
return self((E._special_reflection, E.exp_lattice()(E.lattice()((- E._special_translation)))))
def simple_reflection(self, i):
if (i == 0):
return self.S0()
E = self.realization_of()
return self.from_classical_weyl(E.classical_weyl().simple_reflection(i))
_method
def simple_reflections(self):
return Family(self.realization_of().cartan_type().index_set(), self.simple_reflection)
def from_classical_weyl(self, w):
return self((w, self.cartesian_factors()[1].one()))
def from_translation(self, la):
return self((self.cartesian_factors()[0].one(), self.realization_of().exp_lattice()(la)))
class ExtendedAffineWeylGroupWFElement(GroupSemidirectProduct.Element):
def has_descent(self, i, side='right', positive=False):
if (side == 'right'):
self = (~ self)
if positive:
return (not self.has_descent(i, side='left'))
return self.cartesian_projection(0).has_descent(i, side='left')
def to_fundamental_group(self):
return self.cartesian_projection(1)
def to_affine_weyl_left(self):
return self.cartesian_projection(0)
def bruhat_le(self, x):
if (self.cartesian_projection(1) != x.cartesian_projection(1)):
return False
return self.cartesian_projection(0).bruhat_le(x.cartesian_projection(0))
class ExtendedAffineWeylGroupWF(GroupSemidirectProduct, BindableClass):
def __init__(self, E):
def twist(g, w):
return g.act_on_affine_weyl(w)
GroupSemidirectProduct.__init__(self, E.affine_weyl(), E.fundamental_group(), twist=twist, act_to_right=False, print_tuple=E._print_tuple, category=E.Realizations())
self._style = 'WF'
def _repr_(self):
return ((self.realization_of()._repr_() + ' realized by ') + super()._repr_())
def from_affine_weyl(self, w):
return self((w, self.cartesian_factors()[1].one()))
_method
def simple_reflections(self):
E = self.realization_of()
W = E.affine_weyl()
return Family(E.cartan_type().index_set(), (lambda i: self.from_affine_weyl(W.simple_reflection(i))))
_method
def from_fundamental(self, f):
return self((self.cartesian_factors()[0].one(), f))
class ExtendedAffineWeylGroupFWElement(GroupSemidirectProduct.Element):
def has_descent(self, i, side='right', positive=False):
if (side == 'left'):
self = (~ self)
if positive:
return (not self.has_descent(i, side='right'))
return self.cartesian_projection(1).has_descent(i, side='right')
def to_fundamental_group(self):
return self.cartesian_projection(0)
def to_affine_weyl_right(self):
return self.cartesian_projection(1)
def action_on_affine_roots(self, beta):
g = self.cartesian_projection(0)
w = self.cartesian_projection(1)
return g.act_on_affine_lattice(w.action(beta))
class ExtendedAffineWeylGroupFW(GroupSemidirectProduct, BindableClass):
def __init__(self, E):
def twist(g, w):
return g.act_on_affine_weyl(w)
GroupSemidirectProduct.__init__(self, E.fundamental_group(), E.affine_weyl(), twist=twist, act_to_right=True, print_tuple=E._print_tuple, category=E.Realizations())
self._style = 'FW'
def _repr_(self):
return ((self.realization_of()._repr_() + ' realized by ') + super()._repr_())
_method
def simple_reflections(self):
E = self.realization_of()
W = E.affine_weyl()
return Family(E.cartan_type().index_set(), (lambda i: self.from_affine_weyl(W.simple_reflection(i))))
def from_affine_weyl(self, w):
return self((self.cartesian_factors()[0].one(), w))
_method
def from_fundamental(self, f):
return self((f, self.cartesian_factors()[1].one()))
class ExtendedAffineWeylGroupPvW0Element(GroupSemidirectProduct.Element):
def has_descent(self, i, side='right', positive=False):
return self.parent().realization_of().PW0()(self).has_descent(i, side=side, positive=positive)
def dual_action(self, la):
w = self.cartesian_projection(1)
assert (la in w.parent().domain())
return (self.cartesian_projection(0).value + w.action(la))
def to_dual_translation_left(self):
return self.cartesian_projection(0).value
def to_dual_classical_weyl(self):
return self.cartesian_projection(1)
def is_translation(self):
w = self.to_dual_classical_weyl()
return (w == w.parent().one())
class ExtendedAffineWeylGroupPvW0(GroupSemidirectProduct, BindableClass):
def __init__(self, E):
def twist(w, l):
return E.exp_dual_lattice()(w.action(l.value))
GroupSemidirectProduct.__init__(self, E.exp_dual_lattice(), E.dual_classical_weyl(), twist=twist, act_to_right=False, prefix0=E._prefixt, print_tuple=E._print_tuple, category=E.Realizations())
self._style = 'PvW0'
def _repr_(self):
return ((self.realization_of()._repr_() + ' realized by ') + super()._repr_())
def from_dual_translation(self, la):
E = self.realization_of()
return self((E.exp_dual_lattice()(la), self.cartesian_factors()[1].one()))
_method
def simple_reflections(self):
E = self.realization_of()
return Family(E.cartan_type().index_set(), (lambda i: self(E.PW0().simple_reflection(i))))
def from_dual_classical_weyl(self, w):
return self((self.cartesian_factors()[0].one(), w))
class ExtendedAffineWeylGroupW0PvElement(GroupSemidirectProduct.Element):
def dual_action(self, la):
w = self.cartesian_projection(0)
assert (la in w.parent().domain())
return w.action((self.cartesian_projection(1).value + la))
def has_descent(self, i, side='right', positive=False):
return self.parent().realization_of().W0P()(self).has_descent(i, side=side, positive=positive)
def to_dual_translation_right(self):
return self.cartesian_projection(1).value
def to_dual_classical_weyl(self):
return self.cartesian_projection(0)
def is_translation(self):
w = self.to_dual_classical_weyl()
return (w == w.parent().one())
class ExtendedAffineWeylGroupW0Pv(GroupSemidirectProduct, BindableClass):
def __init__(self, E):
def twist(w, l):
return E.exp_dual_lattice()(w.action(l.value))
GroupSemidirectProduct.__init__(self, E.dual_classical_weyl(), E.exp_dual_lattice(), twist=twist, act_to_right=True, prefix1=E._prefixt, print_tuple=E._print_tuple, category=E.Realizations())
self._style = 'W0Pv'
def _repr_(self):
return ((self.realization_of()._repr_() + ' realized by ') + super()._repr_())
def from_dual_translation(self, la):
E = self.realization_of()
return self((self.cartesian_factors()[0].one(), E.exp_dual_lattice()(la)))
_method
def simple_reflections(self):
E = self.realization_of()
return Family(E.cartan_type().index_set(), (lambda i: self(E.PW0().simple_reflection(i))))
def from_dual_classical_weyl(self, w):
return self((w, self.cartesian_factors()[1].one())) |
class ScipyOptimizer():
def __init__(self, parameters, method, maxiter, callback=(lambda *args: None), **kwargs):
self.kwargs = kwargs
self.parameters = list(parameters)
self.method = method
self.maxiter = maxiter
self.callback = callback
self.param_groups = []
def step(self, closure):
def fun(x, *args):
with torch.no_grad():
offset = 0
for p in self.parameters:
n = 1
if (len(p.shape) > 0):
n = reduce((lambda x, y: (x * y)), p.shape)
slice = x[offset:(offset + n)]
p.copy_(torch.tensor(slice).reshape(p.shape))
offset += n
return float(closure())
def jac(x, *args):
j = []
for p in self.parameters:
j.append(p.grad.flatten().detach().numpy())
return np.concatenate(j)
xs = []
for p in self.parameters:
xs.append(p.flatten().detach().numpy())
x0 = np.concatenate(xs)
options = self.kwargs.copy()
options['maxiter'] = self.maxiter
minimize(fun, x0, method=self.method, callback=self.callback, jac=jac, options=options)
def zero_grad(self):
for p in self.parameters:
if (p.grad is not None):
if (p.grad.grad_fn is not None):
p.grad.detach_()
else:
p.grad.requires_grad_(False)
p.grad.zero_() |
.parametrize('inspecs', pairwise_inspecs_params())
.parametrize('op', ['add2', 'sub2', 'mul2', 'div2', 'pow2', 'maximum2', 'minimum2'])
def test_pairwise_arithmetic(inspecs, op, nnabla_opts):
func = getattr(F, op)
fb = FunctionBenchmark(func, inspecs, [], {}, nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer) |
def finiteCheck(parameters):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter((lambda p: (p.grad is not None)), parameters))
for p in parameters:
infGrads = isinf(p.grad.data)
p.grad.data[infGrads] = 0
nanGrads = isnan(p.grad.data)
p.grad.data[nanGrads] = 0 |
class ChannelLastModifier(FunctionModifier):
def __init__(self, inputs, inputs_cl=None):
super(ChannelLastModifier, self).__init__()
self._inputs = inputs
self._inputs_cl = inputs_cl
self._prepare_inputs(inputs, inputs_cl)
def _prepare_inputs(self, inputs, inputs_cl=None):
if (inputs_cl is None):
inputs_cl = []
for inp in inputs:
(b, c, h, w) = inp.shape
x = nn.Variable([b, h, w, c])
x.d = inp.d.copy().transpose([0, 2, 3, 1])
inputs_cl.append(x)
self.inputs_cl = inputs_cl
for (inp, inp_cl) in zip(inputs, inputs_cl):
f = inp.function_references[0]
self.init_map_func_inputs(f, [inp_cl])
def connect(self, fname, inputs, args):
if (fname in ['Convolution', 'Deconvolution']):
args['channel_last'] = True
x = inputs[0]
w = inputs[1]
b = (inputs[2] if (len(inputs) == 3) else None)
scope = self.get_parameter_scope(w)
with nn.parameter_scope(scope):
wd = w.d.copy().transpose(0, 2, 3, 1)
w = nn.parameter.get_parameter_or_create('W_cl', wd.shape, wd)
o = F.convolution(x, w, b, **args)
elif (fname == 'BatchNormalization'):
x = inputs[0]
beta = inputs[1]
gamma = inputs[2]
mean = inputs[3]
var = inputs[4]
args['axes'] = [(len(x.shape) - 1)]
if ('no_scale' in args):
del args['no_scale']
if ('no_bias' in args):
del args['no_bias']
scope = self.get_parameter_scope(beta)
with nn.parameter_scope(scope):
beta_d = beta.d.copy().transpose(0, 2, 3, 1)
gamma_d = gamma.d.copy().transpose(0, 2, 3, 1)
mean_d = mean.d.copy().transpose(0, 2, 3, 1)
var_d = var.d.copy().transpose(0, 2, 3, 1)
beta = nn.parameter.get_parameter_or_create('beta_cl', beta_d.shape, beta_d, beta.need_grad)
gamma = nn.parameter.get_parameter_or_create('gamma_cl', gamma_d.shape, gamma_d, gamma.need_grad)
mean = nn.parameter.get_parameter_or_create('mean_cl', mean_d.shape, mean_d, mean.need_grad)
var = nn.parameter.get_parameter_or_create('var_cl', var_d.shape, var_d, var.need_grad)
o = F.batch_normalization(x, beta, gamma, mean, var, **args)
elif (fname in ['MaxPooling', 'AveragePooling', 'SumPooling']):
args['channel_last'] = True
o = self._call_function(fname, inputs, args)
elif (fname in ['Concatenate']):
args['axis'] = (len(inputs[0].shape) - 1)
o = self._call_function(fname, inputs, args)
elif (fname == 'Affine'):
x = inputs[0]
(_, h_s, w_s, c_s) = inputs[0].shape
(_, b_s) = inputs[1].shape
wd = inputs[1].d.copy()
wd = np.reshape(wd, (c_s, h_s, w_s, b_s))
wd = np.transpose(wd, (1, 2, 0, 3))
wd = np.reshape(wd, ((- 1), b_s))
w = nn.parameter.get_parameter_or_create('w_cl', wd.shape, wd, False)
b = (inputs[2] if (len(inputs) == 3) else None)
o = F.affine(x, w, b, **args)
else:
o = self._call_function(fname, inputs, args)
return o
def modify(self, f, inputs):
fname = f.info.type_name
args = f.info.args
if (fname in ['Convolution', 'Deconvolution', 'BatchNormalization', 'MaxPooling', 'AveragePooling', 'SumPooling', 'Unpooling', 'Concatenate', 'Affine']):
o = self.connect(fname, inputs, args)
return o
def __finish__(self):
self._prepare_inputs(self._inputs, self._inputs_cl) |
def similarity_constrained_penalized_logp_atomrings(smiles, name, threshold, fp_type='ECFP4'):
benchmark_name = f'{name} {threshold:.1f} Similarity Constrained Penalized logP'
objective = RdkitScoringFunction(descriptor=(lambda mol: _penalized_logp_atomrings(mol)))
offset = (- objective.score(smiles))
constraint = TanimotoScoringFunction(target=smiles, fp_type=fp_type)
constrained_objective = ThresholdedImprovementScoringFunction(objective=objective, constraint=constraint, threshold=threshold, offset=offset)
constrained_objective.corrupt_score = (- 1000.0)
specification = uniform_specification(1)
return GoalDirectedBenchmark(name=benchmark_name, objective=constrained_objective, contribution_specification=specification) |
def byetenet_residual_block(input_, dilation, layer_no, residual_channels, filter_width, causal=True, train=True):
block_type = ('decoder' if causal else 'encoder')
block_name = 'bytenet_{}_layer_{}_{}'.format(block_type, layer_no, dilation)
with tf.variable_scope(block_name):
input_ln = layer_normalization(input_, name='ln1', trainable=train)
relu1 = tf.nn.relu(input_ln)
conv1 = conv1d(relu1, residual_channels, name='conv1d_1')
conv1 = layer_normalization(conv1, name='ln2', trainable=train)
relu2 = tf.nn.relu(conv1)
dilated_conv = conv1d(relu2, residual_channels, dilation, filter_width, causal=causal, name='dilated_conv')
dilated_conv = layer_normalization(dilated_conv, name='ln3', trainable=train)
relu3 = tf.nn.relu(dilated_conv)
conv2 = conv1d(relu3, (2 * residual_channels), name='conv1d_2')
return (input_ + conv2) |
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return 'raw'
elif IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if (magic == IndexedDataset._HDR_MAGIC):
return 'cached'
elif (magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]):
return 'mmap'
else:
return None
elif FastaDataset.exists(path):
return 'fasta'
else:
return None |
class Partition4(nn.Module):
LAYER_SCOPES = ['Net/Linear[h2_layer]', 'Net/BatchNorm1d[bn3]', 'Net/Linear[output_layer]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:4'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1]
self.lookup = {'l_0': 'h2_layer', 'l_1': 'bn3', 'l_2': 'output_layer'}
self.to(self.device)
def forward(self, *args):
x0 = unflatten(args, self.input_structure)[0]
t_0 = torch.nn.functional.leaky_relu(x0, negative_slope=0.01, inplace=False)
t_0 = self.l_0(t_0)
t_0 = self.l_1(t_0)
t_0 = torch.nn.functional.leaky_relu(t_0, negative_slope=0.01, inplace=False)
t_0 = self.l_2(t_0)
return (t_0,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.