code stringlengths 101 5.91M |
|---|
class DDIMSchedulerOutput(BaseOutput):
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None |
def _sum_clones_gradients(clone_grads):
sum_grads = []
for grad_and_vars in zip(*clone_grads):
grads = []
var = grad_and_vars[0][1]
for (g, v) in grad_and_vars:
assert (v == var)
if (g is not None):
grads.append(g)
if grads:
if (len(grads) > 1):
sum_grad = tf.add_n(grads, name=(var.op.name + '/sum_grads'))
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads |
def main():
args = get_args()
labels = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
manifest = Manifest(args.dataset_dir, [args.manifest], labels, len(labels), normalize=True, max_duration=15.0)
with open(os.path.join(args.log_dir, 'mlperf_log_accuracy.json')) as fh:
results = json.load(fh)
hypotheses = []
references = []
for result in results:
hypotheses.append(array.array(dtype_map[args.output_dtype], bytes.fromhex(result['data'])).tolist())
references.append(manifest[result['qsl_idx']]['transcript'])
references = __gather_predictions([references], labels=labels)
hypotheses = __gather_predictions([hypotheses], labels=labels)
d = dict(predictions=hypotheses, transcripts=references)
wer = process_evaluation_epoch(d)
print('Word Error Rate: {:}%, accuracy={:}%'.format((wer * 100), ((1 - wer) * 100))) |
def run(args):
(acc_db, loss_db, hessian_eig_db) = init_experiment(args)
print('Loading {} tasks for {}'.format(args.tasks, args.dataset))
tasks = get_benchmark_data_loader(args)(args.tasks, args.batch_size)
print('loaded all tasks!')
model = get_benchmark_model(args)
criterion = nn.CrossEntropyLoss().to(DEVICE)
time = 0
for current_task_id in range(1, (args.tasks + 1)):
print(' TASK {} / {} '.format(current_task_id, args.tasks))
train_loader = tasks[current_task_id]['train']
lr = max((args.lr * (args.gamma ** current_task_id)), 5e-05)
for epoch in range(1, (args.epochs_per_task + 1)):
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.8)
train_single_epoch(model, optimizer, train_loader, criterion, current_task_id)
time += 1
for prev_task_id in range(1, (current_task_id + 1)):
if (epoch == args.epochs_per_task):
model = model.to(DEVICE)
val_loader = tasks[prev_task_id]['test']
metrics = eval_single_epoch(model, val_loader, criterion, prev_task_id)
(acc_db, loss_db) = log_metrics(metrics, time, prev_task_id, acc_db, loss_db)
if ((prev_task_id == current_task_id) and args.compute_eigenspectrum):
hessian_eig_db = log_hessian(model, val_loader, time, prev_task_id, hessian_eig_db)
save_checkpoint(model, time)
end_experiment(args, acc_db, loss_db, hessian_eig_db) |
class TemplateHitFeaturizer():
def __init__(self, mmcif_dir: str, max_template_date: str, max_hits: int, kalign_binary_path: str, release_dates_path: Optional[str]=None, obsolete_pdbs_path: Optional[str]=None, strict_error_check: bool=False, _shuffle_top_k_prefiltered: Optional[int]=None, _zero_center_positions: bool=True):
self._mmcif_dir = mmcif_dir
if (not glob.glob(os.path.join(self._mmcif_dir, '*.cif'))):
logging.error('Could not find CIFs in %s', self._mmcif_dir)
raise ValueError(f'Could not find CIFs in {self._mmcif_dir}')
try:
self._max_template_date = datetime.datetime.strptime(max_template_date, '%Y-%m-%d')
except ValueError:
raise ValueError('max_template_date must be set and have format YYYY-MM-DD.')
self.max_hits = max_hits
self._kalign_binary_path = kalign_binary_path
self._strict_error_check = strict_error_check
if release_dates_path:
logging.info('Using precomputed release dates %s.', release_dates_path)
self._release_dates = _parse_release_dates(release_dates_path)
else:
self._release_dates = {}
if obsolete_pdbs_path:
logging.info('Using precomputed obsolete pdbs %s.', obsolete_pdbs_path)
self._obsolete_pdbs = _parse_obsolete(obsolete_pdbs_path)
else:
self._obsolete_pdbs = {}
self._shuffle_top_k_prefiltered = _shuffle_top_k_prefiltered
self._zero_center_positions = _zero_center_positions
def get_templates(self, query_sequence: str, query_pdb_code: Optional[str], query_release_date: Optional[datetime.datetime], hits: Sequence[parsers.TemplateHit]) -> TemplateSearchResult:
logging.info('Searching for template for: %s', query_pdb_code)
template_features = {}
for template_feature_name in TEMPLATE_FEATURES:
template_features[template_feature_name] = []
template_cutoff_date = self._max_template_date
if query_release_date:
delta = datetime.timedelta(days=60)
if ((query_release_date - delta) < template_cutoff_date):
template_cutoff_date = (query_release_date - delta)
assert (template_cutoff_date < query_release_date)
assert (template_cutoff_date <= self._max_template_date)
num_hits = 0
errors = []
warnings = []
filtered = []
for hit in hits:
prefilter_result = _prefilter_hit(query_sequence=query_sequence, query_pdb_code=query_pdb_code, hit=hit, max_template_date=template_cutoff_date, release_dates=self._release_dates, obsolete_pdbs=self._obsolete_pdbs, strict_error_check=self._strict_error_check)
if prefilter_result.error:
errors.append(prefilter_result.error)
if prefilter_result.warning:
warnings.append(prefilter_result.warning)
if prefilter_result.valid:
filtered.append(hit)
filtered = list(sorted(filtered, key=(lambda x: x.sum_probs), reverse=True))
idx = list(range(len(filtered)))
if self._shuffle_top_k_prefiltered:
stk = self._shuffle_top_k_prefiltered
idx[:stk] = np.random.permutation(idx[:stk])
for i in idx:
if (num_hits >= self.max_hits):
break
hit = filtered[i]
result = _process_single_hit(query_sequence=query_sequence, query_pdb_code=query_pdb_code, hit=hit, mmcif_dir=self._mmcif_dir, max_template_date=template_cutoff_date, release_dates=self._release_dates, obsolete_pdbs=self._obsolete_pdbs, strict_error_check=self._strict_error_check, kalign_binary_path=self._kalign_binary_path, _zero_center_positions=self._zero_center_positions)
if result.error:
errors.append(result.error)
if result.warning:
warnings.append(result.warning)
if (result.features is None):
logging.info('Skipped invalid hit %s, error: %s, warning: %s', hit.name, result.error, result.warning)
else:
num_hits += 1
for k in template_features:
template_features[k].append(result.features[k])
for name in template_features:
if (num_hits > 0):
template_features[name] = np.stack(template_features[name], axis=0).astype(TEMPLATE_FEATURES[name])
else:
template_features[name] = np.array([], dtype=TEMPLATE_FEATURES[name])
return TemplateSearchResult(features=template_features, errors=errors, warnings=warnings) |
def main(_):
if (not FLAGS.dataset_name):
raise ValueError('You must supply the dataset name with --dataset_name')
if (not FLAGS.dataset_dir):
raise ValueError('You must supply the dataset directory with --dataset_dir')
if (FLAGS.dataset_name == 'cifar10'):
download_and_convert_cifar10.run(FLAGS.dataset_dir)
elif (FLAGS.dataset_name == 'flowers'):
download_and_convert_flowers.run(FLAGS.dataset_dir)
elif (FLAGS.dataset_name == 'mnist'):
download_and_convert_mnist.run(FLAGS.dataset_dir)
else:
raise ValueError(('dataset_name [%s] was not recognized.' % FLAGS.dataset_name)) |
class MultipleMetrics(object):
def __init__(self, metrics: List[Union[(Metric, object)]], prefix: str=''):
instantiated_metrics = []
for metric in metrics:
if isinstance(metric, type):
instantiated_metrics.append(metric())
else:
instantiated_metrics.append(metric)
self._metrics = instantiated_metrics
self.prefix = prefix
def reset(self):
for metric in self._metrics:
metric.reset()
def __call__(self, y_pred: Tensor, y_true: Tensor) -> Dict:
logs = {}
for metric in self._metrics:
if isinstance(metric, Metric):
logs[(self.prefix + metric._name)] = metric(y_pred, y_true)
elif isinstance(metric, TorchMetric):
metric.update(y_pred, y_true.int())
logs[(self.prefix + type(metric).__name__)] = metric.compute().detach().cpu().numpy()
return logs |
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatMessage]
temperature: Optional[float] = None
top_p: Optional[float] = None
max_length: Optional[int] = None
stream: Optional[bool] = False |
def report_num_trainable_parameters(model: torch.nn.Module) -> int:
assert isinstance(model, torch.nn.Module), 'Argument must be nn.Module'
num_parameters = 0
for (name, p) in model.named_parameters():
if p.requires_grad:
num_parameters += np.prod(list(p.size()))
logger.info('{}: {}'.format(name, np.prod(list(p.size()))))
logger.info('Number of parameters: {}M'.format((num_parameters // (10 ** 6))))
return num_parameters |
def object_detect(args):
mp.set_start_method('spawn', force=True)
logger = logging.getLogger()
cfg = setup_cfg(args)
demo = UnifiedVisualizationDemo(cfg)
if args.img_path:
img_path = [args.img_path]
else:
img_folder = os.path.join(args.data_root, args.img_dir)
img_list = os.listdir(img_folder)
img_path = [os.path.join(img_folder, t) for t in img_list]
if img_path:
for path in tqdm.tqdm(img_path, disable=(not args.output)):
img = read_image(path, format='BGR')
start_time = time.time()
(predictions, visualized_output) = demo.run_on_image(img)
img_name = os.path.basename(path)
output_dir = os.path.join(args.output, img_name.split('.')[0], 'object_detection')
visualize_to_json(predictions, demo.metadata.thing_classes, img_name, output_dir)
logger.info('{}: {} in {:.2f}s'.format(path, ('detected {} instances'.format(len(predictions['instances'])) if ('instances' in predictions) else 'finished'), (time.time() - start_time))) |
def _isint(string):
return ((type(string) is int) or ((isinstance(string, _binary_type) or isinstance(string, _text_type)) and _isconvertible(int, string))) |
class DummyDataset(Dataset):
def __init__(self, images, labels, trsf, use_path=False):
assert (len(images) == len(labels)), 'Data size error!'
self.images = images
self.labels = labels
self.trsf = trsf
self.use_path = use_path
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
if self.use_path:
image = self.trsf(pil_loader(self.images[idx]))
else:
image = self.trsf(Image.fromarray(self.images[idx]))
label = self.labels[idx]
return (idx, image, label) |
def test_digits_sqrt_sample_sparse():
model = FeatureBasedSelection(100, 'sqrt', optimizer='sample', random_state=0)
model.fit(X_digits_sparse)
assert_array_equal(model.ranking, digits_sqrt_sample_ranking)
assert_array_almost_equal(model.gains, digits_sqrt_sample_gains, 4)
assert_array_almost_equal(model.subset, X_digits_sparse[model.ranking].toarray()) |
class MetaSpecProp(SpecProp):
def __init__(self, module, device='cpu'):
self._device = device
super().__init__(module)
self.fake_module = None
self.fake_mode = None
def call_module(self, target, args, kwargs):
assert isinstance(target, str)
submod = self.fetch_attr(target)
reload_meta_module(submod, device=self._device, delete_ckpt_name=False)
(args, kwargs) = move_args_kwargs_to_device(args, kwargs, self._device)
with torch.no_grad():
res = submod(*args, **kwargs)
orig_module = submod
orig_res = res
submod = submod.to('cpu')
(args, kwargs) = move_args_kwargs_to_device(args, kwargs, 'cpu')
(res, _) = move_args_kwargs_to_device(res, {}, 'cpu')
del orig_res
del orig_module
del args
del kwargs
gc.collect()
torch.cuda.empty_cache()
return res
def call_function(self, target, args, kwargs):
assert (not isinstance(target, str))
(args, kwargs) = move_args_kwargs_to_device(args, kwargs, self._device)
with torch.no_grad():
res = target(*args, **kwargs)
(args, kwargs) = move_args_kwargs_to_device(args, kwargs, 'cpu')
orig_res = res
(res, _) = move_args_kwargs_to_device(res, {}, 'cpu')
del orig_res
del args
del kwargs
gc.collect()
torch.cuda.empty_cache()
return res
def call_method(self, target, args, kwargs):
(self_obj, *args_tail) = args
assert isinstance(target, str)
(args, kwargs) = move_args_kwargs_to_device(args, kwargs, self._device)
with torch.no_grad():
res = getattr(self_obj, target)(*args_tail, **kwargs)
(args, kwargs) = move_args_kwargs_to_device(args, kwargs, 'cpu')
orig_res = res
(res, _) = move_args_kwargs_to_device(res, {}, 'cpu')
del orig_res
del args
del kwargs
gc.collect()
torch.cuda.empty_cache()
return res |
def download_azure(directory=None, raw_data=False):
logger.info(f'downloading data into {directory}')
downloader = BlobFileDownloader(directory)
if raw_data:
prefix = 'raw'
else:
prefix = 'train'
downloader.download_blobs_in_container(prefix=prefix)
logger.info('Extracting files...')
path = os.path.join(directory, f'{prefix}.tar.gz')
with tarfile.open(path, mode='r:*') as tf:
tf.extractall(path=directory)
return directory |
def remove_weight_norm(module, name='weight'):
for (k, hook) in module._forward_pre_hooks.items():
if (isinstance(hook, BoundedWeightNorm) and (hook.name == name)):
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("weight_norm of '{}' not found in {}".format(name, module)) |
class DropBlock2d(nn.Module):
def __init__(self, drop_prob=0.1, block_size=7, gamma_scale=1.0, with_noise=False, inplace=False, batchwise=False, fast=True):
super(DropBlock2d, self).__init__()
self.drop_prob = drop_prob
self.gamma_scale = gamma_scale
self.block_size = block_size
self.with_noise = with_noise
self.inplace = inplace
self.batchwise = batchwise
self.fast = fast
def forward(self, x):
if ((not self.training) or (not self.drop_prob)):
return x
if self.fast:
return drop_block_fast_2d(x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
else:
return drop_block_2d(x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) |
class TestClassifier(unittest.TestCase):
def test_inspect(self):
cba = CBA()
test_dataframe = pd.read_csv(dataset_file, sep=';')
transactions = TransactionDB.from_DataFrame(test_dataframe)
cba.fit(transactions)
clf = cba.clf
inspect_df = clf.inspect()
self.assertEqual(type(inspect_df), pd.DataFrame)
self.assertEqual(len(inspect_df), (len(clf.rules) + 1))
self.assertEqual(inspect_df['lhs'].iloc[(- 1)], '{}')
def test_default_rule_correct(self):
cba = CBA(support=0.9)
cba_m2 = CBA(support=0.9)
header1 = ['A', 'B', 'Y']
rows1 = [[1, 1, 0], [0, 0, 1]]
transactions = TransactionDB(rows1, header1)
cba.fit(transactions)
cba_m2.fit(transactions)
default_class = cba.clf.default_class
default_class_m2 = cba_m2.clf.default_class
self.assertTrue((default_class in ['0', '1']))
self.assertTrue((default_class_m2 in ['0', '1']))
default_class_support = cba.clf.default_class_support
default_class_confidence = cba.clf.default_class_confidence
default_class_support_m2 = cba_m2.clf.default_class_support
default_class_confidence_m2 = cba_m2.clf.default_class_confidence
self.assertTrue((0 <= default_class_support <= 1))
self.assertTrue((0 <= default_class_support_m2 <= 1))
self.assertTrue((0 <= default_class_confidence <= 1))
self.assertTrue((0 <= default_class_confidence_m2 <= 1))
def test_predict_probablity(self):
header1 = ['A', 'B', 'Y']
rows1 = [[1, 1, 0], [1, 1, 0], [1, 1, 1], [0, 0, 0], [0, 0, 1], [0, 0, 1]]
transactions = TransactionDB(rows1, header1)
cba = CBA()
cba.fit(transactions)
probs = cba.clf.predict_probability_all(transactions) |
def adjust_shape(placeholder, data):
if ((not isinstance(data, np.ndarray)) and (not isinstance(data, list))):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [(x or (- 1)) for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), 'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape) |
def backprop(dataset, model, optimizer):
total = 0
for feat in dataset:
feature = feat[0]
feature = torch.tensor(feature, dtype=torch.float)
y_pred = model(feature)
y_true = feat[1]
optimizer.zero_grad()
loss = custom_loss(y_pred, y_true, model.name)
loss.backward()
optimizer.step()
total += loss
return (total / len(dataset)) |
def assert_type_bin_pack_state(state: State) -> None:
jax.tree_util.tree_map((lambda leaf: chex.assert_type(leaf, jnp.int32)), (state.container, state.ems, state.items, state.items_location, state.sorted_ems_indexes))
jax.tree_util.tree_map((lambda leaf: chex.assert_type(leaf, bool)), (state.ems_mask, state.items_mask, state.items_placed, state.action_mask)) |
def batch_shuffle(x):
batch_size_this = x.shape[0]
(all_xs, batch_size_all) = concat_all_gather(x)
all_xs_concat = torch.cat(all_xs, dim=0)
total_bs = sum(batch_size_all)
rank = dist.get_rank()
assert (batch_size_all[rank] == batch_size_this)
idx_range = (sum(batch_size_all[:rank]), sum(batch_size_all[:(rank + 1)]))
idx_shuffle = torch.randperm(total_bs, device=x.device)
dist.broadcast(idx_shuffle, src=0)
idx_unshuffle = torch.argsort(idx_shuffle)
splits = torch.split(idx_shuffle, math.ceil((total_bs / dist.get_world_size())))
if (len(splits) > rank):
idx_this = splits[rank]
else:
idx_this = idx_shuffle.new_zeros([0])
return (all_xs_concat[idx_this], idx_unshuffle[idx_range[0]:idx_range[1]]) |
class PyTorchMultiTargetInferSentModelModule(torch.nn.Module):
def __init__(self, W_emb, max_len, rnn_size=300, hidden_size=300, dropout=0.2, regularization=1e-06, trainable_embeddings=False, learning_rate=0.001, pool_type='max', use_umls_attention=False, **kwargs):
super(PyTorchMultiTargetInferSentModelModule, self).__init__()
self.rnn_size = rnn_size
self.hidden_size = hidden_size
self.pool_type = pool_type
self.dropout = dropout
self.regularization = regularization
self.trainable_embeddings = trainable_embeddings
self.learning_rate = learning_rate
self.use_umls_attention = use_umls_attention
self.W_emb = W_emb
self.embedding_dim = W_emb.shape[1]
self.vocab_size = W_emb.shape[0]
self.embed = torch.nn.Embedding(self.vocab_size, self.embedding_dim)
self.embed.weight.data.copy_(torch.from_numpy(W_emb))
if (not self.trainable_embeddings):
self.embed.weight.requires_grad = False
self.encoder = LSTMEncoder(self.embedding_dim, self.rnn_size, bidirectional=True, return_sequence=True)
self.classifier_shared = torch.nn.Sequential(torch.nn.Dropout(p=self.dropout), torch.nn.Linear(((self.rnn_size * 2) * 4), self.hidden_size), torch.nn.Tanh())
self.classifier_source = torch.nn.Sequential(torch.nn.Dropout(p=self.dropout), torch.nn.Linear(self.hidden_size, self.hidden_size), torch.nn.Tanh(), torch.nn.Dropout(p=self.dropout), torch.nn.Linear(self.hidden_size, len(LABELS)))
self.classifier_target = torch.nn.Sequential(torch.nn.Dropout(p=self.dropout), torch.nn.Linear(self.hidden_size, self.hidden_size), torch.nn.Tanh(), torch.nn.Dropout(p=self.dropout), torch.nn.Linear(self.hidden_size, len(LABELS)))
self.mode = None
def forward(self, premise, hypothesis, *args):
premise_len = get_sequence_length(premise)
hypothesis_len = get_sequence_length(hypothesis)
premise = self.embed(premise)
hypothesis = self.embed(hypothesis)
premise = self.encoder(premise, premise_len)
hypothesis = self.encoder(hypothesis, hypothesis_len)
if (self.pool_type == 'max'):
premise = torch.max(premise, dim=1, keepdim=False)[0]
hypothesis = torch.max(hypothesis, dim=1, keepdim=False)[0]
else:
raise ValueError('Pool type {} is not supported'.format(self.pool_type))
features = torch.cat([premise, hypothesis, torch.abs((premise - hypothesis)), (premise * hypothesis)], dim=(- 1))
features_shared = self.classifier_shared(features)
if (self.mode == 'source'):
pred = self.classifier_source(features_shared)
elif (self.mode == 'target'):
pred = self.classifier_target(features_shared)
else:
raise ValueError('You must set the `mode` for a multi-target model')
return pred |
class M4COCRVQADataset(M4CTextVQADataset):
def __init__(self, dataset_type, imdb_file_index, config, *args, **kwargs):
super().__init__(dataset_type, imdb_file_index, config, *args, **kwargs)
self._name = 'm4c_ocrvqa' |
class HAIKU(AbstractTask):
name = 'haiku'
metric = [metrics.calculate_rouge]
metric_names = ['rouge']
split_to_data_split = {'train': 'train', 'validation': 'validation'}
def load_dataset(self, split: int):
haiku = DatasetDict()
with open('./data/manual/ct0_data/haiku/haiku/do_nothing.train.json', 'r') as f:
train_data = json.load(f)
train_data = datasets.Dataset.from_dict(train_data)
haiku['train'] = train_data
with open('./data/manual/ct0_data/haiku/haiku/do_nothing.test.json', 'r') as f:
eval_data1 = json.load(f)
eval_data = datasets.Dataset.from_dict(eval_data1)
haiku['validation'] = eval_data
if (split == 'train'):
return haiku['train']
elif (split == 'validation'):
return haiku['validation']
else:
return None
def preprocessor(self, example, add_prefix=True):
return {'source': example['src'], 'target': example['tgt'], 'task': self.name, 'extra_fields': {}} |
def squeeze_if_one(arr):
if (arr.shape[(- 1)] == 1):
return np.squeeze(arr, axis=(- 1))
else:
return arr |
def exp_param_defaults(exp_params):
defaults = dict(subset_algos=False, error_metric=None, compute_quantum_fixed=False, score_dir=None, slowdown_factor=1, plot=False, movie=False, use_db=False, strategy='stack-meta', super_fast_subset=1000, super_fast_timeout=np.inf, one_shot_timeout=0.333, anytime_timeout=1, use_data_subsets=True, super_fast_learners="[\n ('LR-100-subset', CrossValidationAgent, dict(learner=LogisticRegression,\n learner_kwargs=dict(C=100),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict(),\n feature_subset=10))\n ]", one_shot_algos="[\n ('LR-0.01', CrossValidationAgent, dict(learner=LogisticRegression,\n learner_kwargs=dict(C=0.01),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict())),\n ('LR-100', CrossValidationAgent, dict(learner=LogisticRegression,\n learner_kwargs=dict(C=100),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict())),\n ('KNN-1', CrossValidationAgent, dict(learner=KNeighborsClassifier,\n learner_kwargs=dict(n_neighbors=1),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict())),\n ('KNN-5', CrossValidationAgent, dict(learner=KNeighborsClassifier,\n learner_kwargs=dict(n_neighbors=3),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict())),\n ('KNN-25', CrossValidationAgent, dict(learner=KNeighborsClassifier,\n learner_kwargs=dict(n_neighbors=9),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict())),\n ('GNB', CrossValidationAgent, dict(learner=GaussianNB,\n learner_kwargs=dict(),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict())),\n ('DTC-1', CrossValidationAgent, dict(learner=DecisionTreeClassifier,\n learner_kwargs=dict(min_samples_leaf=1),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict())),\n ('DTC-5', CrossValidationAgent, dict(learner=DecisionTreeClassifier,\n learner_kwargs=dict(min_samples_leaf=9),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict())),\n ('DTC-25', CrossValidationAgent, dict(learner=DecisionTreeClassifier,\n learner_kwargs=dict(min_samples_leaf=27),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict())),\n ('LR-l1-1', CrossValidationAgent, dict(learner=LogisticRegression,\n learner_kwargs=dict(C=1, penalty='l1'),\n agent=OneShotLearnerAgent,\n agent_kwargs=dict()))\n ]", anytime_algos="[\n ('RF-1', CrossValidationAgent, dict(learner=WarmLearner,\n learner_kwargs=dict(base_model=RandomForestClassifier,\n base_model_kwargs=dict(min_samples_leaf=1,\n n_estimators=1)),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('RF-54', CrossValidationAgent, dict(learner=WarmLearner,\n learner_kwargs=dict(base_model=RandomForestClassifier,\n base_model_kwargs=dict(min_samples_leaf=54,\n n_estimators=1)),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('GBM-1', CrossValidationAgent, dict(learner=GradientBoostingClassifier,\n learner_kwargs=dict(min_samples_leaf=1, warm_start=True,\n n_estimators=1),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('GBM-54', CrossValidationAgent, dict(learner=GradientBoostingClassifier,\n learner_kwargs=dict(min_samples_leaf=54, warm_start=True,\n n_estimators=1),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('GBM-1-5', CrossValidationAgent, dict(learner=GradientBoostingClassifier,\n learner_kwargs=dict(min_samples_leaf=1, warm_start=True,\n n_estimators=1, max_depth=5),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('GBM-54-5', CrossValidationAgent, dict(learner=GradientBoostingClassifier,\n learner_kwargs=dict(min_samples_leaf=54, warm_start=True,\n n_estimators=1, max_depth=5),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('RF-3', CrossValidationAgent, dict(learner=WarmLearner,\n learner_kwargs=dict(base_model=RandomForestClassifier,\n base_model_kwargs=dict(min_samples_leaf=3,\n n_estimators=1)),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('RF-27', CrossValidationAgent, dict(learner=WarmLearner,\n learner_kwargs=dict(base_model=RandomForestClassifier,\n base_model_kwargs=dict(min_samples_leaf=27,\n n_estimators=1)),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('GBM-3', CrossValidationAgent, dict(learner=GradientBoostingClassifier,\n learner_kwargs=dict(min_samples_leaf=3, warm_start=True,\n n_estimators=1),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('GBM-27', CrossValidationAgent, dict(learner=GradientBoostingClassifier,\n learner_kwargs=dict(min_samples_leaf=27, warm_start=True,\n n_estimators=1),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('GBM-3-5', CrossValidationAgent, dict(learner=GradientBoostingClassifier,\n learner_kwargs=dict(min_samples_leaf=3, warm_start=True,\n n_estimators=1, max_depth=5),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('GBM-27-5', CrossValidationAgent, dict(learner=GradientBoostingClassifier,\n learner_kwargs=dict(min_samples_leaf=27, warm_start=True,\n n_estimators=1, max_depth=5),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('RF-9', CrossValidationAgent, dict(learner=WarmLearner,\n learner_kwargs=dict(base_model=RandomForestClassifier,\n base_model_kwargs=dict(min_samples_leaf=9,\n n_estimators=1)),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('GBM-9', CrossValidationAgent, dict(learner=GradientBoostingClassifier,\n learner_kwargs=dict(min_samples_leaf=9, warm_start=True,\n n_estimators=1),\n agent_kwargs=dict(time_quantum=self.compute_quantum))),\n ('GBM-9-5',CrossValidationAgent, dict(learner=GradientBoostingClassifier,\n learner_kwargs=dict(min_samples_leaf=9, warm_start=True,\n n_estimators=1, max_depth=5),\n agent_kwargs=dict(time_quantum=self.compute_quantum)))\n ]")
for (key, value) in defaults.iteritems():
if (not (key in exp_params)):
exp_params[key] = value
return exp_params |
class SumOfLosses(Loss):
def __init__(self, l1, l2):
name = '{} + {}'.format(l1.__name__, l2.__name__)
super().__init__(name=name)
self.l1 = l1
self.l2 = l2
def __call__(self, *inputs):
return (self.l1.forward(*inputs) + self.l2.forward(*inputs)) |
class PCQM4MEvaluator():
def __init__(self):
pass
def eval(self, input_dict):
assert ('y_pred' in input_dict)
assert ('y_true' in input_dict)
(y_pred, y_true) = (input_dict['y_pred'], input_dict['y_true'])
assert ((isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray)) or (isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor)))
assert (y_true.shape == y_pred.shape)
assert (len(y_true.shape) == 1)
if isinstance(y_true, torch.Tensor):
return {'mae': torch.mean(torch.abs((y_pred - y_true))).cpu().item()}
else:
return {'mae': float(np.mean(np.absolute((y_pred - y_true))))}
def save_test_submission(self, input_dict, dir_path):
assert ('y_pred' in input_dict)
y_pred = input_dict['y_pred']
if (not osp.exists(dir_path)):
os.makedirs(dir_path)
filename = osp.join(dir_path, 'y_pred_pcqm4m')
assert isinstance(filename, str)
assert (isinstance(y_pred, np.ndarray) or isinstance(y_pred, torch.Tensor))
assert (y_pred.shape == (377423,))
if isinstance(y_pred, torch.Tensor):
y_pred = y_pred.numpy()
y_pred = y_pred.astype(np.float32)
np.savez_compressed(filename, y_pred=y_pred) |
def init_randomizer(base_seed=1234):
global _MDPRInstance
assert (_MDPRInstance is None), 'Repeatedly initializing multiple dimension parallel randomizer.'
_MDPRInstance = MultiDimParallelRandomizer(base_seed) |
def get_fcn8sd(backbone, num_classes, aux=False, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
net = FCN8sd(backbone=backbone, num_classes=num_classes, aux=aux, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
class NautilusBound():
def compute(cls, points, log_l, log_l_min, log_v_target, enlarge_per_dim=1.1, n_points_min=None, split_threshold=100, n_networks=4, neural_network_kwargs={}, pool=None, rng=None):
bound = cls()
bound.n_dim = points.shape[1]
bound.neural_bounds = []
multi_ellipsoid = Union.compute(points[(log_l >= log_l_min)], enlarge_per_dim=enlarge_per_dim, n_points_min=n_points_min, bound_class=Ellipsoid, rng=rng)
while multi_ellipsoid.split(allow_overlap=False):
pass
for ellipsoid in multi_ellipsoid.bounds:
select = ellipsoid.contains(points)
bound.neural_bounds.append(NeuralBound.compute(points[select], log_l[select], log_l_min, enlarge_per_dim=enlarge_per_dim, n_networks=n_networks, neural_network_kwargs=neural_network_kwargs, pool=pool, rng=rng))
bound.outer_bound = Union.compute(points[(log_l >= log_l_min)], enlarge_per_dim=enlarge_per_dim, n_points_min=n_points_min, bound_class=UnitCubeEllipsoidMixture, rng=rng)
while ((bound.outer_bound.volume() - log_v_target) > np.log((split_threshold * (enlarge_per_dim ** points.shape[1])))):
if (not bound.outer_bound.split()):
break
while ((bound.outer_bound.volume() - log_v_target) > np.log((split_threshold * (enlarge_per_dim ** points.shape[1])))):
if (not bound.outer_bound.trim()):
break
if (rng is None):
bound.rng = np.random.default_rng()
else:
bound.rng = rng
bound.points = np.zeros((0, points.shape[1]))
bound.n_sample = 0
bound.n_reject = 0
return bound
def contains(self, points):
in_bound = self.outer_bound.contains(points)
if (len(self.neural_bounds) > 0):
in_bound = (in_bound & np.any([bound.contains(points) for bound in self.neural_bounds], axis=0))
return in_bound
_limits.wrap(limits=1)
def _reset_and_sample(self, n_points=100, rng=None):
self.reset(rng=rng)
self.sample(n_points=n_points, return_points=False)
return self
def sample(self, n_points=100, return_points=True, pool=None):
if (len(self.points) < n_points):
if (pool is None):
while (len(self.points) < n_points):
n_sample = 1000
points = self.outer_bound.sample(n_sample)
in_bound = np.any([bound.contains(points) for bound in self.neural_bounds], axis=0)
points = points[in_bound]
self.points = np.vstack([self.points, points])
self.n_sample += n_sample
self.n_reject += (n_sample - len(points))
else:
n_jobs = None
for attr in ['_processes', '_max_workers', 'size']:
if hasattr(pool, attr):
n_jobs = getattr(pool, attr)
break
if (n_jobs is None):
raise ValueError('Cannot determine size of pool.')
n_points_per_job = ((max((n_points - len(self.points)), 10000) // n_jobs) + 1)
func = partial(self._reset_and_sample, n_points_per_job)
rngs = [np.random.default_rng(seed) for seed in np.random.SeedSequence(self.rng.integers(((2 ** 32) - 1))).spawn(n_jobs)]
bounds = pool.map(func, rngs)
for bound in bounds:
self.points = np.vstack([self.points, bound.points])
self.n_sample += bound.n_sample
self.n_reject += bound.n_reject
self.outer_bound.n_sample += bound.outer_bound.n_sample
self.outer_bound.n_reject += bound.outer_bound.n_reject
if return_points:
points = self.points[:n_points]
self.points = self.points[n_points:]
return points
def volume(self):
if (self.n_sample == 0):
self.sample(return_points=False)
return (self.outer_bound.volume() + np.log((1.0 - (self.n_reject / self.n_sample))))
def number_of_networks_and_ellipsoids(self):
if (self.neural_bounds[0].emulator is not None):
n_networks = (len(self.neural_bounds) * len(self.neural_bounds[0].emulator.neural_networks))
else:
n_networks = 0
n_ellipsoids = 0
for bound in self.outer_bound.bounds:
n_ellipsoids += np.any((~ bound.dim_cube))
return (n_networks, n_ellipsoids)
def write(self, group):
group.attrs['type'] = 'NautilusBound'
group.attrs['n_dim'] = self.n_dim
group.attrs['n_neural_bounds'] = len(self.neural_bounds)
for (i, neural_bound) in enumerate(self.neural_bounds):
neural_bound.write(group.create_group('neural_bound_{}'.format(i)))
self.outer_bound.write(group.create_group('outer_bound'))
group.create_dataset('points', data=self.points, maxshape=(None, self.n_dim))
group.attrs['n_sample'] = self.n_sample
group.attrs['n_reject'] = self.n_reject
def update(self, group):
group.attrs['n_sample'] = self.n_sample
group.attrs['n_reject'] = self.n_reject
self.outer_bound.update(group['outer_bound'])
group['points'].resize(self.points.shape)
group['points'][...] = self.points
def read(cls, group, rng=None):
bound = cls()
if (rng is None):
bound.rng = np.random.default_rng()
else:
bound.rng = rng
bound.n_dim = group.attrs['n_dim']
bound.neural_bounds = []
i = 0
while ('neural_bound_{}'.format(i) in group):
bound.neural_bounds.append(NeuralBound.read(group['neural_bound_{}'.format(i)], rng=bound.rng))
i += 1
bound.outer_bound = Union.read(group['outer_bound'], rng=rng)
bound.points = np.array(group['points'])
bound.n_sample = group.attrs['n_sample']
bound.n_reject = group.attrs['n_reject']
return bound
def reset(self, rng=None):
self.points = np.zeros((0, self.n_dim))
self.n_sample = 0
self.n_reject = 0
self.outer_bound.reset(rng)
if (rng is not None):
self.rng = rng |
class TestCollectResults(unittest.TestCase):
def test_format_mean(self):
self.assertEqual(collect_results.format_mean([0.1, 0.2, 0.3], False)[2], '20.0 +/- 4.7')
self.assertEqual(collect_results.format_mean([0.1, 0.2, 0.3], True)[2], '20.0 $\\pm$ 4.7')
def test_print_table_non_latex(self):
temp_out = io.StringIO()
sys.stdout = temp_out
table = [['1', '2'], ['3', '4']]
collect_results.print_table(table, 'Header text', ['R1', 'R2'], ['C1', 'C2'], colwidth=10, latex=False)
sys.stdout = sys.__stdout__
self.assertEqual(temp_out.getvalue(), textwrap.dedent('\n Header text\n C1 C2 \n R1 1 2 \n R2 3 4 \n '))
def test_print_table_latex(self):
temp_out = io.StringIO()
sys.stdout = temp_out
table = [['1', '2'], ['3', '4']]
collect_results.print_table(table, 'Header text', ['R1', 'R2'], ['C1', 'C2'], colwidth=10, latex=True)
sys.stdout = sys.__stdout__
self.assertEqual(temp_out.getvalue(), textwrap.dedent('\n \\begin{center}\n \\adjustbox{max width=\\textwidth}{%\n \\begin{tabular}{lcc}\n \\toprule\n \\textbf{C1 & \\textbf{C2 \\\\\n \\midrule\n R1 & 1 & 2 \\\\\n R2 & 3 & 4 \\\\\n \\bottomrule\n \\end{tabular}}\n \\end{center}\n '))
def test_get_grouped_records(self):
pass
def test_print_results_tables(self):
pass
def test_load_records(self):
pass
def test_end_to_end(self):
result = subprocess.run('python -m domainbed.scripts.collect_results --input_dir=domainbed/misc/test_sweep_data', shell=True, stdout=subprocess.PIPE)
with open('domainbed/misc/test_sweep_results.txt', 'r') as f:
ground_truth = f.read()
self.assertEqual(result.stdout.decode('utf8'), ground_truth) |
def TranslateY(img, v, max_v, bias=0):
v = (_float_parameter(v, max_v) + bias)
if (random.random() < 0.5):
v = (- v)
v = int((v * img.size[1]))
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v)) |
class TestRecurrentEncoder(TensorTestCase):
def setUp(self):
self.emb_size = 10
self.num_layers = 3
self.hidden_size = 7
seed = 42
torch.manual_seed(seed)
def test_recurrent_encoder_size(self):
for bidirectional in [True, False]:
directional_factor = (2 if bidirectional else 1)
encoder = RecurrentEncoder(hidden_size=self.hidden_size, emb_size=self.emb_size, num_layers=self.num_layers, bidirectional=bidirectional)
self.assertEqual(encoder.rnn.hidden_size, self.hidden_size)
self.assertEqual(encoder.output_size, (self.hidden_size * directional_factor))
self.assertEqual(encoder.rnn.bidirectional, bidirectional)
def test_recurrent_encoder_type(self):
valid_rnn_types = {'gru': GRU, 'lstm': LSTM}
for (name, obj) in valid_rnn_types.items():
encoder = RecurrentEncoder(rnn_type=name)
self.assertEqual(type(encoder.rnn), obj)
def test_recurrent_input_dropout(self):
drop_prob = 0.5
encoder = RecurrentEncoder(dropout=drop_prob, emb_dropout=drop_prob)
input_tensor = torch.Tensor([2, 3, 1, (- 1)])
encoder.train()
dropped = encoder.emb_dropout(input=input_tensor)
encoder.eval()
no_drop = encoder.emb_dropout(input=input_tensor)
self.assertGreaterEqual((no_drop - (drop_prob * dropped)).abs().sum(), 0)
drop_prob = 1.0
encoder = RecurrentEncoder(dropout=drop_prob, emb_dropout=drop_prob)
all_dropped = encoder.emb_dropout(input=input_tensor)
self.assertEqual(all_dropped.sum(), 0)
encoder.eval()
none_dropped = encoder.emb_dropout(input=input_tensor)
self.assertTensorEqual(no_drop, none_dropped)
self.assertTensorEqual((no_drop - all_dropped), no_drop)
def test_recurrent_freeze(self):
encoder = RecurrentEncoder(freeze=True)
for (n, p) in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_recurrent_forward(self):
time_dim = 4
batch_size = 2
bidirectional = True
directions = (2 if bidirectional else 1)
encoder = RecurrentEncoder(emb_size=self.emb_size, num_layers=self.num_layers, hidden_size=self.hidden_size, bidirectional=bidirectional)
x = torch.rand(size=(batch_size, time_dim, self.emb_size))
x_length = torch.Tensor(([time_dim] * batch_size)).int()
mask = torch.ones_like(x)
(output, hidden) = encoder(embed_src=x, src_length=x_length, mask=mask)
self.assertEqual(output.shape, torch.Size([batch_size, time_dim, (directions * self.hidden_size)]))
self.assertEqual(hidden.shape, torch.Size([batch_size, (directions * self.hidden_size)]))
hidden_target = torch.Tensor([[0.1323, 0.0125, 0.29, (- 0.0725), (- 0.0102), (- 0.4405), 0.1226, (- 0.3333), (- 0.3186), (- 0.2411), 0.179, 0.1281, 0.0739, (- 0.0536)], [0.1431, 0.0085, 0.2828, (- 0.0933), (- 0.0139), (- 0.4525), 0.0946, (- 0.3279), (- 0.3001), (- 0.2223), 0.2023, 0.0708, 0.0131, (- 0.0124)]])
output_target = torch.Tensor([[[[0.0041, 0.0324, 0.0846, (- 0.0056), 0.0353, (- 0.2528), 0.0289, (- 0.3333), (- 0.3186), (- 0.2411), 0.179, 0.1281, 0.0739, (- 0.0536)], [0.0159, 0.0248, 0.1496, (- 0.0176), 0.0457, (- 0.3839), 0.078, (- 0.3137), (- 0.2731), (- 0.231), 0.1866, 0.0758, 0.0366, (- 0.0069)], [0.0656, 0.0168, 0.2182, (- 0.0391), 0.0214, (- 0.4389), 0.11, (- 0.2625), (- 0.197), (- 0.2249), 0.1374, 0.0337, 0.0139, 0.0284], [0.1323, 0.0125, 0.29, (- 0.0725), (- 0.0102), (- 0.4405), 0.1226, (- 0.1649), (- 0.1023), (- 0.1823), 0.0712, 0.0039, (- 0.0228), 0.0444]], [[0.0296, 0.0254, 0.1007, (- 0.0225), 0.0207, (- 0.2612), 0.0061, (- 0.3279), (- 0.3001), (- 0.2223), 0.2023, 0.0708, 0.0131, (- 0.0124)], [0.0306, 0.0096, 0.1566, (- 0.0386), 0.0387, (- 0.3958), 0.0556, (- 0.3034), (- 0.2701), (- 0.2165), 0.2061, 0.0364, (- 0.0012), 0.0184], [0.0842, 0.0075, 0.2181, (- 0.0696), 0.0121, (- 0.4389), 0.0874, (- 0.2432), (- 0.1979), (- 0.2168), 0.1519, 0.0066, (- 0.008), 0.0485], [0.1431, 0.0085, 0.2828, (- 0.0933), (- 0.0139), (- 0.4525), 0.0946, (- 0.1608), (- 0.114), (- 0.1646), 0.0796, (- 0.0202), (- 0.0207), 0.0379]]]])
self.assertTensorAlmostEqual(hidden_target, hidden)
self.assertTensorAlmostEqual(output_target, output) |
_model
def res2net50_26w_4s(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['res2net50_26w_4s']
res2net_block_args = dict(scale=4)
model = ResNet(Bottle2neck, [3, 4, 6, 3], base_width=26, num_classes=num_classes, in_chans=in_chans, block_args=res2net_block_args, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def preprocess_for_train(image, labels, bboxes, xs, ys, out_shape, data_format='NHWC', scope='ssd_preprocessing_train'):
fast_mode = False
with tf.name_scope(scope, 'ssd_preprocessing_train', [image, labels, bboxes]):
if (image.get_shape().ndims != 3):
raise ValueError('Input must be of size [height, width, C>0]')
if USE_ROTATION:
rnd = tf.random_uniform((), minval=0, maxval=1)
def rotate():
return tf_image.random_rotate90(image, bboxes, xs, ys)
def no_rotate():
return (image, bboxes, xs, ys)
(image, bboxes, xs, ys) = tf.cond(tf.less(rnd, config.rotation_prob), rotate, no_rotate)
if (MAX_EXPAND_SCALE > 1):
rnd2 = tf.random_uniform((), minval=0, maxval=1)
def expand():
scale = tf.random_uniform([], minval=1.0, maxval=MAX_EXPAND_SCALE, dtype=tf.float32)
image_shape = tf.cast(tf.shape(image), dtype=tf.float32)
(image_h, image_w) = (image_shape[0], image_shape[1])
target_h = tf.cast((image_h * scale), dtype=tf.int32)
target_w = tf.cast((image_w * scale), dtype=tf.int32)
tf.logging.info('expanded')
return tf_image.resize_image_bboxes_with_crop_or_pad(image, bboxes, xs, ys, target_h, target_w)
def no_expand():
return (image, bboxes, xs, ys)
(image, bboxes, xs, ys) = tf.cond(tf.less(rnd2, config.expand_prob), expand, no_expand)
if (image.dtype != tf.float32):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
dst_image = image
(dst_image, labels, bboxes, xs, ys, distort_bbox) = distorted_bounding_box_crop(image, labels, bboxes, xs, ys, min_object_covered=MIN_OBJECT_COVERED, aspect_ratio_range=CROP_ASPECT_RATIO_RANGE, area_range=AREA_RANGE)
dst_image = tf_image.resize_image(dst_image, out_shape, method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
tf_summary_image(dst_image, bboxes, 'image_shape_distorted')
if USING_SHORTER_SIDE_FILTERING:
xs = (xs * out_shape[1])
ys = (ys * out_shape[0])
(labels, bboxes, xs, ys) = tfe.bboxes_filter_by_shorter_side(labels, bboxes, xs, ys, min_height=MIN_SHORTER_SIDE, max_height=MAX_SHORTER_SIDE, assign_value=LABEL_IGNORE)
xs = (xs / out_shape[1])
ys = (ys / out_shape[0])
dst_image = apply_with_random_selector(dst_image, (lambda x, ordering: distort_color(x, ordering, fast_mode)), num_cases=4)
tf_summary_image(dst_image, bboxes, 'image_color_distorted')
image = (dst_image * 255.0)
image = tf_image_whitened(image, [_R_MEAN, _G_MEAN, _B_MEAN])
if (data_format == 'NCHW'):
image = tf.transpose(image, perm=(2, 0, 1))
return (image, labels, bboxes, xs, ys) |
def get_api_response(model, tokenizer, content: str, max_tokens=None):
if ('en' == lang_opt):
system_role_content = 'You are a helpful and creative assistant for writing novel.'
elif ('zh1' == lang_opt):
system_role_content = 'You are a helpful and creative assistant for writing novel. You are must always in Chinese.,'
elif ('zh2' == lang_opt):
system_role_content = ','
else:
raise Exception(f'not supported language: {lang_opt}')
response = openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=[{'role': 'system', 'content': system_role_content}, {'role': 'user', 'content': content}], temperature=0.5, max_tokens=max_tokens)
return response['choices'][0]['message']['content'] |
class Derivative(PdeNode):
def __init__(self, T: Union[(str, Symbol, float, int)], p: Union[(str, Symbol)], S: Union[(str, Symbol, float, int)]=0.0, dim=3, time=True):
super().__init__()
self.T = T
self.S = S
self.dim = dim
self.time = time
(x, y, z) = symbols('x y z')
t = Symbol('t')
input_variables = {'x': x, 'y': y, 'z': z, 't': t}
if (self.dim == 1):
input_variables.pop('y')
input_variables.pop('z')
elif (self.dim == 2):
input_variables.pop('z')
if (not self.time):
input_variables.pop('t')
if (type(S) is str):
S = Function(S)(*input_variables)
elif (type(S) in [float, int]):
S = Number(S)
if isinstance(p, str):
p = Symbol(p)
T = Function(T)(*input_variables)
self.equations = {}
if isinstance(S, Function):
self.equations[((((('derivative_' + self.T) + ':') + str(p)) + '_') + str(self.S))] = (T.diff(p) - S)
else:
self.equations[((('derivative_' + self.T) + ':') + str(p))] = (T.diff(p) - S)
self.make_nodes() |
def list_python_files_in_repository():
source_code_files = []
for (path, subdirs, files) in os.walk('.'):
if ('templates' in path):
continue
for name in files:
if (('.py' in name) and ('.pyc' not in name)):
path_to_files = os.path.join(path, name)
source_code_files.append(path_to_files)
return source_code_files |
def _iterate_marked(cfg, config_mods):
for (path, value) in iterate_flattened_separately(cfg, ['__doc__']):
if (value is PATHCHANGE):
(yield (path, PathEntry(key=path.rpartition('.')[2], added=(path in config_mods.added), modified=(path in config_mods.modified), typechanged=config_mods.typechanged.get(path), doc=config_mods.docs.get(path))))
else:
(yield (path, ConfigEntry(key=path.rpartition('.')[2], value=value, added=(path in config_mods.added), modified=(path in config_mods.modified), typechanged=config_mods.typechanged.get(path), doc=config_mods.docs.get(path)))) |
def main(_):
if (not FLAGS.data_path):
raise ValueError('Must set --data_path to PTB data directory')
if (not os.path.exists(os.path.dirname(FLAGS.save_path))):
try:
os.makedirs(os.path.dirname(FLAGS.save_path))
except OSError as exc:
if (exc.errno != errno.EEXIST):
raise
config = configs.get_config(FLAGS.model)
eval_config = configs.get_config(FLAGS.model)
valid_config = configs.get_config(FLAGS.model)
print(config.batch_size)
eval_config.batch_size = 1
valid_config.batch_size = 20
raw_data = reader.ptb_raw_data(((FLAGS.data_path + config.dataset) + '/'))
(train_data, valid_data, test_data, _) = raw_data
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer((- config.init_scale), config.init_scale)
with tf.name_scope('Train'):
train_input = PTBInput(config=config, data=train_data, name='TrainInput')
with tf.variable_scope('Model', reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
with tf.name_scope('Valid'):
valid_input = PTBInput(config=config, data=valid_data, name='ValidInput')
with tf.variable_scope('Model', reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
'\n with tf.name_scope("Test"):\n test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")\n with tf.variable_scope("Model", reuse=True, initializer=initializer):\n mtest = PTBModel(is_training=False, config=eval_config,\n input_=test_input)\n '
saver = tf.train.Saver(tf.trainable_variables())
configz = tf.ConfigProto()
configz.gpu_options.allow_growth = True
with tf.Session(config=configz) as session:
session.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=session, coord=coord)
if (FLAGS.restore == 'True'):
saver.restore(session, (FLAGS.save_path + 'model.ckpt'))
if (FLAGS.mode == 'train'):
previous_val = 9999
if (FLAGS.restore == 'True'):
f.open((FLAGS.save_path + 'train-and-valid.txt'), 'r')
x = f.readlines()[2]
x = x.rstrip()
x = x.split(' ')
previous_val = int(x[1])
print(('previous validation is %f\n' % previous_val))
for i in range(config.max_max_epoch):
lr_decay = (config.lr_decay ** max(((i + 1) - config.max_epoch), 0.0))
m.assign_lr(session, (config.learning_rate * lr_decay))
print(('Epoch: %d Learning rate: %.3f' % ((i + 1), session.run(m.lr))))
train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True)
print(('Epoch: %d Train BPC: %.4f' % ((i + 1), train_perplexity)))
valid_perplexity = run_epoch(session, mvalid)
print(('Epoch: %d Valid BPC: %.4f' % ((i + 1), valid_perplexity)))
sys.stdout.flush()
if (i == 180):
config.learning_rate *= 0.1
if (valid_perplexity < previous_val):
print('Storing weights')
saver.save(session, (FLAGS.save_path + 'model.ckpt'))
f = open((FLAGS.save_path + 'train-and-valid.txt'), 'w')
f.write(('Epoch %d\nTrain %f\nValid %f\n' % (i, train_perplexity, valid_perplexity)))
f.close()
previous_val = valid_perplexity
counter_val = 0
elif (config.dataset == 'enwik8'):
counter_val += 1
if (counter_val == 2):
config.learning_rate *= 0.1
counter_val = 0
print('Loading best weights')
saver.restore(session, (FLAGS.save_path + 'model.ckpt'))
test_perplexity = run_epoch(session, mtest)
print(('Test Perplexity: %.4f' % test_perplexity))
f = open((FLAGS.save_path + 'test.txt'), 'w')
f.write(('Test %f\n' % test_perplexity))
f.close()
sys.stdout.flush()
coord.request_stop()
coord.join(threads) |
def embed_all(inputs, count, size):
out = []
with tf.variable_scope('embed_all') as scope:
for inp in inputs:
(t_emb, _) = net.embed(inp, count, size)
t_pool = tf.reduce_mean(t_emb, axis=(- 2))
out.append(t_pool)
scope.reuse_variables()
return out |
def common_arg_parser():
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str)
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--network', help='network type mlp', default='mlp')
parser.add_argument('--num_epoch', help='number of epochs to train', type=int, default=50)
parser.add_argument('--num_env', help='Number of environment copies being run', default=1, type=int)
parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str)
parser.add_argument('--load_path', help='Path to load trained model to', default=None, type=str)
parser.add_argument('--log_path', help='Directory to save learning curve data.', default=None, type=str)
parser.add_argument('--save_buffer', help='If save the buffer or not', action='store_true')
parser.add_argument('--load_buffer', help='If to load the offline buffer', action='store_true')
parser.add_argument('--load_model', help='If to load the saved model', action='store_true')
parser.add_argument('--play', default=False, action='store_true')
parser.add_argument('--play_no_training', default=False, action='store_true')
parser.add_argument('--mode', help='mode of algorithms "dynamic", "supervised"', default=None, type=str)
parser.add_argument('--su_method', help='method for supervised learning', default='', type=str)
parser.add_argument('--offline_train', help='If training offline or not', default=False, action='store_true')
return parser |
(autouse=True)
def _requests_prevent_head(monkeypatch: MonkeyPatch, thrower: Callable, logging_side_effect: Callable) -> MagicMock:
mock = MagicMock(side_effect=logging_side_effect(f'requests.head', after=thrower))
monkeypatch.setattr(requests, 'head', mock)
return mock |
def _aspect_preserving_resize(image, resize_min):
shape = tf.shape(input=image)
(height, width) = (shape[0], shape[1])
(new_height, new_width) = _smallest_size_at_least(height, width, resize_min)
return tf.image.resize(image, [new_height, new_width], method=tf.image.ResizeMethod.BILINEAR) |
class MeanAggregator(nn.Module):
def __init__(self, features, cuda=False, gcn=False):
super(MeanAggregator, self).__init__()
self.features = features
self.cuda = cuda
self.gcn = gcn
def forward(self, nodes, to_neighs, num_sample=10):
_set = set
if (not (num_sample is None)):
_sample = random.sample
samp_neighs = [(_set(_sample(to_neigh, num_sample)) if (len(to_neigh) >= num_sample) else to_neigh) for to_neigh in to_neighs]
else:
samp_neighs = to_neighs
if self.gcn:
samp_neighs = [samp_neigh.union(set([int(nodes[i])])) for (i, samp_neigh) in enumerate(samp_neighs)]
unique_nodes_list = list(set.union(*samp_neighs))
unique_nodes = {n: i for (i, n) in enumerate(unique_nodes_list)}
mask = Variable(torch.zeros(len(samp_neighs), len(unique_nodes)))
column_indices = [unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh]
row_indices = [i for i in range(len(samp_neighs)) for j in range(len(samp_neighs[i]))]
mask[(row_indices, column_indices)] = 1
if self.cuda:
mask = mask.cuda()
num_neigh = mask.sum(1, keepdim=True)
mask = mask.div(num_neigh)
if self.cuda:
embed_matrix = self.features(torch.LongTensor(unique_nodes_list).cuda())
else:
embed_matrix = self.features(torch.LongTensor(unique_nodes_list))
to_feats = mask.mm(embed_matrix)
return to_feats |
(inp1=arrays(shape=(3, 10), dtype=np.float, elements=hypothesis.strategies.floats((- 1000), 1000)), inp2=arrays(shape=(3, 10), dtype=np.float, elements=hypothesis.strategies.floats((- 1000), 1000)))
(max_examples=500)
def test_logsumexp2_numerical_stability(inp1, inp2):
t1_ = tf.constant(inp1)
t2_ = tf.constant(inp2)
expected = tf.reduce_logsumexp(tf.stack((t1_, t2_)), axis=0)
t1 = tf.constant(inp1)
t2 = tf.constant(inp2)
result = logsumexp2(t1, t2)
assert np.allclose(result, expected)
t1__ = tf.constant(inp1)
t2__ = tf.constant(inp2)
max_ = tf.math.maximum(t1__, t2__)
assert tf.reduce_all((max_ <= result)) |
.parametrize('metric_name, sklearn_metric, torch_metric', [('MulticlassAccuracy', accuracy_score, Accuracy(task='multiclass', num_classes=3, average='micro')), ('MulticlassPrecision', precision_score, Precision(task='multiclass', num_classes=3, average='macro')), ('MulticlassRecall', recall_score, Recall(task='multiclass', num_classes=3, average='macro')), ('MulticlassF1Score', f1_score, F1Score(task='multiclass', num_classes=3, average='macro')), ('MulticlassFBetaScore', f2_score_multi, FBetaScore(beta=3.0, task='multiclass', num_classes=3, average='macro'))])
def test_muticlass_metrics(metric_name, sklearn_metric, torch_metric):
if (metric_name == 'MulticlassAccuracy'):
sk_res = sklearn_metric(y_true_multi_np, y_pred_muli_np.argmax(axis=1))
else:
sk_res = sklearn_metric(y_true_multi_np, y_pred_muli_np.argmax(axis=1), average='macro')
wd_metric = MultipleMetrics(metrics=[torch_metric])
wd_logs = wd_metric(y_pred_multi_pt, y_true_multi_pt)
wd_res = wd_logs[metric_name]
assert np.isclose(sk_res, wd_res, atol=0.01) |
def make_dir(dirname):
try:
os.makedirs(dirname)
except OSError as exc:
if (exc.errno == errno.EEXIST):
pass
else:
raise Exception(('Unable to create directory: ' + dirname)) |
def get_each_ood_task_log_path(args):
if args['test']:
save_dir = os.path.join((args['OOD_each_task_result_output_dir'] + '_test'), os.path.basename(args['checkpoint']), args['ID_name'])
else:
save_dir = os.path.join((args['OOD_each_task_result_output_dir'] + '_full'), os.path.basename(args['checkpoint']), args['ID_name'])
mkdir(save_dir)
save_path = os.path.join(save_dir, (('ood_' + args['task_name']) + '.json'))
return save_path |
class GPT2Tokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, errors='replace', unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
super().__init__(errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs)
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:(- 1)]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file)
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if (is_split_into_words or add_prefix_space):
text = (' ' + text)
return (text, kwargs)
def _build_conversation_input_ids(self, conversation: 'Conversation') -> List[int]:
input_ids = []
for (is_user, text) in conversation.iter_texts():
input_ids.extend((self.encode(text, add_special_tokens=False) + [self.eos_token_id]))
if (len(input_ids) > self.model_max_length):
input_ids = input_ids[(- self.model_max_length):]
return input_ids |
class KeepName():
def __init__(self, transform):
self.transform = transform
def __call__(self, file_name):
return (file_name, self.transform(file_name)) |
def _get_image_size(img):
if TF._is_pil_image(img):
return img.size
elif (isinstance(img, torch.Tensor) and (img.dim() > 2)):
return img.shape[(- 2):][::(- 1)]
else:
raise TypeError('Unexpected type {}'.format(type(img))) |
def sample(things: List, sample_size: int=None) -> List:
random_sample_size = len(things)
if (sample_size is not None):
random_sample_size = min(sample_size, len(things))
if (random_sample_size == len(things)):
sample_images = things
else:
sample_images = random.sample(things, random_sample_size)
return sample_images |
class Observation(NamedTuple):
coordinates: chex.Array
position: chex.Numeric
trajectory: chex.Array
action_mask: chex.Array |
class HermitianFilter(Filter):
def __init__(self, N, K, eta, mu, dt=1.0):
Filter.__init__(self, N, dt=dt)
for param in [K, eta, mu]:
if ((np.size(param) != 1) and (np.size(param) != N)):
raise ValueError('Parameters should be either scalar or of size N')
if (np.size(K) == 1):
Kvec = (np.ones(N) * K)
else:
Kvec = K
if (np.size(eta) == 1):
etavec = (np.ones(N) * eta)
else:
etavec = eta
if (np.size(mu) == 1):
muvec = ((np.ones(N) * mu) / np.abs(mu))
else:
muvec = np.zeros(N, dtype='quaternion')
muvec[(np.abs(mu) > 0)] = (mu[(np.abs(mu) > 0)] / np.abs(mu)[(np.abs(mu) > 0)])
qi = quaternion.x
Kvec[((N // 2) + 1):] = Kvec[1:(N // 2)][::(- 1)]
etavec[((N // 2) + 1):] = etavec[1:(N // 2)][::(- 1)]
muvec[((N // 2) + 1):] = (((- qi) * np.conj(muvec[1:(N // 2)][::(- 1)])) * qi)
muvec[0] = (0.5 * (muvec[1] + muvec[(- 1)]))
muvec[(N // 2)] = (0.5 * (muvec[((N // 2) + 1)] + muvec[((N // 2) - 1)]))
self.K = Kvec
self.eta = etavec
self.mu = muvec
def output(self, x):
if (np.size(x) != self.N):
raise ValueError('Size of input array should be the same as the constructed filter')
X = qfft.Qfft(x)
qj = quaternion.y
Y = (self.K * (X - ((self.eta * (self.mu * X)) * qj)))
y = qfft.iQfft(Y)
return y |
class FFN(nn.Module):
def __init__(self, embed_dims, feedforward_channels, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), dropout=0.0, add_residual=True):
super(FFN, self).__init__()
assert (num_fcs >= 2), f'num_fcs should be no less than 2. got {num_fcs}.'
self.embed_dims = embed_dims
self.feedforward_channels = feedforward_channels
self.num_fcs = num_fcs
self.act_cfg = act_cfg
self.dropout = dropout
self.activate = build_activation_layer(act_cfg)
layers = nn.ModuleList()
in_channels = embed_dims
for _ in range((num_fcs - 1)):
layers.append(nn.Sequential(Linear(in_channels, feedforward_channels), self.activate, nn.Dropout(dropout)))
in_channels = feedforward_channels
layers.append(Linear(feedforward_channels, embed_dims))
self.layers = nn.Sequential(*layers)
self.dropout = nn.Dropout(dropout)
self.add_residual = add_residual
def forward(self, x, residual=None):
out = self.layers(x)
if (not self.add_residual):
return out
if (residual is None):
residual = x
return (residual + self.dropout(out))
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'feedforward_channels={self.feedforward_channels}, '
repr_str += f'num_fcs={self.num_fcs}, '
repr_str += f'act_cfg={self.act_cfg}, '
repr_str += f'dropout={self.dropout}, '
repr_str += f'add_residual={self.add_residual})'
return repr_str |
class CompositeAudioWaveformTransform(CompositeAudioTransform):
def from_config_dict(cls, config=None):
return super()._from_config_dict(cls, 'waveform', get_audio_waveform_transform, CompositeAudioWaveformTransform, config)
def __call__(self, x, sample_rate):
for t in self.transforms:
(x, sample_rate) = t(x, sample_rate)
return (x, sample_rate) |
class StreamingPlot():
def __init__(self, plot_title: str='Pareto front approximation', reference_front: List[S]=None, reference_point: list=None, axis_labels: list=None):
self.plot_title = plot_title
self.axis_labels = axis_labels
if (reference_point and (not isinstance(reference_point[0], list))):
reference_point = [reference_point]
self.reference_point = reference_point
self.reference_front = reference_front
self.dimension = None
import warnings
warnings.filterwarnings('ignore', '.*GUI is implemented.*')
(self.fig, self.ax) = plt.subplots()
self.sc = None
self.axis = None
def plot(self, front):
(points, dimension) = Plot.get_points(front)
self.create_layout(dimension)
if self.reference_point:
for point in self.reference_point:
(self.scp,) = self.ax.plot(*[[p] for p in point], c='r', ls='None', marker='*', markersize=3)
if self.reference_front:
(rpoints, _) = Plot.get_points(self.reference_front)
(self.scf,) = self.ax.plot(*[rpoints[column].tolist() for column in rpoints.columns.values], c='k', ls='None', marker='*', markersize=1)
(self.sc,) = self.ax.plot(*[points[column].tolist() for column in points.columns.values], ls='None', marker='o', markersize=4)
plt.show(block=False)
def update(self, front: List[S], reference_point: list=None) -> None:
if (self.sc is None):
raise Exception('Figure is none')
(points, dimension) = Plot.get_points(front)
self.sc.set_data(points[0], points[1])
if (dimension == 3):
self.sc.set_3d_properties(points[2])
if reference_point:
self.scp.set_data([p[0] for p in reference_point], [p[1] for p in reference_point])
self.ax.relim()
self.ax.autoscale_view(True, True, True)
try:
self.fig.canvas.flush_events()
except KeyboardInterrupt:
pass
pause(0.01)
def create_layout(self, dimension: int) -> None:
logger.info('Creating figure layout')
self.fig.canvas.manager.set_window_title(self.plot_title)
self.fig.suptitle(self.plot_title, fontsize=16)
if (dimension == 2):
self.ax.spines['top'].set_visible(False)
self.ax.spines['right'].set_visible(False)
self.ax.get_xaxis().tick_bottom()
self.ax.get_yaxis().tick_left()
elif (dimension == 3):
self.ax = Axes3D(self.fig)
self.ax.autoscale(enable=True, axis='both')
else:
raise Exception('Dimension must be either 2 or 3')
self.ax.set_autoscale_on(True)
self.ax.autoscale_view(True, True, True)
self.ax.grid(color='#f0f0f5', linestyle='-', linewidth=0.5, alpha=0.5) |
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 512)
self.up1 = Up(1024, 256, bilinear)
self.up2 = Up(512, 128, bilinear)
self.up3 = Up(256, 64, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = F.normalize(x)
return x |
class TestFileIO(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile: Optional[str] = None
_tmpfile_contents = 'Hello, World'
def setUpClass(cls) -> None:
cls._tmpdir = tempfile.mkdtemp()
with open(os.path.join(cls._tmpdir, 'test.txt'), 'w') as f:
cls._tmpfile = f.name
f.write(cls._tmpfile_contents)
f.flush()
def tearDownClass(cls) -> None:
if (cls._tmpdir is not None):
shutil.rmtree(cls._tmpdir)
def test_file_io(self):
from fairseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, 'test.txt'), 'r') as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_oss(self):
sys.modules['iopath'] = MagicMock()
from fairseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, 'test.txt'), 'r') as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_async(self):
try:
from fairseq.file_io import IOPathManager, PathManager
_asyncfile = os.path.join(self._tmpdir, 'async.txt')
f = PathManager.opena(_asyncfile, 'wb')
f.close()
finally:
self.assertTrue(PathManager.async_close()) |
def _fuse_mha(graph: Graph):
pattern = {'patterns': {'in': [[(0, 'Matmul'), (1, 'Softmax'), (2, 'Matmul')]], 'out': [[(0, 'MultiHeadAttention')]]}, 'search_mode': 'op_type', 'node_names': {0: 0}, 'input_tensors': {0: [[{0: [0]}], [[0], 1]]}, 'output_tensors': {0: [[{2: [0]}, {2: [1]}, {2: [2]}], [[0, 1, 2], 3]]}, 'returns': [0, 2]}
(graph, new_node_names, ret_old_nodes) = pattern_mapping('fuse_mha', pattern, graph)
if (len(new_node_names) != 0):
for idx in range(len(new_node_names)):
new_node_idx = graph.get_node_id(new_node_names[idx][0])
qkmatmul = ret_old_nodes[idx][0]
avmatmul = ret_old_nodes[idx][1]
graph.nodes[new_node_idx].input_tensors = qkmatmul.input_tensors
if ((len(qkmatmul.input_tensors) > 2) and qkmatmul.input_tensors[2].source_op and (graph.get_node_by_name(qkmatmul.input_tensors[2].source_op[0]).op_type == 'PaddingSequence')):
graph.get_node_by_name(qkmatmul.input_tensors[2].source_op[0]).attr = {'dst_shape': '-1,-1'}
graph.nodes[new_node_idx].input_tensors.insert(2, avmatmul.input_tensors[1])
graph.nodes[new_node_idx].input_tensors.append(avmatmul.input_tensors[4])
graph.nodes[new_node_idx].input_tensors.append(avmatmul.input_tensors[5])
if ('reshape_dims' in avmatmul.attr):
graph.nodes[new_node_idx].input_tensors.append(avmatmul.input_tensors[(- 1)])
graph.nodes[new_node_idx].attr = avmatmul.attr
graph.nodes[new_node_idx].attr['V_perm'] = avmatmul.attr.pop('src1_perm')
graph.nodes[new_node_idx].attr['K_perm'] = qkmatmul.attr['src1_perm']
graph.nodes[new_node_idx].attr['Q_perm'] = qkmatmul.attr['src0_perm']
if ('output_scale' in qkmatmul.attr):
graph.nodes[new_node_idx].attr['output_scale'] = qkmatmul.attr['output_scale']
graph.nodes[new_node_idx].attr['per_token'] = True |
def train_all_epochs(opt, model, optimizer, train_sampler, train_loader, criterion, val_loader, num_train_samples=None, no_acc_eval=False, save_all_ranks=False, training_status_info=None, save_params=True):
timer_start = time.time()
if (training_status_info is None):
training_status_info = {}
training_status_info['best_acc1'] = 0
training_status_info['best_acc5'] = 0
training_status_info['best_acc1_at_epoch'] = 0
training_status_info['best_acc5_at_epoch'] = 0
training_status_info['training_elasped_time'] = 0
training_status_info['validation_elasped_time'] = 0
if (num_train_samples is None):
num_train_samples = len(train_loader)
for epoch in range(opt.start_epoch, opt.epochs):
logging.info('--- Start training epoch {}'.format(epoch))
if (train_sampler is not None):
train_sampler.set_epoch(epoch)
training_timer_start = time.time()
train_one_epoch_info = train_one_epoch(train_loader, model, criterion, optimizer, epoch, opt, num_train_samples, no_acc_eval=no_acc_eval)
training_status_info['training_elasped_time'] += (time.time() - training_timer_start)
if (val_loader is not None):
validation_timer_start = time.time()
validate_info = validate(val_loader, model, criterion, opt, epoch=epoch)
training_status_info['validation_elasped_time'] += (time.time() - validation_timer_start)
acc1 = validate_info['top1_acc']
acc5 = validate_info['top5_acc']
else:
acc1 = 0
acc5 = 0
is_best_acc1 = (acc1 > training_status_info['best_acc1'])
is_best_acc5 = (acc5 > training_status_info['best_acc5'])
training_status_info['best_acc1'] = max(acc1, training_status_info['best_acc1'])
training_status_info['best_acc5'] = max(acc5, training_status_info['best_acc5'])
if is_best_acc1:
training_status_info['best_acc1_at_epoch'] = epoch
if is_best_acc5:
training_status_info['best_acc5_at_epoch'] = epoch
elasped_hour = ((time.time() - timer_start) / 3600)
remaining_hour = ((((time.time() - timer_start) / float(((epoch - opt.start_epoch) + 1))) * (opt.epochs - epoch)) / 3600)
logging.info('--- Epoch={}, Elasped hour={:8.4g}, Remaining hour={:8.4g}, Training Speed={:4g}, best_acc1={:4g}, best_acc1_at_epoch={}, best_acc5={}, best_acc5_at_epoch={}'.format(epoch, elasped_hour, remaining_hour, ((num_train_samples * (epoch + 1)) / float((training_status_info['training_elasped_time'] + 1e-08))), training_status_info['best_acc1'], training_status_info['best_acc1_at_epoch'], training_status_info['best_acc5'], training_status_info['best_acc5_at_epoch']))
if (save_params and ((opt.rank == 0) or save_all_ranks) and ((((epoch + 1) % opt.save_freq) == 0) or ((epoch + 1) == opt.epochs))):
checkpoint_filename = os.path.join(opt.save_dir, 'latest-params_rank{}.pth'.format(opt.rank))
save_checkpoint(checkpoint_filename, {'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'top1_acc': acc1, 'top5_acc': acc5, 'training_status_info': training_status_info})
if (save_params and is_best_acc1 and ((opt.rank == 0) or save_all_ranks)):
checkpoint_filename = os.path.join(opt.save_dir, 'best-params_rank{}.pth'.format(opt.rank))
save_checkpoint(checkpoint_filename, {'epoch': epoch, 'state_dict': model.state_dict(), 'top1_acc': acc1, 'top5_acc': acc5, 'training_status_info': training_status_info})
pass
return training_status_info |
class AutoModelForAudioClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def create_exp_dir(dir_path, scripts_to_save=None, debug=False):
if debug:
print('Debug Mode : no experiment dir created')
return functools.partial(logging, log_path=None, log_=False)
if (not os.path.exists(dir_path)):
os.makedirs(dir_path)
print('Experiment dir : {}'.format(dir_path))
if (scripts_to_save is not None):
script_path = os.path.join(dir_path, 'scripts')
if (not os.path.exists(script_path)):
os.makedirs(script_path)
for script in scripts_to_save:
dst_file = os.path.join(dir_path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
return get_logger(log_path=os.path.join(dir_path, 'log.txt')) |
def test_convnext_learning_rate_decay_optimizer_constructor():
model = ConvNeXtExampleModel()
optimizer_cfg = dict(type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05)
stagewise_paramwise_cfg = dict(decay_rate=decay_rate, decay_type='stage_wise', num_layers=6)
optim_constructor = LearningRateDecayOptimizerConstructor(optimizer_cfg, stagewise_paramwise_cfg)
optimizer = optim_constructor(model)
check_convnext_adamw_optimizer(optimizer, stage_wise_gt_lst)
layerwise_paramwise_cfg = dict(decay_rate=decay_rate, decay_type='layer_wise', num_layers=6)
optim_constructor = LearningRateDecayOptimizerConstructor(optimizer_cfg, layerwise_paramwise_cfg)
optimizer = optim_constructor(model)
check_convnext_adamw_optimizer(optimizer, layer_wise_gt_lst) |
def activ_dispatch(activ, norm=None):
return {'none': nn.Identity, 'relu': nn.ReLU, 'lrelu': partial(nn.LeakyReLU, negative_slope=0.2)}[activ.lower()] |
def get_sources_from_sys_modules(globs, base_path):
return get_sources_from_modules(iterate_sys_modules(), base_path) |
def assemble_circuits(circuits, run_config, qobj_id, qobj_header):
qobj_config = QasmQobjConfig()
if run_config:
qobj_config = QasmQobjConfig(**run_config.to_dict())
experiments = []
max_n_qubits = 0
max_memory_slots = 0
for circuit in circuits:
n_qubits = 0
memory_slots = 0
qubit_labels = []
clbit_labels = []
qreg_sizes = []
creg_sizes = []
for qreg in circuit.qregs:
qreg_sizes.append([qreg.name, qreg.size])
for j in range(qreg.size):
qubit_labels.append([qreg.name, j])
n_qubits += qreg.size
for creg in circuit.cregs:
creg_sizes.append([creg.name, creg.size])
for j in range(creg.size):
clbit_labels.append([creg.name, j])
memory_slots += creg.size
header = QobjExperimentHeader(qubit_labels=qubit_labels, n_qubits=n_qubits, qreg_sizes=qreg_sizes, clbit_labels=clbit_labels, memory_slots=memory_slots, creg_sizes=creg_sizes, name=circuit.name)
config = QasmQobjExperimentConfig(n_qubits=n_qubits, memory_slots=memory_slots)
is_conditional_experiment = any((op.control for (op, qargs, cargs) in circuit.data))
max_conditional_idx = 0
instructions = []
for op_context in circuit.data:
instruction = op_context[0].assemble()
qargs = op_context[1]
cargs = op_context[2]
if qargs:
qubit_indices = [qubit_labels.index([qubit.register.name, qubit.index]) for qubit in qargs]
instruction.qubits = qubit_indices
if cargs:
clbit_indices = [clbit_labels.index([clbit.register.name, clbit.index]) for clbit in cargs]
instruction.memory = clbit_indices
if ((instruction.name == 'measure') and is_conditional_experiment):
instruction.register = clbit_indices
if hasattr(instruction, '_control'):
(ctrl_reg, ctrl_val) = instruction._control
mask = 0
val = 0
for clbit in clbit_labels:
if (clbit[0] == ctrl_reg.name):
mask |= (1 << clbit_labels.index(clbit))
val |= (((ctrl_val >> clbit[1]) & 1) << clbit_labels.index(clbit))
conditional_reg_idx = (memory_slots + max_conditional_idx)
conversion_bfunc = QasmQobjInstruction(name='bfunc', mask=('0x%X' % mask), relation='==', val=('0x%X' % val), register=conditional_reg_idx)
instructions.append(conversion_bfunc)
instruction.conditional = conditional_reg_idx
max_conditional_idx += 1
del instruction._control
instructions.append(instruction)
experiments.append(QasmQobjExperiment(instructions=instructions, header=header, config=config))
if (n_qubits > max_n_qubits):
max_n_qubits = n_qubits
if (memory_slots > max_memory_slots):
max_memory_slots = memory_slots
qobj_config.memory_slots = max_memory_slots
qobj_config.n_qubits = max_n_qubits
return QasmQobj(qobj_id=qobj_id, config=qobj_config, experiments=experiments, header=qobj_header) |
class CEGAT(MessagePassing):
def __init__(self, in_dim, hid_dim, out_dim, num_layers, heads, output_heads, dropout, Normalization='bn'):
super(CEGAT, self).__init__()
self.convs = nn.ModuleList()
self.normalizations = nn.ModuleList()
if (Normalization == 'bn'):
self.convs.append(GATConv(in_dim, hid_dim, heads))
self.normalizations.append(nn.BatchNorm1d(hid_dim))
for _ in range((num_layers - 2)):
self.convs.append(GATConv((heads * hid_dim), hid_dim))
self.normalizations.append(nn.BatchNorm1d(hid_dim))
self.convs.append(GATConv((heads * hid_dim), out_dim, heads=output_heads, concat=False))
else:
self.convs.append(GATConv(in_dim, hid_dim, heads))
self.normalizations.append(nn.Identity())
for _ in range((num_layers - 2)):
self.convs.append(GATConv((hid_dim * heads), hid_dim))
self.normalizations.append(nn.Identity())
self.convs.append(GATConv((hid_dim * heads), out_dim, heads=output_heads, concat=False))
self.dropout = dropout
def reset_parameters(self):
for layer in self.convs:
layer.reset_parameters()
for normalization in self.normalizations:
if (not (normalization.__class__.__name__ is 'Identity')):
normalization.reset_parameters()
def forward(self, data):
(x, edge_index, norm) = (data.x, data.edge_index, data.norm)
for (i, conv) in enumerate(self.convs[:(- 1)]):
x = conv(x, edge_index)
x = F.relu(x, inplace=True)
x = self.normalizations[i](x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[(- 1)](x, edge_index)
return x |
class FlaxFeedForward(nn.Module):
dim: int
dropout: float = 0.0
dtype: jnp.dtype = jnp.float32
def setup(self):
self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype)
self.net_2 = nn.Dense(self.dim, dtype=self.dtype)
def __call__(self, hidden_states, deterministic=True):
hidden_states = self.net_0(hidden_states, deterministic=deterministic)
hidden_states = self.net_2(hidden_states)
return hidden_states |
def check_plot_figsize(figsize):
if (not isinstance(figsize, tuple)):
raise TypeError((f"'figsize' must be a tuple with 2 elements, got {type(figsize)}." + PLOT_FIGSIZE_INFO))
if (len(figsize) != 2):
raise ValueError((f"'figsize' must be a tuple with 2 elements, got {len(figsize)} elements." + PLOT_FIGSIZE_INFO))
for value in figsize:
if (not isinstance(value, numbers.Number)):
raise ValueError((f"Elements of 'figsize' must be a `int` or `float`, got {type(value)}." + PLOT_FIGSIZE_INFO))
return figsize |
class Total_Yngve_Depth(object):
def __init__(self, sentence_objs):
self.sentence_objs = sentence_objs
def handle(self):
total_all_yngve_depth = 0
for so in self.sentence_objs:
total_all_yngve_depth += total_yngve_depth(so.yngve_tree_root)
num_sentences = len(self.sentence_objs)
return (total_all_yngve_depth / num_sentences) |
class Mixed(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super(Mixed, self).__init__()
self.branch_0 = Unit3Dpy(in_channels, out_channels[0], kernel_size=(1, 1, 1))
branch_1_conv1 = Unit3Dpy(in_channels, out_channels[1], kernel_size=(1, 1, 1))
branch_1_conv2 = Unit3Dpy(out_channels[1], out_channels[2], kernel_size=(3, 3, 3))
self.branch_1 = torch.nn.Sequential(branch_1_conv1, branch_1_conv2)
branch_2_conv1 = Unit3Dpy(in_channels, out_channels[3], kernel_size=(1, 1, 1))
branch_2_conv2 = Unit3Dpy(out_channels[3], out_channels[4], kernel_size=(3, 3, 3))
self.branch_2 = torch.nn.Sequential(branch_2_conv1, branch_2_conv2)
branch_3_pool = MaxPool3dTFPadding(kernel_size=(3, 3, 3), stride=(1, 1, 1), padding='SAME')
branch_3_conv2 = Unit3Dpy(in_channels, out_channels[5], kernel_size=(1, 1, 1))
self.branch_3 = torch.nn.Sequential(branch_3_pool, branch_3_conv2)
def forward(self, inp):
out_0 = self.branch_0(inp)
out_1 = self.branch_1(inp)
out_2 = self.branch_2(inp)
out_3 = self.branch_3(inp)
out = torch.cat((out_0, out_1, out_2, out_3), 1)
return out |
def get_auto_estimator(backend='torch'):
loss = ('mse' if backend.startswith('keras') else torch.nn.MSELoss())
auto_tcn = AutoTCN(input_feature_num=input_feature_dim, output_target_num=output_feature_dim, past_seq_len=past_seq_len, future_seq_len=future_seq_len, optimizer='Adam', loss=loss, metric='mse', backend=backend, hidden_units=8, num_channels=([16] * 2), levels=hp.randint(1, 3), kernel_size=hp.choice([2, 3]), lr=hp.choice([0.001, 0.003, 0.01]), dropout=hp.uniform(0.1, 0.2), logs_dir='/tmp/auto_tcn', cpus_per_trial=2, name='auto_tcn')
return auto_tcn |
def plot_roc(thr_unc_lst, thr_pred_lst, res_dct, metric, fname_out):
plt.figure(figsize=(10, 10))
for (i_unc, thr_unc) in enumerate(thr_unc_lst):
logger.info(f'Unc Thr: {thr_unc}')
tpr_vals = np.array([np.nanmean(res_dct['tpr'][i_unc][i_pred]) for i_pred in range(len(thr_pred_lst))])
fdr_vals = np.array([np.nanmean(res_dct['fdr'][i_unc][i_pred]) for i_pred in range(len(thr_pred_lst))])
auc_ = auc_homemade(fdr_vals, tpr_vals, True)
optimal_idx = np.argmax((tpr_vals - fdr_vals))
optimal_threshold = thr_pred_lst[optimal_idx]
logger.info(f'AUC: {auc_}, Optimal Pred Thr: {optimal_threshold}')
plt.scatter(fdr_vals, tpr_vals, label='Unc thr={0:0.2f} (area = {1:0.2f})'.format(thr_unc, auc_), s=22)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Detection Rate')
plt.ylabel('True Positive Rate')
plt.title(('ROC - ' + metric))
plt.legend(loc='lower right')
plt.savefig(fname_out, bbox_inches='tight', pad_inches=0)
plt.close() |
_request
def s3_get(url, temp_file):
import boto3
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) |
class Track():
def __init__(self, mean, covariance, track_id, n_init, max_age, feature=None):
self.mean = mean
self.covariance = covariance
self.track_id = track_id
self.hits = 1
self.age = 1
self.time_since_update = 0
self.state = TrackState.Tentative
self.features = []
if (feature is not None):
self.features.append(feature)
self._n_init = n_init
self._max_age = max_age
def to_tlwh(self):
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= (ret[2:] / 2)
return ret
def to_tlbr(self):
ret = self.to_tlwh()
ret[2:] = (ret[:2] + ret[2:])
return ret
def predict(self, kf):
(self.mean, self.covariance) = kf.predict(self.mean, self.covariance)
self.age += 1
self.time_since_update += 1
def update(self, kf, detection):
(self.mean, self.covariance) = kf.update(self.mean, self.covariance, detection.to_xyah())
self.features.append(detection.feature)
self.hits += 1
self.time_since_update = 0
if ((self.state == TrackState.Tentative) and (self.hits >= self._n_init)):
self.state = TrackState.Confirmed
def mark_missed(self):
if (self.state == TrackState.Tentative):
self.state = TrackState.Deleted
elif (self.time_since_update > self._max_age):
self.state = TrackState.Deleted
def is_tentative(self):
return (self.state == TrackState.Tentative)
def is_confirmed(self):
return (self.state == TrackState.Confirmed)
def is_deleted(self):
return (self.state == TrackState.Deleted) |
_registry(dataset_type='ImageRecord', framework='tensorflow, tensorflow_itex', dataset_format='')
class TensorflowImageRecord(IterableDataset):
def __new__(cls, root, transform=None, filter=None):
from tensorflow.python.platform import gfile
glob_pattern = os.path.join(root, '*-*-of-*')
file_names = gfile.Glob(glob_pattern)
if (not file_names):
raise ValueError('Found no files in --root matching: {}'.format(glob_pattern))
from tensorflow.python.data.experimental import parallel_interleave
from neural_compressor.data.transforms.imagenet_transform import ParseDecodeImagenet
ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False)
ds = ds.apply(parallel_interleave(tf.data.TFRecordDataset, cycle_length=len(file_names)))
if (transform is not None):
transform.transform_list.insert(0, ParseDecodeImagenet())
else:
transform = ParseDecodeImagenet()
ds = ds.map(transform, num_parallel_calls=None)
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return ds |
class LabeledExamplePlotter():
def __init__(self, example: LabeledExample):
self.example = example
def _plot_audio(self, audio: ndarray) -> None:
plt.title(str(self))
plt.xlabel('time / samples (sample rate {}Hz)'.format(self.example.sample_rate))
plt.ylabel('y')
plt.plot(audio)
plt.show()
def show_spectrogram(self, type: SpectrogramType=SpectrogramType.power_level):
self.prepare_spectrogram_plot(type)
plt.show()
def save_spectrogram(self, target_directory: Path, type: SpectrogramType=SpectrogramType.power_level, frequency_scale: SpectrogramFrequencyScale=SpectrogramFrequencyScale.linear) -> Path:
self.prepare_spectrogram_plot(type, frequency_scale)
path = Path(target_directory, '{}_{}{}_spectrogram.png'.format(self.example.id, ('mel_' if (frequency_scale == SpectrogramFrequencyScale.mel) else ''), type.value.replace(' ', '_')))
plt.savefig(str(path))
return path
def plot_raw_audio(self) -> None:
self._plot_audio(self.example.get_raw_audio())
def prepare_spectrogram_plot(self, type: SpectrogramType=SpectrogramType.power_level, frequency_scale: SpectrogramFrequencyScale=SpectrogramFrequencyScale.linear) -> None:
spectrogram = self.example.spectrogram(type, frequency_scale=frequency_scale)
(figure, axes) = plt.subplots(1, 1)
use_mel = (frequency_scale == SpectrogramFrequencyScale.mel)
plt.title('\n'.join(wrap('{0}{1} spectrogram for {2}'.format(('mel ' if use_mel else ''), type.value, str(self)), width=100)))
plt.xlabel('time (data every {}ms)'.format(round((1000 / self.example.time_step_rate()))))
plt.ylabel('frequency (data evenly distributed on {} scale, {} total)'.format(frequency_scale.value, self.example.frequency_count_from_spectrogram(spectrogram)))
mel_frequencies = self.example.mel_frequencies()
plt.imshow(spectrogram, cmap='gist_heat', origin='lower', aspect='auto', extent=[0, self.example.duration_in_s, (librosa.hz_to_mel(mel_frequencies[0])[0] if use_mel else 0), (librosa.hz_to_mel(mel_frequencies[(- 1)])[0] if use_mel else self.example.highest_detectable_frequency())])
plt.colorbar(label='{} ({})'.format(type.value, ('in{} dB, not aligned to a particular base level'.format((' something similar to' if use_mel else '')) if (type == SpectrogramType.power_level) else 'only proportional to physical scale')))
class ScalarFormatterWithUnit(ScalarFormatter):
def __init__(self, unit: str):
super().__init__()
self.unit = unit
def __call__(self, x, pos=None) -> str:
return (super().__call__(x, pos) + self.unit)
axes.xaxis.set_major_formatter(ScalarFormatterWithUnit('s'))
axes.yaxis.set_major_formatter((FuncFormatter((lambda value, pos: '{}mel = {}Hz'.format(int(value), int(librosa.mel_to_hz(value)[0])))) if use_mel else ScalarFormatterWithUnit('Hz')))
figure.set_size_inches(19.2, 10.8)
def plot_reconstructed_audio_from_spectrogram(self) -> None:
self._plot_audio(self.example.reconstructed_audio_from_spectrogram())
def save_reconstructed_audio_from_spectrogram(self, target_directory: Path) -> None:
librosa.output.write_wav(str(Path(target_directory, '{}_window{}_hop{}.wav'.format(self.example.id, self.example.fourier_window_length, self.example.hop_length))), self.example.reconstructed_audio_from_spectrogram(), sr=self.example.sample_rate)
def save_spectrograms_of_all_types(self, target_directory: Path) -> None:
for type in SpectrogramType:
for frequency_scale in SpectrogramFrequencyScale:
self.save_spectrogram(target_directory=target_directory, type=type, frequency_scale=frequency_scale) |
def loss_unsup_data(im1_0, im2_0, flow_f5, flow_f4, flow_f3, flow_f2, flow_f1, flow_f0, flow_b5, flow_b4, flow_b3, flow_b2, flow_b1, flow_b0, cbn, vmap_f, vmap_b, w5, w4, w3, w2, w1, occt):
im1_1 = gaussian_smooth.gauss_conv(im1_0, size=5, nsig=3, name='pyr1_1')
im2_1 = gaussian_smooth.gauss_conv(im2_0, size=5, nsig=3, name='pyr1_2')
im1_2 = gaussian_smooth.gauss_conv(im1_1, size=5, nsig=3, name='pyr2_1')
im2_2 = gaussian_smooth.gauss_conv(im2_1, size=5, nsig=3, name='pyr2_2')
im1_3 = gaussian_smooth.gauss_conv(im1_2, size=5, nsig=3, name='pyr3_1')
im2_3 = gaussian_smooth.gauss_conv(im2_2, size=5, nsig=3, name='pyr3_2')
im1_4 = gaussian_smooth.gauss_conv(im1_3, size=5, nsig=3, name='pyr4_1')
im2_4 = gaussian_smooth.gauss_conv(im2_3, size=5, nsig=3, name='pyr4_2')
im1_5 = gaussian_smooth.gauss_conv(im1_4, size=5, nsig=3, name='pyr5_1')
im2_5 = gaussian_smooth.gauss_conv(im2_4, size=5, nsig=3, name='pyr5_2')
dt5 = tf.multiply(get_data_occ(im1_5, im2_5, flow_f5, flow_b5, name='data_term_5', cbn=cbn, occt=occt, vmap_f=vmap_f, vmap_b=vmap_b), tf.constant((w5 * 64.0)))
dt4 = tf.multiply(get_data_occ(im1_4, im2_4, flow_f4, flow_b4, name='data_term_4', cbn=cbn, occt=occt, vmap_f=vmap_f, vmap_b=vmap_b), tf.constant((w4 * 16.0)))
dt3 = tf.multiply(get_data_occ(im1_3, im2_3, flow_f3, flow_b3, name='data_term_3', cbn=cbn, occt=occt, vmap_f=vmap_f, vmap_b=vmap_b), tf.constant((w3 * 4.0)))
dt2 = tf.multiply(get_data_occ(im1_2, im2_2, flow_f2, flow_b2, name='data_term_2', cbn=cbn, occt=occt, vmap_f=vmap_f, vmap_b=vmap_b), tf.constant((w2 * 2.0)))
dt1 = tf.multiply(get_data_occ(im1_1, im2_1, flow_f1, flow_b1, name='data_term_1', cbn=cbn, occt=occt, vmap_f=vmap_f, vmap_b=vmap_b), tf.constant((w1 * 1.0)))
dt0 = tf.multiply(get_data_occ(im1_0, im2_0, flow_f0, flow_b0, name='data_term_0', cbn=cbn, occt=occt, vmap_f=vmap_f, vmap_b=vmap_b), tf.constant(1.0))
loss = tf.add(dt1, tf.add(tf.add(tf.add(dt5, dt4), dt3), dt2))
loss = tf.add(loss, dt0)
divideBy = tf.add(tf.reduce_sum(vmap_f), tf.reduce_sum(vmap_b))
return (tf.divide(loss, divideBy), tf.divide(dt0, divideBy)) |
def image_stats(image, mask=None):
(l, a, b) = cv2.split(image)
if (mask is not None):
(l, a, b) = (l.reshape((- 1)), a.reshape((- 1)), b.reshape((- 1)))
mask = mask.reshape((- 1))
(l, a, b) = (l[mask], a[mask], b[mask])
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
return (lMean, lStd, aMean, aStd, bMean, bStd) |
def prepare_dataset(sentences, word_to_id, char_to_id, tag_to_id, ctc_pred_dict, lower=True):
def f(x):
return (x.lower() if lower else x)
data = []
hands = hand_features_to_idx(sentences)
for (i, s) in enumerate(sentences):
str_words = [w[0] for w in s]
words = [word_to_id[(f(w) if (f(w) in word_to_id) else '<UNK>')] for w in str_words]
chars = [[char_to_id[c] for c in w if (c in char_to_id)] for w in str_words]
caps = [cap_feature(w) for w in str_words]
tags = [tag_to_id[w[(- 1)]] for w in s]
hand = hands[i]
seg_pred_ids = seg_pred_to_idx(s)
ctc_pred_ids = ctc_pred_to_idx(s, ctc_pred_dict)
data.append({'str_words': str_words, 'words': words, 'chars': chars, 'caps': caps, 'tags': tags, 'seg_pred': seg_pred_ids, 'ctc_pred': ctc_pred_ids, 'handcrafted': hand})
return data |
class PairwiseRankingLoss(nn.Module):
def __init__(self, margin):
super(PairwiseRankingLoss, self).__init__()
self.margin = margin
def forward(self, anchor1, anchor2, img_sentc, sent_imgc):
cost_sent = torch.clamp(((self.margin - anchor1) + img_sentc), min=0.0).sum()
cost_img = torch.clamp(((self.margin - anchor2) + sent_imgc), min=0.0).sum()
loss = (cost_sent + cost_img)
return loss |
class RandomVerticalFlip(object):
def __call__(self, img):
if (random.random() < 0.5):
return F.vflip(img)
return img |
class BasicGRUCell(tf.contrib.rnn.RNNCell):
def __init__(self, num_units, activation=tf.tanh, layer_norm=False):
self._num_units = num_units
self._activation = activation
self._layer_norm = layer_norm
def state_size(self):
return self._num_units
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
with tf.variable_scope((scope or type(self).__name__)):
with tf.variable_scope('gates'):
concat = rnn_ops.linear([inputs, state], (2 * self._num_units), True, bias_start=1.0)
(r, u) = tf.split(value=concat, num_or_size_splits=2, axis=1)
if self._layer_norm:
r = rnn_ops.layer_norm(r, name='r')
u = rnn_ops.layer_norm(u, name='u')
r = tf.sigmoid(r)
u = tf.sigmoid(u)
with tf.variable_scope('candidate'):
c = self._activation(rnn_ops.linear([inputs, (r * state)], self._num_units, True))
new_h = ((u * state) + ((1 - u) * c))
return (new_h, new_h)
def trainable_initial_state(self, batch_size):
with tf.variable_scope('initial_h'):
initial_h = rnn_ops.create_initial_state(batch_size, self._num_units)
return initial_h |
class Conv2d(AbstractFourierBasis):
def __init__(self, kernel: kernels.Conv2d, num_bases: int, filters: tf.Tensor=None, biases: tf.Tensor=None, name: str=None):
super().__init__(name=name, kernel=kernel, num_bases=num_bases)
self._filters = filters
self._biases = biases
def __call__(self, x: TensorType) -> tf.Tensor:
self._maybe_initialize(x)
if (isinstance(x, InducingVariables) or (len(x.shape) == 4)):
conv = self.convolve(x)
elif (len(x.shape) > 4):
x_4d = tf.reshape(x, ([(- 1)] + list(x.shape[(- 3):])))
conv = self.convolve(x_4d)
conv = tf.reshape(conv, (list(x.shape[:(- 3)]) + list(conv.shape[1:])))
else:
raise NotImplementedError
return (self.output_scale * tf.cos((conv + self.biases)))
def convolve(self, x: TensorType) -> tf.Tensor:
if isinstance(x, inducing_variables.InducingImages):
return tf.nn.conv2d(input=x.as_images, filters=self.filters, strides=(1, 1, 1, 1), padding='VALID')
return self.kernel.convolve(input=x, filters=self.filters)
def initialize(self, x, dtype: Any=None):
if isinstance(x, inducing_variables.InducingImages):
x = x.as_images
if (dtype is None):
dtype = x.dtype
self._biases = bias_initializer(self.kernel.kernel, self.num_bases, dtype=dtype)
patch_size = ((self.kernel.channels_in * self.kernel.patch_shape[0]) * self.kernel.patch_shape[1])
weights = weight_initializer(self.kernel.kernel, patch_size, batch_shape=[self.num_bases], dtype=dtype)
shape = (self.kernel.patch_shape + [self.kernel.channels_in, self.num_bases])
self._filters = tf.reshape(move_axis(weights, (- 1), 0), shape)
def filters(self):
if (self._filters is None):
return None
shape = (list(self.kernel.patch_shape) + [self.kernel.channels_in, 1])
inv_ls = tf.math.reciprocal(self.kernel.kernel.lengthscales)
if self.kernel.kernel.ard:
coeffs = tf.reshape(inv_ls, shape)
else:
coeffs = tf.fill(shape, inv_ls)
return (coeffs * self._filters)
def biases(self):
return self._biases
def output_scale(self):
return tf.sqrt(((2 * self.kernel.kernel.variance) / self.num_bases)) |
def main():
(args, args_dict) = parse_input(eval=False)
log_device(args)
model = get_model(args)
model.cuda(args.c_cudaid)
model = DDP(model, device_ids=[args.c_cudaid])
best_state_dict = deepcopy(model.state_dict())
(optimizer, lr_scheduler) = get_optimizer(args, model)
loss = get_loss(args)
inter_classifier = None
if (args.task in [constants.F_CL, constants.NEGEV]):
inter_classifier = get_pretrainde_classifier(args)
inter_classifier.cuda(args.c_cudaid)
trainer: Trainer = Trainer(args=args, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, loss=loss, classifier=inter_classifier)
DLLogger.log(fmsg('Start epoch 0 ...'))
trainer.evaluate(epoch=0, split=constants.VALIDSET)
trainer.model_selection(epoch=0)
if args.is_master:
trainer.print_performances()
trainer.report(epoch=0, split=constants.VALIDSET)
DLLogger.log(fmsg('Epoch 0 done.'))
for epoch in range(trainer.args.max_epochs):
dist.barrier()
zepoch = (epoch + 1)
DLLogger.log(fmsg('Start epoch {} ...'.format(zepoch)))
train_performance = trainer.train(split=constants.TRAINSET, epoch=zepoch)
trainer.evaluate(zepoch, split=constants.VALIDSET)
trainer.model_selection(epoch=zepoch)
if args.is_master:
trainer.report_train(train_performance, zepoch)
trainer.print_performances()
trainer.report(zepoch, split=constants.VALIDSET)
DLLogger.log(fmsg('Epoch {} done.'.format(zepoch)))
trainer.adjust_learning_rate()
DLLogger.flush()
if args.is_master:
trainer.save_checkpoints()
dist.barrier()
trainer.save_best_epoch()
trainer.capture_perf_meters()
DLLogger.log(fmsg('Final epoch evaluation on test set ...'))
if (args.task != constants.SEG):
chpts = [constants.BEST_CL]
if args.localization_avail:
chpts = ([constants.BEST_LOC] + chpts)
else:
chpts = [constants.BEST_LOC]
use_argmax = False
for eval_checkpoint_type in chpts:
t0 = dt.datetime.now()
if (eval_checkpoint_type == constants.BEST_LOC):
epoch = trainer.args.best_loc_epoch
elif (eval_checkpoint_type == constants.BEST_CL):
epoch = trainer.args.best_cl_epoch
else:
raise NotImplementedError
DLLogger.log(fmsg('EVAL TEST SET. CHECKPOINT: {}. ARGMAX: {}'.format(eval_checkpoint_type, use_argmax)))
trainer.load_checkpoint(checkpoint_type=eval_checkpoint_type)
trainer.evaluate(epoch, split=constants.TESTSET, checkpoint_type=eval_checkpoint_type, fcam_argmax=use_argmax)
if args.is_master:
trainer.print_performances(checkpoint_type=eval_checkpoint_type)
trainer.report(epoch, split=constants.TESTSET, checkpoint_type=eval_checkpoint_type)
trainer.save_performances(epoch=epoch, checkpoint_type=eval_checkpoint_type)
trainer.switch_perf_meter_to_captured()
tagargmax = f'Argmax: {use_argmax}'
DLLogger.log('EVAL time TESTSET - CHECKPOINT {} {}: {}'.format(eval_checkpoint_type, tagargmax, (dt.datetime.now() - t0)))
DLLogger.flush()
dist.barrier()
if args.is_master:
trainer.save_args()
trainer.plot_perfs_meter()
bye(trainer.args) |
def convert_result_list(outputs):
if isinstance(outputs, torch.Tensor):
return [outputs]
ret = []
for sub in outputs:
ret += convert_result_list(sub)
return ret |
def base_lm_architecture(args):
if hasattr(args, 'decoder_final_norm'):
args.no_decoder_final_norm = (not args.decoder_final_norm)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.0)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_cross_layers = getattr(args, 'decoder_cross_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.decoder_layerdrop = getattr(args, 'decoder_layerdrop', 0)
args.decoder_layers_to_keep = getattr(args, 'decoder_layers_to_keep', None)
args.quant_noise_pq = getattr(args, 'quant_noise_pq', 0)
args.quant_noise_pq_block_size = getattr(args, 'quant_noise_pq_block_size', 8)
args.quant_noise_scalar = getattr(args, 'quant_noise_scalar', 0)
args.add_bos_token = getattr(args, 'add_bos_token', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', False)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', False)
args.checkpoint_activations = getattr(args, 'checkpoint_activations', False)
args.offload_activations = getattr(args, 'offload_activations', False)
if args.offload_activations:
args.checkpoint_activations = True |
def process_line(line):
(image_name, label) = line.strip().split(' ')
label = int(label)
return (image_name, label) |
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, droprate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = droprate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None)
def forward(self, x):
if (not self.equalInOut):
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1((out if self.equalInOut else x))))
if (self.droprate > 0):
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add((x if self.equalInOut else self.convShortcut(x)), out) |
class QuantizeWrapper(QuantizeWrapperBase):
def __init__(self, layer, **kwargs):
super().__init__(layer, **kwargs)
self.kernel = 'kernel'
self.kernel_weights = None
self.channel_axis = kwargs.get('axis', (- 1))
if (self._layer_class == 'DepthwiseConv2D'):
self.kernel = 'depthwise_kernel'
self.channel_axis = 2
if (self._layer_class in layer_wise_config['multiple_inputs_layers']):
self.query_input_index()
def build(self, input_shape):
super().build(input_shape)
if (self._layer_class in layer_wise_config['weighted_layers']):
self.kernel_weights = getattr(self.layer, self.kernel)
(weight_min, weight_max) = self._init_min_max_variables(name=self.kernel_weights.name.split(':')[0], shape=self.kernel_weights.shape[self.channel_axis])
self.weight_range = {'min_var': weight_min, 'max_var': weight_max}
self._trainable_weights.append(self.kernel_weights)
num_input = 1
if (not isinstance(input_shape, tf.TensorShape)):
num_input = len(input_shape)
self.query_input_index()
if (not self.index):
self.index = [i for i in range(num_input)]
if (num_input == 1):
(inputs_min, inputs_max) = self._init_min_max_variables(name=(self.layer.name + '_input{}'.format(0)), shape=None)
self.inputs_range = {'min_var': inputs_min, 'max_var': inputs_max}
else:
self.inputs_range = []
for i in range(num_input):
self.inputs_range.append({})
if (i in self.index):
(inputs_min, inputs_max) = self._init_min_max_variables(name=(self.layer.name + '_input{}'.format(i)), shape=None)
self.inputs_range[i] = {'min_var': inputs_min, 'max_var': inputs_max}
def call(self, inputs, training=None):
if (training is None):
training = tf.keras.backend.learning_phase()
if (self._layer_class in layer_wise_config['weighted_layers']):
weight_quantizer = FakeQuantize(per_channel=True, channel_axis=self.channel_axis)
quantized_weight = weight_quantizer(self.kernel_weights, self.weight_range, training)
setattr(self.layer, self.kernel, quantized_weight)
quantized_inputs = inputs
inputs_quantizer = FakeQuantize(per_channel=False, channel_axis=self.channel_axis)
if (not isinstance(quantized_inputs, tf.Tensor)):
for i in range(len(quantized_inputs)):
if (i in self.index):
quantized_inputs[i] = inputs_quantizer(inputs[i], self.inputs_range[i], training)
else:
quantized_inputs = inputs_quantizer(inputs, self.inputs_range, training)
args = tf_inspect.getfullargspec(self.layer.call).args
if ('training' in args):
outputs = self.layer.call(quantized_inputs, training=training)
else:
outputs = self.layer.call(quantized_inputs)
return outputs |
class MLP(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout=0.5, is_bns=True):
super(MLP, self).__init__()
self.lins = nn.ModuleList()
self.is_bns = is_bns
if is_bns:
self.bns = nn.ModuleList()
if (num_layers == 1):
self.lins.append(nn.Linear(in_channels, out_channels))
else:
self.lins.append(nn.Linear(in_channels, hidden_channels))
if is_bns:
self.bns.append(nn.BatchNorm1d(hidden_channels))
for _ in range((num_layers - 2)):
self.lins.append(nn.Linear(hidden_channels, hidden_channels))
if is_bns:
self.bns.append(nn.BatchNorm1d(hidden_channels))
self.lins.append(nn.Linear(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for lin in self.lins:
lin.reset_parameters()
if self.is_bns:
for bn in self.bns:
bn.reset_parameters()
def forward(self, data, input_tensor=False):
if (not input_tensor):
x = data.graph['node_feat']
else:
x = data
if self.is_bns:
for (i, lin) in enumerate(self.lins[:(- 1)]):
x = lin(x)
x = F.relu(x, inplace=True)
x = self.bns[i](x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[(- 1)](x)
return x
else:
for (i, lin) in enumerate(self.lins[:(- 1)]):
x = F.dropout(x, p=self.dropout, training=self.training)
x = lin(x)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[(- 1)](x)
return x |
def build_train_meta(args):
train_meta_root = os.path.join(args.saving_dir, 'meta')
content_font_name = os.path.basename(args.content_font)
save_path = os.path.join(train_meta_root, 'train.json')
meta_file = os.path.join(train_meta_root, 'trainset_dict.json')
with open(meta_file, 'r') as f_in:
original_meta = json.load(f_in)
with open(args.seen_unis_file) as f:
seen_unis = json.load(f)
with open(args.unseen_unis_file) as f:
unseen_unis = json.load(f)
all_style_fonts = list(original_meta.keys())
unseen_ttf_dir = args.val_font_dir
unseen_ttf_list = [os.path.basename(x) for x in glob.glob((unseen_ttf_dir + '/*'))]
unseen_style_fonts = [ttf for ttf in unseen_ttf_list]
train_style_fonts = list((set(all_style_fonts) - set(unseen_style_fonts)))
train_dict = {'train': {}, 'avail': {}, 'valid': {}}
for style_font in train_style_fonts:
avail_unicodes = original_meta[style_font]
train_unicodes = list(set.intersection(set(avail_unicodes), set(seen_unis)))
train_dict['train'][style_font] = train_unicodes
for style_font in all_style_fonts:
avail_unicodes = original_meta[style_font]
train_dict['avail'][style_font] = avail_unicodes
print('all_style_fonts:', len(all_style_fonts))
print('train_style_fonts:', len(train_dict['train']))
print('val_style_fonts:', len(unseen_style_fonts))
print('seen_unicodes: ', len(seen_unis))
print('unseen_unicodes: ', len(unseen_unis))
train_dict['valid'] = {'seen_fonts': list(train_dict['train'].keys()), 'unseen_fonts': unseen_style_fonts, 'seen_unis': seen_unis, 'unseen_unis': unseen_unis}
with open(save_path, 'w') as fout:
json.dump(train_dict, fout, ensure_ascii=False, indent=4) |
def collect_trajectory(agent, reward):
if (reward < 0):
agent.replay_buffer.clear()
elif (reward > 0):
agent.replay_buffer.add(agent._last_observation, agent.action, reward, False)
while (agent.replay_buffer.size() > 0):
experience = agent.replay_buffer.get_sample()
(state, action, reward, _) = experience
agent._store_transition(state, action, reward, False)
else:
agent.replay_buffer.add(agent._last_observation, agent.action, reward, False) |
def rotate(molecule, angle, axis, fix_com=False):
c = molecule.GetConformers()[0]
d = np.array(c.GetPositions())
ori_mean = np.mean(d, 0, keepdims=True)
if fix_com:
d = (d - ori_mean)
atoms = []
for i in range(len(d)):
atoms.append(Atom('C', d[i]))
atoms = Atoms(atoms)
atoms.rotate(angle, axis)
new_d = atoms.get_positions()
if fix_com:
new_d += ori_mean
for i in range(molecule.GetNumAtoms()):
c.SetAtomPosition(i, new_d[i])
return molecule |
class GateConfigSchema(BaseSchema):
name = String(required=True)
parameters = List(String(), required=True)
qasm_def = String(required=True)
coupling_map = List(List(Integer(), validate=Length(min=1)), validate=Length(min=1))
latency_map = List(List(Integer(validate=OneOf([0, 1])), validate=Length(min=1)), validate=Length(min=1))
conditional = Boolean()
description = String() |
class CompositeTask(abstract_task.AbstractTask):
def __init__(self, *tasks, timeout_steps=np.inf):
self._tasks = tasks
self._timeout_steps = timeout_steps
def reset(self, state, meta_state):
for task in self._tasks:
task.reset(state, meta_state)
def reward(self, state, meta_state, step_count):
reward = 0
should_reset = (step_count >= self._timeout_steps)
for task in self._tasks:
(task_reward, task_should_reset) = task.reward(state, meta_state, step_count)
reward += task_reward
should_reset = (should_reset or task_should_reset)
return (reward, should_reset) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.