code stringlengths 101 5.91M |
|---|
class NDCG(BaseMetric):
def __init__(self, recommendations, config, params, eval_objects):
super().__init__(recommendations, config, params, eval_objects)
self._cutoff = self._evaluation_objects.cutoff
self._relevance = self._evaluation_objects.relevance.discounted_relevance
self._rel_threshold = self._evaluation_objects.relevance._rel_threshold
def name():
return 'nDCG'
def compute_idcg(self, user, cutoff: int) -> float:
gains: t.List = sorted(list(self._relevance.get_user_rel_gains(user).values()))
n: int = min(len(gains), cutoff)
m: int = len(gains)
return sum(map((lambda g, r: (gains[((m - r) - 1)] * self._relevance.logarithmic_ranking_discount(r))), gains, range(n)))
def compute_user_ndcg(self, user_recommendations: t.List, user, cutoff: int) -> float:
idcg: float = self.compute_idcg(user, cutoff)
dcg: float = sum([(self._relevance.get_rel(user, x) * self._relevance.logarithmic_ranking_discount(r)) for (r, x) in enumerate([item for (item, _) in user_recommendations]) if (r < cutoff)])
return ((dcg / idcg) if (dcg > 0) else 0)
def __user_ndcg(self, user_recommendations: t.List, user, cutoff: int):
ndcg: float = self.compute_user_ndcg(user_recommendations[:cutoff], user, cutoff)
return ndcg
def eval_user_metric(self):
return {u: self.__user_ndcg(u_r, u, self._cutoff) for (u, u_r) in self._recommendations.items() if len(self._relevance.get_user_rel(u))} |
def read_tfrecord(example, train):
features = {'image': tf.io.FixedLenFeature([], tf.string), 'class': tf.io.FixedLenFeature([], tf.int64)}
example = tf.io.parse_single_example(example, features)
image = tf.image.decode_jpeg(example['image'], channels=3)
image = (tf.cast(image, tf.float32) / 255.0)
if train:
image = crop_resize.inception_crop(image)
image = tf.image.random_flip_left_right(image)
else:
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.image.resize(image, (RESIZE, RESIZE))
image = tf.reshape(image, (RESIZE, RESIZE, 3))
class_label = tf.cast(example['class'], tf.int32)
return (image, class_label) |
def make_examples(DATA_DIR, train_file, predict_file, evaluate, version_2_with_negative):
processor = (SquadV2Processor() if version_2_with_negative else SquadV1Processor())
if evaluate:
examples = processor.get_dev_examples(DATA_DIR, filename=predict_file)
else:
examples = processor.get_train_examples(DATA_DIR, filename=train_file)
return examples |
def register_Ns3LteEnbMac_methods(root_module, cls):
cls.add_constructor([param('ns3::LteEnbMac const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('DoReceivePhyPdu', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')])
cls.add_method('GetFfMacCschedSapUser', 'ns3::FfMacCschedSapUser *', [])
cls.add_method('GetFfMacSchedSapUser', 'ns3::FfMacSchedSapUser *', [])
cls.add_method('GetLteEnbCmacSapProvider', 'ns3::LteEnbCmacSapProvider *', [])
cls.add_method('GetLteEnbPhySapUser', 'ns3::LteEnbPhySapUser *', [])
cls.add_method('GetLteMacSapProvider', 'ns3::LteMacSapProvider *', [])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetFfMacCschedSapProvider', 'void', [param('ns3::FfMacCschedSapProvider *', 's')])
cls.add_method('SetFfMacSchedSapProvider', 'void', [param('ns3::FfMacSchedSapProvider *', 's')])
cls.add_method('SetLteEnbCmacSapUser', 'void', [param('ns3::LteEnbCmacSapUser *', 's')])
cls.add_method('SetLteEnbPhySapProvider', 'void', [param('ns3::LteEnbPhySapProvider *', 's')])
cls.add_method('SetLteMacSapUser', 'void', [param('ns3::LteMacSapUser *', 's')])
return |
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1000000.0)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = (1.0 - done)
self.ptr = ((self.ptr + 1) % self.max_size)
self.size = min((self.size + 1), self.max_size)
def sample(self, batch_size):
ind = np.random.randint(self.size, size=batch_size)
return (torch.FloatTensor(self.state[ind]).to(self.device), torch.FloatTensor(self.action[ind]).to(self.device), torch.FloatTensor(self.next_state[ind]).to(self.device), torch.FloatTensor(self.reward[ind]).to(self.device), torch.FloatTensor(self.not_done[ind]).to(self.device)) |
class ArgoCSVDataset(torch.utils.data.Dataset):
def __init__(self, input_folder, input_preprocessed, args):
self.input_preprocessed = input_preprocessed
self.args = args
if args.use_preprocessed:
with open(input_preprocessed, 'rb') as f:
self.data = pickle.load(f)
else:
self.files = sorted(glob.glob(f'{input_folder}/*.csv'))
if (args.reduce_dataset_size > 0):
self.files = self.files[:args.reduce_dataset_size]
self.argo_reader = ArgoDataExtractor(args)
def __getitem__(self, idx):
if self.args.use_preprocessed:
return self.data[idx]
else:
return self.argo_reader.extract_data(self.files[idx])
def __len__(self):
if self.args.use_preprocessed:
return len(self.data)
else:
return len(self.files) |
def convert_mxnet_to_torch(filename):
import mxnet
save_dict = mxnet.nd.load(filename)
renamed_dict = dict()
bn_param_mx_pt = {'beta': 'bias', 'gamma': 'weight', 'mean': 'running_mean', 'var': 'running_var'}
for (k, v) in save_dict.items():
v = torch.from_numpy(v.asnumpy())
toks = k.split('_')
if ('conv1a' in toks[0]):
renamed_dict['conv1a.weight'] = v
elif ('linear1000' in toks[0]):
pass
elif ('branch' in toks[1]):
pt_name = []
if (toks[0][(- 1)] != 'a'):
pt_name.append(((('b' + toks[0][(- 3)]) + '_') + toks[0][(- 1)]))
else:
pt_name.append(('b' + toks[0][(- 2)]))
if ('res' in toks[0]):
layer_type = 'conv'
last_name = 'weight'
else:
layer_type = 'bn'
last_name = bn_param_mx_pt[toks[(- 1)]]
pt_name.append(((layer_type + '_') + toks[1]))
pt_name.append(last_name)
torch_name = '.'.join(pt_name)
renamed_dict[torch_name] = v
else:
last_name = bn_param_mx_pt[toks[(- 1)]]
renamed_dict[('bn7.' + last_name)] = v
return renamed_dict |
def main(_argv):
if FLAGS.config_path:
with gfile.GFile(FLAGS.config_path) as config_file:
config_flags = yaml.load(config_file)
for (flag_key, flag_value) in config_flags.items():
setattr(FLAGS, flag_key, flag_value)
if isinstance(FLAGS.tasks, string_types):
FLAGS.tasks = _maybe_load_yaml(FLAGS.tasks)
if isinstance(FLAGS.input_pipeline, string_types):
FLAGS.input_pipeline = _maybe_load_yaml(FLAGS.input_pipeline)
input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(FLAGS.input_pipeline, mode=tf.contrib.learn.ModeKeys.INFER, shuffle=False, num_epochs=1)
train_options = training_utils.TrainOptions.load(FLAGS.model_dir)
model_cls = (locate(train_options.model_class) or getattr(models, train_options.model_class))
model_params = train_options.model_params
model_params = _deep_merge_dict(model_params, _maybe_load_yaml(FLAGS.model_params))
model = model_cls(params=model_params, mode=tf.contrib.learn.ModeKeys.INFER)
hooks = []
for tdict in FLAGS.tasks:
if (not ('params' in tdict)):
tdict['params'] = {}
task_cls = (locate(tdict['class']) or getattr(tasks, tdict['class']))
task = task_cls(tdict['params'])
hooks.append(task)
(predictions, _, _) = create_inference_graph(model=model, input_pipeline=input_pipeline_infer, batch_size=FLAGS.batch_size)
saver = tf.train.Saver()
checkpoint_path = FLAGS.checkpoint_path
if (not checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
def session_init_op(_scaffold, sess):
saver.restore(sess, checkpoint_path)
tf.logging.info('Restored model from %s', checkpoint_path)
scaffold = tf.train.Scaffold(init_fn=session_init_op)
session_creator = tf.train.ChiefSessionCreator(scaffold=scaffold)
with tf.train.MonitoredSession(session_creator=session_creator, hooks=hooks) as sess:
while (not sess.should_stop()):
sess.run([]) |
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dataroot', type=str, default='.', help='path to images (should have subfolders train, test etc)')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--loadSize', type=int, default=512, help='scale images to this size')
parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='selects model to use for netD')
parser.add_argument('--netG', type=str, default='unet_256', help='selects model to use for netG')
parser.add_argument('--nnG', type=int, default=9, help='specify nblock for resnet_nblocks, ndown for unet for unet_ndown')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [aligned | single]')
parser.add_argument('--model', type=str, default='apdrawing_gan', help='chooses which model to use. [apdrawing_gan | test]')
parser.add_argument('--use_local', action='store_true', help='use local part network')
parser.add_argument('--comb_op', type=int, default=1, help='use min-pooling(1) or max-pooling(0) for overlapping regions')
parser.add_argument('--lm_dir', type=str, default='dataset/landmark/ALL', help='path to facial landmarks')
parser.add_argument('--bg_dir', type=str, default='dataset/mask/ALL', help='path to background masks')
parser.add_argument('--soft_border', type=int, default=0, help='use mask with soft border')
parser.add_argument('--EYE_H', type=int, default=40, help='EYE_H')
parser.add_argument('--EYE_W', type=int, default=56, help='EYE_W')
parser.add_argument('--NOSE_H', type=int, default=48, help='NOSE_H')
parser.add_argument('--NOSE_W', type=int, default=48, help='NOSE_W')
parser.add_argument('--MOUTH_H', type=int, default=40, help='MOUTH_H')
parser.add_argument('--MOUTH_W', type=int, default=64, help='MOUTH_W')
parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--auxiliary_root', type=str, default='auxiliary', help='auxiliary model folder')
parser.add_argument('--norm', type=str, default='batch', help='instance normalization or batch normalization')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
parser.add_argument('--display_server', type=str, default=' help='visdom server of the web display')
parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}')
self.initialized = True
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
(opt, _) = parser.parse_known_args()
if UseTest:
opt.model = 'test'
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
(opt, _) = parser.parse_known_args()
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
if UseTest:
opt.use_local = True
if opt.use_local:
opt.loadSize = opt.fineSize
opt.isTrain = self.isTrain
if opt.suffix:
suffix = (('_' + opt.suffix.format(**vars(opt))) if (opt.suffix != '') else '')
opt.name = (opt.name + suffix)
self.print_options(opt)
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
opt.gpu_ids.append(id)
if (len(opt.gpu_ids) > 0):
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt |
def set_random_seed(seed):
if (seed >= 0):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) |
def evaluate(args, model, tokenizer, prefix=''):
eval_task_names = (('mnli', 'mnli-mm') if (args.task_name == 'mnli') else (args.task_name,))
eval_outputs_dirs = ((args.output_dir, (args.output_dir + '-MM')) if (args.task_name == 'mnli') else (args.output_dir,))
results = {}
for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if ((not os.path.exists(eval_output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(eval_output_dir)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if ((args.n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))):
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet', 'albert']) else None)
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (preds is None):
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = (eval_loss / nb_eval_steps)
if (args.output_mode == 'classification'):
preds = np.argmax(preds, axis=1)
elif (args.output_mode == 'regression'):
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_preds_file = os.path.join(args.data_dir, prefix, 'preds.json')
json.dump([max(x, 0) for x in preds.tolist()], open(output_preds_file, 'w'), indent=4)
output_eval_file = os.path.join(args.data_dir, prefix, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
return results |
def get_mnist_datasets(train_transform, test_transform, train_classes=range(6), open_set_classes=range(6, 10), balance_open_set_eval=False, split_train_val=True, seed=0):
np.random.seed(seed)
train_dataset_whole = CustomMNIST(root=mnist_root, transform=train_transform, train=True)
train_dataset_whole = subsample_classes(train_dataset_whole, include_classes=train_classes)
(train_dataset_split, val_dataset_split) = get_train_val_split(train_dataset_whole)
val_dataset_split.transform = test_transform
test_dataset_known = CustomMNIST(root=mnist_root, transform=test_transform, train=False)
test_dataset_known = subsample_classes(test_dataset_known, include_classes=train_classes)
test_dataset_unknown = CustomMNIST(root=mnist_root, transform=test_transform, train=False)
test_dataset_unknown = subsample_classes(test_dataset_unknown, include_classes=open_set_classes)
if balance_open_set_eval:
(test_dataset_known, test_dataset_unknown) = get_equal_len_datasets(test_dataset_known, test_dataset_unknown)
train_dataset = (train_dataset_split if split_train_val else train_dataset_whole)
val_dataset = (val_dataset_split if split_train_val else test_dataset_known)
all_datasets = {'train': train_dataset, 'val': val_dataset, 'test_known': test_dataset_known, 'test_unknown': test_dataset_unknown}
return all_datasets |
def cot(all_potential_countries) -> operations.GraphOfOperations:
operations_graph = operations.GraphOfOperations()
operations_graph.append_operation(operations.Generate(1, 1))
operations_graph.append_operation(operations.Score(1, False, partial(num_errors, all_potential_countries)))
operations_graph.append_operation(operations.GroundTruth(test_keyword_counting))
return operations_graph |
class TVoid(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_snap.TVoid_swiginit(self, _snap.new_TVoid(*args))
def Save(self, arg2):
return _snap.TVoid_Save(self, arg2)
def __eq__(self, arg2):
return _snap.TVoid___eq__(self, arg2)
def __lt__(self, arg2):
return _snap.TVoid___lt__(self, arg2)
def GetMemUsed(self):
return _snap.TVoid_GetMemUsed(self)
__swig_destroy__ = _snap.delete_TVoid |
class AugmenterValidationScoresEvaluator(AugmenterEvaluatorBase):
def __init__(self, validation_scorers_dict: Dict[(str, ValidationScorerBase)], namespace='', run_logger=None):
super().__init__(namespace, run_logger)
self.validation_scorers_dict = validation_scorers_dict
def evaluate(self, augmenter, epoch):
self.save_results_dict({name: val_scorer(augmenter) for (name, val_scorer) in self.validation_scorers_dict.items()}, 'metric') |
.mujoco
def test_mtsac_inverted_double_pendulum():
env_names = ['InvertedDoublePendulum-v2', 'InvertedDoublePendulum-v2']
task_envs = [GarageEnv(env_name=name) for name in env_names]
env = MultiEnvWrapper(task_envs, sample_strategy=round_robin_strategy)
test_envs = MultiEnvWrapper(task_envs, sample_strategy=round_robin_strategy)
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config=snapshot_config)
policy = TanhGaussianMLPPolicy(env_spec=env.spec, hidden_sizes=[32, 32], hidden_nonlinearity=torch.nn.ReLU, output_nonlinearity=None, min_std=np.exp((- 20.0)), max_std=np.exp(2.0))
qf1 = ContinuousMLPQFunction(env_spec=env.spec, hidden_sizes=[32, 32], hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec, hidden_sizes=[32, 32], hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1000000.0))
num_tasks = 2
buffer_batch_size = 128
mtsac = MTSAC(policy=policy, qf1=qf1, qf2=qf2, gradient_steps_per_itr=100, max_path_length=100, eval_env=test_envs, env_spec=env.spec, num_tasks=num_tasks, steps_per_epoch=5, replay_buffer=replay_buffer, min_buffer_size=1000.0, target_update_tau=0.005, discount=0.99, buffer_batch_size=buffer_batch_size)
runner.setup(mtsac, env, sampler_cls=LocalSampler)
ret = runner.train(n_epochs=8, batch_size=128, plot=False)
assert (ret > 0) |
def get_survey(data_type, rx_type, tx_type):
n_spacings = np.logspace(0, 2, 3)
y = np.zeros_like(n_spacings)
z = np.full_like(n_spacings, (- 1.0))
a_locations = np.column_stack((((- 1.5) * n_spacings), y, z))
b_locations = np.column_stack(((1.5 * n_spacings), y, z))
m_locations = np.column_stack((((- 0.5) * n_spacings), y, z))
n_locations = np.column_stack(((0.5 * n_spacings), y, z))
sources = []
if (data_type == 'both'):
data_type = ['volt', 'apparent_resistivity']
else:
data_type = [data_type]
if (rx_type == 'both'):
rx_type = ['p', 'd']
else:
rx_type = [rx_type]
if (tx_type == 'both'):
tx_type = ['p', 'd']
else:
tx_type = [tx_type]
for (a, b, m, n) in zip(a_locations, b_locations, m_locations, n_locations):
rx = []
for dtype in data_type:
for rxt in rx_type:
if (rxt == 'p'):
rx.append(dc.receivers.Pole(m, data_type=dtype))
else:
rx.append(dc.receivers.Dipole(locations=[m, n], data_type=dtype))
for txt in tx_type:
if (txt == 'p'):
sources.append(dc.sources.Pole(rx, a))
else:
sources.append(dc.sources.Dipole(rx, location=[a, b]))
return dc.Survey(sources) |
def _get_trajectory_dataset_fn(stack_size: int, trajectory_length: int=1) -> Callable[([tf.data.Dataset], tf.data.Dataset)]:
batch_fn = _BatchToTransition().create_transitions
def make_trajectory_dataset(episode: tf.data.Dataset) -> tf.data.Dataset:
timesteps: tf.data.Dataset = episode[rlds.STEPS]
batched_steps = rlds.transformations.batch(timesteps, size=stack_size, shift=1, drop_remainder=True)
transitions = batched_steps.map(batch_fn)
if (trajectory_length > 1):
transitions = transitions.repeat(2)
transitions = transitions.skip(tf.random.uniform([], 0, trajectory_length, dtype=tf.int64))
trajectory = transitions.batch(trajectory_length, drop_remainder=True)
else:
trajectory = transitions
return trajectory
return make_trajectory_dataset |
def baseline_detaset_find_examples_fn(search_funcs=None, **kwargs):
search_funcs.heuristic_fn = (lambda *args, **lambda_kwargs: 0)
results = dataset_find_adversarial_examples(search_funcs=search_funcs, **kwargs)
return results |
class expectedAlertNondeterministic():
def __init__(self, caller_name, device_type=None, fn_has_device_arg=True):
self.device_type = device_type
self.error_message = (caller_name + ' does not have a deterministic implementation, but you set')
self.fn_has_device_arg = fn_has_device_arg
def __call__(self, fn):
(fn)
def efail_fn(slf, device, *args, **kwargs):
if ((self.device_type is None) or (self.device_type == slf.device_type)):
deterministic_restore = torch.is_deterministic()
torch.set_deterministic(True)
try:
if self.fn_has_device_arg:
fn(slf, device, *args, **kwargs)
else:
fn(slf, *args, **kwargs)
except RuntimeError as e:
torch.set_deterministic(deterministic_restore)
if (self.error_message not in str(e)):
slf.fail((((('expected non-deterministic error message to start with "' + self.error_message) + '" but got this instead: "') + str(e)) + '"'))
return
else:
torch.set_deterministic(deterministic_restore)
slf.fail('expected a non-deterministic error, but it was not raised')
if self.fn_has_device_arg:
return fn(slf, device, *args, **kwargs)
else:
return fn(slf, *args, **kwargs)
(fn)
def efail_fn_no_device(slf, *args, **kwargs):
return efail_fn(slf, None, *args, **kwargs)
if self.fn_has_device_arg:
return efail_fn
else:
return efail_fn_no_device |
def test_soft_voting_no_proba(create_X_y):
from sklearn.linear_model import Perceptron
(X, y) = create_X_y
clf = Perceptron()
clf.fit(X, y)
with pytest.raises(ValueError):
DESMI([clf, clf, clf, clf], voting='soft').fit(X, y) |
def threshold(input, threshold, value, inplace=False):
if inplace:
return torch._C._nn.threshold_(input, threshold, value)
return torch._C._nn.threshold(input, threshold, value) |
class GLPNFeatureExtractor(GLPNImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use GLPNImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
def ct_tokenizer(nlp):
prefix_re = re.compile('^([\\["\'()*+-?/<>#%]+|[><][=])+')
suffix_re = re.compile('([\\]"\'),-.:;*]|\'s)$')
infix_re = re.compile('[%(),-./;=?]+')
tokenizer = Tokenizer(nlp.vocab, prefix_search=prefix_re.search, suffix_search=suffix_re.search, infix_finditer=infix_re.finditer, token_match=token_match)
special_cases = [fp.format(os.path.dirname(__file__)) for fp in ['{}/specialist_special_cases.txt', '{}/special_cases.txt']]
add_special_cases(tokenizer, special_cases)
return tokenizer |
def build_norm_layer(cfg, num_features, postfix=''):
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in norm_cfg):
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
(abbr, norm_layer) = norm_cfg[layer_type]
if (norm_layer is None):
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = (abbr + str(postfix))
requires_grad = cfg_.pop('requires_grad', True)
cfg_.setdefault('eps', 1e-05)
if (layer_type != 'GN'):
layer = norm_layer(num_features, **cfg_)
else:
assert ('num_groups' in cfg_)
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return (name, layer) |
def TF2FLRD(filenames, batchsize=10, buffersize=100, fetchbatch=2, shuffle=True, parse=_parse_, oneshot=False):
fetchsize = (batchsize * fetchbatch)
train_dataset = tf.data.TFRecordDataset(filenames=filenames)
train_dataset = train_dataset.prefetch(fetchsize)
train_dataset = train_dataset.map(parse)
if shuffle:
train_dataset = train_dataset.shuffle(buffersize)
train_dataset = train_dataset.batch(batchsize)
if oneshot:
return train_dataset.make_one_shot_iterator()
else:
return train_dataset.make_initializable_iterator() |
class VarLSTM(VarRNNBase):
def __init__(self, *args, **kwargs):
super(VarLSTM, self).__init__(*args, mode='LSTM', Cell=nn.LSTMCell, **kwargs)
def forward(self, x, hx=None):
return super(VarLSTM, self).forward(x, hx) |
class FeatureAlphaDropout(_DropoutNd):
def forward(self, input: Tensor) -> Tensor:
return F.feature_alpha_dropout(input, self.p, self.training) |
def add_noise(images, mean=0, std=0.1):
normal_dst = Normal(mean, std)
noise = normal_dst.sample(images.shape)
noisy_image = (noise + images)
return noisy_image |
def get_random_string(length: int) -> str:
letters = string.ascii_lowercase
result_str = ''.join((random.choice(letters) for _ in range(length)))
return result_str |
def init_cnn(m):
if (getattr(m, 'bias', None) is not None):
nn.init.constant_(m.bias, 0)
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(m.weight)
for l in m.children():
init_cnn(l) |
def linear_synthetic_policy_continuous(context: np.ndarray) -> np.ndarray:
check_array(array=context, name='context', expected_dim=2)
return context.mean(1) |
.parametrize('nuclide_name', ['Ni-56', 'Fe-52', 'Cr-48'])
def test_decay_energy_chain(gamma_ray_simulation_state, atomic_dataset, nuclide_name):
nuclide = rd.Nuclide(nuclide_name)
isotopic_mass_fractions = gamma_ray_simulation_state.composition.isotopic_mass_fraction
composition = gamma_ray_simulation_state.composition
cell_masses = composition.calculate_cell_masses(gamma_ray_simulation_state.geometry.volume)
iso_dict = create_isotope_dicts(isotopic_mass_fractions, cell_masses)
inventories_dict = create_inventories_dict(iso_dict)
gamma_ray_lines = atomic_dataset.decay_radiation_data
(Z, A) = (nuclide.Z, nuclide.A)
total_decays = calculate_total_decays(inventories_dict, (1.0 * u.s))
(average_energies, _, _) = calculate_average_energies(isotopic_mass_fractions, gamma_ray_lines)
decay_chain_energy = decay_chain_energies(average_energies, total_decays)
expected = (total_decays[0][(Z, A)][nuclide_name] * average_energies[nuclide_name])
actual = decay_chain_energy[0][(Z, A)][nuclide_name]
npt.assert_almost_equal(expected, actual) |
class CC_head(nn.Module):
def __init__(self, indim, outdim, scale_cls=10.0, learn_scale=True, normalize=True):
super().__init__()
self.L = weight_norm(nn.Linear(indim, outdim, bias=False), name='weight', dim=0)
self.scale_cls = nn.Parameter(torch.FloatTensor(1).fill_(scale_cls), requires_grad=learn_scale)
self.normalize = normalize
def forward(self, features):
if (features.dim() == 4):
if self.normalize:
features = F.normalize(features, p=2, dim=1, eps=1e-12)
features = F.adaptive_avg_pool2d(features, 1).squeeze_((- 1)).squeeze_((- 1))
assert (features.dim() == 2)
x_normalized = F.normalize(features, p=2, dim=1, eps=1e-12)
self.L.weight.data = F.normalize(self.L.weight.data, p=2, dim=1, eps=1e-12)
cos_dist = self.L(x_normalized)
classification_scores = (self.scale_cls * cos_dist)
return classification_scores |
def index(request):
response = HttpResponse()
response.set_cookie('cookie', 'value')
return response |
def map_aa_idx_to_tok_set(esm_sampler_fixture):
return set((esm_sampler_fixture.model.alphabet.get_tok(idx) for idx in esm_sampler_fixture.valid_aa_idx)) |
def signal_name(sig):
if (sig == SIGHUP):
return 'hangup'
if (sig == SIGINT):
return 'interrupt'
if (sig == SIGQUIT):
return 'quit'
if (sig == SIGILL):
return 'illegal instruction'
if (sig == SIGABRT):
return 'abort'
if (sig == SIGFPE):
return 'floating point exception'
if (sig == SIGKILL):
return 'kill signal'
if (sig == SIGSEGV):
return 'segmentation fault'
if (sig == SIGPIPE):
return 'broken pipe'
if (sig == SIGALRM):
return 'alarm'
if (sig == SIGTERM):
return 'terminate'
if (sig == SIGBUS):
return 'bus error'
return ('signal %s' % sig) |
def compute_rouge_approximation(pred_summary, groundtruth):
pred_counts = Counter()
for sent in pred_summary:
pred_counts.update([k for k in sent.split() if (k not in string.punctuation)])
ref_counts = {}
for (i, summary) in enumerate(groundtruth):
ref_counts[i] = Counter()
for sent in summary:
ref_counts[i].update(Counter([k for k in sent.split() if (k not in string.punctuation)]))
match = 0
for tok in pred_counts:
match += sum([min(pred_counts[tok], ref_counts[x][tok]) for x in ref_counts.keys()])
prec_denom = (len(ref_counts.keys()) * sum(pred_counts.values()))
if (prec_denom == 0):
precision = 0
else:
precision = (match / prec_denom)
recall_denom = sum([sum(ref_counts[x].values()) for x in ref_counts])
if (recall_denom == 0):
recall = 0
else:
recall = (match / recall_denom)
if ((precision + recall) == 0):
return 0
else:
return (((2 * precision) * recall) / (precision + recall)) |
def masked_mse_loss(scaler, null_val):
def loss(preds, labels):
if scaler:
preds = scaler.inverse_transform(preds)
labels = scaler.inverse_transform(labels)
return masked_mse_tf(preds=preds, labels=labels, null_val=null_val)
return loss |
def main():
for att in (0, 1):
steps = list(range(100, 2600, 100))
logdir = os.path.join('logdirs/-nl2code-hearthstone-fef2c5b', 'att{}'.format(att))
for step in steps:
if (not os.path.exists(os.path.join(logdir, 'model_checkpoint-{:08d}'.format(step)))):
continue
if os.path.exists(os.path.join(logdir, 'infer-val-step{:05d}-bs1.jsonl'.format(step))):
continue
infer_command = ('python infer.py --config configs/hearthstone/nl2code.jsonnet --logdir {logdir} --output __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl ' + '--step {step} --section val --beam-size 1').format(logdir=logdir, step=step)
print(infer_command) |
class MLP(tf.keras.layers.Layer):
def __init__(self, num_layers, hidden_dim, output_dim):
super(MLP, self).__init__()
self.linear_or_not = True
self.num_layers = num_layers
if (num_layers < 1):
raise ValueError('number of layers should be positive!')
elif (num_layers == 1):
self.linear = Linear_model(output_dim=output_dim)
else:
self.linear_or_not = False
self.multi = Multi_model(layers=num_layers, hidden_dim=hidden_dim, output_dim=output_dim)
def call(self, input_features):
if self.linear_or_not:
return self.linear(input_features)
else:
return self.multi(input_features) |
def get_start_end_idx(video_size, clip_size, clip_idx, num_clips, use_offset=False):
delta = max((video_size - clip_size), 0)
if (clip_idx == (- 1)):
start_idx = random.uniform(0, delta)
elif use_offset:
if (num_clips == 1):
start_idx = math.floor((delta / 2))
else:
start_idx = (clip_idx * math.floor((delta / (num_clips - 1))))
else:
start_idx = ((delta * clip_idx) / num_clips)
end_idx = ((start_idx + clip_size) - 1)
return (start_idx, end_idx) |
def test_gpflow_reparam_sampler_returns_reparam_sampler_with_correct_samples() -> None:
num_samples = 20000
sampler = _QuadraticPredictor().reparam_sampler(num_samples)
samples = sampler.sample(tf.constant([[2.5]], gpflow.default_float()))
assert (samples.shape == [num_samples, 1, 1])
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean(((samples - sample_mean) ** 2))
linear_error = (1 / tf.sqrt(tf.cast(num_samples, tf.float32)))
npt.assert_allclose(sample_mean, [[6.25]], rtol=linear_error)
npt.assert_allclose(sample_variance, 1.0, rtol=(2 * linear_error)) |
def cnn(input_var, filters, strides, name, padding, hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer()):
with tf.compat.v1.variable_scope(name):
h = input_var
for (index, (filter_iter, stride)) in enumerate(zip(filters, strides)):
_stride = [1, stride, stride, 1]
h = _conv(h, 'h{}'.format(index), filter_iter[1], filter_iter[0], _stride, hidden_w_init, hidden_b_init, padding)
if (hidden_nonlinearity is not None):
h = hidden_nonlinearity(h)
dim = tf.reduce_prod(h.get_shape()[1:].as_list())
return tf.reshape(h, [(- 1), dim]) |
def code_for_backward_function(module: 'daceml.torch.DaceModule', forward_sdfg: dace.SDFG, backward_sdfg: dace.SDFG, backward_result: BackwardResult, forwarded_arrays: Dict[(str, data.Data)]) -> str:
(inputs, outputs) = get_arglist(module)
sdfg_name = forward_sdfg.name
ret_str = return_type_str(outputs)
outputs_with_forwarded_outputs = copy.deepcopy(outputs)
outputs_with_forwarded_outputs.extend((n for n in forwarded_arrays if ((n not in inputs) and (n not in outputs))))
(fwd_ptr_init_code, fwd_sdfg_call_arguments, _) = argument_codegen(forward_sdfg, module.dace_model.clean_weights, inputs, outputs_with_forwarded_outputs)
bwd_inputs = (list(backward_result.given_grad_names.values()) + list(forwarded_arrays))
bwd_outputs = list(backward_result.required_grad_names.values())
(bwd_ptr_init_code, bwd_sdfg_call_arguments, _) = argument_codegen(backward_sdfg, module.dace_model.clean_weights, bwd_inputs, bwd_outputs, guard_contiguous=list(backward_result.given_grad_names.values()))
saved_io_for_backward = [n for n in forwarded_arrays if ((n in inputs) or (n in outputs))]
other_saved_for_backward = [n for n in forwarded_arrays if ((n not in inputs) and (n not in outputs))]
return f'''
{get_header(forward_sdfg, backward_sdfg, inputs, outputs, module.use_cuda)}
class {sdfg_name}Function : public torch::autograd::Function<{sdfg_name}Function> {{
public:
static
{ret_str}
forward(
AutogradContext *ctx,
int64_t fwd_handle_ptr, int64_t bwd_handle_ptr, {', '.join((f'const Tensor& {name}_' for name in inputs))}) {{
at::AutoNonVariableTypeMode g;
// initialize outputs
{initialize_outputs_code(module, outputs_with_forwarded_outputs)}
{fwd_ptr_init_code}
// get SDFG state handle
{forward_sdfg.name}Handle_t handle = reinterpret_cast<{forward_sdfg.name}Handle_t>(fwd_handle_ptr);
// call SDFG
__program_{forward_sdfg.name}(handle, {fwd_sdfg_call_arguments});
// save inputs/outputs for backward
{(f"ctx->save_for_backward({{{', '.join((f'{n}' for n in saved_io_for_backward))}}});" if saved_io_for_backward else '')}
// save non-inputs/outputs
{save_non_inputs_outputs(other_saved_for_backward)}
// save bwd handle
ctx->saved_data["bwd_handle"] = bwd_handle_ptr;
// return to torch
return {(f'{outputs[0]}' if (len(outputs) == 1) else f"{{{', '.join((o for o in outputs))}}}")};
}}
static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {{
// recover bwd_handle_ptr
int64_t bwd_handle_ptr = ctx->saved_data.find("bwd_handle")->second.toInt();
// recover saved values
{recover_saved_inputs_outputs(saved_io_for_backward, other_saved_for_backward)}
// create grad values
// NOTE, it might make sense take these from .grad()
{setup_grad_values(backward_result, backward_sdfg, outputs)}
{bwd_ptr_init_code}
// get SDFG state handle
{backward_sdfg.name}Handle_t handle = reinterpret_cast<{backward_sdfg.name}Handle_t>(bwd_handle_ptr);
// call bwd SDFG
__program_{backward_sdfg.name}(handle, {bwd_sdfg_call_arguments});
// return calculated grads in correct order
// first two grads are None (these are the grads for the handle ptrs)
return {{
Tensor(), Tensor(), {', '.join(((backward_result.required_grad_names[i] if (i in backward_result.required_grad_names) else 'Tensor()') for i in inputs))}
}};
}}
}};
{ret_str}
{sdfg_name}_autograd(int64_t handle_ptr, int64_t bwd_handle_ptr, {','.join((f'const Tensor& {name}_' for name in inputs))}) {{
return {sdfg_name}Function::apply(
handle_ptr, bwd_handle_ptr, {', '.join((f'{name}_' for name in inputs))}
);
}}
TORCH_LIBRARY_IMPL(daceml_{sdfg_name}, Autograd{('CUDA' if module.use_cuda else 'CPU')}, m) {{
m.impl("{sdfg_name}", {sdfg_name}_autograd);
}}
''' |
def compute_boundary_distance(mesh: fenics.Mesh, boundaries: Optional[fenics.MeshFunction]=None, boundary_idcs: Optional[List[Union[(int, str)]]]=None, tol: float=0.1, max_iter: int=10) -> fenics.Function:
function_space = fenics.FunctionSpace(mesh, 'CG', 1)
dx = measure.NamedMeasure('dx', mesh)
comm = mesh.mpi_comm()
ksp_options = copy.deepcopy(_utils.linalg.iterative_ksp_options)
u = fenics.TrialFunction(function_space)
v = fenics.TestFunction(function_space)
u_curr = fenics.Function(function_space)
u_prev = fenics.Function(function_space)
norm_u_prev = fenics.sqrt(fenics.dot(fenics.grad(u_prev), fenics.grad(u_prev)))
if ((boundaries is not None) and (boundary_idcs is not None)):
if (len(boundary_idcs) > 0):
bcs = _utils.create_dirichlet_bcs(function_space, fenics.Constant(0.0), boundaries, boundary_idcs)
else:
bcs = fenics.DirichletBC(function_space, fenics.Constant(0.0), fenics.CompiledSubDomain('on_boundary'))
else:
bcs = fenics.DirichletBC(function_space, fenics.Constant(0.0), fenics.CompiledSubDomain('on_boundary'))
lhs = (fenics.dot(fenics.grad(u), fenics.grad(v)) * dx)
rhs = ((fenics.Constant(1.0) * v) * dx)
_utils.assemble_and_solve_linear(lhs, rhs, bcs, fun=u_curr, ksp_options=ksp_options, comm=comm)
rhs = (fenics.dot((fenics.grad(u_prev) / norm_u_prev), fenics.grad(v)) * dx)
residual_form = (pow((fenics.sqrt(fenics.dot(fenics.grad(u_curr), fenics.grad(u_curr))) - fenics.Constant(1.0)), 2) * dx)
res_0 = np.sqrt(fenics.assemble(residual_form))
for _ in range(max_iter):
u_prev.vector().vec().aypx(0.0, u_curr.vector().vec())
u_prev.vector().apply('')
_utils.assemble_and_solve_linear(lhs, rhs, bcs, fun=u_curr, ksp_options=ksp_options, comm=comm)
res = np.sqrt(fenics.assemble(residual_form))
if (res <= (res_0 * tol)):
break
return u_curr |
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = nn.Conv2d(6, 16, 5)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv(x)), 2)
x = x.view((- 1), 256)
return F.relu(x) |
class PoolFormerGroupNorm(nn.GroupNorm):
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs) |
def _solve_expression(f, x, explicit_solutions, multiplicities, to_poly_solve, solution_dict, algorithm, domain):
from sage.structure.element import Expression
if f.is_relational():
if (f.operator() is not operator.eq):
if (algorithm == 'sympy'):
from sympy import S, solveset
from sage.interfaces.sympy import sympy_set_to_list
if (isinstance(x, Expression) and x.is_symbol()):
sympy_vars = (x._sympy_(),)
else:
sympy_vars = tuple([v._sympy_() for v in x])
ret = solveset(f._sympy_(), sympy_vars[0], S.Reals)
return sympy_set_to_list(ret, sympy_vars)
elif (algorithm == 'giac'):
return _giac_solver(f, x, solution_dict)
else:
try:
return solve_ineq(f)
except Exception:
pass
try:
return solve_ineq([f])
except Exception:
raise NotImplementedError('solving only implemented for equalities and few special inequalities, see solve_ineq')
ex = f
else:
ex = (f == 0)
if (multiplicities and to_poly_solve):
raise NotImplementedError('to_poly_solve does not return multiplicities')
def has_integer_assumption(v):
from sage.symbolic.assumptions import assumptions, GenericDeclaration
alist = assumptions()
return any(((isinstance(a, GenericDeclaration) and a.has(v) and (a._assumption in ['even', 'odd', 'integer', 'integervalued'])) for a in alist))
if (len(ex.variables()) and all((has_integer_assumption(var) for var in ex.variables()))):
return f.solve_diophantine(x, solution_dict=solution_dict)
if (algorithm == 'sympy'):
from sympy import S, solveset
from sage.interfaces.sympy import sympy_set_to_list
if (isinstance(x, Expression) and x.is_symbol()):
sympy_vars = (x._sympy_(),)
else:
sympy_vars = tuple([v._sympy_() for v in x])
if (domain == 'real'):
ret = solveset(ex._sympy_(), sympy_vars[0], S.Reals)
else:
ret = solveset(ex._sympy_(), sympy_vars[0])
ret = sympy_set_to_list(ret, sympy_vars)
if solution_dict:
ret = [{sol.left(): sol.right()} for sol in ret]
return ret
if (algorithm == 'giac'):
return _giac_solver(f, x, solution_dict)
m = ex._maxima_()
P = m.parent()
if explicit_solutions:
P.eval('solveexplicit: true')
try:
if (to_poly_solve != 'force'):
s = m.solve(x).str()
else:
s = str([])
except TypeError as mess:
if ('Error executing code in Maxima' in str(mess)):
s = str([])
else:
raise
if explicit_solutions:
P.eval('solveexplicit: false')
if (s == 'all'):
if solution_dict:
ans = [{x: f.parent().var('r1')}]
else:
ans = [(x == f.parent().var('r1'))]
if multiplicities:
return (ans, [])
else:
return ans
X = string_to_list_of_solutions(s)
if multiplicities:
if (len(X) == 0):
return (X, [])
else:
ret_multiplicities = [int(e) for e in str(P.get('multiplicities'))[1:(- 1)].split(',')]
if to_poly_solve:
if (len(X) == 0):
solutions_so_far = [ex]
ignore_exceptions = True
else:
solutions_so_far = X
ignore_exceptions = False
X = []
for eq in solutions_so_far:
if (eq.lhs().is_symbol() and (eq.lhs() == x) and (x not in eq.rhs().variables())):
X.append(eq)
continue
try:
m = eq._maxima_()
s = m.to_poly_solve(x, options='algexact:true')
T = string_to_list_of_solutions(repr(s))
X.extend([t[0] for t in T])
except TypeError as mess:
if ignore_exceptions:
continue
elif (('Error executing code in Maxima' in str(mess)) or ('unable to make sense of Maxima expression' in str(mess))):
if (not explicit_solutions):
X.append(eq)
else:
raise
from sage.symbolic.assumptions import assumptions
to_check = assumptions()
if to_check:
for (ix, soln) in reversed(list(enumerate(X))):
if soln.lhs().is_symbol():
if any((a.contradicts(soln) for a in to_check)):
del X[ix]
if multiplicities:
del ret_multiplicities[ix]
continue
if solution_dict:
if (isinstance(x, (list, tuple)) and (len(x) > 1)):
X = [{sol.left(): sol.right() for sol in b} for b in X]
else:
X = [{sol.left(): sol.right()} for sol in X]
if multiplicities:
return (X, ret_multiplicities)
else:
return X |
def find_critical_alpha(id, a0, mse_criterion, alpha_min, alpha_max, model_builder, alpha_tol=1e-06, vtol=0.001, **model_kwargs):
if (mse_criterion == 'perfect'):
def mse_criterion(v):
return (abs(v) < vtol)
elif (mse_criterion == 'random'):
model = model_builder(alpha=0.5, **model_kwargs)
model.init_second_moments()
tau_x = model.get_second_moments()[id]
def mse_criterion(v):
return (abs((v - tau_x)) > vtol)
def f(alpha):
v = find_state_evolution_mse(id, a0, alpha, model_builder, **model_kwargs)
return mse_criterion(v)
search = binary_search(f, alpha_min, alpha_max, alpha_tol)
alpha_c = search['xmid']
return alpha_c |
class ResNeXt(nn.Module):
def __init__(self, baseWidth, cardinality, layers, num_classes):
super(ResNeXt, self).__init__()
block = Bottleneck
self.cardinality = cardinality
self.baseWidth = baseWidth
self.num_classes = num_classes
self.inplanes = 64
self.output_size = 64
self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], 2)
self.layer3 = self._make_layer(block, 256, layers[2], 2)
self.layer4 = self._make_layer(block, 512, layers[3], 2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, self.baseWidth, self.cardinality, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, self.baseWidth, self.cardinality))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def start_server(args):
th.set_num_threads(NUM_THREAD)
server_namebook = dgl.contrib.read_ip_config(filename=args.ip_config)
port = server_namebook[args.server_id][2]
if (check_port_available(port) == False):
print(('Error: port %d is not available.' % port))
exit()
my_server = KGEServer(server_id=args.server_id, server_namebook=server_namebook, num_client=args.total_client)
my_server.set_clr(args.lr)
if ((my_server.get_id() % my_server.get_group_count()) == 0):
(g2l, entity_emb, entity_emb_state, relation_emb, relation_emb_state) = get_server_data(args, my_server.get_machine_id())
my_server.set_global2local(name='entity_emb', global2local=g2l)
my_server.init_data(name='relation_emb', data_tensor=relation_emb)
my_server.init_data(name='relation_emb_state', data_tensor=relation_emb_state)
my_server.init_data(name='entity_emb', data_tensor=entity_emb)
my_server.init_data(name='entity_emb_state', data_tensor=entity_emb_state)
else:
my_server.set_global2local(name='entity_emb')
my_server.init_data(name='relation_emb')
my_server.init_data(name='relation_emb_state')
my_server.init_data(name='entity_emb')
my_server.init_data(name='entity_emb_state')
print(('KVServer %d listen for requests ...' % my_server.get_id()))
my_server.start() |
class POIarray():
def __init__(self, parameter, values: (Collection | np.array)):
if (not is_valid_parameter(parameter)):
raise ValueError(f'{parameter} is not a valid parameter!')
if (not isinstance(values, Collection)):
raise TypeError('A list/array of values of the POI is required.')
self.parameter = parameter
self.name = parameter.name
self._values = np.array(values, dtype=np.float64)
self._ndim = 1
self._shape = (len(values),)
def values(self):
return self._values
def __repr__(self):
return f"POIarray('{self.name}', values={self.values})"
def __getitem__(self, i):
return POI(self.parameter, self.values[i])
def __iter__(self):
for v in self.values:
(yield POI(self.parameter, v))
def __len__(self):
return len(self.values)
def __eq__(self, other):
if (not isinstance(other, POIarray)):
return NotImplemented
if (len(self) != len(other)):
return False
values_equal = (self.values == other.values)
name_equal = (self.name == other.name)
return (values_equal.all() and name_equal)
def __hash__(self):
return hash((self.name, self.values.tostring()))
def ndim(self):
return self._ndim
def shape(self):
return self._shape
def append(self, values: (((int | float) | Collection) | np.ndarray)):
if (not isinstance(values, Collection)):
values = [values]
values = np.concatenate([self.values, values])
return POIarray(parameter=self.parameter, values=values) |
class NotebookFinder(object):
def __init__(self):
self.loaders = {}
def find_module(self, fullname, path=None):
nb_path = find_notebook(fullname, path)
if (not nb_path):
return
key = path
if path:
key = os.path.sep.join(path)
if (key not in self.loaders):
self.loaders[key] = NotebookLoader(path)
return self.loaders[key] |
def convert_balloon_to_coco(ann_file, out_file, image_prefix):
data_infos = mmcv.load(ann_file)
annotations = []
images = []
obj_count = 0
for (idx, v) in enumerate(mmcv.track_iter_progress(data_infos.values())):
filename = v['filename']
img_path = osp.join(image_prefix, filename)
(height, width) = mmcv.imread(img_path).shape[:2]
images.append(dict(id=idx, file_name=filename, height=height, width=width))
bboxes = []
labels = []
masks = []
for (_, obj) in v['regions'].items():
assert (not obj['region_attributes'])
obj = obj['shape_attributes']
px = obj['all_points_x']
py = obj['all_points_y']
poly = [((x + 0.5), (y + 0.5)) for (x, y) in zip(px, py)]
poly = [p for x in poly for p in x]
(x_min, y_min, x_max, y_max) = (min(px), min(py), max(px), max(py))
data_anno = dict(image_id=idx, id=obj_count, category_id=0, bbox=[x_min, y_min, (x_max - x_min), (y_max - y_min)], area=((x_max - x_min) * (y_max - y_min)), segmentation=[poly], iscrowd=0)
annotations.append(data_anno)
obj_count += 1
coco_format_json = dict(images=images, annotations=annotations, categories=[{'id': 0, 'name': 'balloon'}])
mmcv.dump(coco_format_json, out_file) |
_kl(Dirichlet, Dirichlet)
def _kl_dirichlet_dirichlet(p, q):
sum_p_concentration = p.concentration.sum((- 1))
sum_q_concentration = q.concentration.sum((- 1))
t1 = (sum_p_concentration.lgamma() - sum_q_concentration.lgamma())
t2 = (p.concentration.lgamma() - q.concentration.lgamma()).sum((- 1))
t3 = (p.concentration - q.concentration)
t4 = (p.concentration.digamma() - sum_p_concentration.digamma().unsqueeze((- 1)))
return ((t1 - t2) + (t3 * t4).sum((- 1))) |
def test_from_symbol_table_3(inferred_signature):
config.configuration.test_creation.negate_type = 0.0
with mock.patch('pynguin.utils.randomness.next_float') as float_mock:
float_mock.return_value = 0.0
knowledge = UsageTraceNode('ROOT')
knowledge.children['__eq__'].arg_types[0].add(int)
assert (inferred_signature._from_attr_table(knowledge) == inferred_signature.type_system.convert_type_hint(int)) |
class TimeIt(object):
def __init__(self, prefix=''):
self.prefix = prefix
self.start_times = dict()
self.elapsed_times = defaultdict(int)
self._with_name_stack = []
self._with_args_stack = []
def __call__(self, name, reset_on_stop=False):
self._with_name_stack.append(name)
self._with_args_stack.append({'reset_on_stop': reset_on_stop})
return self
def __enter__(self):
self.start(self._with_name_stack[(- 1)], **self._with_args_stack[(- 1)])
return self
def __exit__(self, exc_type, exc_val, exc_tb):
name = self._with_name_stack.pop()
kwargs = self._with_args_stack.pop()
timeit.stop(name, **kwargs)
def start(self, name, **kwargs):
assert (name not in self.start_times)
self.start_times[name] = time.time()
def stop(self, name, reset_on_stop=False, **kwargs):
assert (name in self.start_times)
if reset_on_stop:
self.elapsed_times[name] = 0
self.elapsed_times[name] += (time.time() - self.start_times[name])
self.start_times.pop(name)
def elapsed(self, name):
return self.elapsed_times[name]
def reset(self):
self.start_times = dict()
self.elapsed_times = defaultdict(int)
def __str__(self):
s = ''
names_elapsed = sorted(self.elapsed_times.items(), key=(lambda x: x[1]), reverse=True)
for (name, elapsed) in names_elapsed:
if ('total' not in self.elapsed_times):
s += '{0}: {1: <10} {2:.1f}\n'.format(self.prefix, name, elapsed)
else:
assert (self.elapsed_times['total'] >= max(self.elapsed_times.values()))
pct = ((100.0 * elapsed) / self.elapsed_times['total'])
s += '{0}: {1: <10} {2:.1f} ({3:.1f}%)\n'.format(self.prefix, name, elapsed, pct)
if ('total' in self.elapsed_times):
times_summed = sum([t for (k, t) in self.elapsed_times.items() if (k != 'total')])
other_time = (self.elapsed_times['total'] - times_summed)
assert (other_time >= 0)
pct = ((100.0 * other_time) / self.elapsed_times['total'])
s += '{0}: {1: <10} {2:.1f} ({3:.1f}%)\n'.format(self.prefix, 'other', other_time, pct)
return s |
class OUNoise():
def __init__(self, action_dimension, scale=0.1, mu=0, theta=0.15, sigma=0.2):
self.action_dimension = action_dimension
self.scale = scale
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = (np.ones(self.action_dimension) * self.mu)
self.reset()
def reset(self):
self.state = (np.ones(self.action_dimension) * self.mu)
def noise(self):
x = self.state
dx = ((self.theta * (self.mu - x)) + (self.sigma * np.random.randn(len(x))))
self.state = (x + dx)
return (self.state * self.scale) |
def add_sub_call_rc_final(ctx: LeanGenContext, called_func: LeanFunctionInfo, pc_offset: int, is_tail_call: bool):
if (ctx.rc_steps is not None):
ctx.concat_final(ctx.rc_steps.add_sub_call_rc_final(called_func.rc, pc_offset, is_tail_call)) |
def collect_one_rollout_mdp(env, expert, horizon=200, render=False, pause=0, threshold=(- 1)):
o = env.reset()
traj = dict(observations=[], actions=[], rewards=[], next_observations=[], terminals=[], agent_infos=[], env_infos=[])
ret = 0
for i in range(horizon):
(a, valid, _, _) = expert.get_action(o)
traj['observations'].append(o)
(o, r, done, info) = env.step(a)
traj['actions'].append(a)
traj['rewards'].append(r)
traj['next_observations'].append(o)
traj['terminals'].append(done)
traj['agent_infos'].append(info)
traj['env_infos'].append(info)
ret += r
if render:
env.render()
if pause:
time.sleep(pause)
if done:
break
if (threshold == (- 1)):
accept = True
elif (ret > threshold):
accept = True
else:
accept = False
return (accept, traj) |
def test_eq_other_type(control_flow_distance):
assert (not control_flow_distance.__eq__(MagicMock())) |
class SyncBatchNorm(SyncBatchNorm_):
def _check_input_dim(self, input):
if (TORCH_VERSION == 'parrots'):
if (input.dim() < 2):
raise ValueError(f'expected at least 2D input (got {input.dim()}D input)')
else:
super()._check_input_dim(input) |
class MLP(nn.Module):
def __init__(self, dim, hidden_dim, out_dim=None) -> None:
super().__init__()
out_dim = (out_dim or dim)
self.fc1 = nn.Conv2d(dim, hidden_dim, 1)
self.act = nn.GELU()
self.fc2 = nn.Conv2d(hidden_dim, out_dim, 1)
def forward(self, x: Tensor) -> Tensor:
return self.fc2(self.act(self.fc1(x))) |
def report_dynamic_errors(dataset, old_new_file, new_new_file, max_t, current_t):
old_new_path = ((RESULT_ROOT / dataset) / old_new_file)
new_new_path = ((RESULT_ROOT / dataset) / new_new_file)
if (max_t > current_t):
try:
o_n = pd.read_csv(old_new_path)
n_n = pd.read_csv(new_new_path)
assert (len(o_n) == len(n_n)), 'In current version, the workload test size should be same.'
o_n_s = o_n.sample(frac=(current_t / max_t))
n_n_s = n_n.sample(frac=(1 - (current_t / max_t)))
mixed_df = pd.concat([o_n_s, n_n_s], ignore_index=True, sort=False)
return evaluate_errors(mixed_df['error'])
except OSError:
print('Cannot open file.')
return (- 1) |
def _wrap_io_open(file, mode, encoding, errors):
binary = ('b' in mode)
if binary:
kwargs = {}
else:
kwargs = {'encoding': encoding, 'errors': errors}
if ((not PY2) or binary):
return io.open(file, mode, **kwargs)
f = io.open(file, '{}b'.format(mode.replace('t', '')))
return _make_text_stream(f, **kwargs) |
def pad_tensor_n(xs, max_len):
ret = np.zeros(((len(xs), max_len) + xs[0].shape[1:]), dtype=xs[0].dtype)
for (idx, x) in enumerate(xs):
ret[idx][:len(x)] = x
return ret |
class LayoutBuilder():
def _init(self, parameters):
self._parameters = parameters
def parameters(self):
return self._parameters
def __len__(self):
raise AssertionError('missing implementation')
def numbatype(self):
raise AssertionError('missing implementation')
def snapshot(self):
raise AssertionError('missing implementation')
def form(self):
raise AssertionError('missing implementation')
def clear(self):
raise AssertionError('missing implementation')
def is_valid(self, error: str):
raise AssertionError('missing implementation') |
def ref_deconvolution_2d(x, w, b, base_axis, pad, stride, dilation, group, channel_last=False, output_padding=(0, 0)):
if channel_last:
transpose_x = refs.ChannelLastToFirstTranspose(x.ndim, len(pad))
transpose_w = refs.ChannelLastToFirstTranspose(w.ndim, len(pad))
return transpose_x.inv(ref_deconvolution_2d(transpose_x(x), transpose_w(w), b, base_axis, pad, stride, dilation, group, False, output_padding))
y = []
for xx in x.reshape((((- 1),) + x.shape[base_axis:])):
y += [refs.deconvolution_2d(xx, w, b, pad, stride, dilation, group, output_padding=output_padding)[np.newaxis]]
y = np.vstack(y)
return y.reshape((x.shape[:base_axis] + y.shape[1:])) |
def evaluate_all_datasets(arch: Text, datasets: List[Text], xpaths: List[Text], splits: List[Text], config_path: Text, seed: int, raw_arch_config, workers, logger):
(machine_info, raw_arch_config) = (get_machine_info(), deepcopy(raw_arch_config))
all_infos = {'info': machine_info}
all_dataset_keys = []
for (dataset, xpath, split) in zip(datasets, xpaths, splits):
(train_data, valid_data, xshape, class_num) = get_datasets(dataset, xpath, (- 1))
if ((dataset == 'cifar10') or (dataset == 'cifar100')):
split_info = load_config('configs/nas-benchmark/cifar-split.txt', None, None)
elif dataset.startswith('ImageNet16'):
split_info = load_config('configs/nas-benchmark/{:}-split.txt'.format(dataset), None, None)
elif (dataset == 'ninapro'):
split_info = None
else:
raise ValueError('invalid dataset : {:}'.format(dataset))
config = load_config(config_path, dict(class_num=class_num, xshape=xshape), logger)
if bool(split):
assert (dataset == 'cifar10')
ValLoaders = {'ori-test': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, shuffle=False, num_workers=workers, pin_memory=True)}
assert (len(train_data) == (len(split_info.train) + len(split_info.valid))), 'invalid length : {:} vs {:} + {:}'.format(len(train_data), len(split_info.train), len(split_info.valid))
train_data_v2 = deepcopy(train_data)
train_data_v2.transform = valid_data.transform
valid_data = train_data_v2
train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.train), num_workers=workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.valid), num_workers=workers, pin_memory=True)
ValLoaders['x-valid'] = valid_loader
else:
train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, shuffle=False, num_workers=workers, pin_memory=True)
if (dataset == 'cifar10'):
ValLoaders = {'ori-test': valid_loader}
elif (dataset == 'cifar100'):
cifar100_splits = load_config('configs/nas-benchmark/cifar100-test-split.txt', None, None)
ValLoaders = {'ori-test': valid_loader, 'x-valid': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xvalid), num_workers=workers, pin_memory=True), 'x-test': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xtest), num_workers=workers, pin_memory=True)}
elif (dataset == 'ImageNet16-120'):
imagenet16_splits = load_config('configs/nas-benchmark/imagenet-16-120-test-split.txt', None, None)
ValLoaders = {'ori-test': valid_loader, 'x-valid': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet16_splits.xvalid), num_workers=workers, pin_memory=True), 'x-test': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet16_splits.xtest), num_workers=workers, pin_memory=True)}
elif (dataset == 'ninapro'):
ValLoaders = {'ori-test': valid_loader}
else:
raise ValueError('invalid dataset : {:}'.format(dataset))
dataset_key = '{:}'.format(dataset)
if bool(split):
dataset_key = (dataset_key + '-valid')
logger.log('Evaluate ||||||| {:10s} ||||||| Train-Num={:}, Valid-Num={:}, Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(dataset_key, len(train_data), len(valid_data), len(train_loader), len(valid_loader), config.batch_size))
logger.log('Evaluate ||||||| {:10s} ||||||| Config={:}'.format(dataset_key, config))
for (key, value) in ValLoaders.items():
logger.log('Evaluate ---->>>> {:10s} with {:} batchs'.format(key, len(value)))
arch_config = dict2config(dict(name='infer.tiny', C=raw_arch_config['channel'], N=raw_arch_config['num_cells'], genotype=arch, num_classes=config.class_num), None)
results = bench_evaluate_for_seed(arch_config, config, train_loader, ValLoaders, seed, logger)
all_infos[dataset_key] = results
all_dataset_keys.append(dataset_key)
all_infos['all_dataset_keys'] = all_dataset_keys
return all_infos |
def load_state_dict_hf(model_name, device=None, dtype=None):
mapped_device = ('cpu' if (dtype not in [torch.float32, None]) else device)
resolved_archive_file = cached_file(model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False)
return torch.load(resolved_archive_file, map_location=mapped_device)
if (dtype is not None):
state_dict = {k: v.to(dtype=dtype) for (k, v) in state_dict.items()}
state_dict = {k: v.to(device=device) for (k, v) in state_dict.items()}
return state_dict |
class MemoryViewIndexNode(BufferIndexNode):
is_memview_index = True
is_buffer_access = False
warned_untyped_idx = False
def analyse_types(self, env, getting=True):
from . import MemoryView
self.is_pythran_mode = has_np_pythran(env)
indices = self.indices
(have_slices, indices, newaxes) = MemoryView.unellipsify(indices, self.base.type.ndim)
if (not getting):
self.writable_needed = True
if (self.base.is_name or self.base.is_attribute):
self.base.entry.type.writable_needed = True
self.memslice_index = ((not newaxes) and (len(indices) == self.base.type.ndim))
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if ((len(indices) - len(newaxes)) > self.base.type.ndim):
self.type = error_type
error(indices[self.base.type.ndim].pos, ('Too many indices specified for type %s' % self.base.type))
return self
axis_idx = 0
for (i, index) in enumerate(indices[:]):
index = index.analyse_types(env)
if index.is_none:
self.is_memview_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
continue
(access, packing) = self.base.type.axes[axis_idx]
axis_idx += 1
if index.is_slice:
self.is_memview_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if (not value.is_none):
value = value.coerce_to(index_type, env)
setattr(index, attr, value)
new_indices.append(value)
elif (index.type.is_int or index.type.is_pyobject):
if (index.type.is_pyobject and (not self.warned_untyped_idx)):
warning(index.pos, 'Index should be typed for more efficient access', level=2)
MemoryViewIndexNode.warned_untyped_idx = True
self.is_memview_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, ('Invalid index for memoryview specified, type %s' % index.type))
return self
self.is_memview_index = (self.is_memview_index and (not self.is_memview_slice))
self.indices = new_indices
self.original_indices = indices
self.nogil = env.nogil
self.analyse_operation(env, getting, axes)
self.wrap_in_nonecheck_node(env)
return self
def analyse_operation(self, env, getting, axes):
self.none_error_message = 'Cannot index None memoryview slice'
self.analyse_buffer_index(env, getting)
def analyse_broadcast_operation(self, rhs):
if self.type.is_memoryviewslice:
lhs = self
if (lhs.is_memview_broadcast or rhs.is_memview_broadcast):
lhs.is_memview_broadcast = True
rhs.is_memview_broadcast = True
def analyse_as_memview_scalar_assignment(self, rhs):
lhs = self.analyse_assignment(rhs)
if lhs:
rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment
return lhs
return self |
_function_api('triline_feature', [('F', 'Grid faeture', '[3, G, D]', True)])
def _query_on_triline(x, G, feature_size, min_=[(- 1), (- 1), (- 1)], max_=[1, 1, 1], use_ste=False, f_init=None, fix_parameters=False, rng=None):
f_init = (f_init if (f_init is not None) else I.NormalInitializer(0.001))
shape = [3, G, feature_size]
feature = nn.parameter.get_parameter_or_create('F', shape, f_init, True, (not fix_parameters))
h = F.cosine_query_on_triline(x, feature, min_, max_, use_ste)
return h |
def _process_deriv_spec(deriv):
if (deriv is not None):
try:
(ords, vals) = zip(*deriv)
except TypeError:
msg = 'Derivatives, `bc_type`, should be specified as a pair of iterables of pairs of (order, value).'
raise ValueError(msg)
else:
(ords, vals) = ([], [])
return np.atleast_1d(ords, vals) |
class SimpleReplayBuffer(ReplayBuffer):
def __init__(self, max_replay_buffer_size, observation_dim, action_dim, env_info_sizes):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._max_replay_buffer_size = max_replay_buffer_size
self._observations = np.zeros((max_replay_buffer_size, observation_dim))
self._next_obs = np.zeros((max_replay_buffer_size, observation_dim))
self._actions = np.zeros((max_replay_buffer_size, action_dim))
self._rewards = np.zeros((max_replay_buffer_size, 1))
self._terminals = np.zeros((max_replay_buffer_size, 1), dtype='uint8')
self._env_infos = {}
for (key, size) in env_info_sizes.items():
self._env_infos[key] = np.zeros((max_replay_buffer_size, size))
self._env_info_keys = env_info_sizes.keys()
self._top = 0
self._size = 0
def add_sample(self, observation, action, reward, terminal, next_observation, env_info, **kwargs):
self._observations[self._top] = observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._next_obs[self._top] = next_observation
for key in self._env_info_keys:
self._env_infos[key][self._top] = env_info[key]
self._advance()
def terminate_episode(self):
pass
def _advance(self):
self._top = ((self._top + 1) % self._max_replay_buffer_size)
if (self._size < self._max_replay_buffer_size):
self._size += 1
def random_batch(self, batch_size):
indices = np.random.randint(0, self._size, batch_size)
batch = dict(observations=self._observations[indices], actions=self._actions[indices], rewards=self._rewards[indices], terminals=self._terminals[indices], next_observations=self._next_obs[indices])
for key in self._env_info_keys:
assert (key not in batch.keys())
batch[key] = self._env_infos[key][indices]
return batch
def rebuild_env_info_dict(self, idx):
return {key: self._env_infos[key][idx] for key in self._env_info_keys}
def batch_env_info_dict(self, indices):
return {key: self._env_infos[key][indices] for key in self._env_info_keys}
def num_steps_can_sample(self):
return self._size |
def check_vit_in_transformers():
if (not has_VIT):
raise ImportError('transformers version >= 4.5.0 required for using modeling_vit') |
def write_output(elem: Dict[(str, Any)], output_dir: str):
filename = os.path.basename(elem['filepath'].replace('gs://', ''))
output_filepath = os.path.join(output_dir, filename)
start_secs = round(elem['audio_start_seconds'], 3)
end_secs = round(elem['audio_end_seconds'], 3)
start_end_str = make_start_end_str(start_secs=start_secs, end_secs=end_secs)
output_filepath = output_filepath.replace('.wav', f'-{start_end_str}.wav')
print(f'[DEBUG] writing to {output_filepath}')
if output_filepath.startswith('gs://'):
gcs = storage.Client()
(bucket, file_name) = output_filepath.replace('gs://', '').split('/', maxsplit=1)
gcs_bucket_obj = gcs.get_bucket(bucket)
blob = gcs_bucket_obj.blob(file_name)
buf = io.BytesIO()
wavfile.write(buf, rate=elem['audio_sample_rate'], data=elem['samples'])
blob.upload_from_string(buf.read(), content_type='audio/x-wav')
else:
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
wavfile.write(output_filepath, rate=elem['audio_sample_rate'], data=elem['samples'])
return None |
def test_defs_always_cached(socket_disabled, isolate_modules):
modules_to_clear = [name for name in sys.modules if (name.split('.')[0] == 'pyhf')]
for module_name in modules_to_clear:
del sys.modules[module_name]
pyhf = importlib.import_module('pyhf')
spec = {'channels': [{'name': 'singlechannel', 'samples': [{'name': 'signal', 'data': [10], 'modifiers': [{'name': 'mu', 'type': 'normfactor', 'data': None}]}, {'name': 'background', 'data': [20], 'modifiers': [{'name': 'uncorr_bkguncrt', 'type': 'shapesys', 'data': [30]}]}]}]}
pyhf.schema.validate(spec, 'model.json') |
def remove_file_if_exists(file_name: str) -> None:
if os.path.exists(file_name):
os.remove(file_name)
else:
print('The file does not exist') |
def maximum_calibration_error(y_hat: Prediction, y: Tensor, n_bins: int=10) -> Tensor:
if ((y_hat.soft is None) or (y_hat.hard is None)):
return torch.as_tensor(float('nan'))
batch_size = y_hat.soft.size(0)
if (batch_size == 0):
return torch.as_tensor(float('nan'))
(acc_binned, conf_binned, _) = bin_predictions(y_hat, y, n_bins)
mce = torch.abs((acc_binned - conf_binned))
mce = torch.max(mce)
return mce.cpu().detach() |
class Function_stieltjes(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'stieltjes', nargs=1, conversions=dict(mathematica='StieltjesGamma', sympy='stieltjes'), latex_name='\\gamma') |
def latent_map(fname, ofname, start_idx):
mat = loadmat(fname)
if ('latent' not in mat.keys()):
print('Skipping file without latents:', fname)
latent_all = mat['latent']
assert (latent_all.shape[0] == latent_all.size), ('Latent is not 1D for file: ' + fname)
latent_all = latent_all.reshape((- 1))
unique = np.unique(latent_all)
for (i, el) in enumerate(unique):
latent_all[(latent_all == el)] = (start_idx + i)
mat['latent'] = latent_all.astype(int).reshape((- 1), 1)
savemat(ofname, mat)
print(('File %s successfully saved to %s with unique elements: %s mapped to %s' % (fname, ofname, unique, list(range(start_idx, (start_idx + len(unique))))))) |
def test_checkpoint_name():
checkpoint = utils.checkpoint_name('saved_models', 'kk_oscar_forward_charlm.pt', None)
assert (os.path.split(checkpoint) == ('saved_models', 'kk_oscar_forward_charlm_checkpoint.pt'))
checkpoint = utils.checkpoint_name('saved_models', 'kk_oscar_forward_charlm', None)
assert (os.path.split(checkpoint) == ('saved_models', 'kk_oscar_forward_charlm_checkpoint'))
checkpoint = utils.checkpoint_name('saved_models', 'kk_oscar_forward_charlm', 'othername.pt')
assert (os.path.split(checkpoint) == ('saved_models', 'othername.pt')) |
class PTBTokenizer():
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, 'edu.stanford.nlp.process.PTBTokenizer', '-preserveLines', '-lowerCase']
final_tokenized_captions_for_image = {}
image_id = [k for (k, v) in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for (k, v) in captions_for_image.items() for c in v])
path_to_jar_dirname = os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences.encode())
tmp_file.close()
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
token_lines = token_lines.decode()
lines = token_lines.split('\n')
os.remove(tmp_file.name)
for (k, line) in zip(image_id, lines):
if (not (k in final_tokenized_captions_for_image)):
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') if (w not in PUNCTUATIONS)])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image |
def main():
args = parse_args()
root_path = args.root_path
out_dir = (args.out_dir if args.out_dir else root_path)
mmcv.mkdir_or_exist(out_dir)
img_dir = osp.join(root_path, 'imgs')
gt_dir = osp.join(root_path, 'annotations')
set_name = {}
for split in args.split_list:
set_name.update({split: (('instances_' + split) + '.json')})
assert osp.exists(osp.join(img_dir, split))
for (split, json_name) in set_name.items():
print(f'Converting {split} into {json_name}')
with mmcv.Timer(print_tmpl='It takes {}s to convert icdar annotation'):
files = collect_files(osp.join(img_dir, split), osp.join(gt_dir, split), split)
image_infos = collect_annotations(files, split, nproc=args.nproc)
convert_annotations(image_infos, osp.join(out_dir, json_name)) |
(config_path='../../conf', config_name='lang_ann.yaml')
def main(cfg: DictConfig) -> None:
data_module = hydra.utils.instantiate(cfg.datamodule)
bert = hydra.utils.instantiate(cfg.model)
data_module.setup()
if cfg.training:
dataset = data_module.train_datasets
else:
dataset = data_module.val_datasets
file_name = os.path.join(dataset.dataset_loader.abs_datasets_dir, 'lang_ann.npy')
if os.path.isfile(file_name):
collected_data = np.load(file_name, allow_pickle=True).reshape((- 1))[0]
start = len(collected_data['indx'])
logger.info('Join the language annotation number {}'.format(len(collected_data['indx'])))
else:
collected_data = {'language': [], 'indx': []}
start = 0
length = len(dataset)
print(length, len(dataset.dataset_loader.episode_lookup))
steps = int(((length - start) // (length * 0.01)))
total = int((1 // 0.01))
logger.info('Progress --> {} / {}'.format((total - steps), total))
for i in range(start, length, steps):
imgs = []
seq_img = dataset[i][1][0].numpy()
(s, c, h, w) = seq_img.shape
seq_img = np.transpose(seq_img, (0, 2, 3, 1))
print('Seq length: {}'.format(s))
print('From: {} To: {}'.format(i, (i + s)))
fig = plt.figure()
for j in range(s):
imgRGB = seq_img[j].astype(int)
img = plt.imshow(imgRGB, animated=True)
imgs.append([img])
ArtistAnimation(fig, imgs, interval=50)
plt.show(block=False)
lang_ann = [input('Which instructions would you give to the robot to do: (press q to quit)\n')]
plt.close()
if (lang_ann[0] == 'q'):
break
logger.info(' Added indexes: {}'.format((dataset.dataset_loader.episode_lookup[i], (dataset.dataset_loader.episode_lookup[i] + dataset.window_size))))
collected_data['language'].append(lang_ann)
collected_data['indx'].append((dataset.dataset_loader.episode_lookup[i], (dataset.dataset_loader.episode_lookup[i] + dataset.window_size)))
file_name = 'lang_ann'
np.save(file_name, collected_data)
if cfg.postprocessing:
language = [item for sublist in collected_data['language'] for item in sublist]
language_embedding = bert(language)
collected_data['language'] = language_embedding.unsqueeze(1)
file_name = 'lang_emb_ann'
np.save(file_name, collected_data)
logger.info('Done extracting language embeddings !') |
def validate(val_loader, net, criterion, optim, scheduler, curr_epoch, writer, curr_iter, optim_at=None, scheduler_at=None):
net.eval()
val_loss = AverageMeter()
iou_acc = 0
error_acc = 0
dump_images = []
for (val_idx, data) in enumerate(val_loader):
if args.no_pos_dataset:
(inputs, gt_image, img_names) = data
elif (args.pos_rfactor > 0):
(inputs, gt_image, img_names, _, (pos_h, pos_w)) = data
else:
(inputs, gt_image, img_names, _) = data
assert ((len(inputs.size()) == 4) and (len(gt_image.size()) == 3))
assert (inputs.size()[2:] == gt_image.size()[1:])
batch_pixel_size = ((inputs.size(0) * inputs.size(2)) * inputs.size(3))
(inputs, gt_cuda) = (inputs.cuda(), gt_image.cuda())
with torch.no_grad():
if (args.pos_rfactor > 0):
if (args.use_hanet and (args.hanet_pos[0] > 0)):
(output, attention_map, pos_map) = net(inputs, pos=(pos_h, pos_w), attention_map=True)
else:
output = net(inputs, pos=(pos_h, pos_w))
else:
output = net(inputs)
del inputs
assert (output.size()[2:] == gt_image.size()[1:])
assert (output.size()[1] == args.dataset_cls.num_classes)
val_loss.update(criterion(output, gt_cuda).item(), batch_pixel_size)
del gt_cuda
predictions = output.data.max(1)[1].cpu()
if ((val_idx % 20) == 0):
if (args.local_rank == 0):
logging.info('validating: %d / %d', (val_idx + 1), len(val_loader))
if ((val_idx > 10) and args.test_mode):
break
if (val_idx < 10):
dump_images.append([gt_image, predictions, img_names])
iou_acc += fast_hist(predictions.numpy().flatten(), gt_image.numpy().flatten(), args.dataset_cls.num_classes)
del output, val_idx, data
iou_acc_tensor = torch.cuda.FloatTensor(iou_acc)
torch.distributed.all_reduce(iou_acc_tensor, op=torch.distributed.ReduceOp.SUM)
iou_acc = iou_acc_tensor.cpu().numpy()
if (args.local_rank == 0):
if (optim_at is not None):
evaluate_eval(args, net, optim, scheduler, val_loss, iou_acc, dump_images, writer, curr_epoch, args.dataset_cls, curr_iter, optim_at, scheduler_at)
else:
evaluate_eval(args, net, optim, scheduler, val_loss, iou_acc, dump_images, writer, curr_epoch, args.dataset_cls, curr_iter)
if (args.use_hanet and (args.hanet_pos[0] > 0)):
visualize_attention(writer, attention_map, curr_iter)
return val_loss.avg |
def load_cmrc2018():
dataset_dict = load_dataset('cmrc2018')
print(dataset_dict)
dataset_dict = cast(DatasetDict, dataset_dict)
dataset_dict = dataset_dict.rename_columns({'question': 'text2', 'context': 'text1'})
dataset_dict = dataset_dict.map(add_label, batched=True, remove_columns=['id', 'answers'])
print(f'processed dataset: {dataset_dict}')
return dataset_dict |
.parametrize('a, feat_idxs, expected', [(B, [0], []), (B, [0, 1], [[0, 1, 0, 1, 1, 0]]), (B, [0, 1, 2, 3, 4, 5], [[1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 1, 0], [1, 0, 0, 1, 0, 1]])])
def test_expand_collection(a, feat_idxs, expected):
children = expand_collection(a, feat_idxs)
assert np.array_equal(np.array(children), np.array(expected)) |
def column_Log(SUK, iota, U, prec=106):
R = RealField(prec)
return [R(SUK.number_field().abs_val(v, iota, prec)).log() for v in U] |
def bmat(*args, **kwargs):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', '.*the matrix subclass is not the recommended way.*')
return np.bmat(*args, **kwargs) |
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight, output_names, 'class_weight') |
def use_cpu_device():
with jax.default_device(jax.local_devices(backend='cpu')[0]):
(yield) |
_torch
_staging_test
class TrainerIntegrationWithHubTester(unittest.TestCase):
def setUpClass(cls):
cls._token = TOKEN
HfFolder.save_token(TOKEN)
def tearDownClass(cls):
for model in ['test-trainer', 'test-trainer-epoch', 'test-trainer-step']:
try:
delete_repo(token=cls._token, repo_id=model)
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-trainer-org')
except HTTPError:
pass
def test_push_to_hub(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=os.path.join(tmp_dir, 'test-trainer'), push_to_hub=True, hub_token=self._token)
url = trainer.push_to_hub()
re_search = re.search((ENDPOINT_STAGING + '/([^/]+/[^/]+)/'), url)
self.assertTrue((re_search is not None))
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, f'{USER}/test-trainer')
model = RegressionPreTrainedModel.from_pretrained(repo_name)
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item())
def test_push_to_hub_in_organization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=tmp_dir)
trainer.save_model()
trainer = get_regression_trainer(output_dir=os.path.join(tmp_dir, 'test-trainer-org'), push_to_hub=True, hub_model_id='valid_org/test-trainer-org', hub_token=self._token)
url = trainer.push_to_hub()
re_search = re.search((ENDPOINT_STAGING + '/([^/]+/[^/]+)/'), url)
self.assertTrue((re_search is not None))
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, 'valid_org/test-trainer-org')
model = RegressionPreTrainedModel.from_pretrained('valid_org/test-trainer-org')
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item())
def get_commit_history(self, repo):
commit_logs = subprocess.run('git log'.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, check=True, encoding='utf-8', cwd=repo).stdout
commits = commit_logs.split('\n\n')[1::2]
return [commit.strip() for commit in commits]
def test_push_to_hub_with_saves_each_epoch(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=os.path.join(tmp_dir, 'test-trainer-epoch'), push_to_hub=True, hub_token=self._token, save_strategy='epoch')
trainer.train()
while ((trainer.push_in_progress is not None) and (not trainer.push_in_progress.is_done)):
time.sleep(0.5)
with tempfile.TemporaryDirectory() as tmp_dir:
_ = Repository(tmp_dir, clone_from=f'{USER}/test-trainer-epoch', token=self._token)
commits = self.get_commit_history(tmp_dir)
self.assertIn('initial commit', commits)
self.assertIn('Training in progress, epoch 1', commits)
def test_push_to_hub_with_saves_each_n_steps(self):
num_gpus = max(1, get_gpu_count())
if (num_gpus > 2):
return
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=os.path.join(tmp_dir, 'test-trainer-step'), push_to_hub=True, hub_token=self._token, save_strategy='steps', save_steps=5)
trainer.train()
while ((trainer.push_in_progress is not None) and (not trainer.push_in_progress.is_done)):
time.sleep(0.5)
with tempfile.TemporaryDirectory() as tmp_dir:
_ = Repository(tmp_dir, clone_from=f'{USER}/test-trainer-step', token=self._token)
commits = self.get_commit_history(tmp_dir)
self.assertIn('initial commit', commits)
self.assertIn('Training in progress, step 5', commits) |
def read_json_data(file_name):
with open(file_name) as f:
article_list = [json.loads(line) for line in f]
return article_list |
def startpoint_difference(pred, label):
x_distance = (pred[0][2] - label[0][2])
y_distance = (pred[0][3] - label[0][3])
distance = math.sqrt(((x_distance * x_distance) + (y_distance * y_distance)))
return distance |
class OpenAIGPTConfig(PretrainedConfig):
model_type = 'openai-gpt'
attribute_map = {'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'}
def __init__(self, vocab_size=40478, n_positions=512, n_embd=768, n_layer=12, n_head=12, afn='gelu', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
super().__init__(**kwargs) |
def gen_while_gradient(op, g_output):
from caffe2.python.core import BlobReference
assert (op.type == 'While'), 'Expected While op'
assert (len(op.input) > 0), 'Expected at least one input in While op'
assert (len(op.output) == len(g_output)), 'Different number of gradient blobs and While op outputs'
(grad_ops, deduped_g_output) = dedupe_g_output(op, g_output)
g_output = deduped_g_output
init_grad_map = {}
op_output = [str(o) for o in op.output]
for (output_name, grad_output_name) in zip(op_output, g_output):
if grad_output_name:
init_grad_map[BlobReference(output_name)] = BlobReference(grad_output_name)
assert (len(init_grad_map) > 0), 'Empty initial gradient map for While op'
loop_net = _get_net_argument(op, 'loop_net')
assert loop_net, 'Expected loop subnet in While op'
assert ((len(loop_net.op) == 1) and (loop_net.op[0].type == 'Do')), 'Gradient While op requires single Do op as a loop body'
do_op = loop_net.op[0]
do_args = _get_do_arguments(do_op)
assert (('reuse_workspace' not in do_args) or (not do_args['reuse_workspace'])), 'Gradient While op requires Do loop body op without reuse_workspace set'
assert (len(do_op.output) > 0), 'Expected Do op with at least one output'
workspace_blob = do_op.output[(- 1)]
(loop_grad_net, loop_grad_map, loop_input_names, loop_output_names) = _gen_subnet_gradient(loop_net, init_grad_map)
assert loop_grad_net, 'Failed to get gradient net for loop body in While op'
grad_ops += _prepare_gradient_while_ops(fwd_op=op, input_names=loop_input_names, output_names=loop_output_names, loop_grad_net=loop_grad_net, workspace_blob=workspace_blob, init_grad_map=init_grad_map, loop_grad_map=loop_grad_map)
op_input = [str(i) for i in op.input]
g_input = [loop_grad_map.get(i, None) for i in op_input]
return (grad_ops, g_input) |
def getfortranname(rout):
try:
name = rout['f2pyenhancements']['fortranname']
if (name == ''):
raise KeyError
if (not name):
errmess(('Failed to use fortranname from %s\n' % rout['f2pyenhancements']))
raise KeyError
except KeyError:
name = rout['name']
return name |
def _read_sequence_example(filename_queue, n_labels=50, n_samples=59049, n_segments=10):
reader = tf.TFRecordReader()
(_, serialized_example) = reader.read(filename_queue)
(context, sequence) = tf.parse_single_sequence_example(serialized_example, context_features={'raw_labels': tf.FixedLenFeature([], dtype=tf.string)}, sequence_features={'raw_segments': tf.FixedLenSequenceFeature([], dtype=tf.string)})
segments = tf.decode_raw(sequence['raw_segments'], tf.float32)
segments.set_shape([n_segments, n_samples])
labels = tf.decode_raw(context['raw_labels'], tf.uint8)
labels.set_shape([n_labels])
labels = tf.cast(labels, tf.float32)
return (segments, labels) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.