code stringlengths 101 5.91M |
|---|
def get_model(point_cloud, is_training, num_class, bn_decay=None, gripper_feat=None, env_feat=None):
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
end_points['l0_xyz'] = l0_xyz
(l1_xyz, l1_points, l1_indices) = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.01, nsample=32, mlp=[32, 32, 64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
(l2_xyz, l2_points, l2_indices) = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.02, nsample=32, mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
(l3_xyz, l3_points, l3_indices) = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.04, nsample=32, mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
(l4_xyz, l4_points, l4_indices) = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.08, nsample=32, mlp=[256, 256, 512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')
(l5_xyz, l5_points, l5_indices) = pointnet_sa_module(l4_xyz, l4_points, npoint=4, radius=0.2, nsample=32, mlp=[512, 512, 1024], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer5')
if (env_feat is None):
extra_feat = gripper_feat
else:
extra_feat = tf.concat([gripper_feat, env_feat], axis=(- 1))
print('extra_feat', extra_feat)
extra_feat = tf.expand_dims(extra_feat, axis=1)
extra_feat0 = extra_feat
extra_feat = tflearn.layers.conv.conv_1d(extra_feat, 512, filter_size=1, strides=1, activation=tf.nn.leaky_relu)
extra_feat = tflearn.layers.conv.conv_1d(extra_feat, 256, filter_size=1, strides=1, activation=tf.nn.leaky_relu)
extra_feat = tflearn.layers.conv.conv_1d(extra_feat, 256, filter_size=1, strides=1, activation=tf.nn.leaky_relu)
extra_feat2 = extra_feat
extra_feat = tflearn.layers.conv.conv_1d(extra_feat, 128, filter_size=1, strides=1, activation=tf.nn.leaky_relu)
extra_feat = tflearn.layers.conv.conv_1d(extra_feat, 128, filter_size=1, strides=1, activation=tf.nn.leaky_relu)
extra_feat = tflearn.layers.conv.conv_1d(extra_feat, 128, filter_size=1, strides=1, activation=tf.nn.leaky_relu)
extra_feat = tflearn.layers.conv.conv_1d(extra_feat, 64, filter_size=1, strides=1, activation=tf.nn.leaky_relu)
extra_feat3 = extra_feat
extra_feat4 = extra_feat
extra_feat5 = extra_feat
extra_feat0 = tf.tile(extra_feat0, [1, 4, 1])
l5_points = tf.concat([l5_points, extra_feat0], axis=(- 1))
l4_points = pointnet_fp_module(l4_xyz, l5_xyz, l4_points, l5_points, [1024, 1024, 512], is_training, bn_decay, scope='fa_layer0', bn=True)
l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [512, 512, 256], is_training, bn_decay, scope='fa_layer1', bn=True)
extra_feat2 = tf.tile(extra_feat2, [1, 64, 1])
l3_points = tf.concat([l3_points, extra_feat2], axis=(- 1))
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256, 256, 128], is_training, bn_decay, scope='fa_layer2', bn=True)
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [128, 128, 128], is_training, bn_decay, scope='fa_layer3', bn=True)
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128, 128, 64], is_training, bn_decay, scope='fa_layer4', bn=True)
extra_feat5 = tf.tile(extra_feat5, [1, 2048, 1])
l0_points = tf.concat([l0_points, extra_feat5], axis=(- 1))
l0_points = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=False, is_training=is_training, scope='fc1_3', bn_decay=bn_decay)
l0_points = tf_util.conv1d(l0_points, 64, 1, padding='VALID', bn=False, is_training=is_training, scope='fc1_4', bn_decay=bn_decay)
net = l0_points
end_points['feats'] = net
return end_points |
class LongPoleCartPole(ModifiableCartPoleEnv):
def __init__(self):
super(LongPoleCartPole, self).__init__()
self.length = self.EXTREME_UPPER_LENGTH
self._followup()
def parameters(self):
parameters = super(LongPoleCartPole, self).parameters
parameters.update({'length': self.length})
return parameters |
def scp_file(file, ip, path, ssh_key=None):
if (ssh_key is None):
scp_cmd_str = ('scp %s %s:%s' % (file, ip, path))
else:
scp_cmd_str = ('scp -i %s %s %s:%s' % (ssh_key, file, ip, path))
return scp_cmd_str |
def save_rollouts(rollout_dir: str, s_obs_vecs: Union[(np.ndarray, List[np.ndarray])], s_ach_goal_vecs: Union[(np.ndarray, List[np.ndarray])], a_vecs: Union[(np.ndarray, List[np.ndarray])], base_actions: Optional[np.ndarray]=None) -> None:
obs_file = os.path.join(rollout_dir, s_obs_vecs_file)
ach_goal_file = os.path.join(rollout_dir, s_ach_goal_vecs_file)
act_file = os.path.join(rollout_dir, a_vecs_file)
base_acts_file = os.path.join(rollout_dir, base_actions_file)
os.makedirs(rollout_dir, exist_ok=True)
np.save(obs_file, s_obs_vecs)
np.save(ach_goal_file, s_ach_goal_vecs)
np.save(act_file, a_vecs)
if (base_actions is not None):
np.save(base_acts_file, base_actions) |
class _MarkupEscapeHelper(object):
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
def __getitem__(self, item):
return _MarkupEscapeHelper(self.obj[item], self.escape)
def __str__(self):
return text_type(self.escape(self.obj))
__unicode__ = __str__
def __repr__(self):
return str(self.escape(repr(self.obj)))
def __int__(self):
return int(self.obj)
def __float__(self):
return float(self.obj) |
class KGEServer(KVServer):
def _push_handler(self, name, ID, data, target):
original_name = name[0:(- 6)]
state_sum = target[(original_name + '_state-data-')]
grad_sum = (data * data).mean(1)
state_sum.index_add_(0, ID, grad_sum)
std = state_sum[ID]
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (((- self.clr) * data) / std_values)
target[name].index_add_(0, ID, tmp)
def set_clr(self, learning_rate):
self.clr = learning_rate |
class Client(object):
def __init__(self, config):
self.config = config
def _base_params(self):
ts = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
nonce = ''.join(random.sample(string.ascii_letters, 32))
return {'Timestamp': ts, 'AccessKeyId': self.config.pop_access_id, 'SignatureMethod': 'HMAC-SHA1', 'SignatureVersion': '1.0', 'SignatureNonce': nonce, 'Format': 'JSON', 'Product': 'dataworks', 'Version': '2017-12-12'}
def create_sql_task(self, code):
params = self._base_params()
params['ExecCode'] = code
params['PluginName'] = self.config.withs['PluginName']
params['Exec'] = self.config.withs['Exec']
return self._create_task(params)
def create_pyodps_task(self, code, args):
params = self._base_params()
params['ExecCode'] = code
params['PluginName'] = self.config.withs['PluginName4PyODPS']
params['Exec'] = self.config.withs['Exec4PyODPS']
if (len(args) > 0):
params['Args'] = args
return self._create_task(params)
def _create_task(self, params):
params['CustomerId'] = self.config.withs['CustomerId']
params['UniqueKey'] = str(time.time())
params['ExecTarget'] = self.config.env['ALISA_TASK_EXEC_TARGET']
nenv = dict(self.config.env)
nenv['SHOW_COLUMN_TYPE'] = 'true'
params['Envs'] = json.dumps(nenv)
val = self._request_and_parse_response('CreateAlisaTask', params)
return (val['alisaTaskId'], val['status'])
def get_status(self, task_id):
params = self._base_params()
params['AlisaTaskId'] = task_id
val = self._request_and_parse_response('GetAlisaTask', params)
return AlisaTaksStatus(int(val['status']))
def completed(self, status):
if isinstance(status, int):
status = AlisaTaksStatus(status)
return (status in [AlisaTaksStatus.ALISA_TASK_COMPLETED, AlisaTaksStatus.ALISA_TASK_ERROR, AlisaTaksStatus.ALISA_TASK_KILLED, AlisaTaksStatus.ALISA_TASK_RERUN, AlisaTaksStatus.ALISA_TASK_EXPIRED])
def read_logs(self, task_id, offset, w):
for _ in range(MAX_LOG_NUM):
params = self._base_params()
params['AlisaTaskId'] = task_id
params['Offset'] = str(offset)
log = self._request_and_parse_response('GetAlisaTaskLog', params)
rlen = int(log['readLength'])
if (rlen == 0):
return offset
offset += rlen
w.write(log['logMsg'])
if bool(log['isEnd']):
return (- 1)
return offset
def count_results(self, task_id):
params = self._base_params()
params['AlisaTaskId'] = task_id
res = self._request_and_parse_response('GetAlisaTaskResultCount', params)
return int(res)
def get_results(self, task_id, batch):
if (batch <= 0):
raise ValueError('batch should greater than 0')
count = self.count_results(task_id)
(columns, body) = ([], [])
for i in range(0, count, batch):
params = self._base_params()
params['AlisaTaskId'] = task_id
params['Start'] = str(i)
params['Limit'] = str(batch)
val = self._request_and_parse_response('GetAlisaTaskResult', params)
(header, rows) = self._parse_alisa_value(val)
if (len(columns) == 0):
columns = header
body.extend(rows)
return {'columns': columns, 'body': body}
def stop(self, task_id):
params = self._base_params()
params['AlisaTaskId'] = task_id
res = self._request_and_parse_response('StopAlisaTask', params)
return bool(res)
def _parse_alisa_value(self, val):
jsval = ast.literal_eval(json.dumps(val))
columns = []
for h in json.loads(jsval['dataHeader']):
nt = h.split('::')
(name, typ) = ((nt[0], nt[1]) if (len(nt) == 2) else (h, 'string'))
columns.append({'name': str(name), 'typ': str(typ)})
body = []
for m in json.loads(jsval['resultMsg']):
row = []
for i in ast.literal_eval(json.dumps(m)):
row.append(i)
body.append(row)
return (columns, body)
def _request_and_parse_response(self, action, params):
params['Action'] = action
params['ProjectEnv'] = self.config.env['SKYNET_SYSTEM_ENV']
url = ((self.config.pop_scheme + '://') + self.config.pop_url)
(code, buf) = Pop.request(url, params, self.config.pop_access_secret)
resp = json.loads(buf)
if (code != 200):
raise RuntimeError(('%s got a bad result, request=%s, response=%s' % (code, params, buf)))
if (resp['returnCode'] != '0'):
raise Exception('returned an error request={}, response={}'.format(params, resp))
return resp['returnValue'] |
def extract_quotes_and_entities(sample_text):
text = utils.preprocess_text(sample_text)
doc = spacy_lang(text)
quotes = extractor.extract_quotes(doc)
annotation = annotator.run(DB_CLIENT, text, [], quotes, '')
people = annotation['people']
sources = annotation['sources']
unified_nes = annotator.merge_nes(doc)
named_entities = annotator.remove_invalid_nes(unified_nes)
(_, _, all_quotes) = annotator.quote_assign(named_entities, quotes, doc)
quotes_and_sources = collect_quotes(all_quotes)
quotes_and_sources = sorted(quotes_and_sources, key=(lambda x: x['speaker']), reverse=True)
sources = list(set([person['speaker'] for person in quotes_and_sources]))
people = list(set(people).union(set(sources)))
return (people, sources, quotes_and_sources) |
class CIFAR10DataLoader(BaseDataLoader):
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_batches=0, training=True, num_workers=4, pin_memory=True):
config = ConfigParser.get_instance()
cfg_trainer = config['trainer']
if cfg_trainer['do_adv']:
print('Doint adv. attack')
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
transform_val = transforms.Compose([transforms.ToTensor()])
else:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
transform_val = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
self.data_dir = data_dir
(self.train_dataset, self.val_dataset) = get_cifar10(config['data_loader']['args']['data_dir'], cfg_trainer, train=training, transform_train=transform_train, transform_val=transform_val)
super().__init__(self.train_dataset, batch_size, shuffle, validation_split, num_workers, pin_memory, val_dataset=self.val_dataset) |
def _make_hist_name(channel, sample, modifier='', prefix='hist', suffix=''):
middle = '_'.join(filter((lambda x: x), [channel, sample, modifier]))
return f'{prefix}{middle}{suffix}' |
def add_cross_val_metrics_parser(subparsers, formatter_class):
subparser = subparsers.add_parser('cross-val-metrics', formatter_class=formatter_class, help='Compute cross-validation metrics on a given dataset')
subparser.add_argument('dataset_path', type=str, help='Path to the dataset file')
subparser.add_argument('output_path', type=str, help='Destination path for the json metrics')
subparser.add_argument('-c', '--config_path', type=str, help='Path to a NLU engine config file')
subparser.add_argument('-n', '--nb_folds', type=int, default=5, help='Number of folds to use for the cross-validation')
subparser.add_argument('-t', '--train_size_ratio', default=1.0, type=float, help='Fraction of the data that we want to use for training (between 0 and 1)')
subparser.add_argument('-s', '--exclude_slot_metrics', action='store_true', help='Fraction of the data that we want to use for training (between 0 and 1)')
subparser.add_argument('-i', '--include_errors', action='store_true', help='Include parsing errors in the output')
subparser.add_argument('-v', '--verbosity', action='count', default=0, help='Increase output verbosity')
subparser.set_defaults(func=_cross_val_metrics)
return subparser |
def pad_and_cat(a, padding_value, padding_dim=1):
max_dim_size = max([x.size()[padding_dim] for x in a])
padded_a = []
for x in a:
if (x.size()[padding_dim] < max_dim_size):
res_len = (max_dim_size - x.size()[1])
pad = nn.ConstantPad1d((0, res_len), padding_value)
padded_a.append(pad(x))
else:
padded_a.append(x)
return torch.cat(padded_a, dim=0) |
def _compute_metrics(metric, eval_preds, tokenizer):
(preds, labels) = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = np.where((labels != IGNORE_INDEX), labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
result = {k: round((v * 100), 4) for (k, v) in result.items()}
prediction_lens = [np.count_nonzero((pred != tokenizer.pad_token_id)) for pred in preds]
result['gen_len'] = np.mean(prediction_lens)
return result |
def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors):
try:
reader = tf_compat.v1.train.NewCheckpointReader(file_name)
if all_tensors:
var_to_shape_map = reader.get_variable_to_shape_map()
for key in sorted(var_to_shape_map):
print('tensor_name: ', key)
print(reader.get_tensor(key))
elif (not tensor_name):
print(reader.debug_string().decode('utf-8'))
else:
print('tensor_name: ', tensor_name)
v = reader.get_tensor(tensor_name)
print_tensor(v)
if (tensor_name.endswith('/Adam') and reader.has_tensor((tensor_name + '_1'))):
print('Guessing Adam m/v')
v2 = reader.get_tensor((tensor_name + '_1'))
eps = 1e-08
print(('Adam update (m / (eps + sqrt(v))) with eps=%r:' % eps))
print_tensor((v / (eps + numpy.sqrt(v2))))
except Exception as e:
print(str(e))
if ('corrupted compressed block contents' in str(e)):
print("It's likely that your checkpoint file has been compressed with SNAPPY.")
if (('Data loss' in str(e)) and any([(e in file_name) for e in ['.index', '.meta', '.data']])):
proposed_file = '.'.join(file_name.split('.')[0:(- 1)])
v2_file_error_template = "\nIt's likely that this is a V2 checkpoint and you need to provide the filename\n*prefix*. Try removing the '.' and extension. Try:\ninspect checkpoint --file_name = {}"
print(v2_file_error_template.format(proposed_file)) |
def GetPageRank_v1(tspec, *args):
if (type(tspec) == PUNGraph):
return GetPageRank_v1_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return GetPageRank_v1_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return GetPageRank_v1_PDirNet(tspec, *args)
if (type(tspec) == PNGraph):
return GetPageRank_v1_PNGraph(tspec, *args)
if (type(tspec) == PNEANet):
return GetPageRank_v1_PNEANet(tspec, *args)
if (type(tspec) == PNGraphMP):
return GetPageRank_v1_PNGraphMP(tspec, *args)
if (type(tspec) == PNEANetMP):
return GetPageRank_v1_PNEANetMP(tspec, *args)
raise TypeError('First argument has invalid type') |
def iterate_over_json_files_in_dir(dir_path):
pathlist = Path(dir_path).glob('*.json')
return [str(path) for path in pathlist] |
def test_computation_cache_fitness_compute_order(cache):
func = MagicMock()
func.is_maximisation_function.return_value = False
func.compute_fitness.return_value = 0
func.compute_is_covered.return_value = True
cache.add_fitness_function(func)
cache._chromosome.has_changed.return_value = False
assert (cache.get_is_covered(func) is True)
assert (cache.get_fitness() == 0)
assert (func.compute_fitness.call_count == 1)
assert (func.compute_is_covered.call_count == 1) |
.parametrize('observation_shape', [(100,)])
.parametrize('batch_size', [32])
.parametrize('eps', [32])
def test_standard_observation_scaler_with_trajectory_slicer(observation_shape: Sequence[int], batch_size: int, eps: float) -> None:
shape = (batch_size, *observation_shape)
observations = np.random.random(shape).astype('f4')
actions = np.random.random((batch_size, 1)).astype('f4')
rewards: Float32NDArray = np.random.random(batch_size).astype(np.float32)
terminals: Float32NDArray = np.zeros(batch_size, dtype=np.float32)
terminals[(- 1)] = 1.0
episodes = EpisodeGenerator(observations=observations, actions=actions, rewards=rewards, terminals=terminals)()
mean = observations.mean(axis=0)
std = observations.std(axis=0)
scaler = StandardObservationScaler(eps=eps)
assert (not scaler.built)
scaler.fit_with_trajectory_slicer(episodes, BasicTrajectorySlicer())
assert scaler.built
assert ((scaler.mean is not None) and (scaler.std is not None))
assert np.allclose(scaler.mean, mean)
assert np.allclose(scaler.std, std) |
def get_norm_layer(opt, norm_nc):
if (opt.param_free_norm == 'instance'):
return nn.InstanceNorm2d(norm_nc, affine=False)
if (opt.param_free_norm == 'syncbatch'):
return SynchronizedBatchNorm2d(norm_nc, affine=False)
if (opt.param_free_norm == 'batch'):
return nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError(('%s is not a recognized param-free norm type in SPADE' % opt.param_free_norm)) |
class LossScaler():
def __init__(self, scale=1):
self.cur_scale = scale
def has_overflow(self, params):
return False
def _has_inf_or_nan(x):
return False
def update_scale(self, overflow):
pass
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(((self.loss_scale * g) for g in grad_in))
def backward(self, loss):
scaled_loss = (loss * self.loss_scale)
scaled_loss.backward() |
def _analyze_and_unparse_code(func: DaceProgram) -> str:
(src_ast, _, _, _) = astutils.function_to_ast(func.f)
resolved = {k: v for (k, v) in func.global_vars.items() if (k not in func.argnames)}
src_ast = GlobalResolver(resolved).visit(src_ast)
src_ast = ConditionalCodeResolver(resolved).visit(src_ast)
src_ast = DeadCodeEliminator().visit(src_ast)
return astutils.unparse(src_ast) |
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
def alpha_bar(time_step):
return (math.cos(((((time_step + 0.008) / 1.008) * math.pi) / 2)) ** 2)
betas = []
for i in range(num_diffusion_timesteps):
t1 = (i / num_diffusion_timesteps)
t2 = ((i + 1) / num_diffusion_timesteps)
betas.append(min((1 - (alpha_bar(t2) / alpha_bar(t1))), max_beta))
return np.array(betas, dtype=np.float64) |
def tplus_time(s, time):
if (time == 0):
return Symbol((str(s) + '_{t}'))
return Symbol((((str(s) + '_{t+') + f'{time}') + '}')) |
def assert_structured_array_dtype(arr, event, time, num_events):
assert (arr.dtype.names == (event, time))
assert np.issubdtype(arr.dtype.fields[event][0], np.bool_)
assert np.issubdtype(arr.dtype.fields[time][0], np.float_)
assert (arr[event].sum() == num_events) |
def read_fasta_check_dna(f):
seq_list = []
for e in read_fasta_yield(f):
res = is_under_alphabet(e.seq, ALPHABET)
if res:
seq_list.append(e)
else:
raise ValueError(' '.join(['Sorry, sequence', str(e.no), 'has character', str(res), '(The character must be A or C or G or T)']))
return seq_list |
class AreaUnderCurve(ConfusionMatrixMetric):
def __init__(self, metric: str='AUC'):
super().__init__(metric)
def calculate(self):
specificity = (self.confusion_matrix.tn / (self.confusion_matrix.tn + self.confusion_matrix.fp))
false_positive_rate = (1 - specificity)
if ((self.confusion_matrix.tp + self.confusion_matrix.fn) == 0):
warnings.warn('Unable to compute area under the curve due to division by zero, returning -inf', NotComputableMetricWarning)
return float('-inf')
true_positive_rate = (self.confusion_matrix.tp / (self.confusion_matrix.tp + self.confusion_matrix.fn))
return (((true_positive_rate - false_positive_rate) + 1) / 2) |
def register_Ns3LteNetDevice_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True)
cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True)
cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True)
cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True)
cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True)
cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True)
cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True)
cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True)
cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'addr')], is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True)
cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')])
return |
def main(dataset_name='qags_xsum', aspect='consistency', aligner_type='disc', disc_init=None, bleurt_init=None, relevance_y_x_init=None, bert_model_type='roberta-large', bert_num_layers=None, dialog_context='fact_history', aggr_type='mean', remove_stopwords=False, n_references=11):
if (aligner_type == 'disc'):
aligner = DiscriminativeAligner.load_from_checkpoint(aggr_type=aggr_type, checkpoint_path=disc_init).to('cuda')
aligner.eval()
if (relevance_y_x_init is not None):
aligner_y_x = DiscriminativeAligner.load_from_checkpoint(aggr_type=aggr_type, checkpoint_path=relevance_y_x_init).to('cuda')
aligner_y_x.eval()
elif (aligner_type == 'bert'):
aligner = BERTAligner(model_type=bert_model_type, num_layers=bert_num_layers, aggr_type=aggr_type, lang='en', device='cuda')
aligner_y_x = aligner
elif (aligner_type == 'bleurt'):
aligner = BLEURTAligner(aggr_type=aggr_type, checkpoint=bleurt_init)
if (relevance_y_x_init is not None):
aligner_y_x = BLEURTAligner(aggr_type=aggr_type, checkpoint=relevance_y_x_init)
examples = get_test_examples(dataset_name=dataset_name, aspect=aspect, dialog_context=dialog_context, n_references=n_references)
all_preds = []
(pred_scores, true_scores) = ([], [])
for example in tqdm(examples, desc='Testing'):
if isinstance(example, list):
if (aspect == 'relevance'):
align_r_y = get_reference_score(aligner=aligner, input_text=example[1].input_text, context=example[1].context, aligner_type=aligner_type, remove_stopwords=remove_stopwords)
align_y_x = aligner_y_x.get_score(input_text=example[0].input_text, context=example[0].context, remove_stopwords=remove_stopwords)
pred_score = (align_r_y * align_y_x)
elif (aspect == 'preservation'):
align_y_x = aligner.get_score(input_text=example[0].input_text, context=example[0].context, remove_stopwords=remove_stopwords)
align_x_y = aligner.get_score(input_text=example[1].input_text, context=example[1].context, remove_stopwords=remove_stopwords)
pred_score = ((align_y_x * align_x_y) / (align_y_x + align_x_y))
all_preds.append({'context_0': example[0].context, 'input_text_0': example[0].input_text, 'context_1': example[1].context, 'input_text_1': example[1].input_text, 'pred_score': pred_score})
if (pred_score is not None):
pred_scores.append(pred_score)
assert (example[0].score == example[1].score)
true_scores.append(example[0].score)
else:
pred_score = aligner.get_score(input_text=example.input_text, context=example.context, remove_stopwords=remove_stopwords)
all_preds.append({'context': example.context, 'input_text': example.input_text, 'pred_score': pred_score})
if (pred_score is not None):
pred_scores.append(pred_score)
true_scores.append(example.score)
pearson_score = pearsonr(pred_scores, true_scores)[0]
spearman_score = spearmanr(pred_scores, true_scores)[0]
kendall_score = kendalltau(pred_scores, true_scores)[0]
os.makedirs(f'eval_results/', exist_ok=True)
output_filename = f'{dataset_name}_{aspect}_{aligner_type}'
if (aligner_type == 'bert'):
output_filename += f'_{bert_model_type}'
output_path = f'eval_results/{output_filename}.json'
json.dump(all_preds, open(output_path, 'w'), indent=4)
print(output_filename)
print(f'#sents: {len(pred_scores)}')
print(f'pearson: {pearson_score:.4f}')
print(f'spearman: {spearman_score:.4f}')
print(f'kendall: {kendall_score:.4f}') |
def get_pix2cam(focals, width, height):
fx = np.array(focals)
fy = np.array(focals)
cx = (np.array(width) * 0.5)
cy = (np.array(height) * 0.5)
arr0 = np.zeros_like(cx)
arr1 = np.ones_like(cx)
k_inv = np.array([[(arr1 / fx), arr0, ((- cx) / fx)], [arr0, ((- arr1) / fy), (cy / fy)], [arr0, arr0, (- arr1)]])
k_inv = np.moveaxis(k_inv, (- 1), 0)
return k_inv.tolist() |
def load_transformer(gpt_ckpt, vqgan_ckpt, stft_vqgan_ckpt='', device=torch.device('cpu')):
from pytorch_lightning.utilities.cloud_io import load as pl_load
checkpoint = pl_load(gpt_ckpt)
checkpoint['hyper_parameters']['args'].vqvae = vqgan_ckpt
if stft_vqgan_ckpt:
checkpoint['hyper_parameters']['args'].stft_vqvae = stft_vqgan_ckpt
gpt = Net2NetTransformer._load_model_state(checkpoint)
gpt.eval()
return gpt |
class VertexAITextClient(VertexAIClient):
def make_request(self, request: Request) -> RequestResult:
parameters = {'temperature': request.temperature, 'max_output_tokens': request.max_tokens, 'top_k': request.top_k_per_token, 'top_p': request.top_p, 'stop_sequences': request.stop_sequences, 'candidate_count': request.num_completions}
completions: List[Sequence] = []
model_name: str = request.model_engine
try:
def do_it():
model = TextGenerationModel.from_pretrained(model_name)
response = model.predict(request.prompt, **parameters)
candidates: List[TextGenerationResponse] = response.candidates
response_dict = {'predictions': [{'text': completion.text for completion in candidates}]}
return response_dict
cache_key = CachingClient.make_cache_key({'engine': request.model_engine, 'prompt': request.prompt, **parameters}, request)
(response, cached) = self.cache.get(cache_key, wrap_request_time(do_it))
except (requests.exceptions.RequestException, AssertionError) as e:
error: str = f'VertexAITextClient error: {e}'
return RequestResult(success=False, cached=False, error=error, completions=[], embedding=[])
for prediction in response['predictions']:
response_text = prediction['text']
text: str = ((request.prompt + response_text) if request.echo_prompt else response_text)
tokenization_result: TokenizationRequestResult = self.tokenizer.tokenize(TokenizationRequest(text, tokenizer=self.tokenizer_name))
tokens: List[Token] = [Token(text=str(text), logprob=0, top_logprobs={}) for text in tokenization_result.raw_tokens]
completion = Sequence(text=response_text, logprob=0, tokens=tokens)
sequence = truncate_sequence(completion, request, print_warning=True)
completions.append(sequence)
return RequestResult(success=True, cached=cached, request_time=response['request_time'], request_datetime=response['request_datetime'], completions=completions, embedding=[]) |
def glibc_version_string():
process_namespace = ctypes.CDLL(None)
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
return None
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version()
if (not isinstance(version_str, str)):
version_str = version_str.decode('ascii')
return version_str |
def voxel_downsample(points, voxel_size, normals=None):
pcd = make_open3d_point_cloud(points, normals=normals)
pcd = pcd.voxel_down_sample(voxel_size)
points = np.asarray(pcd.points)
if (normals is not None):
normals = np.asarray(pcd.normals)
return (points, normals)
else:
return points |
class SU3(Group):
def __init__(self):
self._nc = 3
self._free_params = 8
super().__init__(dim=4, shape=[3, 3], dtype=tf.complex128)
def update_gauge(self, x: Tensor, p: Tensor) -> Tensor:
return tf.matmul(tf.linalg.expm(p), x)
def checkSU(self, x: Tensor) -> tuple[(Tensor, Tensor)]:
return checkSU(x)
def checkU(self, x: Tensor) -> tuple[(Tensor, Tensor)]:
return checkU(x)
def mul(self, a: Tensor, b: Tensor, adjoint_a: bool=False, adjoint_b: bool=False) -> Tensor:
return tf.linalg.matmul(a, b, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
def adjoint(self, x: Tensor) -> Tensor:
return tf.linalg.adjoint(x)
def trace(self, x: Tensor) -> Tensor:
return tf.linalg.trace(x)
def diff_trace(self, x: Tensor):
log.error('TODO')
return x
def diff2Trace(self, x: Tensor):
log.error('TODO')
return x
def exp(self, x: Tensor) -> Tensor:
return tf.linalg.expm(x)
def projectTAH(self, x: Tensor) -> Tensor:
return projectTAH(x)
def compat_proj(self, x: Tensor) -> Tensor:
return projectSU(x)
def random(self, shape: list[int]) -> Tensor:
r = tf.random.normal(shape, dtype=TF_FLOAT)
i = tf.random.normal(shape, dtype=TF_FLOAT)
return projectSU(tf.complex(r, i))
def random_momentum(self, shape: list[int]) -> Tensor:
return randTAH3(shape[:(- 2)])
def kinetic_energy(self, p: Tensor) -> Tensor:
p2 = (norm2(p) - tf.constant(8.0, dtype=TF_FLOAT))
return (0.5 * tf.math.reduce_sum(tf.reshape(p2, [p.shape[0], (- 1)]), axis=1))
def vec_to_group(self, x: Tensor) -> Tensor:
return self.compat_proj(vec_to_su3(x))
def group_to_vec(self, x: Tensor) -> Tensor:
return su3_to_vec(self.compat_proj(x)) |
def test_get_wsgi_auth():
with pytest.raises(ValueError, match='Digest auth is not supported for WSGI apps'):
get_wsgi_auth(('test', 'test'), 'digest') |
class DistributedDocker(Docker):
def __init__(self, namingScheme: str='as{asn}{role}-{name}-{primaryIp}'):
super().__init__(namingScheme)
def getName(self) -> str:
return 'DistributedDocker'
def __compileIxNetMaster(self, net) -> str:
(scope, _, _) = net.getRegistryInfo()
return DistributedDockerCompilerFileTemplates['compose_network_ix_master'].format(netId='{}{}'.format(self._contextToPrefix(scope, 'net'), net.getName()), prefix=net.getPrefix(), labelList=self._getNetMeta(net))
def __compileIxNetWorker(self, net) -> str:
(scope, _, _) = net.getRegistryInfo()
return DistributedDockerCompilerFileTemplates['compose_network_ix_worker'].format(netId='{}{}'.format(self._contextToPrefix(scope, 'net'), net.getName()), labelList=self._getNetMeta(net))
def _doCompile(self, emulator: Emulator):
registry = emulator.getRegistry()
scopes = set()
for (scope, _, _) in registry.getAll().keys():
scopes.add(scope)
ix_nets = ''
for ixnet in ScopedRegistry('ix', registry).getByType('net'):
ix_nets += self.__compileIxNetWorker(ixnet)
for scope in scopes:
mkdir(scope)
chdir(scope)
services = ''
networks = ''
for ((_scope, type, name), obj) in registry.getAll().items():
if (_scope != scope):
continue
if (type == 'rnode'):
self._log('compiling router node {} for as{}...'.format(name, scope))
services += self._compileNode(obj)
if (type == 'hnode'):
self._log('compiling host node {} for as{}...'.format(name, scope))
services += self._compileNode(obj)
if (type == 'rs'):
self._log('compiling rs node for {}...'.format(name))
services += self._compileNode(obj)
if (type == 'snode'):
self._log('compiling service node {}...'.format(name))
services += self._compileNode(obj)
if (type == 'net'):
self._log('creating network: {}/{}...'.format(scope, name))
networks += (self.__compileIxNetMaster(obj) if (scope == 'ix') else self._compileNet(obj))
if ((len(services) > 0) or (len(networks) > 0)):
if (scope != 'ix'):
networks += ix_nets
self._log('creating docker-compose.yml...'.format(scope, name))
print(DockerCompilerFileTemplates['compose'].format(services=services, networks=networks, dummies=self._makeDummies()), file=open('docker-compose.yml', 'w'))
self._used_images = set()
print('COMPOSE_PROJECT_NAME=sim_{}'.format(scope), file=open('.env', 'w'))
chdir('..')
if ((services == '') and (networks == '')):
rmdir(scope) |
def test_too_many_dimensions():
cb = [1, 2, 3, 4]
A = np.random.rand(4, 4)
bad2D = [[1, 2], [3, 4]]
bad3D = np.random.rand(4, 4, 4)
assert_raises(ValueError, _clean_inputs, c=bad2D, A_ub=A, b_ub=cb)
assert_raises(ValueError, _clean_inputs, c=cb, A_ub=bad3D, b_ub=cb)
assert_raises(ValueError, _clean_inputs, c=cb, A_ub=A, b_ub=bad2D)
assert_raises(ValueError, _clean_inputs, c=cb, A_eq=bad3D, b_eq=cb)
assert_raises(ValueError, _clean_inputs, c=cb, A_eq=A, b_eq=bad2D) |
def grads(func, so_fact=1, side=1):
so = (func.space_order // so_fact)
comps = [getattr(func, ('d%s' % d.name))(x0=(d + ((side * d.spacing) / 2)), fd_order=so) for d in func.dimensions if d.is_Space]
st = tuple(([None] * func.grid.dim))
return VectorFunction(name=('grad_%s' % func.name), space_order=func.space_order, components=comps, grid=func.grid, staggered=st) |
class ExponentialDecay(LearningRateSchedule):
def __init__(self, initial_rate, decay_rate, decay_steps, staircase=True):
self.initial_rate = initial_rate
self.decay_rate = decay_rate
self.decay_steps = decay_steps
self.staircase = staircase
def _create_tensor(self, global_step):
return tf.train.exponential_decay(learning_rate=self.initial_rate, global_step=global_step, decay_rate=self.decay_rate, decay_steps=self.decay_steps, staircase=self.staircase) |
def random_bivariate_skew_Gaussian_center(kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=None, strict=False):
assert ((kernel_size % 2) == 1), 'Kernel size must be an odd number.'
assert (sigma_x_range[0] < sigma_x_range[1]), 'Wrong sigma_x_range.'
assert (sigma_y_range[0] < sigma_y_range[1]), 'Wrong sigma_y_range.'
assert (rotation_range[0] < rotation_range[1]), 'Wrong rotation_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
if strict:
sigma_max = np.max([sigma_x, sigma_y])
sigma_min = np.min([sigma_x, sigma_y])
(sigma_x, sigma_y) = (sigma_max, sigma_min)
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
sigma_max = np.max([sigma_x, sigma_y])
thres = (3 / sigma_max)
D = [[np.random.uniform((- thres), thres), np.random.uniform((- thres), thres)], [np.random.uniform((- thres), thres), np.random.uniform((- thres), thres)]]
kernel = bivariate_skew_Gaussian_center(kernel_size, sigma_x, sigma_y, rotation, D)
if (noise_range is not None):
assert (noise_range[0] < noise_range[1]), 'Wrong noise range.'
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
kernel = (kernel * noise)
kernel = (kernel / np.sum(kernel))
if strict:
return (kernel, sigma_x, sigma_y, rotation, D)
else:
return kernel |
def inception_v1(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, prediction_fn=slim.softmax, spatial_squeeze=True, reuse=None, scope='InceptionV1'):
with tf.variable_scope(scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
(net, end_points) = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return (logits, end_points) |
def eval_func_mp(distmat, q_pids, g_pids, q_camids, g_camids, max_rank=50, remove_junk=True):
(num_q, num_g) = distmat.shape
if (num_g < max_rank):
max_rank = num_g
print('Note: number of gallery samples is quite small, got {}'.format(num_g))
all_cmc = []
all_AP = []
print('Generating worker pools')
t1 = time.time()
pool = Pool(30)
res = pool.imap(worker, [(q_pids[q_idx], q_camids[q_idx], g_pids, g_camids, distmat[q_idx], max_rank, remove_junk) for q_idx in range(num_q)], chunksize=32)
print((time.time() - t1))
for r in tqdm(res, total=num_q):
all_AP.append(r[0])
all_cmc.append(r[1])
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = (all_cmc.sum(0) / num_q)
mAP = np.mean(all_AP)
return (all_cmc, mAP, all_AP) |
def main():
args = TrainOptions().parse()
args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed)
args.world_batch_size = args.batchSize
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = (ngpus_per_node * args.world_size)
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu_ids, ngpus_per_node, args) |
def nccl_skip_if_lt_x_gpu(backend, x):
def decorator(func):
(func)
def wrapper(*args, **kwargs):
if (backend != 'nccl'):
return func(*args, **kwargs)
if (torch.cuda.is_available() and (torch.cuda.device_count() >= x)):
return func(*args, **kwargs)
sys.exit(TEST_SKIPS[f'multi-gpu-{x}'].exit_code)
return wrapper
return decorator |
class DatasetFolder(VisionDataset):
def __init__(self, root: str, loader: Callable[([str], Any)], extensions: Optional[Tuple[(str, ...)]]=None, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, is_valid_file: Optional[Callable[([str], bool)]]=None, client: Optional[Any]=None) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.client = client
(classes, class_to_idx) = self.find_classes(self.root)
samples = self.make_dataset(self.root, class_to_idx, extensions, is_valid_file)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
def make_dataset(directory: str, class_to_idx: Dict[(str, int)], extensions: Optional[Tuple[(str, ...)]]=None, is_valid_file: Optional[Callable[([str], bool)]]=None) -> List[Tuple[(str, int)]]:
if (class_to_idx is None):
raise ValueError('The class_to_idx parameter cannot be None.')
return make_dataset(directory, class_to_idx, extensions=extensions, is_valid_file=is_valid_file)
def find_classes(self, directory: str) -> Tuple[(List[str], Dict[(str, int)])]:
return find_classes(directory)
def __getitem__(self, index: int) -> Tuple[(Any, Any)]:
(path, target) = self.samples[index]
if (self.client is not None):
with io.BytesIO(self.client.get(os.path.join('s3://sdcBucket', 'datasets/vision/imagenet_prep', '/'.join(path.split('/')[(- 3):])))) as f:
sample = Image.open(f).convert('RGB')
else:
sample = self.loader(path)
if (self.transform is not None):
sample = self.transform(sample)
if (self.target_transform is not None):
target = self.target_transform(target)
return (sample, target)
def __len__(self) -> int:
return len(self.samples) |
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(64, 128, 3, 1, 1)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(128, 32, 3, 1, 1)
self.relu2 = nn.ReLU()
def forward(self, x):
y = (x.float() * 0.5)
a = self.conv1(y)
b = self.relu1(a)
c = self.conv2(b)
d = self.relu2(c)
return d |
def test_powermod_list():
assert (powermod_list(15, (Integer(1) / 6), 21) == [3, 6, 9, 12, 15, 18])
assert (powermod_list(2, (Integer(5) / 2), 11) == []) |
def parse_conf(parser, input):
args = parser.parse_args([])
d = (input if (type(input) == dict) else input.__dict__)
args.__dict__.update({k: v for (k, v) in d.items() if (k in args.__dict__)})
return args |
def add_single_scale_rpn_outputs(model, blob_in, dim_in, spatial_scale):
anchors = generate_anchors(stride=(1.0 / spatial_scale), sizes=cfg.RPN.SIZES, aspect_ratios=cfg.RPN.ASPECT_RATIOS)
num_anchors = anchors.shape[0]
dim_out = dim_in
model.Conv(blob_in, 'conv_rpn', dim_in, dim_out, kernel=3, pad=1, stride=1, weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
model.Relu('conv_rpn', 'conv_rpn')
model.Conv('conv_rpn', 'rpn_cls_logits', dim_in, num_anchors, kernel=1, pad=0, stride=1, weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
model.Conv('conv_rpn', 'rpn_bbox_pred', dim_in, (4 * num_anchors), kernel=1, pad=0, stride=1, weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
if ((not model.train) or cfg.MODEL.FASTER_RCNN):
model.net.Sigmoid('rpn_cls_logits', 'rpn_cls_probs')
model.GenerateProposals(['rpn_cls_probs', 'rpn_bbox_pred', 'im_info'], ['rpn_rois', 'rpn_roi_probs'], anchors=anchors, spatial_scale=spatial_scale)
if cfg.MODEL.FASTER_RCNN:
if model.train:
model.GenerateProposalLabels(['rpn_rois', 'roidb', 'im_info'])
else:
model.net.Alias('rpn_rois', 'rois') |
def check_model_table(overwrite=False):
(current_table, start_index, end_index, lines) = _find_text_in_file(filename=os.path.join(PATH_TO_DOCS, 'index.mdx'), start_prompt='<!--This table is updated automatically from the auto modules', end_prompt='<!-- End table-->')
new_table = get_model_table_from_auto_modules()
if (current_table != new_table):
if overwrite:
with open(os.path.join(PATH_TO_DOCS, 'index.mdx'), 'w', encoding='utf-8', newline='\n') as f:
f.writelines(((lines[:start_index] + [new_table]) + lines[end_index:]))
else:
raise ValueError('The model table in the `index.mdx` has not been updated. Run `make fix-copies` to fix this.') |
def _is_exception(obj) -> bool:
if (not inspect.isclass(obj)):
return False
return issubclass(obj, Exception) |
def main(args):
cfg = Config.fromfile(args.config)
for d in [cfg, cfg.data.test]:
d.update(dict(report_speed=args.report_speed))
if (args.score_threshold is not None):
cfg.test_cfg.min_score = args.score_threshold
print(json.dumps(cfg._cfg_dict, indent=4))
sys.stdout.flush()
data_loader = build_data_loader(cfg.data.test)
test_loader = torch.utils.data.DataLoader(data_loader, batch_size=1, shuffle=False, num_workers=2)
model = build_model(cfg.model)
model = model.cuda()
if (args.checkpoint is not None):
if os.path.isfile(args.checkpoint):
print("Loading model and optimizer from checkpoint '{}'".format(args.checkpoint))
sys.stdout.flush()
checkpoint = torch.load(args.checkpoint)
d = dict()
for (key, value) in checkpoint['state_dict'].items():
tmp = key[7:]
d[tmp] = value
model.load_state_dict(d)
else:
raise ValueError("No checkpoint found at '{}'".format(args.resume))
model = fuse_module(model)
test(test_loader, model, cfg) |
.parametrize('inspecs', reduction_inspecs_params())
.parametrize('reduction', ['sum', 'mean', 'max', 'min', 'prod'])
.parametrize('axis', [None, 1])
def test_reduction_axis(inspecs, reduction, axis, nnabla_opts):
func = getattr(F, reduction)
fb = FunctionBenchmark(func, inspecs, [], dict(axis=axis), nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer) |
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) |
def topic_coherence(dataset, beta, feature_names, n_top_words=10):
word_counts = {}
word_combination_counts = {}
length = len(dataset)
coherence_sum = 0.0
coherence_count = 0
topic_coherence_sum = 0.0
for i in range(len(beta)):
top_words = [j for j in beta[i].argsort()[:((- n_top_words) - 1):(- 1)]]
topic_coherence = 0
topic_coherence_count = 0.0
for (i, word) in enumerate(top_words):
if (word not in word_counts):
count = count_word(dataset, word)
word_counts[word] = count
for j in range(i):
word2 = top_words[j]
combination = (word, word2)
if (combination not in word_combination_counts):
count = count_word_combination(dataset, combination)
word_combination_counts[combination] = count
wc1 = (word_counts[word] / float(length))
wc2 = (word_counts[word2] / float(length))
cc = (word_combination_counts[combination] / float(length))
if (cc > 0):
coherence = (math.log((cc / float((wc1 * wc2)))) / (- math.log(cc)))
topic_coherence += coherence
coherence_sum += coherence
coherence_count += 1
topic_coherence_count += 1
topic_coherence_sum += (topic_coherence / float(topic_coherence_count))
return ((coherence_sum / float(coherence_count)), (topic_coherence_sum / float(len(beta)))) |
_level_function()
def corr(x, y, weight=None, axis=None, *, keepdims=False, mask_identity=False, highlevel=True, behavior=None, attrs=None):
(yield (x, y, weight))
return _impl(x, y, weight, axis, keepdims, mask_identity, highlevel, behavior, attrs) |
def get_sex_threshold_plotting():
thresholds = {genome: get_param(keys=['sex_inference', genome, 'thresholds'], def_value='list( "XX"=c(0.8, 1), "XY"=c(0, 0.6), "consistent with XX but not XY"=c(0.6, 1), "consistent with XY but not XX"=c(0, 0.8) )') for genome in GENOMES}
sex_thresholds = 'list({pair})'.format(pair=', '.join([f'"{genome}"={thresholds[genome]}' for genome in GENOMES]))
sex_thresholds = sex_thresholds.replace('=', '?')
return sex_thresholds |
def get_fed_loss_cls_weights_v2(dataset_names: Union[(str, List[str])], freq_weight_power=1.0):
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
logger = logging.getLogger(__name__)
class_freq_weight_list = []
for dataset_name in dataset_names:
if (MetadataCatalog.get(dataset_name).get('json_file') is None):
continue
class_freq_path = (MetadataCatalog.get(dataset_name).json_file[:(- 5)] + '_cat_info.json')
if os.path.exists(class_freq_path):
logger.info("Search outside metadata 'image_count' for dataset '{}' from '{}'".format(dataset_name, class_freq_path))
class_freq_weight_list.append(load_fed_loss_cls_weights(class_freq_path, freq_weight_power))
continue
else:
logger.info("Nofind outside metadata 'image_count' for dataset '{}' from '{}'".format(dataset_name, class_freq_path))
logger.info("Using builtin metadata 'image_count' for dataset '{}'".format(dataset_name))
meta = MetadataCatalog.get(dataset_name)
class_freq_meta = meta.class_image_count
class_freq = torch.tensor([c['image_count'] for c in sorted(class_freq_meta, key=(lambda x: x['id']))])
class_freq_weight = (class_freq.float() ** freq_weight_power)
class_freq_weight_list.append(class_freq_weight)
return (class_freq_weight_list[0] if (len(class_freq_weight_list) == 1) else class_freq_weight_list) |
class ResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
nn.init.xavier_uniform_(self.conv1.weight.data, 1.0)
nn.init.xavier_uniform_(self.conv2.weight.data, 1.0)
if (stride == 1):
self.model = nn.Sequential(nn.ReLU(), SpectralNorm(self.conv1), nn.ReLU(), SpectralNorm(self.conv2))
else:
self.model = nn.Sequential(nn.ReLU(), SpectralNorm(self.conv1), nn.ReLU(), SpectralNorm(self.conv2), nn.AvgPool2d(2, stride=stride, padding=0))
self.bypass = nn.Sequential()
if (stride != 1):
self.bypass_conv = nn.Conv2d(in_channels, out_channels, 1, 1, padding=0)
nn.init.xavier_uniform_(self.bypass_conv.weight.data, np.sqrt(2))
self.bypass = nn.Sequential(SpectralNorm(self.bypass_conv), nn.AvgPool2d(2, stride=stride, padding=0))
def forward(self, x):
return (self.model(x) + self.bypass(x)) |
def concatenate_tensors(tensor1, tensor2):
diff_height = (tensor2.size()[2] - tensor1.size()[2])
diff_width = (tensor2.size()[3] - tensor1.size()[3])
tensor1 = F.pad(tensor1, [(diff_width // 2), (diff_width - (diff_width // 2)), (diff_height // 2), (diff_height - (diff_height // 2))])
return torch.cat([tensor1, tensor2], dim=1) |
def test_flatten_labels_1():
y = pd.DataFrame({'Product': ['Debt collection', 'Checking or savings account'], 'Sub-product': ['I do not know', 'Checking account']})
flat_y = flatten_labels(y)
ground_truth = pd.Series(['Debt collection:sep:I do not know', 'Checking or savings account:sep:Checking account'])
assert_series_equal(ground_truth, flat_y) |
class PayoffTable():
identify: AgentID
agents: Sequence[AgentID]
shared_simulation_flag: SimulationFlag
table: Any = None
def __post_init__(self):
self._policy_idx = {agent: {} for agent in self.agents}
if (self.table is not None):
assert (len(self.table.shape) == len(self.agents)), (self.table.shape, len(self.agents))
else:
self.table = np.zeros(self.shared_simulation_flag.shape, dtype=np.float32)
def __getitem__(self, key: Dict[(str, Sequence[PolicyID])]) -> np.ndarray:
idx = self._get_combination_index(key)
item = self.table[idx]
return item
def __setitem__(self, key: Dict[(AgentID, Sequence[PolicyID])], value: float):
idx = self._get_combination_index(key)
self.table[idx] = value
def is_simulation_done(self, population_mapping: Dict[(str, Sequence[PolicyID])]) -> bool:
idx = self._get_combination_index(population_mapping)
return np.alltrue(self.shared_simulation_flag.data[idx])
def idx_to_policy_mapping(self, idx_tup: Sequence[int]) -> Dict[(AgentID, List[PolicyID])]:
policy_mapping = {}
for (agent, pid_idx) in zip(self.agents, idx_tup):
pid_idx_mapping = self._policy_idx[agent]
for (k, v) in pid_idx_mapping.items():
if (v == pid_idx):
policy_mapping[agent] = [k]
break
return policy_mapping
def set_simulation_done(self, population_mapping: Dict[(str, Sequence[PolicyID])]):
idx = self._get_combination_index(population_mapping)
self.shared_simulation_flag.data[idx] = True
def expand_table(self, pad_info: List[Sequence[int]]):
if (not any(self.table.shape)):
pad_info = ([(0, 1)] * len(self.agents))
self.table = np.pad(self.table, pad_info)
if (self.shared_simulation_flag.data.shape != self.table.shape):
self.shared_simulation_flag.data = np.pad(self.shared_simulation_flag.data, pad_info)
def _get_combination_index(self, policy_combination: Dict[(AgentID, Sequence[PolicyID])]) -> Tuple:
res = []
expand_flag = False
pad_info = []
for agent in self.agents:
idx = []
policy_seq = policy_combination[agent]
if isinstance(policy_seq, str):
policy_seq = [policy_seq]
new_policy_add_num = 0
for p in policy_seq:
if (self._policy_idx[agent].get(p) is None):
expand_flag = True
self._policy_idx[agent][p] = len(self._policy_idx[agent])
new_policy_add_num += 1
idx.append(self._policy_idx[agent][p])
pad_info.append((0, new_policy_add_num))
res.append(idx)
if expand_flag:
self.expand_table(pad_info)
return np.ix_(*res)
def get_combination_index(self, policy_combination: Dict[(AgentID, Sequence[PolicyID])]) -> Tuple:
return self._get_combination_index(policy_combination) |
class A001109(RecurrenceSequence2):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
self._params = (0, 1, 6, (- 1))
self._b = []
self._precompute(2)
def _repr_(self):
return 'a(n)^2 is a triangular number: a(n) = 6*a(n-1) - a(n-2) with a(0)=0, a(1)=1' |
def skip_in_ci(test_function):
return pytest.mark.skipif((os.environ.get('CI') == 'true'), reason="This test doesn't work on GitHub Actions.")(test_function) |
def time_multihead_attention(q, num_heads, k=None, v=None, mask=False, mode='self', bias=True, do_backprop=True, fp='fp32', use_apex=False, num_iters=100, num_warmups=5):
if use_apex:
from apex import amp
embed_size = q.size(2)
attn = torch.nn.MultiheadAttention(embed_size, num_heads, bias=bias).to(profiling.cuda_device)
attn.train()
q = q.to(profiling.cuda_device)
if (k is not None):
k = k.to(profiling.cuda_device)
mask_shape = (q.size(0), k.size(0))
else:
mask_shape = (q.size(0), q.size(0))
if (v is not None):
v = v.to(profiling.cuda_device)
dy = profiling.generate_batch(q.size(1), q.size(0), embed_size).to(profiling.cuda_device)
if mask:
mask = profiling.gen_attention_mask(*mask_shape).to(profiling.cuda_device)
else:
mask = None
if (fp == 'fp16'):
if use_apex:
attn = amp.initialize(attn)
else:
q = q.half()
if (k is not None):
k = k.half()
if (v is not None):
v = v.half()
if (mask is not None):
mask = mask.half()
attn = attn.half()
dy = dy.half()
(result, backward_result) = (None, None)
def forward():
nonlocal result
if (mode == 'self'):
result = attn.forward(q, q, q, need_weights=False, attn_mask=mask)[0]
elif (mode == 'encdec'):
result = attn.forward(q, k, k, need_weights=False, attn_mask=mask)[0]
elif (mode == 'arb'):
result = attn.forward(q, k, v, need_weights=False, attn_mask=mask)[0]
def backward():
nonlocal backward_result
backward_result = result.backward(dy)
def clear():
attn.zero_grad()
return profiling.time_funcs([forward, backward, clear], name=('MHA ' + mode), func_names=['forward', 'backward', 'clear'], num_iters=num_iters, warmups=num_warmups) |
def test_in_order_unary():
check_reproduce_tree(transition_scheme=TransitionScheme.IN_ORDER_UNARY) |
def get_deepspeech(device: torch.device) -> GetterReturnType:
sample_rate = 16000
window_size = 0.02
window = 'hamming'
audio_conf = dict(sample_rate=sample_rate, window_size=window_size, window=window, noise_dir=None)
N = 10
num_classes = 10
spectrogram_size = 161
seq_length = 500
target_length = 10
labels = torch.rand(num_classes, device=device)
inputs = torch.rand(N, 1, spectrogram_size, seq_length, device=device)
inputs_sizes = torch.rand(N, device=device).mul((seq_length * 0.1)).add((seq_length * 0.8))
targets = torch.rand(N, target_length, device=device)
targets_sizes = torch.full((N,), target_length, dtype=torch.int, device=device)
model = models.DeepSpeech(rnn_type=nn.LSTM, labels=labels, rnn_hidden_size=1024, nb_layers=5, audio_conf=audio_conf, bidirectional=True)
model = model.to(device)
criterion = nn.CTCLoss()
(params, names) = extract_weights(model)
def forward(*new_params: Tensor) -> Tensor:
load_weights(model, names, new_params)
(out, out_sizes) = model(inputs, inputs_sizes)
out = out.transpose(0, 1)
loss = criterion(out, targets, out_sizes, targets_sizes)
return loss
return (forward, params) |
class DPRContextEncoderState(DPRState):
def load_dpr_model(self):
model = DPRContextEncoder(DPRConfig(**BertConfig.get_config_dict('bert-base-uncased')[0]))
print(f'Loading DPR biencoder from {self.src_file}')
saved_state = load_states_from_checkpoint(self.src_file)
(encoder, prefix) = (model.ctx_encoder, 'ctx_model.')
state_dict = {'bert_model.embeddings.position_ids': model.ctx_encoder.bert_model.embeddings.position_ids}
for (key, value) in saved_state.model_dict.items():
if key.startswith(prefix):
key = key[len(prefix):]
if (not key.startswith('encode_proj.')):
key = ('bert_model.' + key)
state_dict[key] = value
encoder.load_state_dict(state_dict)
return model |
.parametrize('image_shape', [(111,), (33, 44), (22, 55, 11), (6, 5, 4, 3)])
.parametrize('order', ['C', 'F'])
def test_offsets_to_raveled_neighbors_highest_connectivity(image_shape, order):
footprint = np.ones(((3,) * len(image_shape)), dtype=bool)
center = ((1,) * len(image_shape))
offsets = _util._offsets_to_raveled_neighbors(image_shape, footprint, center, order)
assert (len(offsets) == (footprint.sum() - 1))
assert (0 not in offsets)
assert (len(set(offsets)) == offsets.size)
assert all((((- x) in offsets) for x in offsets))
image_center = tuple(((s // 2) for s in image_shape))
coords = [np.abs((np.arange(s, dtype=np.intp) - c)) for (s, c) in zip(image_shape, image_center)]
grid = np.meshgrid(*coords, indexing='ij')
image = np.sum(grid, axis=0)
image_raveled = image.ravel(order)
image_center_raveled = np.ravel_multi_index(image_center, image_shape, order=order)
samples = []
for offset in offsets:
index = (image_center_raveled + offset)
samples.append(image_raveled[index])
assert (np.min(samples) == 1)
assert (np.max(samples) == len(image_shape))
assert (list(sorted(samples)) == samples) |
def process(source_sent, target_sent, hypo_sent, metric):
source_bpe = ' '.join(sp.EncodeAsPieces(source_sent))
hypo_bpe = [' '.join(sp.EncodeAsPieces(h)) for h in hypo_sent]
if (metric == 'bleu'):
score_str = [get_bleu(h, target_sent) for h in hypo_sent]
else:
score_str = [get_ter(h, target_sent) for h in hypo_sent]
return (source_bpe, hypo_bpe, score_str) |
def setup_file_observer():
file_obs_path = os.path.join(results_path, 'sacred')
logger.info('FileStorageObserver path: {}'.format(file_obs_path))
logger.info('Using the FileStorageObserver in results/sacred')
ex.observers.append(FileStorageObserver.create(file_obs_path))
pass |
def fricas_integrator(expression, v, a=None, b=None, noPole=True):
if (not isinstance(expression, Expression)):
expression = SR(expression)
from sage.interfaces.fricas import fricas
e_fricas = fricas(expression)
v_fricas = fricas(v)
if (a is None):
result = e_fricas.integrate(v_fricas)
else:
seg = fricas.equation(v_fricas, fricas.segment(a, b))
if noPole:
result = e_fricas.integrate(seg, '"noPole"')
else:
result = e_fricas.integrate(seg)
result = result.sage()
if (result == 'failed'):
result = expression.integrate(v, a, b, hold=True)
elif (result == 'potentialPole'):
raise ValueError('The integrand has a potential pole in the integration interval')
return result |
class POXL2Learning(Controller):
def start(self):
self.pox = ('%s/pox/pox.py' % POX_PATH)
pox_opts = set_pox_opts('forwarding.l2_learning', 'DEBUG', (('logs/' + type(self).__name__) + '.log,w'))
self.cmd(self.pox, pox_opts)
def stop(self):
self.cmd(('kill %' + self.pox)) |
def correctedProgram(input_program, init_state, final_state, exception_str, verbose=True, id_mapping={}):
instructions_program = input_program[4:]
program_header = input_program[:4]
try:
(line_exception, exception, argument_exception) = parseException(exception_str, verbose)
except ValueError:
print(exception_str)
if verbose:
printProgramWithLine(instructions_program)
return (None, exception_str)
corrected_instructions = instructions_program
insert_in = []
(action, objects, ids) = augmentation_utils.parseStrBlock(instructions_program[line_exception])
if (exception == ProgramException.NOT_CLOSED):
if (action.upper() != 'OPEN'):
insert_in.append([line_exception, '[Close] <{}> ({})'.format(objects[0], ids[0])])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
ipdb.set_trace()
else:
corrected_instructions = augmentation_utils.removeInstructions([line_exception], instructions_program)
if (exception == ProgramException.NOT_OPEN):
if (action.upper() != 'CLOSE'):
insert_in.append([line_exception, '[Open] <{}> ({})'.format(objects[0], ids[0])])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
ipdb.set_trace()
else:
corrected_instructions = augmentation_utils.removeInstructions([line_exception], instructions_program)
if (exception == ProgramException.NOT_SITTING):
if (action.upper() != 'STANDUP'):
if (action.upper() in ['SLEEP', 'WAKEUP']):
return (None, exception_str)
insert_in.append([line_exception, '[Sit] <{}> ({})'.format(objects[0], ids[0])])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
else:
corrected_instructions = augmentation_utils.removeInstructions([line_exception], instructions_program)
if (exception == ProgramException.NOT_LYING):
if (action.upper() != 'STANDUP'):
if (action.upper() in ['SLEEP', 'WAKEUP']):
return (None, exception_str)
insert_in.append([line_exception, '[Lie] <{}> ({})'.format(objects[0], ids[0])])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
else:
corrected_instructions = augmentation_utils.removeInstructions([line_exception], instructions_program)
if (exception == ProgramException.NOT_CLOSE):
(object_name, id_object_env) = argument_exception[0]
id_object = getidperobject(object_name, id_object_env, id_mapping)
insert_in.append([line_exception, '[Walk] <{}> ({})'.format(object_name, id_object)])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
if (exception == ProgramException.NOT_FACING):
insert_in.append([line_exception, '[TurnTo] <{}> ({})'.format(objects[0], ids[0])])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
if (exception == ProgramException.SITTING):
if (action.upper() == 'SIT'):
corrected_instructions = augmentation_utils.removeInstructions([line_exception], instructions_program)
ipdb.set_trace()
else:
insert_in.append([line_exception, '[StandUp]'])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
if (exception == ProgramException.NOT_ON):
if (action.upper() != 'SWITCHOFF'):
insert_in.append([line_exception, '[SwitchOn] <{}> ({})'.format(objects[0], ids[0])])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
ipdb.set_trace()
else:
corrected_instructions = augmentation_utils.removeInstructions([line_exception], instructions_program)
if (exception == ProgramException.NOT_OFF):
if (action.upper() != 'SWITCHON'):
insert_in.append([line_exception, '[SwitchOff] <{}> ({})'.format(objects[0], ids[0])])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
ipdb.set_trace()
else:
corrected_instructions = augmentation_utils.removeInstructions([line_exception], instructions_program)
if (exception == ProgramException.NOT_PLUGGED_OUT):
if (action.upper() != 'PLUGIN'):
insert_in.append([line_exception, '[PlugOut] <{}> ({})'.format(objects[0], ids[0])])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
ipdb.set_trace()
else:
corrected_instructions = augmentation_utils.removeInstructions([line_exception], instructions_program)
if (exception == ProgramException.STILL_ON):
(action, objects, ids) = augmentation_utils.parseStrBlock(instructions_program[line_exception])
insert_in.append([line_exception, '[SwitchOff] <{}> ({})'.format(objects[0], ids[0])])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
if (exception == ProgramException.UNPLUGGED):
if (action.upper() == 'PLUGOUT'):
corrected_instructions = augmentation_utils.removeInstructions([line_exception], instructions_program)
else:
insert_in.append([line_exception, '[PlugIn] <{}> ({})'.format(objects[0], ids[0])])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
if (exception == ProgramException.DOOR_CLOSED):
door_argument = [arg for arg in argument_exception if (arg[0] == 'door')]
id_object_env = door_argument[(- 1)][1]
object_name = 'door'
try:
id_object = getidperobject(object_name, id_object_env, id_mapping)
except:
print('Door used')
print('Previous program')
insert_in.append([line_exception, '[Walk] <{}> ({})'.format(object_name, id_object)])
insert_in.append([line_exception, '[Find] <{}> ({})'.format(object_name, id_object)])
insert_in.append([line_exception, '[Open] <{}> ({})'.format(object_name, id_object)])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
if (exception == ProgramException.OCCUPIED):
node_state_dict = final_state.to_dict()['nodes']
edge_state_dict = final_state.to_dict()['edges']
edge_interest = [edge_graph['from_id'] for edge_graph in edge_state_dict if ((edge_graph['to_id'] == int(argument_exception[0][1])) and (edge_graph['relation_type'] in ['ON']))]
node_interest = [node_graph for node_graph in node_state_dict if (node_graph['id'] in edge_interest)]
prev_obj = {}
for object_script in list(final_state._script_objects):
ob_mod = object_script[0]
if (ob_mod not in prev_obj):
prev_obj[ob_mod] = 1
else:
prev_obj[ob_mod] += 1
for object_occupied in node_interest:
object_name = object_occupied['class_name']
if (object_name == 'character'):
continue
id_object_env = object_occupied['id']
id_object = getidperobject(object_name, id_object_env, id_mapping)
insert_in.append([line_exception, '[Find] <{}> ({})'.format(object_name, id_object)])
insert_in.append([line_exception, '[Grab] <{}> ({})'.format(object_name, id_object)])
insert_in.append([line_exception, '[Release] <{}> ({})'.format(object_name, id_object)])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
if (exception == ProgramException.INSIDE_CLOSED):
node_state_dict = final_state.to_dict()['nodes']
edge_state_dict = final_state.to_dict()['edges']
id_object_env = id_mapping[(objects[0], int(ids[0]))]
edge_interest = [edge_graph['to_id'] for edge_graph in edge_state_dict if ((edge_graph['from_id'] == id_object_env) and (edge_graph['relation_type'] in ['INSIDE']))]
node_interest = [node_graph for node_graph in node_state_dict if (node_graph['id'] in edge_interest)]
return (None, exception_str)
if (exception == ProgramException.FREE_HAND):
if (action.upper() != 'GRAB'):
node_state_dict = final_state.to_dict()['nodes']
edge_state_dict = final_state.to_dict()['edges']
edge_graph_grabbed = [edge_graph['to_id'] for edge_graph in edge_state_dict if ('holds_lh' in edge_graph['relation_type'].lower())]
edge_graph_grabbed += [edge_graph['to_id'] for edge_graph in edge_state_dict if ('holds_rh' in edge_graph['relation_type'].lower())]
elem = random.randrange(len(edge_graph_grabbed))
edge_interest = edge_graph_grabbed[elem]
node_interest = [node_graph for node_graph in node_state_dict if (node_graph['id'] == edge_interest)][0]
id_object_env = node_interest['id']
object_name = node_interest['class_name']
id_object = getidperobject(node_interest['class_name'], node_interest['id'], id_mapping)
insert_in.append([line_exception, '[Release] <{}> ({})'.format(object_name, id_object)])
insert_in.append([(line_exception + 1), '[Grab] <{}> ({})'.format(object_name, id_object)])
corrected_instructions = augmentation_utils.insertInstructions(insert_in, instructions_program)
else:
return (None, exception_str)
output_program = (program_header + corrected_instructions)
return output_program |
.slow
def test_train_eval(tmp_path, cfg_train, cfg_eval):
assert (str(tmp_path) == cfg_train.paths.output_dir == cfg_eval.paths.output_dir)
with open_dict(cfg_train):
cfg_train.trainer.max_epochs = 1
cfg_train.test = True
HydraConfig().set_config(cfg_train)
(train_metric_dict, _) = train(cfg_train)
assert ('last.ckpt' in os.listdir((tmp_path / 'checkpoints')))
with open_dict(cfg_eval):
cfg_eval.ckpt_path = str(((tmp_path / 'checkpoints') / 'last.ckpt'))
HydraConfig().set_config(cfg_eval)
(test_metric_dict, _) = evaluate(cfg_eval)
assert (test_metric_dict['test/acc'] > 0.0)
assert (abs((train_metric_dict['test/acc'].item() - test_metric_dict['test/acc'].item())) < 0.001) |
_module()
class SingleStageTextDetector(SingleStageDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
SingleStageDetector.__init__(self, backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
def forward_train(self, img, img_metas, **kwargs):
x = self.extract_feat(img)
preds = self.bbox_head(x)
losses = self.bbox_head.loss(preds, **kwargs)
return losses
def simple_test(self, img, img_metas, rescale=False):
x = self.extract_feat(img)
outs = self.bbox_head(x)
if torch.onnx.is_in_onnx_export():
return outs
if (len(img_metas) > 1):
boundaries = [self.bbox_head.get_boundary(*outs[i].unsqueeze(0), [img_metas[i]], rescale) for i in range(len(img_metas))]
else:
boundaries = [self.bbox_head.get_boundary(*outs, img_metas, rescale)]
return boundaries |
.parametrize('ty,num', sub_table)
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], debug=True)
def test_sub_overflow_i(capfd, ty, num):
if (not supports_overflow(ti.lang.impl.current_cfg().arch)):
return
capfd.readouterr()
def foo(num: ty) -> ty:
a = ty(num)
b = ty((- num))
return (a - b)
foo(num)
ti.sync()
captured = capfd.readouterr().out
assert ('Subtraction overflow detected' in captured)
assert ('return a - b' in captured) |
_model_architecture('transformer_lm', 'transformer_lm_gpt2_big')
def transformer_lm_gpt2_big(args):
args.decoder_embed_dim = safe_getattr(args, 'decoder_embed_dim', 1600)
args.decoder_ffn_embed_dim = safe_getattr(args, 'decoder_ffn_embed_dim', 6400)
args.decoder_layers = safe_getattr(args, 'decoder_layers', 48)
args.decoder_attention_heads = safe_getattr(args, 'decoder_attention_heads', 25)
args.dropout = safe_getattr(args, 'dropout', 0.1)
args.attention_dropout = safe_getattr(args, 'attention_dropout', 0.1)
args.activation_fn = safe_getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args) |
class HourOfDay(TimeFeature):
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return ((index.hour / 23.0) - 0.5) |
def readFile(fileName):
fileExist = os.path.exists(fileName)
if (fileExist == False):
print('The file is not available in the directory')
return
readFile = open(fileName, 'r')
readContent = readFile.read()
readFile.close()
print(readContent)
readFile = open(fileName, 'r+')
readContent = readFile.read()
readFile.close()
print(readContent)
return |
def FriendshipGraph(n):
if (n < 1):
raise ValueError('n must be a positive integer')
if (n == 1):
from sage.graphs.generators.basic import CycleGraph
G = CycleGraph(3)
G.name('Friendship graph')
return G
N = ((2 * n) + 1)
center = (2 * n)
G = Graph(N, name='Friendship graph')
for i in range(0, (N - 1), 2):
G.add_cycle([center, i, (i + 1)])
G.set_pos({center: (0, 0)})
G._circle_embedding(list(range((N - 1))), radius=1)
return G |
class Test_density(TestCase):
def test_works_water(self):
M = (1 * aq.kg)
R = (1 * aq.m)
answer = ((0.2387 * aq.kg) / (aq.m ** 3))
result = Density(M, R).density.rescale((aq.kg / (aq.m ** 3)))
self.assertAlmostEqual(answer, result, 3)
def test_works_hd189(self):
M = (1.144 * aq.M_j)
R = (1.138 * aq.R_j)
answer = ((1.0296 * aq.g) / (aq.cm ** 3))
result = Density(M, R).density
self.assertAlmostEqual(answer, result, 3)
def test_works_jupiter(self):
R = ((6.9911 * (10 ** 7)) * aq.m)
d = ((1.326 * aq.g) / (aq.cm ** 3))
result = Density(None, R, d).M.rescale(aq.kg)
answer = ((1.898 * (10 ** 27)) * aq.kg)
self.assertAlmostEqual(answer, result, delta=1e+24)
(M=floats(0.0001, 10000), R=floats(0.0001, 10000))
def test_can_derive_other_vars_from_one_calculated(self, M, R):
assume(((R > 0) and (M > 0)))
inf = float('inf')
assume(((R < inf) and (M < inf)))
M *= aq.kg
R *= aq.m
density = Density(M, R).density
self.assertAlmostEqual(Density(M, None, density).R, R, 4)
self.assertAlmostEqual(Density(None, R, density).M, M, 4) |
def entity_linking(e_spans, verbose=False, cutoff=500, threshold=0):
guessed_ids = []
for span in e_spans:
span_ids = e_index.label_scores(span, top=cutoff, threshold=threshold, verbose=verbose, scale=0.3, max_degree=100000)
guessed_ids.append(span_ids)
return guessed_ids |
class SetVariable(goos.Action):
node_type = 'goos.action.set_variable'
def __init__(self, var: Variable, value: Function) -> None:
super().__init__(var)
self._var = var
self._value = value
if ((not isinstance(value, numbers.Number)) and (not isinstance(value, Function))):
raise TypeError('`value` must be either numeric or a `goos.Function`, got {}'.format(value))
def run(self, plan: goos.OptimizationPlan) -> None:
value = self._value
if isinstance(value, Function):
value = plan.eval_node(value).array
plan.set_var_value(self._var, value, check_frozen=(not self._var._is_param)) |
def list_files(root: str, suffix: str, prefix: bool=False):
root = os.path.expanduser(root)
files = [p for p in os.listdir(root) if (os.path.isfile(os.path.join(root, p)) and p.endswith(suffix))]
if (prefix is True):
files = [os.path.join(root, d) for d in files]
return files |
def tqdm_report_hook():
def report_hook(pbar, count, block_size, total_size):
if ((pbar.total is None) and total_size):
pbar.total = total_size
progress_bytes = (count * block_size)
pbar.update((progress_bytes - pbar.n))
pbar = tqdm(total=None)
return partial(report_hook, pbar) |
def get_multiclass_recall(preds, y_label, n_classes):
label_cat = range(n_classes)
labels_accu = {}
for la in label_cat:
idx_of_cat = (y_label == la)
cat_preds = preds[idx_of_cat]
if (cat_preds.size != 0):
accu = np.mean((cat_preds == la))
labels_accu[la] = [accu]
else:
labels_accu[la] = []
return labels_accu |
class AutoDirect(AutoFallbackSolver):
name = 'ls.auto_direct'
_ls_solvers = [('ls.mumps', {}), ('ls.scipy_umfpack', {}), ('ls.scipy_superlu', {})] |
def load_state_dict(model, state_dict, prefix='', ignore_missing='relative_position_index'):
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if (metadata is not None):
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {}))
module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in module._modules.items():
if (child is not None):
load(child, ((prefix + name) + '.'))
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if (ignore_key in key):
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if (len(missing_keys) > 0):
print('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, missing_keys))
if (len(unexpected_keys) > 0):
print('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, unexpected_keys))
if (len(ignore_missing_keys) > 0):
print('Ignored weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, ignore_missing_keys))
if (len(error_msgs) > 0):
print('\n'.join(error_msgs)) |
def vgg_19(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_19', fc_conv_padding='VALID', global_pool=False):
with tf.variable_scope(scope, 'vgg_19', [inputs], reuse=tf.AUTO_REUSE) as sc:
end_points_collection = (sc.original_name_scope + '_end_points')
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[(sc.name + '/fc8')] = net
return (net, end_points) |
def pytest_collection_modifyitems(config, items):
skip_doctests = False
if (np_base_version >= parse_version('2')):
reason = 'Due to NEP 51 numpy scalar repr has changed in numpy 2'
skip_doctests = True
for item in items:
if isinstance(item, DoctestItem):
item.dtest.globs = {}
if skip_doctests:
skip_marker = pytest.mark.skip(reason=reason)
for item in items:
if isinstance(item, DoctestItem):
item.add_marker(skip_marker) |
def get_within_circle_constraint(r: float) -> Callable[([List[float]], float)]:
def _constraint(x_y: List[float]) -> float:
(x, y) = x_y
return ((np.square(r) - np.square(x)) - np.square(y))
return _constraint |
_fusion('linear_sum')
class LinearSum(nn.Module):
def __init__(self, input_dims, output_dim, mm_dim=1200, activ_input='relu', activ_output='relu', normalize=False, dropout_input=0.0, dropout_pre_lin=0.0, dropout_output=0.0):
super().__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
self.linear0 = nn.Linear(input_dims[0], mm_dim)
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum((p.numel() for p in self.parameters() if p.requires_grad))
log_class_usage('Fusion', self.__class__)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if (self.dropout_input > 0):
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z = (x0 + x1)
if self.normalize:
z = (torch.sqrt(F.relu(z)) - torch.sqrt(F.relu((- z))))
z = F.normalize(z, p=2)
if (self.dropout_pre_lin > 0):
z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if (self.dropout_output > 0):
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z |
def register_Ns3MmWaveMacCschedSapProvider_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWaveMacCschedSapProvider const &', 'arg0')])
cls.add_method('CschedCellConfigReq', 'void', [param('ns3::MmWaveMacCschedSapProvider::CschedCellConfigReqParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('CschedLcConfigReq', 'void', [param('ns3::MmWaveMacCschedSapProvider::CschedLcConfigReqParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('CschedLcReleaseReq', 'void', [param('ns3::MmWaveMacCschedSapProvider::CschedLcReleaseReqParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('CschedUeConfigReq', 'void', [param('ns3::MmWaveMacCschedSapProvider::CschedUeConfigReqParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('CschedUeReleaseReq', 'void', [param('ns3::MmWaveMacCschedSapProvider::CschedUeReleaseReqParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
return |
class NormalBlock(Block):
def __init__(self, x=0, y=0, h=1, w=1, value=(- 0.1)):
super(NormalBlock, self).__init__(x, y, h, w)
self.color = '#FFFFFFFF'
self.name = 'NormalBlock'
self.value = value |
def create_pipeline(context, mode, exclude_classes=()):
assert (mode in ('pyx', 'py', 'pxd'))
from .Visitor import PrintTree
from .ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse
from .ParseTreeTransforms import ForwardDeclareTypes, InjectGilHandling, AnalyseDeclarationsTransform
from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes
from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
from .ParseTreeTransforms import TrackNumpyAttributes, InterpretCompilerDirectives, TransformBuiltinMethods
from .ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform
from .ParseTreeTransforms import CalculateQualifiedNamesTransform
from .TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic
from .ParseTreeTransforms import AdjustDefByDirectives, AlignFunctionDefinitions
from .ParseTreeTransforms import RemoveUnreachableCode, GilCheck
from .FlowControl import ControlFlowAnalysis
from .AnalysedTreeTransforms import AutoTestDictTransform
from .AutoDocTransforms import EmbedSignature
from .Optimize import FlattenInListTransform, SwitchTransform, IterationTransform
from .Optimize import EarlyReplaceBuiltinCalls, OptimizeBuiltinCalls
from .Optimize import InlineDefNodeCalls
from .Optimize import ConstantFolding, FinalOptimizePhase
from .Optimize import DropRefcountingTransform
from .Optimize import ConsolidateOverflowCheck
from .Buffer import IntroduceBufferAuxiliaryVars
from .ModuleNode import check_c_declarations, check_c_declarations_pxd
if (mode == 'pxd'):
_check_c_declarations = check_c_declarations_pxd
_specific_post_parse = PxdPostParse(context)
else:
_check_c_declarations = check_c_declarations
_specific_post_parse = None
if (mode == 'py'):
_align_function_definitions = AlignFunctionDefinitions(context)
else:
_align_function_definitions = None
stages = [NormalizeTree(context), PostParse(context), _specific_post_parse, TrackNumpyAttributes(), InterpretCompilerDirectives(context, context.compiler_directives), ParallelRangeTransform(context), AdjustDefByDirectives(context), WithTransform(context), MarkClosureVisitor(context), _align_function_definitions, RemoveUnreachableCode(context), ConstantFolding(), FlattenInListTransform(), DecoratorTransform(context), ForwardDeclareTypes(context), InjectGilHandling(), AnalyseDeclarationsTransform(context), AutoTestDictTransform(context), EmbedSignature(context), EarlyReplaceBuiltinCalls(context), TransformBuiltinMethods(context), MarkParallelAssignments(context), ControlFlowAnalysis(context), RemoveUnreachableCode(context), MarkOverflowingArithmetic(context), IntroduceBufferAuxiliaryVars(context), _check_c_declarations, InlineDefNodeCalls(context), AnalyseExpressionsTransform(context), FindInvalidUseOfFusedTypes(context), ExpandInplaceOperators(context), IterationTransform(context), SwitchTransform(context), OptimizeBuiltinCalls(context), CreateClosureClasses(context), CalculateQualifiedNamesTransform(context), ConsolidateOverflowCheck(context), DropRefcountingTransform(), FinalOptimizePhase(context), GilCheck()]
filtered_stages = []
for s in stages:
if (s.__class__ not in exclude_classes):
filtered_stages.append(s)
return filtered_stages |
_task('laser')
class LaserTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('configfile', metavar='PATH', help='dataset configuration file in json')
parser.add_argument('--weighting-alpha', type=float, default=None, help='alpha for automatic weighting')
parser.add_argument('--raw-text', action='store_true', help='load raw text dataset')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left (default: False)')
try:
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
except ArgumentError:
pass
def __init__(self, args, config, src_dictionary, tgt_dictionary, num_tasks):
super().__init__(args)
self.config = config
self.src_dictionary = src_dictionary
self.tgt_dictionary = tgt_dictionary
self.num_tasks = num_tasks
def setup_task(cls, args, **kwargs):
with open(args.configfile, 'r') as f:
config = json.load(f)
num_tasks = (max((dataset['id'] for dataset in config['train'])) + 1)
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
src_dictionary = Dictionary.load(config['src_vocab'])
tgt_dictionary = Dictionary.load(config['tgt_vocab'])
logger.info('| src Dictionary {} : {} types'.format(config['src_vocab'], len(src_dictionary)))
logger.info('| tgt Dictionary {} : {} types'.format(config['tgt_vocab'], len(tgt_dictionary)))
return cls(args, config, src_dictionary, tgt_dictionary, num_tasks)
def build_model(self, args):
model = models.build_model(args, self)
return model
def dataset(self, split):
if (split not in self.datasets):
raise KeyError(('Dataset not loaded: ' + split))
return self.datasets[split]
def load_dataset(self, split, epoch=1, **kwargs):
def indexed_dataset(path, dictionary):
if self.args.raw_text:
raise Exception('Unable to handle raw text.')
dataset = IndexedDataset(path, fix_lua_indexing=True)
return dataset
pair_datasets = OrderedDict()
if (split == 'valid'):
self.datasets[split] = pair_datasets
return
if (split not in self.config):
raise FileNotFoundError('Dataset not found in config file: {}'.format(split))
size_by_corpus = defaultdict(int)
size_sum = 0
size_sum_with_subsampling = 0
init_pair_datasets = {}
for dataset_config in self.config[split]:
src_path = os.path.dirname(dataset_config['src'])
corpus_name = src_path.split('/')[(- 2)]
language_pair_name = src_path.split('/')[(- 1)]
pair_datasets_key = ((corpus_name + '-') + language_pair_name)
logger.info(f'loading... {pair_datasets_key}')
if ('src' in dataset_config):
src_dataset = indexed_dataset(dataset_config['src'], self.src_dictionary)
else:
src_dataset = None
if ('tgt' in dataset_config):
tgt_dataset = indexed_dataset(dataset_config['tgt'], self.tgt_dictionary)
else:
tgt_dataset = None
dataset = LanguagePairDataset(src_dataset, src_dataset.sizes, self.src_dictionary, tgt_dataset, tgt_dataset.sizes, self.tgt_dictionary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target)
if (pair_datasets_key in init_pair_datasets):
logger.warning(f'Ignoring already added {pair_datasets_key}. Consider using `sample` key in order to upsample.')
else:
init_pair_datasets[pair_datasets_key] = {'dataset': dataset, 'sample': dataset_config.get('sample', None), 'id': dataset_config.get('id', None), 'len': len(dataset)}
length_sum = 0
weighted_freqs_sum = 0
freq_per_dataset = {}
vmax = 0
vmin = 1
weighted_freq_per_dataset = {}
if self.args.weighting_alpha:
for key in init_pair_datasets:
if (init_pair_datasets[key]['sample'] is None):
length_sum += len(init_pair_datasets[key]['dataset'])
for key in init_pair_datasets:
if (init_pair_datasets[key]['sample'] is None):
val = (float(init_pair_datasets[key]['len']) / length_sum)
freq_per_dataset[key] = val
weighted_freqs_sum += (val ** self.args.weighting_alpha)
for key in freq_per_dataset:
val = ((freq_per_dataset[key] ** self.args.weighting_alpha) / weighted_freqs_sum)
vmin = min(vmin, val)
vmax = max(vmax, val)
weighted_freq_per_dataset[key] = val
for pair_datasets_key in init_pair_datasets:
dataset_config = init_pair_datasets[pair_datasets_key]
dataset = dataset_config['dataset']
sample = dataset_config['sample']
if (sample is None):
sample = 1.0
if (pair_datasets_key in weighted_freq_per_dataset):
w = (vmax / weighted_freq_per_dataset[pair_datasets_key])
sample = w
sample = round(sample)
initial_sample = sample
initial_pair_datasets_key = pair_datasets_key
while (sample >= 1.0):
assert (pair_datasets_key not in pair_datasets), f'{pair_datasets_key} already in'
size_sum_with_subsampling += len(dataset)
pair_datasets[pair_datasets_key] = MultitaskDatasetWrapper(dataset, dataset_config.get('id', 0), 1.0, name=pair_datasets_key)
size_sum += len(dataset)
sample -= 1.0
pair_datasets_key += '-up'
assert (sample < 1e-06), f'sample remains > 0 {pair_datasets_key}'
logger.info(f'added pair {initial_pair_datasets_key} length {len(dataset)} new_length = {(len(dataset) * initial_sample)}')
size_by_corpus[corpus_name] += len(dataset)
self.datasets[split] = pair_datasets
logger.info(f'Datasets number = {len(self.datasets[split])} size = {size_sum} size_sum_with_subsampling = {size_sum_with_subsampling}')
def source_dictionary(self):
return self.src_dictionary
def target_dictionary(self):
return self.tgt_dictionary
def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, data_buffer_size=0, disable_iterator_cache=False):
assert isinstance(dataset, OrderedDict)
assert len(dataset)
assert isinstance(dataset[next(iter(dataset))], FairseqDataset)
for (_, dt) in dataset.items():
dt.set_epoch(epoch)
indices = OrderedDict()
batch_sampler = OrderedDict()
with data_utils.numpy_seed((seed + epoch)):
for (key, dt) in dataset.items():
logger.info(f' ordered_indices {key}')
indices[key] = dt.ordered_indices()
if (max_positions is not None):
for (key, dt) in dataset.items():
logger.info(f' filter_by_size {key}')
(indices[key], ignored) = dt.filter_indices_by_size(indices[key], max_positions)
for (key, dt) in dataset.items():
logger.info(f' batch_by_size {key}')
batch_sampler[key] = data_utils.batch_by_size(indices[key], dt.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple)
epoch_iter = MultidatasetEpochBatchIterator(dataset=dataset, batch_sampler=batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch)
return epoch_iter |
def main():
args_parser = ArgumentParser()
args_parser.add_argument('--lexicon', required=True)
args_parser.add_argument('--input', required=True)
args_parser.add_argument('--disamb_map', required=True)
args_parser.add_argument('--disamb', action='store_true')
args_parser.add_argument('--output', required=True)
args = args_parser.parse_args()
lexicon_fn = args.lexicon
disamb_map_fn = args.disamb_map
scoring_words = args.input
map_dict = parse_vocab(disamb_map_fn)
segments = eval(open(scoring_words, 'r').read())
unicode_to_phone = {value: key for (key, value) in map_dict.items()}
phone_mapper = PhoneUtils.PhoneMapper(lexicon_filename=lexicon_fn, phone_unicode_map_filename=disamb_map_fn)
if args.disamb:
result_disamb = phone_mapper.map_phones_to_word_on_corpus(segments)
PhoneUtils.save_corpus_segments_to_file(args.output, result_disamb)
else:
result = {}
for (seg_tag, word_seq) in segments.items():
phones_seq = ''
for unicode in word_seq:
phones_seq += unicode_to_phone[unicode]
result[seg_tag] = phones_seq
lex = Lexicon(lexicon_fn)
vocab_dict = {}
words_map = {}
for word in lex.lemmas:
if (len(lex.lemmas[word]['phons']) > 0):
phonemes = lex.lemmas[word]['phons'][0]['phon']
phone_concat = ''.join(phonemes.split())
vocab_dict[word] = phone_concat
words_map.setdefault(phone_concat, []).append(word)
for (k, v) in result.items():
list_phones = v.split()
new_value = ''
for phones in list_phones:
if (phones in words_map.keys()):
new_value += (' ' + words_map[phones][0])
else:
new_value += (' ' + '[UNK]')
result[k] = new_value
PhoneUtils.save_corpus_segments_to_file(args.output, result) |
def make_update_fn():
def _update_step(runner_state):
step_fn = jax.vmap(auto_reset(env.step, env.init))
def _env_step(runner_state, unused):
(params, opt_state, env_state, last_obs, rng) = runner_state
(rng, _rng) = jax.random.split(rng)
(logits, value) = forward.apply(params, last_obs)
pi = distrax.Categorical(logits=logits)
action = pi.sample(seed=_rng)
log_prob = pi.log_prob(action)
(rng, _rng) = jax.random.split(rng)
keys = jax.random.split(_rng, env_state.observation.shape[0])
env_state = step_fn(env_state, action, keys)
transition = Transition(env_state.terminated, action, value, jnp.squeeze(env_state.rewards), log_prob, last_obs)
runner_state = (params, opt_state, env_state, env_state.observation, rng)
return (runner_state, transition)
(runner_state, traj_batch) = jax.lax.scan(_env_step, runner_state, None, args.num_steps)
(params, opt_state, env_state, last_obs, rng) = runner_state
(_, last_val) = forward.apply(params, last_obs)
def _calculate_gae(traj_batch, last_val):
def _get_advantages(gae_and_next_value, transition):
(gae, next_value) = gae_and_next_value
(done, value, reward) = (transition.done, transition.value, transition.reward)
delta = ((reward + ((args.gamma * next_value) * (1 - done))) - value)
gae = (delta + (((args.gamma * args.gae_lambda) * (1 - done)) * gae))
return ((gae, value), gae)
(_, advantages) = jax.lax.scan(_get_advantages, (jnp.zeros_like(last_val), last_val), traj_batch, reverse=True, unroll=16)
return (advantages, (advantages + traj_batch.value))
(advantages, targets) = _calculate_gae(traj_batch, last_val)
def _update_epoch(update_state, unused):
def _update_minbatch(tup, batch_info):
(params, opt_state) = tup
(traj_batch, advantages, targets) = batch_info
def _loss_fn(params, traj_batch, gae, targets):
(logits, value) = forward.apply(params, traj_batch.obs)
pi = distrax.Categorical(logits=logits)
log_prob = pi.log_prob(traj_batch.action)
value_pred_clipped = (traj_batch.value + (value - traj_batch.value).clip((- args.clip_eps), args.clip_eps))
value_losses = jnp.square((value - targets))
value_losses_clipped = jnp.square((value_pred_clipped - targets))
value_loss = (0.5 * jnp.maximum(value_losses, value_losses_clipped).mean())
ratio = jnp.exp((log_prob - traj_batch.log_prob))
gae = ((gae - gae.mean()) / (gae.std() + 1e-08))
loss_actor1 = (ratio * gae)
loss_actor2 = (jnp.clip(ratio, (1.0 - args.clip_eps), (1.0 + args.clip_eps)) * gae)
loss_actor = (- jnp.minimum(loss_actor1, loss_actor2))
loss_actor = loss_actor.mean()
entropy = pi.entropy().mean()
total_loss = ((loss_actor + (args.vf_coef * value_loss)) - (args.ent_coef * entropy))
return (total_loss, (value_loss, loss_actor, entropy))
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(total_loss, grads) = grad_fn(params, traj_batch, advantages, targets)
(updates, opt_state) = optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
return ((params, opt_state), total_loss)
(params, opt_state, traj_batch, advantages, targets, rng) = update_state
(rng, _rng) = jax.random.split(rng)
batch_size = (args.minibatch_size * num_minibatches)
assert (batch_size == (args.num_steps * args.num_envs)), 'batch size must be equal to number of steps * number of envs'
permutation = jax.random.permutation(_rng, batch_size)
batch = (traj_batch, advantages, targets)
batch = jax.tree_util.tree_map((lambda x: x.reshape(((batch_size,) + x.shape[2:]))), batch)
shuffled_batch = jax.tree_util.tree_map((lambda x: jnp.take(x, permutation, axis=0)), batch)
minibatches = jax.tree_util.tree_map((lambda x: jnp.reshape(x, ([num_minibatches, (- 1)] + list(x.shape[1:])))), shuffled_batch)
((params, opt_state), total_loss) = jax.lax.scan(_update_minbatch, (params, opt_state), minibatches)
update_state = (params, opt_state, traj_batch, advantages, targets, rng)
return (update_state, total_loss)
update_state = (params, opt_state, traj_batch, advantages, targets, rng)
(update_state, loss_info) = jax.lax.scan(_update_epoch, update_state, None, args.update_epochs)
(params, opt_state, _, _, _, rng) = update_state
runner_state = (params, opt_state, env_state, last_obs, rng)
return (runner_state, loss_info)
return _update_step |
class Params(MutableMapping):
DEFAULT = object()
def __init__(self, params: Dict[(str, Any)], history: str='', loading_from_archive: bool=False, files_to_archive: Dict[(str, str)]=None) -> None:
self.params = _replace_none(params)
self.history = history
self.loading_from_archive = loading_from_archive
self.files_to_archive = ({} if (files_to_archive is None) else files_to_archive)
def add_file_to_archive(self, name: str) -> None:
if (not self.loading_from_archive):
self.files_to_archive[f'{self.history}{name}'] = self.get(name)
def pop(self, key: str, default: Any=DEFAULT):
if (default is self.DEFAULT):
try:
value = self.params.pop(key)
except KeyError:
raise ConfigurationError('key "{}" is required at location "{}"'.format(key, self.history))
else:
value = self.params.pop(key, default)
if (not isinstance(value, dict)):
logger.info((((self.history + key) + ' = ') + str(value)))
return self._check_is_dict(key, value)
def get(self, key: str, default: Any=DEFAULT):
if (default is self.DEFAULT):
try:
value = self.params.get(key)
except KeyError:
raise ConfigurationError('key "{}" is required at location "{}"'.format(key, self.history))
else:
value = self.params.get(key, default)
return self._check_is_dict(key, value)
def pop_choice(self, key: str, choices: List[Any], default_to_first_choice: bool=False):
default = (choices[0] if default_to_first_choice else self.DEFAULT)
value = self.pop(key, default)
if (value not in choices):
key_str = (self.history + key)
message = ('%s not in acceptable choices for %s: %s' % (value, key_str, str(choices)))
raise ConfigurationError(message)
return value
def as_dict(self, quiet=False):
if quiet:
return self.params
def log_recursively(parameters, history):
for (key, value) in parameters.items():
if isinstance(value, dict):
new_local_history = ((history + key) + '.')
log_recursively(value, new_local_history)
else:
logger.info((((history + key) + ' = ') + str(value)))
logger.info('Converting Params object to dict; logging of default values will not occur when dictionary parameters are used subsequently.')
logger.info('CURRENTLY DEFINED PARAMETERS: ')
log_recursively(self.params, self.history)
return self.params
def duplicate(self) -> 'Params':
return Params(copy.deepcopy(self.params))
def assert_empty(self, class_name: str):
if self.params:
raise ConfigurationError('Extra parameters passed to {}: {}'.format(class_name, self.params))
def __getitem__(self, key):
if (key in self.params):
return self._check_is_dict(key, self.params[key])
else:
raise KeyError
def __setitem__(self, key, value):
self.params[key] = value
def __delitem__(self, key):
del self.params[key]
def __iter__(self):
return iter(self.params)
def __len__(self):
return len(self.params)
def _check_is_dict(self, new_history, value):
if isinstance(value, dict):
new_history = ((self.history + new_history) + '.')
return Params(value, history=new_history, loading_from_archive=self.loading_from_archive, files_to_archive=self.files_to_archive)
if isinstance(value, list):
value = [self._check_is_dict((new_history + '.list'), v) for v in value]
return value
def from_file(params_file: str, params_overrides: str='') -> 'Params':
params_file = cached_path(params_file)
file_dict = pyhocon.ConfigFactory.parse_file(params_file)
overrides_dict = pyhocon.ConfigFactory.parse_string(params_overrides)
param_dict = overrides_dict.with_fallback(file_dict)
return Params(param_dict) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.