code stringlengths 101 5.91M |
|---|
class OnnxOpOptionalAttrGetter(object):
def __init__(self):
self._optional_attrs = {'ArgMax': {'axis': 0, 'keepdims': 1, 'select_last_index': 0}, 'ArgMin': {'axis': 0, 'keepdims': 1, 'select_last_index': 0}, 'AveragePool': {'auto_pad': 'NOTSET', 'ceil_mode': 0, 'count_include_pad': 0}, 'BatchNormalization': {'epsilon': 1e-05, 'momentum': 0.9, 'training_mode': 0}, 'Celu': {'alpha': 1.0}, 'ConcatFromSequence': {'new_axis': 0}, 'Conv': {'auto_pad': 'NOTSET', 'group': 1}, 'ConvInteger': {'auto_pad': 'NOTSET', 'group': 1}, 'ConvTranspose': {'auto_pad': 'NOTSET', 'group': 1}, 'CumSum': {'exclusive': 0, 'reverse': 0}, 'DepthToSpace': {'mode': 'DCR'}, 'DequantizeLinear': {'axis': 1}, 'Elu': {'alpha': 1.0}, 'EyeLike': {'k': 0}, 'Flatten': {'axis': 1}, 'GRU': {'direction': 'forward', 'layout': 0, 'linear_before_reset': 0}, 'Gather': {'axis': 0}, 'GatherElements': {'axis': 0}, 'GatherND': {'batch_dims': 0}, 'Gemm': {'alpha': 1.0, 'beta': 1.0, 'transA': 0, 'transB': 0}, 'GlobalLpPool': {'p': 2}, 'GridSample': {'align_corners': 0, 'mode': 'bilinear', 'padding_mode': 'zeros'}, 'GroupNormalization': {'epsilon': 1e-05}, 'HardSigmoid': {'alpha': 0.2, 'beta': 0.5}, 'Hardmax': {'axis': (- 1)}, 'InstanceNormalization': {'epsilon': 1e-05}, 'IsInf': {'detect_negative': 1, 'detect_positive': 1}, 'LRN': {'alpha': 0.0001, 'beta': 0.75, 'bias': 1.0}, 'LSTM': {'direction': 'forward', 'input_forget': 0, 'layout': 0}, 'LayerNormalization': {'axis': (- 1), 'epsilon': 1e-05, 'stash_type': (- 1)}, 'LeakyRelu': {'alpha': 0.01}, 'LogSoftmax': {'axis': (- 1)}, 'LpNormalization': {'axis': (- 1), 'p': 2}, 'LpPool': {'auto_pad': 'NOTSET', 'ceil_mode': 0, 'p': 2}, 'MaxPool': {'auto_pad': 'NOTSET', 'ceil_mode': 0, 'storage_order': 0}, 'MaxRoiPool': {'spatial_scale': 1.0}, 'MaxUnpool': {}, 'MeanVarianceNormalization': {'axes': [0, 2, 3]}, 'NonMaxSuppression': {'center_point_box': 0}, 'Pad': {'mode': 'constant'}, 'QLinearConv': {'auto_pad': 'NOTSET', 'group': 1}, 'QuantizeLinear': {'axis': 1}, 'RNN': {'direction': 'forward', 'layout': 0}, 'ReduceL1': {'keepdims': 1, 'noop_with_empty_axes': 0}, 'ReduceL2': {'keepdims': 1, 'noop_with_empty_axes': 0}, 'ReduceLogSum': {'keepdims': 1, 'noop_with_empty_axes': 0}, 'ReduceLogSumExp': {'keepdims': 1, 'noop_with_empty_axes': 0}, 'ReduceMax': {'keepdims': 1, 'noop_with_empty_axes': 0}, 'ReduceMean': {'keepdims': 1, 'noop_with_empty_axes': 0}, 'ReduceMin': {'keepdims': 1, 'noop_with_empty_axes': 0}, 'ReduceProd': {'keepdims': 1, 'noop_with_empty_axes': 0}, 'ReduceSum': {'keepdims': 1, 'noop_with_empty_axes': 0}, 'ReduceSumSquare': {'keepdims': 1, 'noop_with_empty_axes': 0}, 'Reshape': {'allowzero': 0}, 'Resize': {'antialias': 0, 'coordinate_transformation_mode': 'half_pixel', 'cubic_coeff_a': (- 0.75), 'exclude_outside': 0, 'extrapolation_value': 0.0, 'keep_aspect_ratio_policy': 'stretch', 'mode': 'nearest', 'nearest_mode': 'round_prefer_floor'}, 'ReverseSequence': {'batch_axis': 1, 'time_axis': 0}, 'RoiAlign': {'coordinate_transformation_mode': 'half_pixel', 'mode': 'avg', 'output_height': 1, 'output_width': 1, 'sampling_ratio': 0, 'spatial_scale': 1.0}, 'ScatterElements': {'axis': 0, 'reduction': 'none'}, 'ScatterND': {'reduction': 'none'}, 'Selu': {'alpha': 1.67326, 'gamma': 1.0507}, 'Shrink': {'bias': 0.0, 'lambd': 0.5}, 'Softmax': {'axis': (- 1)}, 'Split': {'axis': 0}, 'SplitToSequence': {'axis': 0, 'keepdims': 1}, 'ThresholdedRelu': {'alpha': 1.0}, 'Topk': {'axis': (- 1), 'largest': 1, 'sorted': 1}, 'Trilu': {'upper': 1}, 'Unique': {'sorted': 1}}
def get(self, op_type: str) -> dict:
return self._optional_attrs.get(op_type, {}) |
def build_generic_retinanet_model(model, add_conv_body_func, freeze_conv_body=False):
def _single_gpu_build_func(model):
(blobs, dim, spatial_scales) = add_conv_body_func(model)
if (not model.train):
model.conv_body_net = model.net.Clone('conv_body_net')
retinanet_heads.add_fpn_retinanet_outputs(model, blobs, dim, spatial_scales)
if cfg.WSL.WSL_ON:
retinanet_heads.add_wsl_fpn_retinanet_outputs(model, blobs, dim, spatial_scales)
if model.train:
loss_gradients = retinanet_heads.add_wsl_fpn_retinanet_losses(model)
return (loss_gradients if model.train else None)
if model.train:
loss_gradients = retinanet_heads.add_fpn_retinanet_losses(model)
return (loss_gradients if model.train else None)
optim.build_data_parallel_model(model, _single_gpu_build_func)
return model |
def _conv2d(channel, kernel, padding, stride, num_sync_bn_devices=(- 1)):
cell = nn.HybridSequential(prefix='')
cell.add(nn.Conv2D(channel, kernel_size=kernel, strides=stride, padding=padding, use_bias=False))
if (num_sync_bn_devices < 1):
cell.add(nn.BatchNorm(epsilon=1e-05, momentum=0.9))
else:
cell.add(gluon.contrib.nn.SyncBatchNorm(epsilon=1e-05, momentum=0.9, num_devices=num_sync_bn_devices))
cell.add(nn.LeakyReLU(0.1))
return cell |
def pr_curve_raw(tag, tp, fp, tn, fn, precision, recall, num_thresholds=127, weights=None):
if (num_thresholds > 127):
num_thresholds = 127
data = np.stack((tp, fp, tn, fn, precision, recall))
pr_curve_plugin_data = PrCurvePluginData(version=0, num_thresholds=num_thresholds).SerializeToString()
plugin_data = SummaryMetadata.PluginData(plugin_name='pr_curves', content=pr_curve_plugin_data)
smd = SummaryMetadata(plugin_data=plugin_data)
tensor = TensorProto(dtype='DT_FLOAT', float_val=data.reshape((- 1)).tolist(), tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])]))
return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) |
def get_model_list_for_rtl(net):
if (type(net) is not list):
net = [net]
def flatten_list(in_list, out_list):
for model in in_list:
if issubclass(type(model), Switcher):
model = model.get_current_model()
if ((type(model) != Convolution2d) and hasattr(model, 'get_model_list')):
flatten_list(model.get_model_list(), out_list)
else:
out_list.append(model)
out_list = []
flatten_list(net, out_list)
return out_list |
def evaluation(test_file='../saved/CAR+P_Normal_predict_exa_4_1.txt', maxid=0, display=1):
file = open(test_file, 'r')
batch_in = []
batch_out = []
batch_pred = []
cntline = 0
for line in file.readlines():
cntline += 1
if (cntline <= display):
print(line)
in_ = [int(x) for x in line.split('# batch in: [')[1].split(']')[0].split(', ')]
out_ = int(line.split('# batch out: ')[1].split(' #')[0])
pred_split = line.split('# batch pred: [')[1].split(']')[0].split(', ')
if (pred_split[0] == ''):
pred_ = []
else:
pred_ = [int(x) for x in pred_split]
batch_in.append(in_)
batch_out.append(out_)
batch_pred.append(pred_)
file.close()
input_len_dict = {}
for (in_, out_, pred_) in zip(batch_in, batch_out, batch_pred):
while (in_[(- 1)] == 0):
in_ = in_[:(- 1)]
input_len = len(in_)
hr = 0
if (out_ in pred_):
hr = 1
if (input_len in input_len_dict):
input_len_dict[input_len] += [hr]
else:
input_len_dict[input_len] = [hr]
print('total case..', len(batch_in))
len_score = ([0] * 9)
over9 = []
for (k, v) in input_len_dict.items():
if (k < 9):
len_score[k] = (sum(v) / len(v))
else:
over9 += v
print(' for diff lens')
print('len of over 9 seq', len(over9))
for s in len_score:
print(s)
print((sum(over9) / len(over9)))
return (batch_in, batch_out, batch_pred) |
def submit_alisa_train(datasource, estimator_string, select, validation_select, model_params, model_name, pre_trained_model, **train_params):
params = dict(locals())
del params['train_params']
params.update(train_params)
if estimator_string.lower().startswith('xgboost'):
params['entry_type'] = 'train_xgb'
else:
params['entry_type'] = 'train_tf'
cwd = tempfile.mkdtemp(prefix='sqlflow', dir='/tmp')
(train_table, val_table) = create_train_and_eval_tmp_table(select, validation_select, datasource)
(params['pai_table'], params['pai_val_table']) = (train_table, val_table)
path_to_save = get_oss_model_save_path(datasource, model_name)
path_to_load = get_oss_model_save_path(datasource, pre_trained_model)
params['oss_model_dir'] = path_to_save
if ((path_to_load == '') or (path_to_load != path_to_save)):
clean_oss_model_path((path_to_save + '/'))
prepare_archive(cwd, estimator_string, path_to_save, params)
cmd = get_pai_train_cmd(datasource, estimator_string, model_name, train_table, val_table, model_params, train_params, path_to_save, ('file://%s' % JOB_ARCHIVE_FILE), ('file://%s' % PARAMS_FILE))
upload_resource_and_submit_alisa_task(datasource, ('file://' + path.join(cwd, JOB_ARCHIVE_FILE)), ('file://' + path.join(cwd, PARAMS_FILE)), cmd)
save_model_to_sqlfs(datasource, path_to_save, model_name)
drop_tables([train_table, val_table], datasource) |
def print_assert_equal(test_string, actual, desired):
__tracebackhide__ = True
import pprint
if (not (actual == desired)):
msg = StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual, msg)
msg.write('DESIRED: \n')
pprint.pprint(desired, msg)
raise AssertionError(msg.getvalue()) |
def get_updated_ranges(ranges, max_live=None):
def _get_max_live(ranges):
max_live = (max((x[1].used for x in ranges if x[1].used)) + 1)
return max_live
def _update_range(x, max_live, size):
cx = x
if (x[1].defined is None):
cx = (cx[0], cx[1]._replace(defined=(- 1)))
if (x[1].used is None):
cx = (cx[0], cx[1]._replace(used=max_live))
if (x[1].size is None):
cx = (cx[0], cx[1]._replace(size=size))
return cx
if (max_live is None):
max_live = _get_max_live(ranges)
ranges = [_update_range(x, max_live, 1) for x in ranges]
return ranges |
class SeparationDataset(Dataset):
def __init__(self, data_dir, rate=16000, src=['mix_clean'], tgt=['s1', 's2'], n_fft=512, hop_length=320, win_length=512, window='hann', center=True):
super(SeparationDataset, self).__init__()
self.data_dir = data_dir
self.rate = rate
self.src = src
self.tgt = tgt
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.center = center
self.n_srcs = len(self.tgt)
assert ((len(self.src) == 1) and (len(self.tgt) == 1))
cond_list = ['s1', 's2', 'noise', 'mix_clean', 'mix_both', 'mix_single', 'noisy', 'clean']
reco2path = {}
for cond in (src + tgt):
assert (cond in cond_list)
assert os.path.exists('{}/{}/wav.scp'.format(self.data_dir, cond))
with open('{}/{}/wav.scp'.format(self.data_dir, cond), 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
(uttname, path) = line.split()
if (uttname not in reco2path):
reco2path[uttname] = {}
reco2path[uttname][cond] = path
self.reco2path = reco2path
self.recolist = list(self.reco2path.keys())
self.recolist.sort()
def __len__(self):
return len(self.recolist)
def __getitem__(self, i):
reco = self.recolist[i]
src_path = self.reco2path[reco][self.src[0]]
(src_samp, rate) = librosa.load(src_path, sr=SAMPLE_RATE)
assert (rate == self.rate)
src_feat = np.transpose(librosa.stft(src_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
(tgt_samp_list, tgt_feat_list) = ([], [])
for j in range(self.n_srcs):
tgt_path = self.reco2path[reco][self.tgt[j]]
(tgt_samp, rate) = librosa.load(tgt_path, sr=SAMPLE_RATE)
assert (rate == self.rate)
tgt_feat = np.transpose(librosa.stft(tgt_samp, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center))
tgt_samp_list.append(tgt_samp)
tgt_feat_list.append(tgt_feat)
'\n reco (str):\n name of the utterance\n\n src_sample (ndarray):\n audio samples for the source [T, ]\n\n src_feat (ndarray):\n the STFT feature map for the source with shape [T1, D]\n\n tgt_samp_list (list(ndarray)):\n list of audio samples for the targets\n\n tgt_feat_list (list(ndarray)):\n list of STFT feature map for the targets\n '
return (reco, src_samp, src_feat, tgt_samp_list, tgt_feat_list)
def collate_fn(self, batch):
sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0])))
bs = len(sorted_batch)
uttname_list = [sorted_batch[i][0] for i in range(bs)]
source_attr = {}
mix_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][2])) for i in range(bs)]
mix_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][2])) for i in range(bs)]
mix_stft_list = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)]
mix_magnitude = pad_sequence(mix_magnitude_list, batch_first=True)
mix_phase = pad_sequence(mix_phase_list, batch_first=True)
mix_stft = pad_sequence(mix_stft_list, batch_first=True)
source_attr['magnitude'] = mix_magnitude
source_attr['phase'] = mix_phase
source_attr['stft'] = mix_stft
target_attr = {}
target_attr['magnitude'] = []
target_attr['phase'] = []
for j in range(self.n_srcs):
tgt_magnitude_list = [torch.from_numpy(np.abs(sorted_batch[i][4][j])) for i in range(bs)]
tgt_phase_list = [torch.from_numpy(np.angle(sorted_batch[i][4][j])) for i in range(bs)]
tgt_magnitude = pad_sequence(tgt_magnitude_list, batch_first=True)
tgt_phase = pad_sequence(tgt_phase_list, batch_first=True)
target_attr['magnitude'].append(tgt_magnitude)
target_attr['phase'].append(tgt_phase)
wav_length = torch.from_numpy(np.array([len(sorted_batch[i][1]) for i in range(bs)]))
source_wav_list = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)]
source_wav = pad_sequence(source_wav_list, batch_first=True)
target_wav_list = []
for j in range(self.n_srcs):
target_wav_list.append(pad_sequence([torch.from_numpy(sorted_batch[i][3][j]) for i in range(bs)], batch_first=True))
feat_length = torch.from_numpy(np.array([stft.size(0) for stft in mix_stft_list]))
return (source_wav_list, uttname_list, source_attr, source_wav, target_attr, target_wav_list, feat_length, wav_length) |
class DictMetadata(object):
def __init__(self, metadata):
self._metadata = metadata
def has_metadata(self, name):
return (name in self._metadata)
def get_metadata(self, name):
try:
return ensure_str(self._metadata[name])
except UnicodeDecodeError as e:
e.reason += ' in {} file'.format(name)
raise
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def metadata_isdir(self, name):
return False
def metadata_listdir(self, name):
return []
def run_script(self, script_name, namespace):
pass |
_torch
class DecisionTransformerModelIntegrationTest(unittest.TestCase):
def test_autoregressive_prediction(self):
NUM_STEPS = 2
TARGET_RETURN = 10
model = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert')
model = model.to(torch_device)
config = model.config
torch.manual_seed(0)
state = torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32)
expected_outputs = torch.tensor([[0.242793, (- 0.), 0.8742613], [0., (- 0.), (- 0.)]], device=torch_device)
returns_to_go = torch.tensor(TARGET_RETURN, device=torch_device, dtype=torch.float32).reshape(1, 1, 1)
states = state
actions = torch.zeros(1, 0, config.act_dim, device=torch_device, dtype=torch.float32)
rewards = torch.zeros(1, 0, device=torch_device, dtype=torch.float32)
timesteps = torch.tensor(0, device=torch_device, dtype=torch.long).reshape(1, 1)
for step in range(NUM_STEPS):
actions = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=torch_device)], dim=1)
rewards = torch.cat([rewards, torch.zeros(1, 1, device=torch_device)], dim=1)
attention_mask = torch.ones(1, states.shape[1]).to(dtype=torch.long, device=states.device)
with torch.no_grad():
(_, action_pred, _) = model(states=states, actions=actions, rewards=rewards, returns_to_go=returns_to_go, timesteps=timesteps, attention_mask=attention_mask, return_dict=False)
self.assertEqual(action_pred.shape, actions.shape)
self.assertTrue(torch.allclose(action_pred[(0, (- 1))], expected_outputs[step], atol=0.0001))
(state, reward, _, _) = (torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32), 1.0, False, {})
actions[(- 1)] = action_pred[(0, (- 1))]
states = torch.cat([states, state], dim=1)
pred_return = (returns_to_go[(0, (- 1))] - reward)
returns_to_go = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1)], dim=1)
timesteps = torch.cat([timesteps, (torch.ones((1, 1), device=torch_device, dtype=torch.long) * (step + 1))], dim=1) |
class FP16Compressor(Compressor):
def compress(tensor):
tensor_compressed = tensor
if tensor.dtype.is_floating_point:
tensor_compressed = tensor.type(torch.float16)
return (tensor_compressed, tensor.dtype)
def decompress(tensor, ctx):
tensor_decompressed = tensor
dtype = ctx
if dtype.is_floating_point:
tensor_decompressed = tensor.type(dtype)
return tensor_decompressed |
class SpatialGradientFeatures(nn.Module):
def __init__(self, C_inout, with_gradient_rotations=True):
super(SpatialGradientFeatures, self).__init__()
self.C_inout = C_inout
self.with_gradient_rotations = with_gradient_rotations
if self.with_gradient_rotations:
self.A_re = nn.Linear(self.C_inout, self.C_inout, bias=False)
self.A_im = nn.Linear(self.C_inout, self.C_inout, bias=False)
else:
self.A = nn.Linear(self.C_inout, self.C_inout, bias=False)
def forward(self, vectors):
vectorsA = vectors
if self.with_gradient_rotations:
vectorsBreal = (self.A_re(vectors[(..., 0)]) - self.A_im(vectors[(..., 1)]))
vectorsBimag = (self.A_re(vectors[(..., 1)]) + self.A_im(vectors[(..., 0)]))
else:
vectorsBreal = self.A(vectors[(..., 0)])
vectorsBimag = self.A(vectors[(..., 1)])
dots = ((vectorsA[(..., 0)] * vectorsBreal) + (vectorsA[(..., 1)] * vectorsBimag))
return torch.tanh(dots) |
def _data_dimensions(features: _Features) -> Tuple[(int, int)]:
for inp in features.inputs:
if (inp.location in [_Location.NODE, _Location.EDGE]):
return inp.data.shape[:2]
assert False |
def boost_get_includes(self, *k, **kw):
includes = ((k and k[0]) or kw.get('includes', None))
if (includes and self.__boost_get_version_file(includes)):
return includes
for dir in BOOST_INCLUDES:
if self.__boost_get_version_file(dir):
return dir
if includes:
self.fatal(('headers not found in %s' % includes))
else:
self.fatal('headers not found, use --boost-includes=/path/to/boost') |
def test_stl_ownership():
cstats = ConstructorStats.get(m.Placeholder)
assert (cstats.alive() == 0)
r = m.test_stl_ownership()
assert (len(r) == 1)
del r
assert (cstats.alive() == 0) |
def test_ids2var():
snt_ids = [1, 1, 1, 1, 1]
snt_ids_var = ids2var(snt_ids, 1, 2, 3, addEOS=True)
snt_ids_var_shape = snt_ids_var.data.size()
assert (snt_ids_var_shape == torch.Size([1, 2, 3]))
print('Test (ids2var): passed') |
def keep_file(x):
h = sha256str(x)
new_hash = (not (h in hashes))
if new_hash:
hashes.add(h)
return new_hash |
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
if (not hasattr(dist, '_backend')):
self.warn_on_half = True
else:
self.warn_on_half = (True if (dist._backend == dist.dist_backend.GLOO) else False)
self.module = module
for p in self.module.state_dict().values():
if (not torch.is_tensor(p)):
continue
dist.broadcast(p, 0)
def allreduce_params():
if self.needs_reduction:
self.needs_reduction = False
buckets = {}
for param in self.module.parameters():
if (param.requires_grad and (param.grad is not None)):
tp = type(param.data)
if (tp not in buckets):
buckets[tp] = []
buckets[tp].append(param)
if self.warn_on_half:
if (torch.cuda.HalfTensor in buckets):
print((('WARNING: gloo dist backend for half parameters may be extremely slow.' + ' It is recommended to use the NCCL backend in this case. This currently requires') + 'PyTorch built from top of tree master.'))
self.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for (buf, synced) in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
param._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
'\n def _sync_buffers(self):\n buffers = list(self.module._all_buffers())\n if len(buffers) > 0:\n # cross-node buffer sync\n flat_buffers = _flatten_dense_tensors(buffers)\n dist.broadcast(flat_buffers, 0)\n for buf, synced in zip(buffers, _unflatten_dense_tensors(flat_buffers, buffers)):\n buf.copy_(synced)\n def train(self, mode=True):\n # Clear NCCL communicator and CUDA event cache of the default group ID,\n # These cache will be recreated at the later call. This is currently a\n # work-around for a potential NCCL deadlock.\n if dist._backend == dist.dist_backend.NCCL:\n dist._clear_group_cache()\n super(DistributedDataParallel, self).train(mode)\n self.module.train(mode)\n ' |
class GenericVisitor(ABC):
def __init__(self):
pass
def visit(self, node):
method_name = self._visit_method_name(node)
visitor = getattr(self, method_name, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
raise Exception('{}: No {} method'.format(type(self).__name__, self._visit_method_name(node)))
def _visit_method_name(node) -> str:
return ('visit_' + camel_to_snake_case(type(node).__name__)) |
def _check_directly_compile_overloaded(obj):
qual_name = _qualified_name(obj)
if (_jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj)):
raise RuntimeError('Function {} cannot be directly compiled because it is overloaded. It must be used in a context of a function where its inputs can determine which overload to call.'.format(qual_name)) |
def TestGE_KL():
def test_gekl_init(self):
pass
def test_gekl_step(self):
pass |
def elsa_architecture(nb_classes, nb_tokens, maxlen, feature_output=False, embed_dropout_rate=0, final_dropout_rate=0, embed_dim=300, embed_l2=1e-06, return_attention=False, load_embedding=False, pre_embedding=None, high=False, LSTM_hidden=512, LSTM_drop=0.5):
class NonMasking(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(NonMasking, self).__init__(**kwargs)
def build(self, input_shape):
input_shape = input_shape
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
return x
def get_output_shape_for(self, input_shape):
return input_shape
model_input = Input(shape=(maxlen,), dtype='int32')
embed_reg = (L1L2(l2=embed_l2) if (embed_l2 != 0) else None)
if ((not load_embedding) and (pre_embedding is None)):
embed = Embedding(input_dim=nb_tokens, output_dim=embed_dim, mask_zero=True, input_length=maxlen, embeddings_regularizer=embed_reg, name='embedding')
else:
embed = Embedding(input_dim=nb_tokens, output_dim=embed_dim, mask_zero=True, input_length=maxlen, weights=[pre_embedding], embeddings_regularizer=embed_reg, trainable=True, name='embedding')
if high:
x = NonMasking()(embed(model_input))
else:
x = embed(model_input)
x = Activation('tanh')(x)
if (embed_dropout_rate != 0):
embed_drop = SpatialDropout1D(embed_dropout_rate, name='embed_drop')
x = embed_drop(x)
lstm_0_output = Bidirectional(LSTM(LSTM_hidden, return_sequences=True, dropout=LSTM_drop), name='bi_lstm_0')(x)
lstm_1_output = Bidirectional(LSTM(LSTM_hidden, return_sequences=True, dropout=LSTM_drop), name='bi_lstm_1')(lstm_0_output)
x = concatenate([lstm_1_output, lstm_0_output, x])
if high:
x = TimeDistributed(Highway(activation='tanh', name='high'))(x)
weights = None
x = AttentionWeightedAverage(name='attlayer', return_attention=return_attention)(x)
if return_attention:
(x, weights) = x
if (not feature_output):
if (final_dropout_rate != 0):
x = Dropout(final_dropout_rate)(x)
if (nb_classes > 2):
outputs = [Dense(nb_classes, activation='softmax', name='softmax')(x)]
else:
outputs = [Dense(1, activation='sigmoid', name='softmax')(x)]
else:
outputs = [x]
if return_attention:
outputs.append(weights)
return Model(inputs=[model_input], outputs=outputs) |
class TensorFieldFreeModule(TensorFreeModule):
Element = TensorFieldParal
def __init__(self, vector_field_module, tensor_type):
domain = vector_field_module._domain
dest_map = vector_field_module._dest_map
kcon = tensor_type[0]
lcov = tensor_type[1]
name = 'T^({},{})({}'.format(kcon, lcov, domain._name)
latex_name = '\\mathcal{{T}}^{{({}, {})}}\\left({}'.format(kcon, lcov, domain._latex_name)
if (dest_map is not domain.identity_map()):
dm_name = dest_map._name
dm_latex_name = dest_map._latex_name
if (dm_name is None):
dm_name = 'unnamed map'
if (dm_latex_name is None):
dm_latex_name = '\\mathrm{unnamed\\; map}'
name += (',' + dm_name)
latex_name += (',' + dm_latex_name)
name += ')'
latex_name += '\\right)'
TensorFreeModule.__init__(self, vector_field_module, tensor_type, name=name, latex_name=latex_name)
self._domain = domain
self._dest_map = dest_map
self._ambient_domain = vector_field_module._ambient_domain
def _element_constructor_(self, comp=[], frame=None, name=None, latex_name=None, sym=None, antisym=None):
try:
if comp.is_trivial_zero():
return self.zero()
except AttributeError:
if (comp == 0):
return self.zero()
if isinstance(comp, DiffFormParal):
form = comp
p = form.degree()
if ((self._tensor_type != (0, p)) or (self._fmodule != form.base_module())):
raise TypeError(('cannot convert the {}'.format(form) + ' to an element of {}'.format(self)))
if (p == 1):
asym = None
else:
asym = range(p)
resu = self.element_class(self._fmodule, (0, p), name=form._name, latex_name=form._latex_name, antisym=asym)
for (frame, cp) in form._components.items():
resu._components[frame] = cp.copy()
return resu
if isinstance(comp, MultivectorFieldParal):
pvect = comp
p = pvect.degree()
if ((self._tensor_type != (p, 0)) or (self._fmodule != pvect.base_module())):
raise TypeError(('cannot convert the {}'.format(pvect) + ' to an element of {}'.format(self)))
if (p == 1):
asym = None
else:
asym = range(p)
resu = self.element_class(self._fmodule, (p, 0), name=pvect._name, latex_name=pvect._latex_name, antisym=asym)
for (frame, cp) in pvect._components.items():
resu._components[frame] = cp.copy()
return resu
if isinstance(comp, AutomorphismFieldParal):
autom = comp
if ((self._tensor_type != (1, 1)) or (self._fmodule != autom.base_module())):
raise TypeError(('cannot convert the {}'.format(autom) + ' to an element of {}'.format(self)))
resu = self.element_class(self._fmodule, (1, 1), name=autom._name, latex_name=autom._latex_name)
for (basis, comp) in autom._components.items():
resu._components[basis] = comp.copy()
return resu
if isinstance(comp, TensorField):
if ((self._tensor_type == comp._tensor_type) and self._domain.is_subset(comp._domain) and self._ambient_domain.is_subset(comp._ambient_domain)):
return comp.restrict(self._domain)
else:
raise TypeError(('cannot convert the {}'.format(comp) + ' to an element of {}'.format(self)))
if (not isinstance(comp, (list, tuple))):
raise TypeError(('cannot convert the {} '.format(comp) + 'to an element of {}'.format(self)))
resu = self.element_class(self._fmodule, self._tensor_type, name=name, latex_name=latex_name, sym=sym, antisym=antisym)
if comp:
resu.set_comp(frame)[:] = comp
return resu
def _coerce_map_from_(self, other):
from sage.manifolds.differentiable.diff_form_module import DiffFormFreeModule
from sage.manifolds.differentiable.multivector_module import MultivectorFreeModule
from sage.manifolds.differentiable.automorphismfield_group import AutomorphismFieldParalGroup
if isinstance(other, (TensorFieldModule, TensorFieldFreeModule)):
return ((self._tensor_type == other._tensor_type) and self._domain.is_subset(other._domain) and self._ambient_domain.is_subset(other._ambient_domain))
if isinstance(other, DiffFormFreeModule):
return ((self._fmodule is other.base_module()) and (self._tensor_type == (0, other.degree())))
if isinstance(other, MultivectorFreeModule):
return ((self._fmodule is other.base_module()) and (self._tensor_type == (other.degree(), 0)))
if isinstance(other, AutomorphismFieldParalGroup):
return ((self._fmodule is other.base_module()) and (self._tensor_type == (1, 1)))
return False
def _repr_(self):
description = 'Free module '
if (self._name is not None):
description += (self._name + ' ')
description += 'of type-({},{})'.format(self._tensor_type[0], self._tensor_type[1])
description += ' tensors fields '
if (self._dest_map is self._domain.identity_map()):
description += 'on the {}'.format(self._domain)
else:
description += ('along the {}'.format(self._domain) + ' mapped into the {}'.format(self._ambient_domain))
return description |
class SageTimeitResult():
def __init__(self, stats, series=None):
self.stats = stats
self.series = (series if (not None) else [])
def __repr__(self):
if (self.stats[0] > 1):
s = ('%d loops, best of %d: %.*g %s per loop' % self.stats)
else:
s = ('%d loop, best of %d: %.*g %s per loop' % self.stats)
if isinstance(s, str):
return s
return s.encode('utf-8') |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, atrous=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, atrous)
self.bn1 = SynchronizedBatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = SynchronizedBatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def test_nrand(m=10000, u=3, v=4):
y = torch.var_mean(diffsptk.nrand(m, mean=u, var=v), unbiased=False)
y_ = U.call(f'nrand -m {m} -u {u} -v {v} | vstat')
assert U.allclose(y[1], y_[0], rtol=0.1)
assert U.allclose(y[0], y_[1], rtol=0.1) |
class TestRmsProp(OptimizerTestBase, LRModificationTestBase, TestCase):
def build_optimizer(self, model, **kwargs):
self._skip_gpu = False
return build_rms_prop(model, base_learning_rate=0.1, epsilon=0.1, **kwargs)
def check_optimizer(self, optimizer):
self.assertFalse(optimizer.get_auxiliary_parameters().shared)
self.assertTrue(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().local:
workspace.FetchBlob(param)
def testSparse(self):
raise unittest.SkipTest('no sparse support') |
def shd(true_matrix: pd.DataFrame, estimated_matrix: pd.DataFrame):
true_graph = adjmatrix_to_graph(true_matrix)
estimated_graph = adjmatrix_to_graph(estimated_matrix)
shd = SHD(true_graph, estimated_graph).get_shd()
return shd |
def cal_pxl_roc(gt_mask, scores):
(fpr, tpr, _) = roc_curve(gt_mask.flatten(), scores.flatten())
per_pixel_rocauc = roc_auc_pxl(gt_mask.flatten(), scores.flatten())
return (fpr, tpr, per_pixel_rocauc) |
def remove_malformed_prompt_logs(is_malformed_function):
new_prompt_logs = []
for item in global_variables.prompt_logs:
if (not is_malformed_function(item)):
new_prompt_logs.append(item)
global_variables.prompt_logs = new_prompt_logs |
class OuterNode(object):
def __init__(self, is_tensor=False, tensor_value=None, attr_name=None):
self.output = []
self.is_tensor = is_tensor
self.tensor_value = tensor_value
self.attr_name = attr_name
self.attr_value = None
if (is_tensor == False):
if (tensor_value is not None):
self.tensor_value = np.array(tensor_value)
if (self.tensor_value.shape == ()):
self.tensor_value = np.expand_dims(self.tensor_value, 0)
self.is_tensor = True
if attr_name:
self.is_tensor = True
def get_attr(self):
attr_value = self.attr_value
if (len(self.attr_value.shape) == 0):
attr_value = float(attr_value)
return {self.attr_name: translate_onnx(self.attr_name, attr_value)} |
def run_train_test_cycle(X, Y, L, LS, S, P, model_class, output_root_dir, data_name, target_name, training_programme=None, do_this_if_model_exists='skip', save_data_in_output_dir=True, force_device_for_training=None, force_device_for_evaluation=None):
assert (Y.shape[0] == X.shape[0] == LS.shape[0]), 'Number of samples differ between labels Y (n={}), data X (n={}) and subject labels LS (n={})'.format(L.shape[0], X.shape[0], LS.shape[0])
assert (len(L) == X.shape[2]), 'Number of provided channel names/labels in L (c={}) differs from number of channels in data X(c={})'.format(len(L), X.shape[2])
assert (sum([len(s) for s in S]) == X.shape[0]), 'Number of samples distributed over splits in S (n={}) differs from number of samples in X ({})'.format(sum([len(s) for s in S]), X.shape[0])
if save_data_in_output_dir:
print('Saving training and evaluation data to {}'.format(output_root_dir))
helpers.ensure_dir_exists(output_root_dir)
scipy.io.savemat('{}/data.mat'.format(output_root_dir), {'X': X})
scipy.io.savemat('{}/targets.mat'.format(output_root_dir), {'Y': Y})
scipy.io.savemat('{}/channel_labels.mat'.format(output_root_dir), {'L': L})
scipy.io.savemat('{}/subject_labels.mat'.format(output_root_dir), {'LS': LS})
scipy.io.savemat('{}/splits.mat'.format(output_root_dir), {'S': S})
scipy.io.savemat('{}/permutation.mat'.format(output_root_dir), {'P': P})
logfile = open('{}/log.txt'.format(output_root_dir), 'a')
for split_index in range(len(S)):
model = model_class(output_root_dir, data_name, target_name, split_index)
model_dir = model.path_dir()
helpers.ensure_dir_exists(model_dir)
if (model.exists() and (do_this_if_model_exists == 'skip')):
print('Model already exists at {}. skipping'.format(model_dir))
continue
t_start = time.time()
j_test = split_index
i_test = S[j_test]
j_val = ((split_index + 1) % len(S))
i_val = S[j_val]
j_train = list((set(range(len(S))) - {j_test, j_val}))
i_train = []
for j in j_train:
i_train.extend(S[j])
x_train = X[(i_train, ...)]
y_train = Y[(i_train, ...)]
x_test = X[(i_test, ...)]
y_test = Y[(i_test, ...)]
x_val = X[(i_val, ...)]
y_val = Y[(i_val, ...)]
x_test_shape_orig = x_test.shape
(x_train, x_val, x_test, y_train, y_val, y_test) = model.preprocess_data(x_train, x_val, x_test, y_train, y_val, y_test)
if ((not model.exists()) or (model.exists() and (do_this_if_model_exists == 'retrain'))):
model.build_model(x_train.shape, y_train.shape)
if (training_programme is not None):
model.train_model = types.MethodType(training_programme.train_model, model)
model.train_model(x_train, y_train, x_val, y_val, force_device=force_device_for_training)
model.save_model()
else:
model.load_model()
results = model.evaluate_model(x_test, y_test, force_device=force_device_for_evaluation, lower_upper=helpers.get_channel_wise_bounds(x_train))
t_end = time.time()
report = '\n{}\n'.format(model.path_dir().replace('/', ' '))
report += 'test accuracy : {}\n'.format(results['acc'])
report += 'test loss (l1): {}\n'.format(results['loss_l1'])
report += 'train-evaluation-sequence done after {}s\n\n'.format((t_end - t_start))
print(report)
with open('{}/scores.txt'.format(model.path_dir()), 'w') as f:
f.write(report)
logfile.write(report)
logfile.flush()
scipy.io.savemat('{}/outputs.mat'.format(model.path_dir()), results) |
def test_avgpool1d_stride1_padding_same():
time_dim = Dim(10, name='time')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: rf.Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]:
return rf.pool1d(x, mode='avg', pool_size=3, strides=1, padding='same', in_spatial_dim=in_spatial_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
(out, _) = model(extern_data['data'], in_spatial_dim=time_dim)
out.mark_as_default_output(shape=[batch_dim, time_dim])
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step) |
class BertAdam(Optimizer):
def __init__(self, params=None, lr='required', warmup=(- 1), t_total=(- 1), schedule='warmup_linear', betas=(0.9, 0.999), e=1e-06, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if ((lr == 'required') or (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if ((not isinstance(schedule, _LRSchedule)) and (schedule not in SCHEDULES)):
raise ValueError('Invalid schedule parameter: {}'.format(schedule))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {} - should be in [0.0, 1.0['.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {} - should be in [0.0, 1.0['.format(betas[1]))
if (not (e >= 0.0)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e))
if (not isinstance(schedule, _LRSchedule)):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
elif ((warmup != (- 1)) or (t_total != (- 1))):
logger.warning('warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. Please specify custom warmup and t_total in _LRSchedule object.')
defaults = dict(lr=lr, schedule=schedule, betas=betas, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if (len(state) == 0):
pass
else:
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['next_m'] = torch.zeros_like(p.data)
state['next_v'] = torch.zeros_like(p.data)
(next_m, next_v) = (state['next_m'], state['next_v'])
(beta1, beta2) = group['betas']
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
next_m.mul_(beta1).add_((1 - beta1), grad)
next_v.mul_(beta2).addcmul_((1 - beta2), grad, grad)
update = (next_m / (next_v.sqrt() + group['e']))
if (group['weight_decay'] > 0.0):
update += (group['weight_decay'] * p.data)
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = (lr_scheduled * update)
p.data.add_((- update_with_lr))
state['step'] += 1
return loss |
class HyperInfomax(nn.Module):
def __init__(self, args, n_features, device, writer):
super(HyperInfomax, self).__init__()
self.feature_emb = nn.Embedding(n_features, args.dim)
self.feature_emb_edge = nn.Embedding(n_features, args.dim)
self.hgnn = HGNN(args.dim, args.hid_units, device)
self.edgePred = RestCross(args.dim, args.edge_num, eval(args.l0_para))
self.infomax = INFOMAX(args.hid_units, args.n_neg_max, device)
self.infomin = INFOMIN(args.hid_units, args.n_neg_min, device)
self.readout = MLPReadout(args.hid_units, 1, nn.Sigmoid())
self.args = args
self.device = device
def edge_stat(self, adj, batch):
round_mat = scatter(torch.round(adj), batch, dim=0)
grezero_mat = scatter((adj > 0).float(), batch, dim=0)
ones = torch.ones_like(round_mat)
round_stat_list = []
grezero_stat_list = []
for i in range((self.args.num_features + 1)):
round_stat = torch.sum((round_mat == i).float()).unsqueeze(0)
grezero_stat = torch.sum((grezero_mat == i).float()).unsqueeze(0)
round_stat_list.append(round_stat)
grezero_stat_list.append(grezero_stat)
round_stat_list = torch.cat(round_stat_list).unsqueeze(1)
grezero_stat_list = torch.cat(grezero_stat_list).unsqueeze(1)
return (round_stat_list, grezero_stat_list)
def edge_pred(self, features, batch, is_training, record=False, epoch=(- 1)):
(adj, l0_penaty, n_edge) = self.edgePred(features, batch, is_training)
n_edge = n_edge.to(self.device)
edge_batch = torch.LongTensor(range((batch.max() + 1))).to(self.device).repeat_interleave(n_edge)
(round_stat, grezero_stat) = (None, None)
return (adj, l0_penaty, edge_batch, (round_stat, grezero_stat))
def pred(self, pred_logits):
predictions = self.readout(pred_logits)
return torch.squeeze(predictions)
def l0_hirs(self, info_data, train, record=False, epoch=(- 1)):
(node_index, edge_index, batch) = info_data
nb_nodes = node_index.shape[0]
features = self.feature_emb(node_index)
features_edge = self.feature_emb_edge(node_index)
(adj, l0_penaty, edge_batch, edge_stats) = self.edge_pred(features_edge, batch, train, record, epoch)
(c, h) = self.hgnn(features, adj, batch)
edge_num = torch.sum(torch.ones_like(adj[(adj > 0)]))
return (c, h, edge_batch, l0_penaty, edge_num, edge_stats)
def run_pred(self, data, train, record=False, epoch=(- 1)):
(c, h, edge_batch, l0_penaty, edges, edge_stats) = self.l0_hirs(data, train, record=record, epoch=epoch)
pred = self.pred(c)
if (not train):
return (pred, (l0_penaty, edges))
else:
return (c, h, edge_batch, l0_penaty, edges, pred, edge_stats)
def cal_similarity(self, c_p, c_n):
cos = nn.CosineSimilarity(dim=0, eps=1e-06).to(self.device)
p_mean = torch.mean(c_p, dim=0)
n_mean = torch.mean(c_n, dim=0)
return cos(p_mean, n_mean)
def forward(self, pos_data, neg_data, train, record=False, epoch=(- 1)):
(c_p, h_p, edge_batch_p, l0_penalty_p, edges_p, pred_p, edge_stats_p) = self.run_pred(pos_data, train, record=record, epoch=epoch)
(c_n, h_n, edge_batch_n, l0_penalty_n, edges_n, pred_n, edge_stats_n) = self.run_pred(neg_data, train, record=record, epoch=epoch)
hc_loss = self.infomax(c_p, h_p, edge_batch_p, c_n, h_n, edge_batch_n)
infomax_loss = hc_loss
infomin_loss_p = self.infomin(h_p, edge_batch_p)
infomin_loss_n = self.infomin(h_n, edge_batch_n)
infomin_loss = ((infomin_loss_p + infomin_loss_n) / 2)
distance_c = self.cal_similarity(c_p, c_n)
distance_h = self.cal_similarity(h_p, h_n)
l0_penalty = (l0_penalty_p + l0_penalty_n)
n_edges = (edges_p + edges_n)
return (pred_p, pred_n, infomax_loss, infomin_loss, (distance_c, distance_h), (l0_penalty, n_edges), (edge_stats_p, edge_stats_n)) |
class Composed(Representation):
kind = 'composed'
def __init__(self, *reps, context={}):
super().__init__(context=context)
self.reps = [from_config(rep, context=self.context) for rep in reps]
def _from_config(cls, config, context={}):
return cls(*config['reps'], context=context)
def _get_config(self):
return {'reps': [rep.get_config() for rep in self.reps]}
def compute(self, data):
to_concatenate = [rep(data) for rep in self.reps]
if all([isinstance(rep, GlobalRepresentation) for rep in to_concatenate]):
computed_representation = np.concatenate([rep.array for rep in to_concatenate], axis=1)
return GlobalRepresentation.from_array(self, data, computed_representation)
elif all([isinstance(rep, AtomicRepresentation) for rep in to_concatenate]):
computed_representation = np.concatenate([rep.linear for rep in to_concatenate], axis=1)
return AtomicRepresentation.from_linear(self, data, computed_representation)
else:
raise ValueError(f'Composed representation can either deal with all atomic or all global representations, not mixed cases.') |
def test():
aa = ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'))
b = aa.to_backend_array(allow_missing=False)
assert (b.tolist() == [104, 101, 108, 108, 111, 116, 104, 101, 114, 101])
assert (b.dtype == np.dtype(np.uint8))
c = ak.contents.NumpyArray(np.array([0, ], dtype='datetime64[s]'))
assert ([d.isoformat() for d in c.to_backend_array(allow_missing=False).tolist()] == ['1970-01-01T00:00:00', '2020-01-01T00:00:00'])
recordarray = ak.contents.RecordArray([ak.contents.NumpyArray(np.array([1, 2, 3, 4, 5], dtype=np.int64))], fields=['one'])
assert (recordarray.to_backend_array(allow_missing=False).tolist() == [(1,), (2,), (3,), (4,), (5,)]) |
class IndexedRowTableLinearize(TableLinearize):
def process_table(self, table_content: Dict):
assert (('header' in table_content) and ('rows' in table_content)), self.PROMPT_MESSAGE
_table_str = (self.process_header(table_content['header']) + ' ')
for (i, row_example) in enumerate(table_content['rows']):
_table_str += (self.process_row(row_example, row_index=(i + 1)) + '\n')
return _table_str.strip()
def process_header(self, headers: List):
return ('header: ' + ' | '.join(headers))
def process_row(self, row: List, row_index: int):
row_str = ''
row_cell_values = []
for cell_value in row:
if isinstance(cell_value, int):
row_cell_values.append(str(cell_value))
else:
row_cell_values.append(cell_value)
row_str += ' | '.join(row_cell_values)
return ((('row ' + str(row_index)) + ' : ') + row_str) |
def make_embeddings(opt, word_dict, for_encoder=True, embed_type=None):
embedding_dim = (opt.kb_embed_size if (embed_type == 'kb') else opt.word_vec_size)
word_padding_idx = word_dict.to_ind(markers.PAD)
num_word_embeddings = len(word_dict)
return Embeddings(word_vec_size=embedding_dim, position_encoding=False, dropout=opt.dropout, word_padding_idx=word_padding_idx, word_vocab_size=num_word_embeddings) |
.parametrize('batch_size', [1])
def test_examples_cpp_mnist_runtime(tmpdir, nnabla_examples_root, batch_size):
pytest.skip('Temporarily skip due to mnist training data server trouble.')
nn.clear_parameters()
if (not nnabla_examples_root.available):
pytest.skip('`nnabla-examples` can not be found.')
if (not command_exists('mnist_runtime')):
pytest.skip('An executable `mnist_runtime` is not in path.')
tmpdir.chdir()
script = os.path.join(nnabla_examples_root.path, 'image-classification/mnist-collection', 'classification.py')
check_call(['python', script, '-i', '100'])
nnp_file = tmpdir.join('tmp.monitor', 'lenet_result.nnp').strpath
assert os.path.isfile(nnp_file)
pgm_file = os.path.join(os.path.dirname(__file__), '../../../examples/cpp/mnist_runtime/1.pgm')
assert os.path.isfile(pgm_file)
output = check_output(['mnist_runtime', nnp_file, pgm_file, 'Runtime'])
output.decode('ascii').splitlines()[1].split(':')[1].strip()
cpp_result = np.asarray(output.decode('ascii').splitlines()[1].split(':')[1].strip().split(' '), dtype=np.float32)
from nnabla.utils import nnp_graph
nnp = nnp_graph.NnpLoader(nnp_file)
graph = nnp.get_network('Validation', batch_size=batch_size)
x = graph.inputs['x']
y = graph.outputs['y']
from nnabla.utils.image_utils import imread
img = imread(pgm_file, grayscale=True)
x.d = img
y.forward()
assert_allclose(y.d.flatten(), cpp_result) |
_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
class EncoderDecoderModel(PreTrainedModel):
config_class = EncoderDecoderConfig
base_model_prefix = 'encoder_decoder'
def __init__(self, config: Optional[PretrainedConfig]=None, encoder: Optional[PreTrainedModel]=None, decoder: Optional[PreTrainedModel]=None):
assert ((config is not None) or ((encoder is not None) and (decoder is not None))), 'Either a configuration or an Encoder and a decoder has to be provided'
if (config is None):
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
else:
assert isinstance(config, self.config_class), 'config: {} has to be of type {}'.format(config, self.config_class)
super().__init__(config)
if (encoder is None):
from .modeling_auto import AutoModel
encoder = AutoModel.from_config(config.encoder)
if (decoder is None):
from .modeling_auto import AutoModelForCausalLM
decoder = AutoModelForCausalLM.from_config(config.decoder)
self.encoder = encoder
self.decoder = decoder
assert (self.encoder.get_output_embeddings() is None), 'The encoder {} should not have a LM Head. Please use a model without LM Head'
self.tie_weights()
def tie_weights(self):
if self.config.tie_encoder_decoder:
decoder_base_model_prefix = self.decoder.base_model_prefix
self._tie_encoder_decoder_weights(self.encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def from_encoder_decoder_pretrained(cls, encoder_pretrained_model_name_or_path: str=None, decoder_pretrained_model_name_or_path: str=None, *model_args, **kwargs) -> PreTrainedModel:
kwargs_encoder = {argument[len('encoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('encoder_')}
kwargs_decoder = {argument[len('decoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('decoder_')}
for key in kwargs_encoder.keys():
del kwargs[('encoder_' + key)]
for key in kwargs_decoder.keys():
del kwargs[('decoder_' + key)]
encoder = kwargs_encoder.pop('model', None)
if (encoder is None):
assert (encoder_pretrained_model_name_or_path is not None), 'If `model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined'
from .modeling_auto import AutoModel
if ('config' not in kwargs_encoder):
from .configuration_auto import AutoConfig
encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
if ((encoder_config.is_decoder is True) or (encoder_config.add_cross_attention is True)):
logger.info(f'Initializing {encoder_pretrained_model_name_or_path} as a encoder model from a decoder model. Cross-attention and casual mask are disabled.')
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder['config'] = encoder_config
encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop('model', None)
if (decoder is None):
assert (decoder_pretrained_model_name_or_path is not None), 'If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined'
from .modeling_auto import AutoModelForCausalLM
if ('config' not in kwargs_decoder):
from .configuration_auto import AutoConfig
decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
if ((decoder_config.is_decoder is False) or (decoder_config.add_cross_attention is False)):
logger.info(f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers.")
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder['config'] = decoder_config
if ((kwargs_decoder['config'].is_decoder is False) or (kwargs_decoder['config'].add_cross_attention is False)):
logger.warning(f'Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`')
decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
return cls(encoder=encoder, decoder=decoder, config=config)
_start_docstrings_to_callable(ENCODER_DECODER_INPUTS_DOCSTRING)
_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs):
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
kwargs_encoder = {argument: value for (argument, value) in kwargs.items() if (not argument.startswith('decoder_'))}
kwargs_decoder = {argument[len('decoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('decoder_')}
if (encoder_outputs is None):
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs_encoder)
encoder_hidden_states = encoder_outputs[0]
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs_decoder)
if (not return_dict):
return (decoder_outputs + encoder_outputs)
return Seq2SeqLMOutput(loss=decoder_outputs.loss, logits=decoder_outputs.logits, past_key_values=None, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, past, attention_mask, encoder_outputs, **kwargs):
decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids)
decoder_attention_mask = (decoder_inputs['attention_mask'] if ('attention_mask' in decoder_inputs) else None)
input_dict = {'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'decoder_input_ids': decoder_inputs['input_ids'], 'encoder_outputs': encoder_outputs}
if ('use_cache' in decoder_inputs):
input_dict['decoder_use_cache'] = decoder_inputs['use_cache']
if ('past_key_values' in decoder_inputs):
input_dict['past_key_values'] = decoder_inputs['past_key_values']
return input_dict
def _reorder_cache(self, past, beam_idx):
return self.decoder._reorder_cache(past, beam_idx) |
class BM25L(BM25):
def __init__(self, corpus, tokenizer=None, k1=1.5, b=0.75, delta=0.5):
self.k1 = k1
self.b = b
self.delta = delta
super().__init__(corpus, tokenizer)
def _calc_idf(self, nd):
for (word, freq) in nd.items():
idf = (math.log((self.corpus_size + 1)) - math.log((freq + 0.5)))
self.idf[word] = idf
def get_scores(self, query):
score = np.zeros(self.corpus_size)
doc_len = np.array(self.doc_len)
for q in query:
q_freq = np.array([(doc.get(q) or 0) for doc in self.doc_freqs])
ctd = (q_freq / ((1 - self.b) + ((self.b * doc_len) / self.avgdl)))
score += (((((self.idf.get(q) or 0) * q_freq) * (self.k1 + 1)) * (ctd + self.delta)) / ((self.k1 + ctd) + self.delta))
return score |
class GatewayWriteObjectStore(GatewayOperator):
def __init__(self, bucket_name: str, bucket_region: str, num_connections: int=32, key_prefix: Optional[str]=''):
super().__init__('write_object_store')
self.bucket_name = bucket_name
self.bucket_region = bucket_region
self.num_connections = num_connections
self.key_prefix = key_prefix |
class ReraiseStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
nogil_check = Node.gil_error
gil_message = 'Raising exception'
def generate_execution_code(self, code):
code.mark_pos(self.pos)
vars = code.funcstate.exc_vars
if vars:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.put_giveref(vars[0])
code.put_giveref(vars[1])
code.put_xgiveref(vars[2])
code.putln(('__Pyx_ErrRestoreWithState(%s, %s, %s);' % tuple(vars)))
for varname in vars:
code.put(('%s = 0; ' % varname))
code.putln()
code.putln(code.error_goto(self.pos))
else:
code.globalstate.use_utility_code(UtilityCode.load_cached('ReRaiseException', 'Exceptions.c'))
code.putln(('__Pyx_ReraiseException(); %s' % code.error_goto(self.pos))) |
def read_config(key=None, default=None):
import json
config_path = (sxs_directory('config') / 'config.json')
if config_path.exists():
config = json.load(config_path.open('r'))
else:
config = {}
if (key is None):
return config
else:
return config.get(key, default) |
def gesummv_distr2(alpha: dc.float64, beta: dc.float64, A: dc.float64[(lM, lN)], B: dc.float64[(lM, lN)], x: dc.float64[lN], y: dc.float64[lMy]):
tmp1 = distr.MatMult(A, x, ((Px * lM), (Py * lN)), c_block_sizes=(lMy, 1))
tmp2 = distr.MatMult(B, x, (M, N), c_block_sizes=(lMy, 1))
y[:] = ((alpha * tmp1) + (beta * tmp2)) |
def test_crosstab_basic_3d():
a = 'a'
b = 'b'
x = [0, 0, 9, 9, 0, 0, 9, 9]
y = [a, a, a, a, b, b, b, a]
z = [1, 2, 3, 1, 2, 3, 3, 1]
expected_xvals = [0, 9]
expected_yvals = [a, b]
expected_zvals = [1, 2, 3]
expected_count = np.array([[[1, 1, 0], [0, 1, 1]], [[2, 0, 1], [0, 0, 1]]])
((xvals, yvals, zvals), count) = crosstab(x, y, z)
assert_array_equal(xvals, expected_xvals)
assert_array_equal(yvals, expected_yvals)
assert_array_equal(zvals, expected_zvals)
assert_array_equal(count, expected_count) |
class Examples(SegmentationBase):
def __init__(self, size=256, random_crop=False, interpolation='bicubic'):
super().__init__(data_csv='data/ade20k_examples.txt', data_root='data/ade20k_images', segmentation_root='data/ade20k_segmentations', size=size, random_crop=random_crop, interpolation=interpolation, n_labels=151, shift_segmentation=False) |
def register_Ns3ComponentCarrierEnb_methods(root_module, cls):
cls.add_constructor([param('ns3::ComponentCarrierEnb const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetFfMacScheduler', 'ns3::Ptr< ns3::FfMacScheduler >', [])
cls.add_method('GetFfrAlgorithm', 'ns3::Ptr< ns3::LteFfrAlgorithm >', [])
cls.add_method('GetMac', 'ns3::Ptr< ns3::LteEnbMac >', [])
cls.add_method('GetPhy', 'ns3::Ptr< ns3::LteEnbPhy >', [])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetFfMacScheduler', 'void', [param('ns3::Ptr< ns3::FfMacScheduler >', 's')])
cls.add_method('SetFfrAlgorithm', 'void', [param('ns3::Ptr< ns3::LteFfrAlgorithm >', 's')])
cls.add_method('SetMac', 'void', [param('ns3::Ptr< ns3::LteEnbMac >', 's')])
cls.add_method('SetPhy', 'void', [param('ns3::Ptr< ns3::LteEnbPhy >', 's')])
cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True)
return |
class TrainArgParser(BaseArgParser):
def __init__(self):
super(TrainArgParser, self).__init__()
self.parser.add_argument('--num_epochs', type=int, default=20, help='Number of epochs to train.')
self.parser.add_argument('--lr', type=float, default=0.001, help='Learning rate.')
self.parser.add_argument('--beta1', type=float, default=0.5, help='Beta1 value for Adam optimizer.')
self.parser.add_argument('--max_ckpts', type=int, default=3, help='Max ckpts to save.') |
class TestDB(TestCase):
create_statement = 'create table test_db (features text, label int)'
hive_create_statement = 'create table test_db (features string, label int) ROW FORMAT DELIMITED FIELDS TERMINATED BY "\x01"'
select_statement = 'select * from test_db'
drop_statement = 'drop table if exists test_db'
((testing.get_driver() == 'mysql'), 'skip non mysql tests')
def test_mysql(self):
conn = connect(testing.get_datasource())
self._do_test(conn)
conn.close()
((testing.get_driver() == 'hive'), 'skip non hive tests')
def test_hive(self):
uri = testing.get_datasource()
conn = connect(uri)
self._do_test(conn)
self._do_test_hive_specified_db(conn)
def _do_test_hive_specified_db(self, conn):
create_db = 'create database if not exists test_db'
create_tbl = 'create table test_db.tbl (features string, label int)\n ROW FORMAT DELIMITED FIELDS TERMINATED BY "\x01"'
drop_tbl = 'drop table if exists test_db.tbl'
select_tbl = 'select * from test_db.tbl'
table_schema = ['label', 'features']
values = ([(1, '5,6,1,2')] * 10)
self.assertTrue(conn.execute(create_db))
self.assertTrue(conn.execute(drop_tbl))
self.assertTrue(conn.execute(create_tbl))
with buffered_db_writer(conn, 'test_db.tbl', table_schema, buff_size=10) as w:
for row in values:
w.write(row)
(field_names, data) = execute(conn, select_tbl)
expect_result = ([('5,6,1,2', 1)] * 10)
self.assertEqual(field_names, ['features', 'label'])
self.assertEqual(expect_result, data)
def _do_test(self, conn):
table_name = 'test_db'
table_schema = ['features', 'label']
values = ([('5,6,1,2', 1)] * 10)
conn.execute(self.drop_statement)
if (conn.driver == 'hive'):
conn.execute(self.hive_create_statement)
else:
conn.execute(self.create_statement)
with buffered_db_writer(conn, table_name, table_schema, buff_size=10) as w:
for row in values:
w.write(row)
(field_names, data) = execute(conn, self.select_statement)
self.assertEqual(table_schema, field_names)
self.assertEqual(values, data) |
def test_to_jams():
default_clipid = 'foa_dev/split1_ir0_ov1_1'
dataset = tau2019sse.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
jam = clip.to_jams()
assert jam.validate() |
def _logit(p):
p = torch.max((torch.ones(1) * 0.1), torch.min((torch.ones(1) * 0.9), p))
return (torch.log((p + 1e-10)) + torch.log(((1 - p) + 1e-10))) |
def process_variant(variant):
if args.debug:
variant['vae_variant']['num_epochs'] = 10
variant['vae_variant']['vis_kwargs']['save_period'] = 2
variant['vae_variant']['vis_kwargs']['num_samples_for_video'] = 2
variant['rl_variant']['vae_wrapped_env_kwargs']['num_samples_for_latent_histogram'] = 100 |
class REFERDB(BASE):
def __init__(self, db_config):
super(REFERDB, self).__init__()
self._configs['data_aug'] = True
self._configs['random_flip'] = True
self._configs['random_affine'] = True
self._configs['random_color'] = True
self._configs['random_lighting'] = True
self._configs['input_size'] = [256, 256]
self._configs['output_sizes'] = [32, 32]
self._configs['anchors'] = None
self._configs['vocab_size'] = 0
self._configs['word_embedding_size'] = 512
self._configs['word_vec_size'] = 512
self._configs['hidden_size'] = 512
self._configs['bidirectional'] = True
self._configs['input_dropout_p'] = 0.5
self._configs['dropout_p'] = 0.2
self._configs['n_layers'] = 1
self._configs['max_query_len'] = 128
self._configs['variable_length'] = True
self._configs['joint_embedding_size'] = 256
self._configs['joint_out_dim'] = 256
self._configs['joint_embedding_dropout'] = 0.1
self._configs['joint_mlp_layers'] = 2
self._configs['corpus_path'] = None
self.update_config(db_config) |
class GlueDataset(Dataset):
args: GlueDataTrainingArguments
output_mode: str
features: List[InputFeatures]
def __init__(self, args: GlueDataTrainingArguments, tokenizer: PreTrainedTokenizerBase, limit_length: Optional[int]=None, mode: Union[(str, Split)]=Split.train, cache_dir: Optional[str]=None):
warnings.warn('This dataset will be removed from the library soon, preprocessing should be handled with the Datasets library. You can have a look at this example script for pointers: FutureWarning)
self.args = args
self.processor = glue_processors[args.task_name]()
self.output_mode = glue_output_modes[args.task_name]
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
cached_features_file = os.path.join((cache_dir if (cache_dir is not None) else args.data_dir), 'cached_{}_{}_{}_{}'.format(mode.value, tokenizer.__class__.__name__, str(args.max_seq_length), args.task_name))
label_list = self.processor.get_labels()
if ((args.task_name in ['mnli', 'mnli-mm']) and (tokenizer.__class__.__name__ in ('RobertaTokenizer', 'RobertaTokenizerFast', 'XLMRobertaTokenizer', 'BartTokenizer', 'BartTokenizerFast'))):
(label_list[1], label_list[2]) = (label_list[2], label_list[1])
self.label_list = label_list
lock_path = (cached_features_file + '.lock')
with FileLock(lock_path):
if (os.path.exists(cached_features_file) and (not args.overwrite_cache)):
start = time.time()
self.features = torch.load(cached_features_file)
logger.info(f'Loading features from cached file {cached_features_file} [took %.3f s]', (time.time() - start))
else:
logger.info(f'Creating features from dataset file at {args.data_dir}')
if (mode == Split.dev):
examples = self.processor.get_dev_examples(args.data_dir)
elif (mode == Split.test):
examples = self.processor.get_test_examples(args.data_dir)
else:
examples = self.processor.get_train_examples(args.data_dir)
if (limit_length is not None):
examples = examples[:limit_length]
self.features = glue_convert_examples_to_features(examples, tokenizer, max_length=args.max_seq_length, label_list=label_list, output_mode=self.output_mode)
start = time.time()
torch.save(self.features, cached_features_file)
logger.info('Saving features into cached file %s [took %.3f s]', cached_features_file, (time.time() - start))
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list |
def run_one_epoch(epoch, loader, model, criterion, optimizer, meters, phase='train', scheduler=None):
t_start = time.time()
assert (phase in ['train', 'val', 'test', 'calib']), 'phase not be in train/val/test/calib.'
train = (phase == 'train')
if train:
model.train()
else:
model.eval()
if (phase == 'calib'):
model.apply(bn_calib)
if getattr(FLAGS, 'distributed', False):
loader.sampler.set_epoch(epoch)
skip_print = False
for (batch_idx, (input, target)) in enumerate(loader):
if (phase == 'calib'):
if (batch_idx == getattr(FLAGS, 'bn_calib_batch_num', (- 1))):
break
if (train and (batch_idx >= getattr(FLAGS, 'finetune_iters', float('inf')))):
if (getattr(FLAGS, 'finetune_iters', float('inf')) == 0):
skip_print = True
break
if (not getattr(FLAGS, 'int_op_only', False)):
target = target.cuda(non_blocking=True)
if train:
if (FLAGS.lr_scheduler == 'linear_decaying'):
linear_decaying_per_step = (((FLAGS.lr / FLAGS.num_epochs) / len(loader.dataset)) * FLAGS.batch_size)
for param_group in optimizer.param_groups:
param_group['lr'] -= linear_decaying_per_step
optimizer.zero_grad()
loss = forward_loss(model, criterion, input, target, meters, train=True)
loss.backward()
if (getattr(FLAGS, 'distributed', False) and getattr(FLAGS, 'distributed_all_reduce', False)):
allreduce_grads(model)
optimizer.step()
if (FLAGS.lr_scheduler in ['exp_decaying_iter', 'cos_annealing_iter', 'multistep_iter']):
scheduler.step()
else:
forward_loss(model, criterion, input, target, meters)
val_top1 = None
if (is_master() and (meters is not None) and (not skip_print)):
results = flush_scalar_meters(meters)
mprint(('{:.1f}s\t{}\t{}/{}: '.format((time.time() - t_start), phase, epoch, FLAGS.num_epochs) + ', '.join(('{}: {}'.format(k, v) for (k, v) in results.items()))))
val_top1 = results['top1_error']
return val_top1 |
def quotient(x, y, *args, **kwds):
try:
return x.quotient(y, *args, **kwds)
except AttributeError:
return (x / y) |
_node_type(optplan.NodeMetaType.TRANSFORMATION)
class ContToDiscThresholding(optplan.TransformationBase):
type = schema_utils.polymorphic_model_type('cont_to_disc_thresholding')
continuous_parametrization = optplan.ReferenceType(optplan.Parametrization)
threshold = types.FloatType() |
class BasicUnit(nn.Module):
def __init__(self, unit, input_dim, increase_rate, droprate):
super(BasicUnit, self).__init__()
rnnunit_map = {'rnn': nn.RNN, 'lstm': nn.LSTM, 'gru': nn.GRU}
self.unit = unit
self.layer = rnnunit_map[unit](input_dim, increase_rate, 1)
if ('lstm' == self.unit):
utils.init_lstm(self.layer)
self.droprate = droprate
self.input_dim = input_dim
self.increase_rate = increase_rate
self.output_dim = (input_dim + increase_rate)
self.init_hidden()
def init_hidden(self):
self.hidden_state = None
def rand_ini(self):
return
def forward(self, x):
if (self.droprate > 0):
new_x = F.dropout(x, p=self.droprate, training=self.training)
else:
new_x = x
(out, new_hidden) = self.layer(new_x, self.hidden_state)
self.hidden_state = utils.repackage_hidden(new_hidden)
out = out.contiguous()
return torch.cat([x, out], 2) |
class SymmetricIdeal(Ideal_generic):
def __init__(self, ring, gens, coerce=True):
Ideal_generic.__init__(self, ring, gens, coerce=coerce)
def __repr__(self):
return ('Symmetric Ideal %s of %s' % (self._repr_short(), self.ring()))
def _latex_(self):
from sage.misc.latex import latex
return ('\\left(%s\\right)%s[\\mathfrak{S}_{\\infty}]' % (', '.join((latex(g) for g in self.gens())), latex(self.ring())))
def _contains_(self, p):
try:
return (self.reduce(p) == 0)
except Exception:
return False
def __mul__(self, other):
PARENT = self.ring()
if ((not isinstance(other, self.__class__)) or (self.ring() != other.ring())):
if hasattr(other, 'gens'):
other = SymmetricIdeal(PARENT, other.gens(), coerce=True)
other = other.symmetrisation()
sN = max(([X.max_index() for X in self.gens()] + [1]))
oN = max(([X.max_index() for X in other.gens()] + [1]))
from sage.combinat.permutation import Permutation
P = Permutation((list(range(2, ((sN + oN) + 1))) + [1]))
oGen = list(other.gens())
SymL = oGen
for i in range(sN):
oGen = [(X ** P) for X in oGen]
SymL = (SymL + oGen)
OUT = []
for X in self.gens():
OUT.extend([(X * Y) for Y in SymL])
return SymmetricIdeal(PARENT, OUT, coerce=False).interreduction()
def __pow__(self, n):
OUT = SymmetricIdeal(self.ring(), [1])
for i in range(n):
OUT = (self * OUT)
return OUT
def is_maximal(self):
if (not self.base_ring().is_field()):
raise NotImplementedError
if (len(self.gens()) == 1):
if (self.is_trivial() and (not self.is_zero())):
return True
V = [p.variables() for p in self.gens()]
V = [x for x in V if (len(x) == 1)]
V = [str(x[0]).split('_')[0] for x in V]
return (set(V) == set(self.ring().variable_names()))
def reduce(self, I, tailreduce=False):
if (I in self.ring()):
return self.ring()(I).reduce(self)
from sage.rings.polynomial.symmetric_reduction import SymmetricReductionStrategy
if hasattr(I, 'gens'):
I = I.gens()
if (not I):
return self
I = list(I)
S = SymmetricReductionStrategy(self.ring(), I, tailreduce)
return SymmetricIdeal(self.ring(), [S.reduce(X) for X in self.gens()], coerce=False)
def interreduction(self, tailreduce=True, sorted=False, report=None, RStrat=None):
DONE = []
TODO = []
PARENT = self.ring()
for P in self.gens():
if (P._p != 0):
if P.is_unit():
if (RStrat is not None):
RStrat.add_generator(PARENT(1))
return SymmetricIdeal(self.ring(), [self.ring().one()], coerce=False)
TODO.append(P)
if (not sorted):
TODO = list(set(TODO))
TODO.sort()
if (not hasattr(PARENT, '_P')):
VarList = set()
for P in TODO:
if (P._p != 0):
if P.is_unit():
if (RStrat is not None):
RStrat.add_generator(PARENT.one())
return SymmetricIdeal(PARENT, [PARENT.one()], coerce=False)
VarList = VarList.union(P._p.parent().variable_names())
VarList = list(VarList)
if (not VarList):
return SymmetricIdeal(PARENT, [0])
if (report is not None):
print('Symmetric interreduction')
from sage.rings.polynomial.symmetric_reduction import SymmetricReductionStrategy
if (RStrat is None):
RStrat = SymmetricReductionStrategy(self.ring(), tailreduce=tailreduce)
GroundState = RStrat.gens()
while True:
RStrat.setgens(GroundState)
DONE = []
for i in range(len(TODO)):
if (report is not None):
print(('[%d/%d] ' % ((i + 1), len(TODO))), end='')
sys.stdout.flush()
p = RStrat.reduce(TODO[i], report=report)
if (p._p != 0):
if p.is_unit():
return SymmetricIdeal(self.ring(), [self.ring().one()], coerce=False)
RStrat.add_generator(p, good_input=True)
DONE.append(p)
elif (report is not None):
print('-> 0')
DONE.sort()
if (DONE == TODO):
break
else:
if (len(TODO) == len(DONE)):
import copy
bla = copy.copy(TODO)
bla.sort()
if (bla == DONE):
break
TODO = DONE
return SymmetricIdeal(self.ring(), DONE, coerce=False)
def interreduced_basis(self):
return Sequence(self.interreduction(tailreduce=True).gens(), self.ring(), check=False)
def symmetrisation(self, N=None, tailreduce=False, report=None, use_full_group=False):
newOUT = self.interreduction(tailreduce=tailreduce, report=report).squeezed()
R = self.ring()
OUT = (R * ())
if (N is None):
N = max(([Y.max_index() for Y in newOUT.gens()] + [1]))
else:
N = Integer(N)
if (hasattr(R, '_max') and (R._max < N)):
R.gen()[N]
if (report is not None):
print(('Symmetrise %d polynomials at level %d' % (len(newOUT.gens()), N)))
if use_full_group:
from sage.combinat.permutation import Permutations
NewGens = []
Gens = self.gens()
for P in Permutations(N):
NewGens.extend([(p ** P) for p in Gens])
return (NewGens * R).interreduction(tailreduce=tailreduce, report=report)
from sage.combinat.permutation import Permutation
from sage.rings.polynomial.symmetric_reduction import SymmetricReductionStrategy
RStrat = SymmetricReductionStrategy(self.ring(), OUT.gens(), tailreduce=tailreduce)
while (OUT != newOUT):
OUT = newOUT
PermutedGens = list(OUT.gens())
if (not (report is None)):
print('Apply permutations')
for i in range(1, N):
for j in range((i + 1), (N + 1)):
P = Permutation((i, j))
for X in OUT.gens():
p = RStrat.reduce((X ** P), report=report)
if (p._p != 0):
PermutedGens.append(p)
RStrat.add_generator(p, good_input=True)
newOUT = (PermutedGens * R).interreduction(tailreduce=tailreduce, report=report)
return OUT
def symmetric_basis(self):
return Sequence(self.symmetrisation(tailreduce=True).normalisation().gens(), self.ring(), check=False)
def normalisation(self):
return SymmetricIdeal(self.ring(), [(X / X.lc()) for X in self.gens() if (X._p != 0)])
def squeezed(self):
return SymmetricIdeal(self.ring(), [X.squeezed() for X in self.gens()])
_method
def groebner_basis(self, tailreduce=False, reduced=True, algorithm=None, report=None, use_full_group=False):
if (algorithm is None):
algorithm = ''
PARENT = self.ring()
if (not (hasattr(PARENT.base_ring(), 'is_field') and PARENT.base_ring().is_field())):
raise TypeError(('The base ring (= %s) must be a field' % PARENT.base_ring()))
OUT = self.symmetrisation(tailreduce=tailreduce, report=report, use_full_group=use_full_group)
if (not (report is None)):
print('Symmetrisation done')
VarList = set()
for P in OUT.gens():
if (P._p != 0):
if P.is_unit():
return Sequence([PARENT(1)], PARENT, check=False)
VarList = VarList.union([str(X) for X in P.variables()])
VarList = list(VarList)
if (not VarList):
return Sequence([PARENT(0)], PARENT, check=False)
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
N = max(([int(X.split('_')[1]) for X in VarList] + [1]))
while True:
if hasattr(PARENT, '_P'):
CommonR = PARENT._P
else:
VarList = set()
for P in OUT.gens():
if (P._p != 0):
if P.is_unit():
return Sequence([PARENT(1)], PARENT, check=False)
VarList = VarList.union([str(X) for X in P.variables()])
VarList = list(VarList)
VarList.sort(key=PARENT.varname_key, reverse=True)
CommonR = PolynomialRing(PARENT._base, VarList, order=PARENT._order)
try:
DenseIdeal = ([(CommonR(P._p) if ((CommonR is P._p.parent()) or (CommonR.ngens() != P._p.parent().ngens())) else CommonR(repr(P._p))) for P in OUT.gens()] * CommonR)
except Exception:
if (report is not None):
print('working around a libsingular bug')
DenseIdeal = ([repr(P._p) for P in OUT.gens()] * CommonR)
if (report is not None):
print('Classical Groebner basis')
if (algorithm != ''):
print(('(using %s)' % algorithm))
newOUT = (DenseIdeal.groebner_basis(algorithm) * PARENT)
if (report is not None):
print('->', len(newOUT.gens()), 'generators')
N += 1
newOUT = newOUT.symmetrisation(N=N, tailreduce=tailreduce, report=report, use_full_group=use_full_group)
if ([X.lm() for X in OUT.gens()] == [X.lm() for X in newOUT.gens()]):
if reduced:
if tailreduce:
return Sequence(newOUT.normalisation().gens(), PARENT, check=False)
return Sequence(newOUT.interreduction(tailreduce=True, report=report).normalisation().gens(), PARENT, check=False)
return Sequence(newOUT.gens(), PARENT, check=False)
OUT = newOUT |
def main(args):
scene_manager = SceneManager(args.input_folder)
scene_manager.load()
with open(args.output_file, 'w') as fid:
fid.write('NVM_V3\n \n{:d}\n'.format(len(scene_manager.images)))
image_fmt_str = (' {:.3f} ' + (7 * '{:.7f} '))
for (image_id, image) in scene_manager.images.iteritems():
camera = scene_manager.cameras[image.camera_id]
f = (0.5 * (camera.fx + camera.fy))
fid.write((args.image_name_prefix + image.name))
fid.write(image_fmt_str.format(*(((f,) + tuple(image.q.q)) + tuple(image.C()))))
if (camera.distortion_func is None):
fid.write('0 0\n')
else:
fid.write('{:.7f} 0\n'.format((- camera.k1)))
image_id_to_idx = dict(((image_id, i) for (i, image_id) in enumerate(scene_manager.images)))
fid.write('{:d}\n'.format(len(scene_manager.points3D)))
for (i, point3D_id) in enumerate(scene_manager.point3D_ids):
fid.write('{:.7f} {:.7f} {:.7f} '.format(*scene_manager.points3D[i]))
fid.write('{:d} {:d} {:d} '.format(*scene_manager.point3D_colors[i]))
keypoints = [((image_id_to_idx[image_id], kp_idx) + tuple(scene_manager.images[image_id].points2D[kp_idx])) for (image_id, kp_idx) in scene_manager.point3D_id_to_images[point3D_id]]
fid.write('{:d}'.format(len(keypoints)))
fid.write(((len(keypoints) * ' {:d} {:d} {:.3f} {:.3f}') + '\n').format(*itertools.chain(*keypoints))) |
def CheckForCopyright(filename, lines, error):
for line in xrange(1, min(len(lines), 11)):
if re.search('Copyright', lines[line], re.I):
break
else:
error(filename, 0, 'legal/copyright', 5, 'No copyright message found. You should have a line: "Copyright [year] <Copyright Owner>"') |
def _wandb_log(_dict):
if (wandb.run is not None):
wandb.log(_dict)
else:
log.info(repr(_dict)) |
class BPRMF(RecMixin, BaseRecommenderModel):
_charger
def __init__(self, data, config, params, *args, **kwargs):
self._random = np.random
self._params_list = [('_factors', 'factors', 'f', 10, int, None), ('_learning_rate', 'lr', 'lr', 0.05, None, None), ('_bias_regularization', 'bias_regularization', 'bias_reg', 0, None, None), ('_user_regularization', 'user_regularization', 'u_reg', 0.0025, None, None), ('_positive_item_regularization', 'positive_item_regularization', 'pos_i_reg', 0.0025, None, None), ('_negative_item_regularization', 'negative_item_regularization', 'neg_i_reg', 0.00025, None, None), ('_update_negative_item_factors', 'update_negative_item_factors', 'up_neg_i_f', True, None, None), ('_update_users', 'update_users', 'up_u', True, None, None), ('_update_items', 'update_items', 'up_i', True, None, None), ('_update_bias', 'update_bias', 'up_b', True, None, None)]
self.autoset_params()
self._batch_size = 1
self._ratings = self._data.train_dict
self._model = MFModel(self._factors, self._data, self._learning_rate, self._user_regularization, self._bias_regularization, self._positive_item_regularization, self._negative_item_regularization)
self._sampler = cs.Sampler(self._data.i_train_dict)
def get_recommendations(self, k: int=10):
predictions_top_k_val = {}
predictions_top_k_test = {}
(recs_val, recs_test) = self.process_protocol(k)
predictions_top_k_val.update(recs_val)
predictions_top_k_test.update(recs_test)
return (predictions_top_k_val, predictions_top_k_test)
def get_single_recommendation(self, mask, k, *args):
return {u: self._model.get_user_predictions(u, mask, k) for u in self._ratings.keys()}
def name(self):
return ((((('BPRMF' + '_e:') + str(self._epochs)) + '_bs:') + str(self._batch_size)) + f'_{self.get_params_shortcut()}')
def train(self):
if self._restore:
return self.restore_weights()
print(f'Transactions: {self._data.transactions}')
for it in range(self._epochs):
print(f'''
Iteration: {(it + 1)}''')
loss = 0
steps = 0
with tqdm(total=int((self._data.transactions // self._batch_size)), disable=(not self._verbose)) as t:
for batch in self._sampler.step(self._data.transactions, self._batch_size):
steps += 1
self._model.train_step(batch)
t.update()
self.evaluate(it)
def restore_weights(self):
try:
with open(self._saving_filepath, 'rb') as f:
self._model.set_model_state(pickle.load(f))
print(f'Model correctly Restored')
recs = self.get_recommendations(self.evaluator.get_needed_recommendations())
result_dict = self.evaluator.eval(recs)
self._results.append(result_dict)
print('')
if self._save_recs:
store_recommendation(recs, (self._config.path_output_rec_result + f'{self.name}.tsv'))
return True
except Exception as ex:
print(f'Error in model restoring operation! {ex}')
return False |
class TeacherController(object):
def __init__(self, teacher, nb_test_episodes, param_env_bounds, seed=None, teacher_params={}):
self.teacher = teacher
self.nb_test_episodes = nb_test_episodes
self.test_ep_counter = 0
self.eps = 0.001
self.param_env_bounds = copy.deepcopy(param_env_bounds)
(mins, maxs) = ([], [])
for (name, bounds) in param_env_bounds.items():
if (len(bounds) == 2):
mins.append(bounds[0])
maxs.append(bounds[1])
elif (len(bounds) == 3):
mins.extend(([bounds[0]] * bounds[2]))
maxs.extend(([bounds[1]] * bounds[2]))
else:
print('ill defined boundaries, use [min, max, nb_dims] format or [min, max] if nb_dims=1')
exit(1)
if (teacher == 'Oracle'):
self.task_generator = OracleTeacher(mins, maxs, teacher_params['window_step_vector'], seed=seed)
elif (teacher == 'Random'):
self.task_generator = RandomTeacher(mins, maxs, seed=seed)
elif (teacher == 'RIAC'):
self.task_generator = RIAC(mins, maxs, seed=seed, params=teacher_params)
elif (teacher == 'ALP-GMM'):
self.task_generator = ALPGMM(mins, maxs, seed=seed, params=teacher_params)
elif (teacher == 'Covar-GMM'):
self.task_generator = CovarGMM(mins, maxs, seed=seed, params=teacher_params)
else:
print('Unknown teacher')
raise NotImplementedError
self.test_mode = 'fixed_set'
if (self.test_mode == 'fixed_set'):
name = get_test_set_name(self.param_env_bounds)
self.test_env_list = pickle.load(open((('teachDRL/teachers/test_sets/' + name) + '.pkl'), 'rb'))
print('fixed set of {} tasks loaded: {}'.format(len(self.test_env_list), name))
self.env_params_train = []
self.env_train_reward_weights = []
self.env_train_norm_reward_weights = []
self.env_train_len = []
self.env_params_test = []
self.env_test_reward_weights = []
self.env_test_len = []
def record_train_episode(self, reward, ep_len):
self.env_train_reward_weights.append(reward)
self.env_train_len.append(ep_len)
if (self.teacher != 'Oracle'):
reward = np.interp(reward, ((- 150), 350), (0, 1))
self.env_train_norm_reward_weights.append(reward)
self.task_generator.update(self.env_params_train[(- 1)], reward)
def record_test_episode(self, reward, ep_len):
self.env_test_reward_weights.append(reward)
self.env_test_len.append(ep_len)
def dump(self, filename):
with open(filename, 'wb') as handle:
dump_dict = {'env_params_train': self.env_params_train, 'env_train_reward_weights': self.env_train_reward_weights, 'env_train_len': self.env_train_len, 'env_params_test': self.env_params_test, 'env_test_reward_weights': self.env_test_reward_weights, 'env_test_len': self.env_test_len, 'env_param_bounds': list(self.param_env_bounds.items())}
dump_dict = self.task_generator.dump(dump_dict)
pickle.dump(dump_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def set_env_params(self, env):
params = copy.copy(self.task_generator.sample_task())
assert (type(params[0]) == np.float32)
self.env_params_train.append(params)
param_dict = param_vec_to_param_dict(self.param_env_bounds, params)
env.env.set_environment(**param_dict)
return params
def set_test_env_params(self, test_env):
self.test_ep_counter += 1
if (self.test_mode == 'fixed_set'):
test_param_dict = self.test_env_list[(self.test_ep_counter - 1)]
legacy = ['tunnel_height', 'gap_width', 'step_height', 'step_number']
keys = test_param_dict.keys()
for env_param in legacy:
if (env_param in keys):
del test_param_dict[env_param]
else:
raise NotImplementedError
test_param_vec = param_dict_to_param_vec(self.param_env_bounds, test_param_dict)
self.env_params_test.append(test_param_vec)
test_env.env.set_environment(**test_param_dict)
if (self.test_ep_counter == self.nb_test_episodes):
self.test_ep_counter = 0 |
def pre_process_sent(sent, do_filter, lower_case, res_wrds):
if do_filter:
sent = re.sub('-', ' ', sent)
sent = re.sub('', ' ', sent)
if (len(res_wrds) > 0):
wrds = sent.split()
wrds = [((('SPLIT_ME ' + w) + ' SPLIT_ME') if (w in res_wrds) else w) for w in wrds]
sents = [x.strip() for x in ' '.join(wrds).split('SPLIT_ME') if (x.strip() != '')]
else:
sents = [sent]
if lower_case:
sents = [(s.lower() if (s not in res_wrds) else s) for s in sents]
return sents |
class SparseKernelTests(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.random_state = np.random.RandomState(0)
def test_sample_weights(self):
X = self.random_state.uniform((- 1), 1, size=(4, 5))
X_sparse = self.random_state.uniform((- 1), 1, size=(3, 5))
Knm = (X X_sparse.T)
Kmm = (X_sparse X_sparse.T)
equal_wts = np.ones(len(Knm))
nonequal_wts = self.random_state.uniform((- 1), 1, size=(len(Knm),))
model = SparseKernelCenterer()
weighted_model = SparseKernelCenterer()
Knm_unweighted = model.fit_transform(Knm, Kmm)
Knm_equal_weighted = weighted_model.fit_transform(Knm, Kmm, sample_weight=equal_wts)
Knm_nonequal_weighted = weighted_model.fit_transform(Knm, Kmm, sample_weight=nonequal_wts)
self.assertTrue(np.isclose(Knm_unweighted, Knm_equal_weighted, atol=1e-12).all())
self.assertFalse(np.isclose(Knm_unweighted, Knm_nonequal_weighted, atol=1e-12).all())
def test_invalid_sample_weights(self):
X = self.random_state.uniform((- 1), 1, size=(4, 5))
X_sparse = self.random_state.uniform((- 1), 1, size=(3, 5))
Knm = (X X_sparse.T)
Kmm = (X_sparse X_sparse.T)
wts_len = np.ones((len(Knm) + 1))
wts_dim = np.ones((len(Knm), 2))
model = SparseKernelCenterer()
with self.assertRaises(ValueError):
model.fit_transform(Knm, Kmm, sample_weight=wts_len)
with self.assertRaises(ValueError):
model.fit_transform(Knm, Kmm, sample_weight=wts_dim)
def test_Square_Kmm(self):
X = self.random_state.uniform((- 1), 1, size=(4, 5))
X_sparse = self.random_state.uniform((- 1), 1, size=(3, 5))
Knm = (X X_sparse.T)
Kmm = (X_sparse X.T)
model = SparseKernelCenterer()
with self.assertRaises(ValueError) as cm:
model.fit(Knm, Kmm)
self.assertEqual(str(cm.exception), 'The active kernel is not square.')
def test_LatterDim(self):
X = self.random_state.uniform((- 1), 1, size=(4, 5))
X_sparse = self.random_state.uniform((- 1), 1, size=(3, 5))
Knm = (X X.T)
Kmm = (X_sparse X_sparse.T)
model = SparseKernelCenterer()
with self.assertRaises(ValueError) as cm:
model.fit(Knm, Kmm)
self.assertEqual(str(cm.exception), 'The reference kernel is not commensurate shape with the active kernel.')
def test_new_kernel(self):
X = self.random_state.uniform((- 1), 1, size=(4, 5))
X_sparse = self.random_state.uniform((- 1), 1, size=(3, 5))
Knm = (X X_sparse.T)
Kmm = (X_sparse X_sparse.T)
Knm2 = (X X.T)
model = SparseKernelCenterer()
model = model.fit(Knm, Kmm)
with self.assertRaises(ValueError) as cm:
model.transform(Knm2)
self.assertEquals(str(cm.exception), 'The reference kernel and received kernel have different shape')
def test_NotFittedError_transform(self):
K = self.random_state.uniform(0, 100, size=(3, 3))
model = SparseKernelCenterer()
with self.assertRaises(sklearn.exceptions.NotFittedError):
model.transform(K)
def test_fit_transform(self):
X = self.random_state.uniform((- 1), 1, size=(4, 5))
X_sparse = self.random_state.uniform((- 1), 1, size=(3, 5))
Knm = (X X_sparse.T)
Kmm = (X_sparse X_sparse.T)
model = SparseKernelCenterer(rcond=1e-12)
Ktr = model.fit_transform(Knm, Kmm)
Knm_mean = Knm.mean(axis=0)
Kc = (Knm - Knm_mean)
Khat = ((Kc np.linalg.pinv(Kmm, rcond=1e-12)) Kc.T)
Kc /= np.sqrt((np.trace(Khat) / Khat.shape[0]))
self.assertTrue(np.isclose(Ktr, Kc, atol=1e-12).all())
def test_center_only(self):
X = self.random_state.uniform((- 1), 1, size=(4, 5))
X_sparse = self.random_state.uniform((- 1), 1, size=(3, 5))
Knm = (X X_sparse.T)
Kmm = (X_sparse X_sparse.T)
model = SparseKernelCenterer(with_center=True, with_trace=False, rcond=1e-12)
Ktr = model.fit_transform(Knm, Kmm)
Knm_mean = Knm.mean(axis=0)
Kc = (Knm - Knm_mean)
self.assertTrue(np.isclose(Ktr, Kc, atol=1e-12).all())
def test_trace_only(self):
X = self.random_state.uniform((- 1), 1, size=(4, 5))
X_sparse = self.random_state.uniform((- 1), 1, size=(3, 5))
Knm = (X X_sparse.T)
Kmm = (X_sparse X_sparse.T)
model = SparseKernelCenterer(with_center=False, with_trace=True, rcond=1e-12)
Ktr = model.fit_transform(Knm, Kmm)
Kc = Knm.copy()
Khat = ((Kc np.linalg.pinv(Kmm, rcond=1e-12)) Kc.T)
Kc /= np.sqrt((np.trace(Khat) / Khat.shape[0]))
self.assertTrue(np.isclose(Ktr, Kc, atol=1e-12).all())
def test_no_preprocessing(self):
X = self.random_state.uniform((- 1), 1, size=(4, 5))
X_sparse = self.random_state.uniform((- 1), 1, size=(3, 5))
Knm = (X X_sparse.T)
Kmm = (X_sparse X_sparse.T)
model = SparseKernelCenterer(with_center=False, with_trace=False, rcond=1e-12)
Ktr = model.fit_transform(Knm, Kmm)
Kc = Knm.copy()
self.assertTrue(np.isclose(Ktr, Kc, atol=1e-12).all()) |
def accum_graph_fts(encoders, dp: _DataPoint, graph_fts: _Array) -> _Array:
if ((dp.location == _Location.GRAPH) and (dp.type_ != _Type.POINTER)):
encoding = _encode_inputs(encoders, dp)
graph_fts += encoding
return graph_fts |
class PilBackend(ImageUtilsBackend):
_interpolations_map = {'nearest': Image.Resampling.NEAREST, 'bilinear': Image.Resampling.BILINEAR, 'bicubic': Image.Resampling.BICUBIC}
def __init__(self):
ImageUtilsBackend.__init__(self)
if hasattr(Image.Resampling, 'HAMMING'):
self._interpolations_map['hamming'] = Image.Resampling.HAMMING
if hasattr(Image.Resampling, 'BOX'):
self._interpolations_map['box'] = Image.Resampling.BOX
if hasattr(Image.Resampling, 'LANCZOS'):
self._interpolations_map['lanczos'] = Image.Resampling.LANCZOS
def convert_pil(pil_image, grayscale, num_channels, return_palette_indices):
if (pil_image.mode == 'I'):
raise ValueError('Input img type seems int32. Currently we don`t support int32 image in pillow backend.')
if ((pil_image.mode == 'P') and (not return_palette_indices)):
if ('transparency' in pil_image.info):
pil_image = pil_image.convert('RGBA')
else:
pil_image = pil_image.convert('RGB')
if grayscale:
ret = np.asarray(pil_image.convert('L'))
if (num_channels > 0):
ret = np.broadcast_to(ret[(..., np.newaxis)], (ret.shape + (num_channels,)))
return ret
elif (num_channels == 3):
return pil_image.convert('RGB')
elif (num_channels == 4):
return pil_image.convert('RGBA')
return pil_image
def pil_image_to_ndarray(pil_image, grayscale, num_channels, return_palette_indices):
ret = PilBackend.convert_pil(pil_image, grayscale, num_channels, return_palette_indices)
return np.asarray(ret).astype(np.uint8)
def pil_resize_from_ndarray(arr, size, resample):
mode = ('F' if (arr.dtype == np.float32) else None)
pil_image = Image.fromarray(arr, mode=mode)
resized_image = pil_image.resize(size, resample=resample)
return np.asarray(resized_image)
def accept(self, path, ext, operator):
if (operator in ['resize', 'save']):
return 'OK'
elif (ext in ['.bmp', '.dib', '.eps', '.gif', '.icns', '.ico', '.jpeg', '.jpg', '.msp', '.png', '.ppm', '.pbm', '.pgm', '.pnm', '.tif', '.tiff']):
return 'OK'
else:
return 'NG'
def imread(self, path, grayscale=False, size=None, interpolate='bilinear', channel_first=False, as_uint16=False, num_channels=(- 1), return_palette_indices=False):
if as_uint16:
logger.warning('pillow only supports uint8 for RGB image. If you want to load image as uint16, install pypng or cv2 and nnabla.utils.image_utils automatically change backend to use these module.')
return self.next_available(path).imread(path, grayscale=grayscale, size=size, interpolate=interpolate, channel_first=channel_first, as_uint16=as_uint16, num_channels=num_channels)
_imread_before(grayscale, num_channels)
pil_img = Image.open(path, mode='r')
try:
img = self.pil_image_to_ndarray(pil_img, grayscale, num_channels, return_palette_indices)
except:
return self.next_available(path).imread(path, grayscale=grayscale, size=size, interpolate=interpolate, channel_first=channel_first, as_uint16=as_uint16, num_channels=num_channels)
return _imread_after(img, size, interpolate, channel_first, self.imresize)
def imsave(self, path, img, channel_first=False, as_uint16=False, auto_scale=True):
img = _imsave_before(img, channel_first, auto_scale)
if ((img.dtype == np.uint16) or as_uint16):
logger.warning('Pillow only supports uint8 image to save. Cast img to uint8.If you want to save image as uint16, install pypng or cv2 and nnabla.utils.image_utils automatically change backend to use these module.')
return self.next_available(path).imsave(path, img, channel_first=channel_first, as_uint16=as_uint16, auto_scale=auto_scale)
if (auto_scale and (img.dtype != np.uint8)):
img = (img * 255).astype(np.uint8)
if ((len(img.shape) == 3) and (img.shape[(- 1)] == 1)):
img = np.squeeze(img, axis=(- 1))
Image.fromarray(img).save(path)
def imresize(self, img, size, interpolate='bilinear', channel_first=False):
img = _imresize_before(img, size, channel_first, interpolate, list(self._interpolations_map.keys()))
expand_flag = False
if ((len(img.shape) == 3) and (img.shape[(- 1)] == 1)):
img = img.reshape(img.shape[0], img.shape[1])
expand_flag = True
resample = self._interpolations_map[interpolate]
if (img.dtype == np.uint8):
resized = self.pil_resize_from_ndarray(img, size, resample)
else:
dtype = img.dtype
img_float32 = np.asarray(img, np.float32)
if (len(img.shape) == 3):
resized = np.stack([self.pil_resize_from_ndarray(img_float32[(..., i)], size, resample) for i in range(img.shape[(- 1)])], axis=2)
else:
resized = self.pil_resize_from_ndarray(img_float32, size, resample)
resized = np.asarray(resized, dtype)
if expand_flag:
resized = resized[(..., np.newaxis)]
return _imresize_after(resized, channel_first) |
def test_get_subclasses():
class Parent():
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
subclasses = get_subclasses(Parent)
expected_subclasses = {'Child': Child, 'GrandChild': GrandChild}
assert (subclasses == expected_subclasses) |
('analyze_code', 'Analyze Code', '"code": "<full_code_string>"')
def analyze_code(code: str, agent: Agent) -> list[str]:
function_string = 'def analyze_code(code: str) -> list[str]:'
args = [code]
description_string = 'Analyzes the given code and returns a list of suggestions for improvements.'
return call_ai_function(function_string, args, description_string, config=agent.config) |
def make_env(args):
if (',' in args.per_dim_threshold):
per_dim_threshold = np.array([float(t) for t in args.per_dim_threshold.split(',')])
else:
per_dim_threshold = float(args.per_dim_threshold)
if (gym.envs.registry.env_specs.get(args.env) is not None):
env_fn = (lambda : gym.make(args.env))
eval_env_fn = env_fn
elif ('dictpushandreach' in args.env.lower()):
env_fn = (lambda : TimeLimit(DictPushAndReach(), 50))
eval_env_fn = (lambda : TimeLimit(DictPushAndReach(), 50))
elif ('dictpush' in args.env.lower()):
env_fn = (lambda : TimeLimit(DictPush(), 50))
eval_env_fn = (lambda : TimeLimit(DictPush(), 50))
elif ('pointmaze' in args.env.lower()):
env_fn = (lambda : PointMaze2D())
eval_env_fn = (lambda : PointMaze2D(test=True))
elif ('simplemaze' in args.env.lower()):
env_fn = (lambda : SimpleMazeEnv())
eval_env_fn = (lambda : SimpleMazeEnv(test=True))
elif ('moat' in args.env.lower()):
env_fn = (lambda : make_moat_env(slow_factor=args.slow_factor))
eval_env_fn = (lambda : make_moat_env(slow_factor=args.slow_factor))
elif ('antmaze' in args.env.lower()):
if ('hiro' in args.env.lower()):
env_fn = (lambda : AntMazeEnv(variant='AntMaze-HIRO', eval=False))
eval_env_fn = (lambda : AntMazeEnv(variant='AntMaze-HIRO', eval=True))
elif ('gg' in args.env.lower()):
env_fn = (lambda : GGAntMaze(eval=False))
eval_env_fn = (lambda : GGAntMaze(eval=True))
else:
env_fn = (lambda : AntMazeEnv(variant='AntMaze-SR', eval=False))
eval_env_fn = (lambda : AntMazeEnv(variant='AntMaze-SR', eval=True))
elif ('antpush' in args.env.lower()):
env_fn = (lambda : AntMazeEnv(variant='AntPush', eval=False))
eval_env_fn = (lambda : AntMazeEnv(variant='AntPush', eval=True))
elif ('antfall' in args.env.lower()):
env_fn = (lambda : AntMazeEnv(variant='AntFall', eval=False))
eval_env_fn = (lambda : AntMazeEnv(variant='AntFall', eval=True))
elif (('pen_' in args.env.lower()) or ('block_' in args.env.lower()) or ('egg_' in args.env.lower())):
(env_type, mode, dt, rt) = args.env.split('_')
if (mode == 'full'):
target_pos = 'random'
target_rot = 'xyz'
mode_str = 'Full'
else:
assert ('rotate' in mode)
mode_str = 'Rotate'
target_pos = 'ignore'
(_, target_rot) = mode.split('-')
assert (target_rot in ['z', 'parallel', 'xyz', ''])
mode_str = 'Rotate{}'.format(RotationDict[target_rot])
if (target_rot == ''):
target_rot = 'xyz'
if (env_type == 'block'):
HandEnv = HandBlockEnv
elif (env_type == 'pen'):
HandEnv = HandPenEnv
elif (env_type == 'egg'):
HandEnv = HandEggEnv
else:
raise ValueError
max_step = max(args.env_max_step, 100)
env_fn = (lambda : HandEnv(max_step=max_step, distance_threshold=float(dt), rotation_threshold=float(rt), target_position=target_pos, target_rotation=target_rot))
assert (env_type in HandObjectDict.keys())
gym_env_str = 'HandManipulate{}{}-v0'.format(HandObjectDict[env_type], mode_str)
if (args.eval_env and (args.eval_env.lower() != 'none')):
eval_env_fn = (lambda : gym.make(args.eval_env))
else:
eval_env_fn = (lambda : gym.make(gym_env_str))
elif ('handreach_' in args.env.lower()):
(env_type, dt) = args.env.split('_')
max_step = max(args.env_max_step, 50)
env_fn = (lambda : HandReachFullEnv(max_step=max_step, distance_threshold=float(dt)))
eval_env_fn = (lambda : gym.make('HandReach-v0'))
elif (args.env.lower() == 'pushright_pushleft'):
env_fn = (lambda : PushRight())
eval_env_fn = (lambda : PushLeft())
elif (args.env.lower() == 'pushright_pushright'):
env_fn = (lambda : PushRight())
eval_env_fn = (lambda : PushRight())
elif (args.env.lower() == 'pushleft_pushright'):
env_fn = (lambda : PushLeft())
eval_env_fn = (lambda : PushRight())
elif (args.env.lower() == 'pushleft_pushleft'):
env_fn = (lambda : PushLeft())
eval_env_fn = (lambda : PushLeft())
elif (args.env.lower() == 'sweep2'):
env_fn = (lambda : FetchHookSweepAllEnv(place_two=True))
eval_env_fn = (lambda : FetchHookSweepAllEnv(place_two=True))
elif (args.env.lower() == 'sweep'):
env_fn = (lambda : FetchHookSweepAllEnv(smaller_state=True, place_two=True, place_random=False))
eval_env_fn = (lambda : FetchHookSweepAllEnv(smaller_state=True, place_two=True, place_random=False))
else:
(env, external, internal) = args.env.split('_')
if (external.lower() == 'all'):
external = GoalType.ALL
elif (external.lower() == 'objgrip'):
external = GoalType.OBJ_GRIP
elif (external.lower() == 'objspeed'):
external = GoalType.OBJSPEED
elif (external.lower() == 'objspeedrot'):
external = GoalType.OBJSPEED2
elif (external.lower() == 'obj'):
external = GoalType.OBJ
elif (external.lower() == 'grip'):
external = GoalType.GRIP
else:
raise ValueError
if (internal.lower() == 'all'):
raise ValueError
elif (internal.lower() == 'objgrip'):
internal = GoalType.OBJ_GRIP
elif (internal.lower() == 'obj'):
internal = GoalType.OBJ
elif (internal.lower() == 'grip'):
internal = GoalType.GRIP
else:
raise ValueError
n_blocks = 0
range_min = None
range_max = None
if (env.lower() == 'push'):
Env = PushEnv
elif (env.lower() == 'slide'):
Env = SlideEnv
elif (env.lower() == 'pickplace'):
Env = PickPlaceEnv
n_blocks = args.pp_in_air_percentage
range_min = args.pp_min_air
range_max = args.pp_max_air
elif ('stack' in env.lower()):
Env = StackEnv
n_blocks = int(env.lower().replace('stack', ''))
elif ('slide' in env.lower()):
Env = SlideNEnv
n_blocks = int(env.lower().replace('slide', ''))
else:
raise ValueError('Invalid environment')
env_fn = (lambda : Env(max_step=args.env_max_step, internal_goal=internal, external_goal=external, mode=args.reward_mode, per_dim_threshold=per_dim_threshold, hard=args.hard, distance_threshold=args.train_dt, n=n_blocks, range_min=range_min, range_max=range_max))
eval_env_fn = (lambda : Env(max_step=50, internal_goal=internal, external_goal=external, mode=args.reward_mode, compute_reward_with_internal=args.test_with_internal, hard=args.hard, n=n_blocks, range_min=range_min, range_max=range_max))
return (env_fn, eval_env_fn) |
class RegNetSELayer(nn.Module):
def __init__(self, in_channels: int, reduced_channels: int):
super().__init__()
self.pooler = nn.AdaptiveAvgPool2d((1, 1))
self.attention = nn.Sequential(nn.Conv2d(in_channels, reduced_channels, kernel_size=1), nn.ReLU(), nn.Conv2d(reduced_channels, in_channels, kernel_size=1), nn.Sigmoid())
def forward(self, hidden_state):
pooled = self.pooler(hidden_state)
attention = self.attention(pooled)
hidden_state = (hidden_state * attention)
return hidden_state |
def multiple_outputs_activation_model():
inputs = Input(shape=INPUT_SHAPE)
x = Conv2D(2, 3)(inputs)
y = Conv2D(2, 3)(inputs)
x_relu = ReLU()(x)
y_relu = ReLU()(y)
outputs = Add()([x_relu, y_relu])
return keras.Model(inputs=inputs, outputs=outputs) |
def get_Babi_3(args=None):
Babi_3_dataset = Dataset(name='babi_3', path='preprocess/Babi/vec_babi_qa3_three-supporting-facts_.p', args=args)
Babi_3_dataset.vec.word_dim = 50
Babi_3_dataset.bsize = 50
Babi_3_dataset.n_iters = 100
Babi_3_dataset.hidden_size = 32
return Babi_3_dataset |
def measure_net_latency(net, l_type='gpu8', fast=True, input_shape=(3, 224, 224), clean=False):
if isinstance(net, nn.DataParallel):
net = net.module
rm_bn_from_net(net)
if ('gpu' in l_type):
(l_type, batch_size) = (l_type[:3], int(l_type[3:]))
else:
batch_size = 1
data_shape = ([batch_size] + list(input_shape))
if (l_type == 'cpu'):
if fast:
n_warmup = 5
n_sample = 10
else:
n_warmup = 50
n_sample = 50
if (get_net_device(net) != torch.device('cpu')):
if (not clean):
print('move net to cpu for measuring cpu latency')
net = copy.deepcopy(net).cpu()
elif (l_type == 'gpu'):
if fast:
n_warmup = 5
n_sample = 10
else:
n_warmup = 50
n_sample = 50
else:
raise NotImplementedError
images = torch.zeros(data_shape, device=get_net_device(net))
measured_latency = {'warmup': [], 'sample': []}
net.eval()
with torch.no_grad():
for i in range(n_warmup):
inner_start_time = time.time()
net(images)
used_time = ((time.time() - inner_start_time) * 1000.0)
measured_latency['warmup'].append(used_time)
if (not clean):
print(('Warmup %d: %.3f' % (i, used_time)))
outer_start_time = time.time()
for i in range(n_sample):
net(images)
total_time = ((time.time() - outer_start_time) * 1000.0)
measured_latency['sample'].append((total_time, n_sample))
return ((total_time / n_sample), measured_latency) |
class Stackimg():
def __init__(self, opt):
self.stack_n = opt.stack_n
self.img_size = opt.img_size
self.env = opt.env
self.trans_stack = transforms.Compose([transforms.Resize((self.img_size, self.img_size)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
self.buffer = []
def push(self, img):
img = Image.fromarray(img)
img = transforms.ToTensor()(img)
img = transforms.ToPILImage()(img)
img = self.trans_stack(img)
if (len(self.buffer) < self.stack_n):
self.buffer = ([img] * self.stack_n)
else:
self.buffer = self.buffer[1:]
self.buffer.append(img)
return torch.stack(self.buffer, 0).float().cuda() |
class AttentionMask(eqx.Module):
is_causal: bool = eqx.static_field()
explicit_mask: Optional[NamedArray] = None
def materialize(self, QPos: Axis, KPos: Axis, q_slice: Optional[haliax.dslice]=None, k_slice: Optional[haliax.dslice]=None) -> Optional[NamedArray]:
if (q_slice is None):
q_slice = haliax.dslice(0, QPos.size)
if (k_slice is None):
k_slice = haliax.dslice(0, KPos.size)
if self.is_causal:
causal = causal_mask(QPos.resize(q_slice.size), KPos.resize(k_slice.size), q_slice.start, k_slice.start)
else:
causal = None
if (self.explicit_mask is not None):
explicit = self.explicit_mask[(QPos, q_slice, KPos, k_slice)]
else:
explicit = None
return combine_masks_and(causal, explicit)
def causal() -> 'AttentionMask':
return AttentionMask(is_causal=True)
def explicit(mask: NamedArray) -> 'AttentionMask':
return AttentionMask(is_causal=False, explicit_mask=mask)
def __and__(self, other) -> 'AttentionMask':
is_causal = (self.is_causal and other.is_causal)
explicit_mask = combine_masks_and(self.explicit_mask, other.explicit_mask)
return AttentionMask(is_causal=is_causal, explicit_mask=explicit_mask)
def __or__(self, other) -> 'AttentionMask':
is_causal = (self.is_causal or other.is_causal)
explicit_mask = combine_masks_or(self.explicit_mask, other.explicit_mask)
return AttentionMask(is_causal=is_causal, explicit_mask=explicit_mask) |
def util_grid_cost(src: str, dest: str, src_tier: str='PREMIUM', dest_tier: str='PREMIUM'):
with path('skyplane.data', 'throughput.csv') as throughput_grid_path:
solver = ThroughputSolver(throughput_grid_path)
print(solver.get_path_cost(src, dest, src_tier, dest_tier)) |
def get_output_filename(source_filename, cwd, options):
if options.cplus:
c_suffix = '.cpp'
else:
c_suffix = '.c'
suggested_file_name = Utils.replace_suffix(source_filename, c_suffix)
if options.output_file:
out_path = os.path.join(cwd, options.output_file)
if os.path.isdir(out_path):
return os.path.join(out_path, os.path.basename(suggested_file_name))
else:
return out_path
else:
return suggested_file_name |
def get_pixel_counts_from_label(src):
label_values = src.read()
print(label_values.shape)
(unique, counts) = np.unique(label_values, return_counts=True)
print(unique, counts)
nochange_pixels_count = counts[0]
change_pixels_count = counts[1]
cloud_pixels_count = 0
if (2 in unique):
cloud_pixels_count = counts[2]
all_pixels = (src.width * src.height)
all_pixels_check = ((nochange_pixels_count + change_pixels_count) + cloud_pixels_count)
assert (all_pixels == all_pixels_check)
return (change_pixels_count, nochange_pixels_count, cloud_pixels_count)
print('all pixels:', all_pixels, 'check', all_pixels_check)
print('ch | no ch | cloud:', change_pixels_count, nochange_pixels_count, cloud_pixels_count)
change_to_no_change = (change_pixels_count / nochange_pixels_count)
print('change to no change ratio:', change_to_no_change) |
def code_to_exprs(code: str, inputs: Set[str], outputs: Set[str]) -> Dict[(str, sp.Expr)]:
inputs = list(inputs)
outputs = list(outputs)
code_fn = '\ndef symbolic_execution({}):\n # define functions from cmath.h\n from sympy import exp, log\n def log2(x):\n return log(x, 2)\n def log10(x):\n return log(x, 10)\n from sympy import sin, cos, tan, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh\n from sympy import sin, cos, tan, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh\n from sympy import Pow as pow, sqrt\n from sympy import sign, floor, ceiling as ceil, Abs as abs, Abs as fabs\n from sympy import Max as max, Min as min\n from sympy import Max as fmax, Min as fmin\n from sympy import erf\n{}\n return {}\n '
code_fn = code_fn.format(', '.join(inputs), '\n'.join(((' ' + line.strip()) for line in code.split('\n'))), ', '.join(outputs))
try:
temp_globals = {'dace': dace}
exec(code_fn, temp_globals)
results = temp_globals['symbolic_execution'](*[sp.symbols(inp) for inp in inputs])
if (len(outputs) > 1):
return dict(zip(outputs, results))
else:
return {outputs[0]: results}
except Exception as e:
raise AutoDiffException('Exception occured while attempting to symbolically execute code:\n{}'.format(code)) from e |
class NumericAttributeBinaryTest(InstanceConditionalTest):
def __init__(self, att_idx, att_value, equal_passes_test):
super().__init__()
self._att_idx = att_idx
self._att_value = att_value
self._equals_passes_test = equal_passes_test
def branch_for_instance(self, X):
if ((self._att_idx > len(X)) or (self._att_idx < 0)):
return (- 1)
v = X[self._att_idx]
if (v == self._att_value):
return (0 if self._equals_passes_test else 1)
return (0 if (v < self._att_value) else 1)
def max_branches():
return 2
def describe_condition_for_branch(self, branch):
if ((branch == 0) or (branch == 1)):
compare_char = ('<' if (branch == 0) else '>')
equals_branch = (0 if self._equals_passes_test else 1)
compare_char += ('=' if (branch == equals_branch) else '')
return 'Attribute {} {} {}'.format(self._att_idx, compare_char, self._att_value)
def branch_rule(self, branch):
condition = ('<' if (branch == 0) else '>')
equals_branch = (0 if self._equals_passes_test else 1)
condition += ('=' if (branch == equals_branch) else '')
return Predicate(self._att_idx, condition, self._att_value)
def get_atts_test_depends_on(self):
return [self._att_idx]
def get_split_value(self):
return self._att_value |
def test_get_transitive_successors(graph, node, second_node, third_node, fourth_node):
graph.add_node(fourth_node)
graph.add_node(node)
graph.add_node(second_node)
graph.add_node(third_node)
graph.add_edge(node, second_node)
graph.add_edge(second_node, third_node)
graph.add_edge(third_node, fourth_node)
result = graph.get_transitive_successors(second_node)
assert (result == {third_node, fourth_node}) |
def register_Ns3SsidChecker_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SsidChecker const &', 'arg0')])
return |
def get_number_of_params_summary(model, name='', print_on=True, include_routers=True):
total_num = get_total_number_of_params(model)
paths_list = model.paths_list
num_list = []
for (nodes, _) in paths_list:
num = get_number_of_params_path(model, nodes, include_routers=include_routers)
num_list.append(num)
if print_on:
print(('\n' + name))
print('Number of parameters summary:')
print(' Total: {} '.format(total_num))
print(' Max per branch: {} '.format(max(num_list)))
print(' Min per branch: {} '.format(min(num_list)))
print(' Average per branch: {}'.format(((sum(num_list) * 1.0) / len(num_list))))
return (total_num, max(num_list), min(num_list), ((sum(num_list) * 1.0) / len(num_list))) |
class CustomConfig(PretrainedConfig):
model_type = 'custom'
def __init__(self, attribute=1, **kwargs):
self.attribute = attribute
super().__init__(**kwargs) |
class BenchmarkThread(Thread):
def __init__(self, par_scheduler, num):
Thread.__init__(self, name=('BenchmarkThread %d' % num))
self._par_scheduler = par_scheduler
self._id = num
self.exception = None
def run(self):
try:
scheduler = self._par_scheduler.get_local_scheduler()
while True:
work = self._par_scheduler.acquire_work()
if (work is None):
return
scheduler._process_remaining_runs(work)
except BaseException as exp:
self.exception = exp |
def test_before_search_start(stopping_condition):
stopping_condition.before_statement_execution(None, None, None)
stopping_condition.before_search_start(None)
assert (stopping_condition.current_value() == 0) |
def main(_):
print(FLAGS.lamb_max, FLAGS.delta, FLAGS.epsilon, FLAGS.top_bn, FLAGS.sigma)
numpy.random.seed(seed=FLAGS.seed)
tf.set_random_seed(numpy.random.randint(1234))
with tf.Graph().as_default() as g:
with tf.device('/cpu:0'):
(images_1, images_2, labels) = two_transformed_input(batch_size=FLAGS.batch_size, train=True, shuffle=True)
(ul_images_1, ul_images_2) = unlabeled_two_transformed_input(batch_size=FLAGS.ul_batch_size, shuffle=True)
(images_eval_test, labels_eval_test) = one_transformed_input(batch_size=FLAGS.eval_batch_size, train=False, shuffle=True)
with tf.device(FLAGS.device):
lr = tf.placeholder(tf.float32, shape=[], name='learning_rate')
mom = tf.placeholder(tf.float32, shape=[], name='momentum')
lamb = tf.placeholder(tf.float32, shape=[], name='regular_coef')
with tf.variable_scope('CNN') as scope:
(loss, train_op, global_step) = build_training_graph(images_1, images_2, labels, ul_images_1, ul_images_2, lr, mom, lamb)
scope.reuse_variables()
losses_eval_test = build_test_graph(images_eval_test, labels_eval_test)
init_op = tf.global_variables_initializer()
if (not FLAGS.log_dir):
logdir = None
writer_test = None
else:
logdir = FLAGS.log_dir
if (os.path.isdir(logdir) == False):
os.makedirs(logdir)
writer_test = tf.summary.FileWriter((FLAGS.log_dir + '/test'), g)
saver = tf.train.Saver(tf.global_variables())
sv = tf.train.Supervisor(is_chief=True, logdir=logdir, init_op=init_op, init_feed_dict={lr: FLAGS.learning_rate, mom: FLAGS.mom1}, saver=saver, global_step=global_step, summary_op=None, summary_writer=None, save_model_secs=150, recovery_wait_secs=0)
print('Training...')
with sv.managed_session() as sess:
for ep in range(FLAGS.num_epochs):
if sv.should_stop():
break
rampup_value = rampup(ep, FLAGS.rampup_length)
rampdown_value = rampdown(ep, FLAGS.rampdown_length, FLAGS.num_epochs)
rampup_lamb = (FLAGS.lamb_max * rampup_value)
learning_rate = ((FLAGS.learning_rate * rampdown_value) * rampup_value)
adam_beta1 = ((rampdown_value * FLAGS.mom1) + ((1.0 - rampdown_value) * FLAGS.mom2))
feed_dict = {lr: learning_rate, mom: adam_beta1, lamb: rampup_lamb}
sum_loss = 0
start = time.time()
for i in range(FLAGS.num_iter_per_epoch):
(_, batch_loss, _) = sess.run([train_op, loss, global_step], feed_dict=feed_dict)
sum_loss += batch_loss
end = time.time()
if ((((ep + 1) % FLAGS.eval_freq) == 0) or ((ep + 1) == FLAGS.num_epochs)):
act_values_dict = {}
for key in losses_eval_test.keys():
act_values_dict[key] = 0
n_iter_per_epoch = int((NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size))
for i in range(n_iter_per_epoch):
values = losses_eval_test.values()
act_values = sess.run(values)
for (key, value) in zip(act_values_dict.keys(), act_values):
act_values_dict[key] += value
summary = tf.Summary()
current_global_step = sess.run(global_step)
for key in act_values_dict.keys():
summary.value.add(tag=key, simple_value=(act_values_dict[key] / n_iter_per_epoch))
if (writer_test is not None):
writer_test.add_summary(summary, current_global_step)
cur_error = (act_values_dict['No_Acc'] / n_iter_per_epoch)
print('Epoch:', ep, 'CE_loss_train:', (sum_loss / FLAGS.num_iter_per_epoch), 'test error', cur_error, 'elapsed_time:', (end - start))
else:
print('Epoch:', ep, 'CE_loss_train:', (sum_loss / FLAGS.num_iter_per_epoch), 'elapsed_time:', (end - start))
sv.stop() |
.parametrize('observation_shape', [(100,), ((100,), (200,))])
.parametrize('action_size', [2])
.parametrize('latent_size', [32])
.parametrize('batch_size', [32])
.parametrize('n', [100])
.parametrize('beta', [0.5])
def test_conditional_vae(observation_shape: Shape, action_size: int, latent_size: int, batch_size: int, n: int, beta: float) -> None:
encoder_encoder = DummyEncoderWithAction(observation_shape, action_size)
decoder_encoder = DummyEncoderWithAction(observation_shape, latent_size)
vae_encoder = VAEEncoder(encoder=encoder_encoder, hidden_size=encoder_encoder.get_feature_size(), latent_size=latent_size)
vae_decoder = VAEDecoder(encoder=decoder_encoder, hidden_size=decoder_encoder.get_feature_size(), action_size=action_size)
vae = ConditionalVAE(encoder=vae_encoder, decoder=vae_decoder)
x = create_torch_observations(observation_shape, batch_size)
action = torch.rand(batch_size, action_size)
y = vae(x, action)
assert (y.shape == (batch_size, action_size))
dist = forward_vae_encode(vae, x, action)
assert (dist.mean.shape == (batch_size, latent_size))
y = forward_vae_decode(vae, x, dist.sample())
assert (y.shape == (batch_size, action_size))
y = forward_vae_sample(vae, x)
assert (y.shape == (batch_size, action_size))
y = forward_vae_sample_n(vae, x, n)
assert (y.shape == (batch_size, n, action_size))
error = compute_vae_error(vae, x, action, beta)
assert (error.ndim == 0)
check_parameter_updates(vae, (x, action)) |
def get_args_and_hdf5_file(cfg):
common_parameters = ['--train:mode', 'world', '--train:samples', '256**3', '--train:batchsize', '64*64*128', '--train:sampler_importance', '0.01', '--val:copy_and_split', '--outputmode', 'density:direct', '--lossmode', 'density', '--activation', BEST_ACTIVATION, '-l1', '1', '--lr_step', '50', '-i', '200', '--activation', BEST_ACTIVATION, '--layers', ':'.join(([str(BEST_NETWORK[0])] * (BEST_NETWORK[1] - 1))), '--volumetric_features_resolution', str(GRID_RESOLUTION), '--volumetric_features_channels', str(GRID_CHANNELS), '--logdir', (BASE_PATH + '/log'), '--modeldir', (BASE_PATH + '/model'), '--hdf5dir', (BASE_PATH + '/hdf5')]
def getFourierParameters(fourier):
std = fourier
return ['--fouriercount', str(((BEST_NETWORK[0] - 4) // 2)), '--fourierstd', str(std)]
(config, fourier, importance, filename) = cfg
launcher = [sys.executable, 'volnet/train_volnet.py']
args = (((((launcher + [config]) + common_parameters) + getFourierParameters(fourier)) + importance) + ['--name', filename])
hdf5_file = os.path.join(BASE_PATH, 'hdf5', (filename + '.hdf5'))
return (args, hdf5_file, filename) |
class EllipticCurve_finite_field(EllipticCurve_field, HyperellipticCurve_finite_field):
_point = ell_point.EllipticCurvePoint_finite_field
def plot(self, *args, **kwds):
R = self.base_ring()
if (not R.is_prime_field()):
raise NotImplementedError
from sage.plot.point import points
return points([P[0:2] for P in self.points() if (not P.is_zero())], *args, **kwds)
def _points_via_group_structure(self):
G = self.abelian_group()
pts = [x.element() for x in G.gens()]
ni = G.generator_orders()
ngens = G.ngens()
H0 = [self(0)]
if (ngens == 0):
return H0
for m in range(1, ni[0]):
H0.append((H0[(- 1)] + pts[0]))
if (ngens == 1):
return H0
H1 = [self(0)]
for m in range(1, ni[1]):
H1.append((H1[(- 1)] + pts[1]))
return [(P + Q) for P in H0 for Q in H1]
def points(self):
try:
return self.__points
except AttributeError:
pass
from sage.structure.sequence import Sequence
k = self.base_ring()
if (k.is_prime_field() and (k.order() > 50)):
v = self._points_via_group_structure()
else:
v = self._points_fast_sqrt()
v.sort()
self.__points = Sequence(v, immutable=True)
return self.__points
rational_points = points
def count_points(self, n=1):
try:
n = Integer(n)
except TypeError:
raise TypeError('n must be a positive integer')
if (n < 1):
raise ValueError('n must be a positive integer')
if (n == 1):
return self.cardinality()
return [self.cardinality(extension_degree=i) for i in range(1, (n + 1))]
def random_element(self):
k = self.base_field()
n = ((2 * k.order()) + 1)
while True:
i = ZZ.random_element(n)
if (not i):
return self.point(0)
v = self.lift_x(k.random_element(), all=True)
try:
return v[(i % 2)]
except IndexError:
pass
random_point = random_element
def trace_of_frobenius(self):
return ((1 + self.base_field().order()) - self.cardinality())
def cardinality(self, algorithm=None, extension_degree=1):
if (extension_degree > 1):
frob = ((self.frobenius() ** extension_degree) - 1)
R = self.frobenius_order()
if (R.degree() == 1):
return (frob * frob)
else:
return frob.norm()
try:
return self._order
except AttributeError:
pass
jpol = None
if (algorithm is None):
jpol = self.j_invariant().minimal_polynomial()
if (jpol.degree() < self.base_field().degree()):
algorithm = 'subfield'
else:
algorithm = 'pari'
if (algorithm == 'pari'):
N = self.cardinality_pari()
elif (algorithm == 'subfield'):
if (jpol is None):
jpol = self.j_invariant().minimal_polynomial()
N = self._cardinality_subfield(jpol)
elif (algorithm == 'bsgs'):
N = self.cardinality_bsgs()
elif (algorithm == 'exhaustive'):
N = self.cardinality_exhaustive()
elif (algorithm == 'all'):
N = self.cardinality_pari()
N2 = self.cardinality_bsgs()
if (N != N2):
raise AssertionError(('cardinality with pari=%s but with bsgs=%s' % (N, N2)))
else:
raise ValueError('algorithm {!r} is not known'.format(algorithm))
self._order = N
return N
from .cardinality import cardinality_bsgs, cardinality_exhaustive, _cardinality_subfield
order = cardinality
def frobenius_polynomial(self):
x = polygen(ZZ)
return (((x ** 2) - (self.trace_of_frobenius() * x)) + self.base_field().cardinality())
def frobenius_order(self):
f = self.frobenius_polynomial().factor()[0][0]
return ZZ.extension(f, names='phi')
def frobenius(self):
R = self.frobenius_order()
if (R.degree() == 1):
return self.frobenius_polynomial().roots(multiplicities=False)[0]
else:
return R.gen(1)
def frobenius_endomorphism(self):
return self.frobenius_isogeny(self.base_field().degree())
def frobenius_discriminant(self):
return self.frobenius_polynomial().discriminant()
def cardinality_pari(self):
return Integer(self.__pari__().ellcard())
_method
def gens(self):
(card, ords, pts) = self.__pari__().ellgroup(flag=1)
if (not hasattr(self, '_order')):
self._order = ZZ(card)
pts = tuple((self.point(list(P)) for P in pts))
if (len(pts) >= 1):
pts[0]._order = ZZ(ords[0])
return pts
def __iter__(self):
(yield from self.points())
def __getitem__(self, n):
return self.points()[n]
_method
def abelian_group(self):
gens = self.gens()
assert (len(gens) <= 2)
if (len(gens) == 2):
(P, Q) = gens
n = self.cardinality()
n1 = P.order()
n2 = (n // n1)
assert (not (n1 * Q))
k = n1.prime_to_m_part(n2)
Q *= k
nQ = (n2 * generic.order_from_multiple((n2 * Q), ((n1 // k) // n2)))
S = ((n // nQ) * P)
T = (n2 * Q)
S.set_order((nQ // n2), check=False)
x = S.discrete_log(T)
Q -= (((x * n1) // nQ) * P)
assert (not (n2 * Q))
Q.set_order(n2, check=False)
gens = (P, Q)
orders = [T.order() for T in gens]
self.gens.set_cache(gens)
return AdditiveAbelianGroupWrapper(self.point_homset(), gens, orders)
def torsion_basis(self, n):
T = self.abelian_group().torsion_subgroup(n)
if (T.invariants() != (n, n)):
raise ValueError(f'curve does not have full rational {n}-torsion')
return tuple((P.element() for P in T.gens()))
def is_isogenous(self, other, field=None, proof=True):
from .ell_generic import is_EllipticCurve
if (not is_EllipticCurve(other)):
raise ValueError('Second argument is not an Elliptic Curve.')
if self.is_isomorphic(other):
return True
if (self.base_field().characteristic() != other.base_field().characteristic()):
raise ValueError('The base fields must have the same characteristic.')
if (field is None):
if (self.base_field().degree() == other.base_field().degree()):
return (self.cardinality() == other.cardinality())
elif (self.base_field().degree() == gcd(self.base_field().degree(), other.base_field().degree())):
return (self.cardinality(extension_degree=(other.base_field().degree() // self.base_field().degree())) == other.cardinality())
elif (other.base_field().degree() == gcd(self.base_field().degree(), other.base_field().degree())):
return (other.cardinality(extension_degree=(self.base_field().degree() // other.base_field().degree())) == self.cardinality())
else:
raise ValueError('Curves have different base fields: use the field parameter.')
else:
f_deg = field.degree()
s_deg = self.base_field().degree()
o_deg = other.base_field().degree()
if (not lcm(s_deg, o_deg).divides(f_deg)):
raise ValueError('Field must be an extension of the base fields of both curves')
else:
sc = self.cardinality(extension_degree=(f_deg // s_deg))
oc = other.cardinality(extension_degree=(f_deg // o_deg))
return (sc == oc)
def is_supersingular(self, proof=True):
return is_j_supersingular(self.j_invariant(), proof=proof)
def is_ordinary(self, proof=True):
return (not is_j_supersingular(self.j_invariant(), proof=proof))
def set_order(self, value, *, check=True, num_checks=8):
value = Integer(value)
if check:
q = self.base_field().order()
(a, b) = Hasse_bounds(q, 1)
if (not (a <= value <= b)):
raise ValueError(('Value %s illegal (not an integer in the Hasse range)' % value))
for i in range(num_checks):
G = self.random_point()
if ((value * G) != self(0)):
raise ValueError(('Value %s illegal (multiple of random point not the identity)' % value))
self._order = value
def _fetch_cached_order(self, other):
if (hasattr(self, '_order') or (not hasattr(other, '_order'))):
return
F = self.base_field()
if (F != other.base_field()):
raise ValueError('curves have distinct base fields')
n = getattr(other, '_order', None)
if (n is not None):
self._order = n
def height_above_floor(self, ell, e):
if self.is_supersingular():
raise ValueError('{} is not ordinary'.format(self))
if (e == 0):
return 0
j = self.j_invariant()
if (j in [0, 1728]):
return e
F = j.parent()
x = polygen(F)
from sage.rings.polynomial.polynomial_ring import polygens
from sage.libs.pari.convert_sage import gen_to_sage
from sage.libs.pari.all import pari
(X, Y) = polygens(F, ['X', 'Y'], 2)
phi = gen_to_sage(pari.polmodular(ell), {'x': X, 'y': Y})
j1 = phi([x, j]).roots(multiplicities=False)
nj1 = len(j1)
on_floor = ((self.two_torsion_rank() < 2) if (ell == 2) else (nj1 <= ell))
if on_floor:
return 0
if ((e == 1) or (nj1 != (ell + 1))):
return e
if (nj1 < 3):
return 0
j0 = [j, j, j]
h = 1
while True:
for i in range(3):
r = (phi([x, j1[i]]) // (x - j0[i])).roots(multiplicities=False)
if (not r):
return h
j0[i] = j1[i]
j1[i] = r[0]
h += 1
def endomorphism_discriminant_from_class_number(self, h):
F = self.base_field()
if (not F.is_finite()):
raise ValueError('Base field {} must be finite'.format(F))
if self.is_supersingular():
raise ValueError('Elliptic curve ({}) must be ordinary'.format(self))
D1 = self.frobenius_discriminant()
D0 = D1.squarefree_part()
if ((D0 % 4) != 1):
D0 *= 4
v = ZZ((D1 // D0)).isqrt()
h0 = D0.class_number()
if (h % h0):
raise ValueError('Incorrect class number {}'.format(h))
from sage.schemes.elliptic_curves.cm import OrderClassNumber
cs = [(v // f) for f in v.divisors() if (OrderClassNumber(D0, h0, f) == h)]
if (not cs):
raise ValueError('Incorrect class number {}'.format(h))
if (len(cs) == 1):
return (((v // cs[0]) ** 2) * D0)
from sage.sets.set import Set
L = sorted(set(sum([c.prime_factors() for c in cs], [])))
for ell in L:
e = self.height_above_floor(ell, v.valuation(ell))
cs = [c for c in cs if (c.valuation(ell) == e)]
if (not cs):
raise ValueError('Incorrect class number {}'.format(h))
if (len(cs) == 1):
return (((v // cs[0]) ** 2) * D0)
raise ValueError('Incorrect class number {}'.format(h))
def twists(self):
K = self.base_field()
j = self.j_invariant()
twists = None
if (not j):
twists = curves_with_j_0(K)
elif (j == 1728):
twists = curves_with_j_1728(K)
if twists:
for (i, t) in enumerate(twists):
if self.is_isomorphic(t):
twists[i] = twists[0]
twists[0] = self
break
return twists
if (K.characteristic() == 2):
D = K.one()
while (D.trace() == 0):
D = K.random_element()
else:
D = K.gen()
q2 = ((K.cardinality() - 1) // 2)
while ((not D) or ((D ** q2) == 1)):
D = K.random_element()
return [self, self.quadratic_twist(D)] |
def write_log_task(filename='Changelog'):
st = subprocess.Popen(['git', 'log', f'{LOG_START}..{LOG_END}'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, error) = st.communicate()
if (not (st.returncode == 0)):
raise RuntimeError(('%s failed' % str(error)))
out = st.communicate()[0].decode()
with open(filename, 'w') as a:
a.writelines(out)
print('Release logs generated successfully') |
class TicTacToeNNet():
def __init__(self, game, args):
(self.board_z, self.board_x, self.board_y) = game.getBoardSize()
self.action_size = game.getActionSize()
self.args = args
self.input_boards = Input(shape=(self.board_z, self.board_x, self.board_y))
x_image = Reshape((self.board_z, self.board_x, self.board_y, 1))(self.input_boards)
h_conv1 = Activation('relu')(BatchNormalization(axis=3)(Conv3D(args.num_channels, 3, padding='same')(x_image)))
h_conv2 = Activation('relu')(BatchNormalization(axis=3)(Conv3D(args.num_channels, 3, padding='same')(h_conv1)))
h_conv3 = Activation('relu')(BatchNormalization(axis=3)(Conv3D(args.num_channels, 3, padding='same')(h_conv2)))
h_conv4 = Activation('relu')(BatchNormalization(axis=3)(Conv3D(args.num_channels, 3, padding='valid')(h_conv3)))
h_conv4_flat = Flatten()(h_conv4)
s_fc1 = Dropout(args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(1024)(h_conv4_flat))))
s_fc2 = Dropout(args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(512)(s_fc1))))
self.pi = Dense(self.action_size, activation='softmax', name='pi')(s_fc2)
self.v = Dense(1, activation='tanh', name='v')(s_fc2)
self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v])
self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(args.lr)) |
def build_optimizers(model, cfgs):
optimizers = {}
if hasattr(model, 'module'):
model = model.module
is_dict_of_dict = True
for (key, cfg) in cfgs.items():
if (not isinstance(cfg, dict)):
is_dict_of_dict = False
if is_dict_of_dict:
for (key, cfg) in cfgs.items():
cfg_ = cfg.copy()
module = getattr(model, key)
optimizers[key] = build_optimizer(module, cfg_)
return optimizers
return build_optimizer(model, cfgs) |
class UnetSplit(nn.Module):
def __init__(self, in_channels=3, depth=5, shared_depth=0, blocks=1, out_channels_image=3, out_channels_mask=1, start_filters=32, residual=True, batch_norm=nn.BatchNorm2d, transpose=True, concat=True, transfer_data=True, long_skip=False):
super(UnetSplit, self).__init__()
self.transfer_data = transfer_data
self.shared = shared_depth
(self.optimizer_encoder, self.optimizer_real, self.optimizer_fake) = (None, None, None)
self.optimizer_shared = None
if (type(blocks) is not tuple):
blocks = (blocks, blocks, blocks, blocks)
if (not transfer_data):
concat = False
self.encoder = UnetEncoderD(in_channels=in_channels, depth=depth, blocks=blocks[0], start_filters=start_filters, residual=residual, batch_norm=batch_norm)
self.fake_region_decoder = UnetDecoderD(in_channels=(start_filters * (2 ** ((depth - shared_depth) - 1))), out_channels=out_channels_image, depth=(depth - shared_depth), blocks=blocks[1], residual=residual, batch_norm=batch_norm, transpose=transpose, concat=concat)
self.real_region_decoder = UnetDecoderD(in_channels=(start_filters * (2 ** ((depth - shared_depth) - 1))), out_channels=out_channels_image, depth=(depth - shared_depth), blocks=blocks[2], residual=residual, batch_norm=batch_norm, transpose=transpose, concat=concat)
self.share_decoder = None
self.long_skip = long_skip
self._forward = self.unshared_forward
if (self.shared != 0):
self._forward = self.shared_forward
self.shared_decoder = UnetDecoderD(in_channels=(start_filters * (2 ** (depth - 1))), out_channels=(start_filters * (2 ** ((depth - shared_depth) - 1))), depth=shared_depth, blocks=blocks[3], residual=residual, batch_norm=batch_norm, transpose=transpose, concat=concat, is_final=False)
def set_optimizers(self):
self.optimizer_encoder = torch.optim.Adam(self.encoder.parameters(), lr=0.001)
self.optimizer_fake = torch.optim.Adam(self.fake_region_decoder.parameters(), lr=0.001)
self.optimizer_real = torch.optim.Adam(self.real_region_decoder.parameters(), lr=0.001)
if (self.shared != 0):
self.optimizer_shared = torch.optim.Adam(self.shared_decoder.parameters(), lr=0.001)
def zero_grad_all(self):
self.optimizer_encoder.zero_grad()
self.optimizer_fake.zero_grad()
self.optimizer_real.zero_grad()
if (self.shared != 0):
self.optimizer_shared.zero_grad()
def step_all(self):
self.optimizer_encoder.step()
self.optimizer_fake.step()
self.optimizer_real.step()
if (self.shared != 0):
self.optimizer_shared.step()
def __call__(self, synthesized):
return self._forward(synthesized)
def forward(self, synthesized):
return self._forward(synthesized)
def unshared_forward(self, synthesized):
(image_code, before_pool) = self.encoder(synthesized)
if (not self.transfer_data):
before_pool = None
reconstructed_fake = torch.tanh(self.fake_region_decoder(image_code, before_pool))
reconstructed_real = torch.tanh(self.real_region_decoder(image_code, before_pool))
return (reconstructed_fake, reconstructed_real)
def shared_forward(self, synthesized):
(image_code, before_pool) = self.encoder(synthesized)
if self.transfer_data:
shared_before_pool = before_pool[((- self.shared) - 1):]
unshared_before_pool = before_pool[:(- self.shared)]
else:
before_pool = None
shared_before_pool = None
unshared_before_pool = None
x = self.shared_decoder(image_code, shared_before_pool)
reconstructed_fake = torch.tanh(self.fake_region_decoder(x, unshared_before_pool))
reconstructed_real = torch.tanh(self.real_region_decoder(x, unshared_before_pool))
if self.long_skip:
reconstructed_fake = (reconstructed_fake + synthesized)
reconstructed_real = (reconstructed_real + synthesized)
return (reconstructed_fake, reconstructed_real) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.