code stringlengths 101 5.91M |
|---|
class Average():
SAVE = ['sum', 'cnt']
def __init__(self):
self.reset()
def add(self, data: Union[(int, float, torch.Tensor)]):
if torch.is_tensor(data):
data = data.detach()
self.sum += data
self.cnt += 1
def reset(self):
self.sum = 0
self.cnt = 0
def get(self, reset=True) -> Union[(float, torch.Tensor)]:
res = (self.sum / self.cnt)
if reset:
self.reset()
return res
def state_dict(self) -> Dict[(str, Any)]:
return {k: self.__dict__[k] for k in self.SAVE}
def load_state_dict(self, state: Dict[(str, Any)]):
self.__dict__.update((state or {})) |
def benchmark(problems, output_csv, obj, num_subproblems_sweep, split_methods_sweep, split_fraction_sweep):
with open(output_csv, 'a') as results:
print_(','.join(HEADERS), file=results)
for (problem_name, topo_fname, tm_fname) in problems:
problem = Problem.from_file(topo_fname, tm_fname)
print_(problem.name, tm_fname)
traffic_seed = problem.traffic_matrix.seed
total_demand = problem.total_demand
print_('traffic seed: {}'.format(traffic_seed))
print_('traffic matrix model: {}'.format(problem.traffic_matrix.model))
print_('traffic matrix scale factor: {}'.format(problem.traffic_matrix.scale_factor))
print_('total demand: {}'.format(total_demand))
(num_paths, edge_disjoint, dist_metric) = PATH_FORM_HYPERPARAMS
for (num_subproblems, split_method, split_fraction) in product(num_subproblems_sweep, split_methods_sweep, split_fraction_sweep):
if ('poisson-high-intra' in tm_fname):
split_fraction = 0.75
run_dir = os.path.join(TOP_DIR, problem.name, '{}-{}'.format(traffic_seed, problem.traffic_matrix.model))
if (not os.path.exists(run_dir)):
os.makedirs(run_dir)
try:
print_('\nPOP, objective {}, {} split method, {} subproblems, {} paths, edge disjoint {}, dist metric {}'.format(obj, split_method, num_subproblems, num_paths, edge_disjoint, dist_metric))
run_pop_dir = os.path.join(run_dir, 'pop', obj, split_method, '{}-partitions'.format(num_subproblems), '{}-paths'.format(num_paths), 'edge_disjoint-{}'.format(edge_disjoint), 'dist_metric-{}'.format(dist_metric))
if (not os.path.exists(run_pop_dir)):
os.makedirs(run_pop_dir)
with open(os.path.join(run_pop_dir, '{}-pop-objective_{}-split-method_{}-{}_partitions-{}_paths-edge_disjoint_{}-dist_metric_{}.txt'.format(problem.name, obj, split_method, num_subproblems, num_paths, edge_disjoint, dist_metric)), 'w') as log:
pop = POP(objective=Objective.get_obj_from_str(obj), num_subproblems=num_subproblems, split_method=split_method, split_fraction=split_fraction, num_paths=num_paths, edge_disjoint=edge_disjoint, dist_metric=dist_metric, out=log)
pop.solve(problem)
sol_dict = pop.sol_dict
with open(log.name.replace('.txt', '-sol-dict.pkl'), 'wb') as w:
pickle.dump(sol_dict, w)
check_feasibility(problem, [sol_dict])
result_line = PLACEHOLDER.format(problem_name, len(problem.G.nodes), len(problem.G.edges), traffic_seed, problem.traffic_matrix.model, problem.traffic_matrix.scale_factor, len(problem.commodity_list), total_demand, 'pop', split_method, split_fraction, num_subproblems, num_paths, edge_disjoint, dist_metric, obj, pop.obj_val, pop.runtime_est(NUM_CORES))
print_(result_line, file=results)
except:
print_('POP, objective {}, split method {}, {} subproblems, {} paths, Problem {}, traffic seed {}, traffic model {} failed'.format(obj, split_method, num_subproblems, num_paths, problem.name, traffic_seed, problem.traffic_matrix.model))
traceback.print_exc(file=sys.stdout) |
def wrapped_conv(*args, **kwargs):
copy = dict(kwargs)
copy.pop('image_shape', None)
copy.pop('filter_shape', None)
assert copy.pop('filter_flip', False)
(input, W, input_shape, get_W_shape) = args
if (theano.config.device == 'cpu'):
return theano.tensor.nnet.conv2d(*args, **kwargs)
try:
return theano.sandbox.cuda.dnn.dnn_conv(input.astype('float32'), W.astype('float32'), **copy)
except Exception as e:
print('falling back to default conv2d')
return theano.tensor.nnet.conv2d(*args, **kwargs) |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_gt_nit(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format == 'compact'):
result = ([nit.compact(val)] + result)
elif (output_format == 'standard'):
result = ([nit.format(val)] + result)
return result |
class ResBlock(nn.Module):
def __init__(self, n_channel=1, dim=512):
super(ResBlock, self).__init__()
self.bn1 = nn.BatchNorm1d(n_channel)
self.fc1 = nn.Linear(in_features=dim, out_features=dim)
self.bn2 = nn.BatchNorm1d(n_channel)
self.fc2 = nn.Linear(in_features=dim, out_features=dim)
def forward(self, x):
residual = x
out = F.relu(self.bn1(self.fc1(x)))
out = self.bn2(self.fc2(out))
out += residual
out = F.relu(out)
return out |
def get_times(json_data):
r = {}
for fwd_bwd in json_data:
for test_name in json_data[fwd_bwd]:
name = construct_name(fwd_bwd, test_name)
r[name] = json_data[fwd_bwd][test_name]
return r |
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels, num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(True)
def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels):
if (num_branches != len(num_blocks)):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if (num_branches != len(num_channels)):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if (num_branches != len(num_inchannels)):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
downsample = None
if ((stride != 1) or (self.num_inchannels[branch_index] != (num_channels[branch_index] * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.num_inchannels[branch_index], (num_channels[branch_index] * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((num_channels[branch_index] * block.expansion), momentum=BN_MOMENTUM))
layers = []
layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample))
self.num_inchannels[branch_index] = (num_channels[branch_index] * block.expansion)
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if (self.num_branches == 1):
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range((num_branches if self.multi_scale_output else 1)):
fuse_layer = []
for j in range(num_branches):
if (j > i):
fuse_layer.append(nn.Sequential(nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False), nn.BatchNorm2d(num_inchannels[i]), nn.Upsample(scale_factor=(2 ** (j - i)), mode='nearest')))
elif (j == i):
fuse_layer.append(None)
else:
conv3x3s = []
for k in range((i - j)):
if (k == ((i - j) - 1)):
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_outchannels_conv3x3)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_outchannels_conv3x3), nn.ReLU(True)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if (self.num_branches == 1):
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = (x[0] if (i == 0) else self.fuse_layers[i][0](x[0]))
for j in range(1, self.num_branches):
if (i == j):
y = (y + x[j])
else:
y = (y + self.fuse_layers[i][j](x[j]))
x_fuse.append(self.relu(y))
return x_fuse |
def test_analytical_none_argument():
with pytest.raises(TypeError):
analytical.solve(None) |
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, return_intermediate_dec=False):
super().__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before)
encoder_norm = (nn.LayerNorm(d_model) if normalize_before else None)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
(bs, c, h, w) = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
if (mask is not None):
mask = mask.flatten(1)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed)
return (hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)) |
class CoNLLFile():
def __init__(self, filename=None, input_str=None, ignore_gapping=True):
self.ignore_gapping = ignore_gapping
if ((filename is not None) and (not os.path.exists(filename))):
raise Exception(('File not found at: ' + filename))
if (filename is None):
assert ((input_str is not None) and (len(input_str) > 0))
self._file = input_str
self._from_str = True
else:
self._file = filename
self._from_str = False
def load_all(self):
_ = self.sents
_ = self.num_words
def load_conll(self):
(sents, cache) = ([], [])
if self._from_str:
infile = io.StringIO(self.file)
else:
infile = open(self.file)
with infile:
for line in infile:
line = line.strip()
if (len(line) == 0):
if (len(cache) > 0):
sents.append(cache)
cache = []
else:
if line.startswith('#'):
continue
array = line.split('\t')
if (self.ignore_gapping and ('.' in array[0])):
continue
assert (len(array) == FIELD_NUM)
cache += [array]
if (len(cache) > 0):
sents.append(cache)
return sents
def file(self):
return self._file
def sents(self):
if (not hasattr(self, '_sents')):
self._sents = self.load_conll()
return self._sents
def __len__(self):
return len(self.sents)
def num_words(self):
if (not hasattr(self, '_num_words')):
n = 0
for sent in self.sents:
for ln in sent:
if ('-' not in ln[0]):
n += 1
self._num_words = n
return self._num_words
def get(self, fields, as_sentences=False):
assert isinstance(fields, list), 'Must provide field names as a list.'
assert (len(fields) >= 1), 'Must have at least one field.'
field_idxs = [FIELD_TO_IDX[f.lower()] for f in fields]
results = []
for sent in self.sents:
cursent = []
for ln in sent:
if ('-' in ln[0]):
continue
if (len(field_idxs) == 1):
cursent += [ln[field_idxs[0]]]
else:
cursent += [[ln[fid] for fid in field_idxs]]
if as_sentences:
results.append(cursent)
else:
results += cursent
return results
def set(self, fields, contents):
assert isinstance(fields, list), 'Must provide field names as a list.'
assert isinstance(contents, list), 'Must provide contents as a list (one item per line).'
assert (len(fields) >= 1), 'Must have at least one field.'
assert (self.num_words == len(contents)), 'Contents must have the same number as the original file.'
field_idxs = [FIELD_TO_IDX[f.lower()] for f in fields]
cidx = 0
for sent in self.sents:
for ln in sent:
if ('-' in ln[0]):
continue
if (len(field_idxs) == 1):
ln[field_idxs[0]] = contents[cidx]
else:
for (fid, ct) in zip(field_idxs, contents[cidx]):
ln[fid] = ct
cidx += 1
return
def write_conll(self, filename):
conll_string = self.conll_as_string()
with open(filename, 'w') as outfile:
outfile.write(conll_string)
return
def conll_as_string(self):
return_string = ''
for sent in self.sents:
for ln in sent:
return_string += ('\t'.join(ln) + '\n')
return_string += '\n'
return return_string
def write_conll_with_lemmas(self, lemmas, filename):
assert (self.num_words == len(lemmas)), 'Num of lemmas does not match the number in original data file.'
lemma_idx = FIELD_TO_IDX['lemma']
idx = 0
with open(filename, 'w') as outfile:
for sent in self.sents:
for ln in sent:
if ('-' not in ln[0]):
lm = lemmas[idx]
if (len(lm) == 0):
lm = '_'
ln[lemma_idx] = lm
idx += 1
print('\t'.join(ln), file=outfile)
print('', file=outfile)
return
def get_mwt_expansions(self):
word_idx = FIELD_TO_IDX['word']
expansions = []
src = ''
dst = []
for sent in self.sents:
mwt_begin = 0
mwt_end = (- 1)
for ln in sent:
if ('.' in ln[0]):
continue
if ('-' in ln[0]):
(mwt_begin, mwt_end) = [int(x) for x in ln[0].split('-')]
src = ln[word_idx]
continue
if (mwt_begin <= int(ln[0]) < mwt_end):
dst += [ln[word_idx]]
elif (int(ln[0]) == mwt_end):
dst += [ln[word_idx]]
expansions += [[src, ' '.join(dst)]]
src = ''
dst = []
return expansions
def get_mwt_expansion_cands(self):
word_idx = FIELD_TO_IDX['word']
cands = []
for sent in self.sents:
for ln in sent:
if ('MWT=Yes' in ln[(- 1)]):
cands += [ln[word_idx]]
return cands
def write_conll_with_mwt_expansions(self, expansions, output_file):
idx = 0
count = 0
for sent in self.sents:
for ln in sent:
idx += 1
if ('MWT=Yes' not in ln[(- 1)]):
print('{}\t{}'.format(idx, '\t'.join(((ln[1:6] + [str((idx - 1))]) + ln[7:]))), file=output_file)
else:
expanded = [x for x in expansions[count].split(' ') if (len(x) > 0)]
count += 1
endidx = ((idx + len(expanded)) - 1)
print('{}-{}\t{}'.format(idx, endidx, '\t'.join([('_' if ((i == 5) or (i == 8)) else x) for (i, x) in enumerate(ln[1:])])), file=output_file)
for (e_i, e_word) in enumerate(expanded):
print('{}\t{}\t{}'.format((idx + e_i), e_word, '\t'.join((((['_'] * 4) + [str(((idx + e_i) - 1))]) + (['_'] * 3)))), file=output_file)
idx = endidx
print('', file=output_file)
idx = 0
assert (count == len(expansions)), '{} {} {}'.format(count, len(expansions), expansions)
return |
.parametrize('values, uniques, expected_counts', [(np.array(((([1] * 10) + ([2] * 4)) + ([3] * 15))), np.array([1, 2, 3]), [10, 4, 15]), (np.array(((([1] * 10) + ([2] * 4)) + ([3] * 15))), np.array([1, 2, 3, 5]), [10, 4, 15, 0]), (np.array(((([np.nan] * 10) + ([2] * 4)) + ([3] * 15))), np.array([2, 3, np.nan]), [4, 15, 10]), (np.array((((['b'] * 4) + (['a'] * 16)) + (['c'] * 20)), dtype=object), ['a', 'b', 'c'], [16, 4, 20]), (np.array((((['b'] * 4) + (['a'] * 16)) + (['c'] * 20)), dtype=object), ['c', 'b', 'a'], [20, 4, 16]), (np.array(((([np.nan] * 4) + (['a'] * 16)) + (['c'] * 20)), dtype=object), ['c', np.nan, 'a'], [20, 4, 16]), (np.array((((['b'] * 4) + (['a'] * 16)) + (['c'] * 20)), dtype=object), ['a', 'b', 'c', 'e'], [16, 4, 20, 0])])
def test_get_counts(values, uniques, expected_counts):
counts = _get_counts(values, uniques)
assert_array_equal(counts, expected_counts) |
def apply_tf_op(inputs, session, input_gate, output_gate, batch_size, train_gate=None, print_option=True):
(inputs, ndata) = zero_padding2nmul(inputs=inputs, mul=batch_size)
nbatch = (len(inputs) // batch_size)
outputs = list()
feed_dict = dict()
if (train_gate is not None):
feed_dict[train_gate] = False
if print_option:
for b in tqdm_range(nbatch):
feed_dict[input_gate] = inputs[(b * batch_size):((b + 1) * batch_size)]
outputs.append(session.run(output_gate, feed_dict=feed_dict))
else:
for b in range(nbatch):
feed_dict[input_gate] = inputs[(b * batch_size):((b + 1) * batch_size)]
outputs.append(session.run(output_gate, feed_dict=feed_dict))
outputs = np.concatenate(outputs, axis=0)
outputs = outputs[:ndata]
return outputs |
def load_configuration(configuration_path):
configuration = None
with open(configuration_path, 'r') as configuration_file:
configuration = yaml.load(configuration_file, Loader=yaml.FullLoader)
return configuration |
_utils.test(arch=get_host_arch_list())
def test_constant_matrices():
assert (ti.cos((math.pi / 3)) == test_utils.approx(0.5))
assert np.allclose((- ti.Vector([2, 3])).to_numpy(), np.array([(- 2), (- 3)]))
assert (ti.cos(ti.Vector([2, 3])).to_numpy() == test_utils.approx(np.cos(np.array([2, 3]))))
assert (ti.max(2, 3) == 3)
res = ti.max(4, ti.Vector([3, 4, 5]))
assert np.allclose(res.to_numpy(), np.array([4, 4, 5]))
res = (ti.Vector([2, 3]) + ti.Vector([3, 4]))
assert np.allclose(res.to_numpy(), np.array([5, 7]))
res = ti.atan2(ti.Vector([2, 3]), ti.Vector([3, 4]))
assert (res.to_numpy() == test_utils.approx(np.arctan2(np.array([2, 3]), np.array([3, 4]))))
res = (ti.Matrix([[2, 3], [4, 5]]) ti.Vector([2, 3]))
assert np.allclose(res.to_numpy(), np.array([13, 23]))
v = ti.Vector([3, 4])
w = ti.Vector([5, (- 12)])
r = ti.Vector([1, 2, 3, 4])
s = ti.Matrix([[1, 2], [3, 4]])
assert (v.normalized().to_numpy() == test_utils.approx(np.array([0.6, 0.8])))
assert (v.cross(w) == test_utils.approx((((- 12) * 3) - (4 * 5))))
w.y = (v.x * w[0])
r.x = r.y
r.y = r.z
r.z = r.w
r.w = r.x
assert np.allclose(w.to_numpy(), np.array([5, 15]))
assert (ti.select(ti.Vector([1, 0]), ti.Vector([2, 3]), ti.Vector([4, 5])) == ti.Vector([2, 5]))
s[(0, 1)] = 2
assert (s[(0, 1)] == 2)
def func(t: ti.i32):
m = ti.Matrix([[2, 3], [4, t]])
print((m ti.Vector([2, 3])))
m += ti.Matrix([[3, 4], [5, t]])
print((m v))
print(r.x, r.y, r.z, r.w)
s = (w m)
print(s)
print(m)
func(5) |
class RobustScannerFusionLayer(BaseModule):
def __init__(self, dim_model, dim=(- 1), init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.dim_model = dim_model
self.dim = dim
self.linear_layer = nn.Linear((dim_model * 2), (dim_model * 2))
self.glu_layer = nn.GLU(dim=dim)
def forward(self, x0, x1):
assert (x0.size() == x1.size())
fusion_input = torch.cat([x0, x1], self.dim)
output = self.linear_layer(fusion_input)
output = self.glu_layer(output)
return output |
class SingletonSpecies(CharacteristicSpecies):
def __init__(self, min=None, max=None, weight=None):
CharacteristicSpecies_class.__init__(self, 1, min=min, max=max, weight=weight)
self._name = 'Singleton species'
self._state_info = [] |
class MBMPOAgent(ModelBasedAgent):
def __init__(self, model_optimizer, policy, value_function, dynamical_model, reward_model, optimizer, mpo_value_learning_criterion, termination_model=None, initial_distribution=None, plan_horizon=1, plan_samples=8, plan_elites=1, max_memory=10000, model_learn_batch_size=64, model_learn_num_iter=30, bootstrap=True, mpo_epsilon=0.1, mpo_epsilon_mean=0.1, mpo_epsilon_var=0.0001, mpo_regularization=False, mpo_num_iter=100, mpo_gradient_steps=50, mpo_batch_size=None, mpo_num_action_samples=15, mpo_target_update_frequency=4, mpo_policy_update_frequency=1, sim_num_steps=200, sim_initial_states_num_trajectories=8, sim_initial_dist_num_trajectories=0, sim_memory_num_trajectories=0, sim_max_memory=100000, sim_num_subsample=1, sim_refresh_interval=1, thompson_sampling=False, gamma=1.0, exploration_steps=0, exploration_episodes=0, tensorboard=False, comment=''):
self.algorithm = MBMPO(dynamical_model, reward_model, policy, value_function, criterion=mpo_value_learning_criterion, epsilon=mpo_epsilon, epsilon_mean=mpo_epsilon_mean, epsilon_var=mpo_epsilon_var, regularization=mpo_regularization, num_action_samples=mpo_num_action_samples, gamma=gamma, termination_model=termination_model)
optimizer = type(optimizer)([p for (name, p) in self.algorithm.named_parameters() if (('model' not in name) and ('target' not in name))], **optimizer.defaults)
super().__init__(policy=policy, dynamical_model=dynamical_model, reward_model=reward_model, model_optimizer=model_optimizer, termination_model=termination_model, value_function=self.algorithm.critic_target, plan_horizon=plan_horizon, plan_samples=plan_samples, plan_elites=plan_elites, model_learn_num_iter=model_learn_num_iter, model_learn_batch_size=model_learn_batch_size, bootstrap=bootstrap, max_memory=max_memory, policy_opt_num_iter=mpo_num_iter, policy_opt_batch_size=mpo_batch_size, policy_opt_gradient_steps=mpo_gradient_steps, policy_opt_target_update_frequency=mpo_target_update_frequency, policy_update_frequency=mpo_policy_update_frequency, optimizer=optimizer, sim_num_steps=sim_num_steps, sim_initial_states_num_trajectories=sim_initial_states_num_trajectories, sim_initial_dist_num_trajectories=sim_initial_dist_num_trajectories, sim_memory_num_trajectories=sim_memory_num_trajectories, sim_refresh_interval=sim_refresh_interval, sim_num_subsample=sim_num_subsample, sim_max_memory=sim_max_memory, initial_distribution=initial_distribution, thompson_sampling=thompson_sampling, gamma=gamma, exploration_steps=exploration_steps, exploration_episodes=exploration_episodes, tensorboard=tensorboard, comment=comment)
def default(cls, environment, gamma=0.99, exploration_steps=0, exploration_episodes=0, tensorboard=False, test=False):
model = EnsembleModel(dim_state=environment.dim_state, dim_action=environment.dim_action, num_heads=5, layers=[200, 200], biased_head=False, non_linearity='ReLU', input_transform=None, deterministic=False)
dynamical_model = TransformedModel(model, list())
model_optimizer = Adam(dynamical_model.parameters(), lr=0.0005)
reward_model = QuadraticReward(torch.eye(environment.dim_state[0]), torch.eye(environment.dim_action[0]), goal=environment.goal)
policy = NNPolicy(dim_state=environment.dim_state, dim_action=environment.dim_action, layers=[100, 100], biased_head=True, non_linearity='ReLU', squashed_output=True, input_transform=None, action_scale=environment.action_scale, goal=environment.goal, deterministic=False, tau=0.005)
value_function = NNValueFunction(dim_state=environment.dim_state, layers=[200, 200], biased_head=True, non_linearity='ReLU', input_transform=None, tau=0.005)
optimizer = Adam(chain(policy.parameters(), value_function.parameters()), lr=0.005)
return cls(model_optimizer, policy, value_function, dynamical_model, reward_model, optimizer, mpo_value_learning_criterion=loss.MSELoss, termination_model=None, initial_distribution=None, plan_horizon=1, plan_samples=8, plan_elites=1, max_memory=10000, model_learn_batch_size=64, model_learn_num_iter=(4 if test else 30), bootstrap=True, mpo_epsilon=0.1, mpo_epsilon_mean=0.1, mpo_epsilon_var=0.0001, mpo_regularization=False, mpo_num_iter=(5 if test else 200), mpo_gradient_steps=50, mpo_batch_size=None, mpo_num_action_samples=15, mpo_target_update_frequency=4, sim_num_steps=(5 if test else 200), sim_initial_states_num_trajectories=8, sim_initial_dist_num_trajectories=0, sim_memory_num_trajectories=0, sim_max_memory=100000, sim_num_subsample=1, sim_refresh_interval=1, thompson_sampling=False, gamma=gamma, exploration_steps=exploration_steps, exploration_episodes=exploration_episodes, tensorboard=tensorboard, comment=environment.name) |
def get_single_col_by_input_type(input_type, column_definition):
l = [tup[0] for tup in column_definition if (tup[2] == input_type)]
if (len(l) != 1):
raise ValueError('Invalid number of columns for {}'.format(input_type))
return l[0] |
class NetworkConnectionError(PipError):
def __init__(self, error_msg, response=None, request=None):
self.response = response
self.request = request
self.error_msg = error_msg
if ((self.response is not None) and (not self.request) and hasattr(response, 'request')):
self.request = self.response.request
super(NetworkConnectionError, self).__init__(error_msg, response, request)
def __str__(self):
return str(self.error_msg) |
(Output('plots', 'children'), Input('tabs', 'value'), [State('local-explanation-state', 'data'), State('global-explanation-state', 'data'), State('data-explanation-state', 'data'), State('prediction-explanation-state', 'data'), State('whatif-explanation-state', 'data')])
def _click_tab(tab, local_exp_state, global_exp_state, data_exp_state, prediction_exp_state, whatif_exp_state):
if (tab == 'local-explanation'):
state = copy.deepcopy(board.state)
params = (json.loads(local_exp_state) if (local_exp_state is not None) else {})
for (param, value) in params.items():
state.set_param('local', param, value)
return create_local_explanation_layout(state)
elif (tab == 'global-explanation'):
state = copy.deepcopy(board.state)
params = (json.loads(global_exp_state) if (global_exp_state is not None) else {})
for (param, value) in params.items():
state.set_param('global', param, value)
return create_global_explanation_layout(state)
elif (tab == 'data-explanation'):
state = copy.deepcopy(board.state)
params = (json.loads(data_exp_state) if (data_exp_state is not None) else {})
for (param, value) in params.items():
state.set_param('data', param, value)
return create_data_explanation_layout(state)
elif (tab == 'prediction-explanation'):
state = copy.deepcopy(board.state)
params = (json.loads(prediction_exp_state) if (prediction_exp_state is not None) else {})
for (param, value) in params.items():
state.set_param('prediction', param, value)
return create_prediction_explanation_layout(state)
elif (tab == 'what-if-explanation'):
state = copy.deepcopy(board.whatif_state)
params = (json.loads(whatif_exp_state) if (whatif_exp_state is not None) else {})
for (param, value) in params.items():
state.set_param(param, value)
return create_what_if_layout(state) |
def load_predicted_data(docs, pred_events_json, pred_entities_json):
logger.info('Loading predicted mentions...')
load_predicted_mentions(docs, pred_events_json, pred_entities_json) |
class LALR_ContextualLexer(LALR_WithLexer):
def init_lexer(self):
states = {idx: list(t.keys()) for (idx, t) in self.parser._parse_table.states.items()}
always_accept = (self.postlex.always_accept if self.postlex else ())
self.lexer = ContextualLexer(self.lexer_conf, states, always_accept=always_accept) |
class EncodeTransforms(TransformsConfig):
def __init__(self):
super(EncodeTransforms, self).__init__()
def get_transforms(self):
transforms_dict = {'transform_gt_train': transforms.Compose([transforms.ToPILImage(), transforms.Resize((resize_size, resize_size)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_source': transforms.Compose([transforms.ToPILImage(), transforms.Resize((resize_size, resize_size)), transforms.RandomHorizontalFlip(0.5), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_inference': transforms.Compose([transforms.ToPILImage(), transforms.Resize((resize_size, resize_size)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])}
return transforms_dict |
class EmbeddingAttribute(object):
def __init__(self, user_attributes, item_attributes, mb, n_sampled, input_steps=0, item_output=False, item_ind2logit_ind=None, logit_ind2item_ind=None, indices_item=None, devices=['/gpu:0']):
self.user_attributes = user_attributes
self.item_attributes = item_attributes
self.batch_size = mb
self.n_sampled = n_sampled
self.input_steps = input_steps
self.item_output = item_output
self.num_item_features = (item_attributes.num_features_cat + item_attributes.num_features_mulhot)
self.reuse_item_tr = None
self.item_ind2logit_ind = item_ind2logit_ind
self.logit_ind2item_ind = logit_ind2item_ind
if (logit_ind2item_ind is not None):
self.logit_size = len(logit_ind2item_ind)
if (indices_item is not None):
self.indices_item = indices_item
else:
self.indices_item = range(self.logit_size)
self.mask = {}
self.zero_logits = {}
self.pos_indices = {}
self.l_true = {}
self.l_false = {}
self.devices = devices
self.att = {}
self._init_attributes(user_attributes, name='user', device=devices[0])
self._init_attributes(item_attributes, name='item', device=devices[0])
if self.item_output:
self._init_attributes(item_attributes, name='item_output', device=devices[(- 1)])
(self.user_embs_cat, self.user_embs_mulhot) = self._embedded(user_attributes, prefix='user', device=devices[0])
(self.item_embs_cat, self.item_embs_mulhot) = self._embedded(item_attributes, prefix='item', transpose=False, device=devices[0])
(self.i_biases_cat, self.i_biases_mulhot) = self._embedded_bias(item_attributes, 'item', device=devices[0])
if item_output:
(self.item_embs2_cat, self.item_embs2_mulhot) = self._embedded(item_attributes, prefix='item_output', transpose=False, device=devices[(- 1)])
(self.i_biases2_cat, self.i_biases2_mulhot) = self._embedded_bias(item_attributes, 'item_output', device=devices[(- 1)])
self.u_indices = {}
self.u_indices['input'] = self._placeholders('user', 'input', mb, device=devices[0])
self.i_indices = {}
print('construct postive/negative items/scores ')
self.i_indices['pos'] = self._placeholders('item', 'pos', mb, device=devices[0])
self.i_indices['neg'] = self._placeholders('item', 'neg', mb, device=devices[0])
print('construct mini-batch item candicate pool')
if (self.n_sampled is not None):
self.i_indices['sampled_pass'] = self._placeholders('item', 'sampled', self.n_sampled, device=devices[(- 1)])
print('construct input item')
for step in xrange(input_steps):
name_ = 'input{}'.format(step)
self.i_indices[name_] = self._placeholders('item', name_, mb, device=devices[0])
' full version'
with tf.device(devices[(- 1)]):
ia = item_attributes
print('construct full prediction layer')
(indices_cat, indices_mulhot, segids_mulhot, lengths_mulhot) = ([], [], [], [])
for i in xrange(ia.num_features_cat):
indices_cat.append(tf.constant(ia.full_cat_tr[i]))
for i in xrange(ia.num_features_mulhot):
indices_mulhot.append(tf.constant(ia.full_values_tr[i]))
segids_mulhot.append(tf.constant(ia.full_segids_tr[i]))
lengths_mulhot.append(tf.constant(ia.full_lengths_tr[i]))
self.i_indices['full'] = (indices_cat, indices_mulhot, segids_mulhot, lengths_mulhot)
' sampled version '
print('sampled prediction layer')
if (self.n_sampled is not None):
prefix = ('item_output' if self.item_output else 'item')
self.i_indices['sampled'] = self._var_indices(self.n_sampled, device=devices[(- 1)])
self.update_sampled = self._pass_sampled_items(prefix, device=devices[(- 1)])
return
def _var_indices(self, size, name='sampled', opt='item', device='/gpu:0'):
(cat_indices, mulhot_indices, mulhot_segids, mulhot_lengths) = ([], [], [], [])
att = self.item_attributes
with tf.device(device):
init_int32 = tf.constant(0)
for i in xrange(att.num_features_cat):
cat_indices.append(tf.get_variable(dtype=tf.int32, name='var{}_{}_cat_ind_{}'.format(opt, name, i), trainable=False, initializer=tf.zeros([size], dtype=tf.int32)))
for i in xrange(att.num_features_mulhot):
l1 = len(att.full_values_tr[i])
mulhot_indices.append(tf.get_variable(dtype=tf.int32, trainable=False, initializer=tf.zeros([l1], dtype=tf.int32), name='var{}_{}_mulhot_ind_{}'.format(opt, name, i)))
l2 = len(att.full_segids_tr[i])
assert (l1 == l2), ('length of indices/segids should be the same %d/%d' % (l1, l2))
mulhot_segids.append(tf.get_variable(dtype=tf.int32, trainable=False, initializer=tf.zeros([l2], dtype=tf.int32), name='var{}_{}_mulhot_seg_{}'.format(opt, name, i)))
mulhot_lengths.append(tf.get_variable(dtype=tf.float32, shape=[size, 1], name='var{}_{}_mulhot_len_{}'.format(opt, name, i), trainable=False))
return (cat_indices, mulhot_indices, mulhot_segids, mulhot_lengths)
def _placeholders(self, opt, name, size, device='/gpu:0'):
with tf.device(device):
r = tf.placeholder(tf.int32, shape=[size], name='{}_{}_ind'.format(opt, name))
return r
def get_prediction(self, latent, pool='full', device='/gpu:0', output_feat=1):
with tf.device(device):
out_layer = self.i_indices[pool]
(indices_cat, indices_mulhot, segids_mulhot, lengths_mulhot) = out_layer
innerps = []
n1 = (1 if (output_feat == 0) else self.item_attributes.num_features_cat)
n2 = (0 if (output_feat == 0) else self.item_attributes.num_features_mulhot)
for i in xrange(n1):
item_emb_cat = (self.item_embs2_cat[i] if self.item_output else self.item_embs_cat[i])
i_biases_cat = (self.i_biases2_cat[i] if self.item_output else self.i_biases_cat[i])
u = (latent[i] if isinstance(latent, list) else latent)
inds = indices_cat[i]
innerp = (tf.matmul(item_emb_cat, tf.transpose(u)) + i_biases_cat)
innerps.append(lookup(innerp, inds))
offset = self.item_attributes.num_features_cat
for i in xrange(n2):
item_embs_mulhot = (self.item_embs2_mulhot[i] if self.item_output else self.item_embs_mulhot[i])
item_biases_mulhot = (self.i_biases2_mulhot[i] if self.item_output else self.i_biases_mulhot[i])
u = (latent[(i + offset)] if isinstance(latent, list) else latent)
lengs = lengths_mulhot[i]
if (pool == 'full'):
inds = indices_mulhot[i]
segids = segids_mulhot[i]
V = self.logit_size
else:
inds = tf.slice(indices_mulhot[i], [0], [self.sampled_mulhot_l[i]])
segids = tf.slice(segids_mulhot[i], [0], [self.sampled_mulhot_l[i]])
V = self.n_sampled
innerp = tf.add(tf.matmul(item_embs_mulhot, tf.transpose(u)), item_biases_mulhot)
if (output_feat == 1):
innerps.append(tf.div(tf.unsorted_segment_sum(lookup(innerp, inds), segids, V), lengs))
elif (output_feat == 2):
innerps.append(tf.segment_max(lookup(innerp, inds), segids))
elif (output_feat == 3):
score_max = tf.reduce_max(innerp)
innerp = tf.subtract(innerp, score_max)
innerps.append((score_max + tf.log((1 + tf.unsorted_segment_sum(tf.exp(lookup(innerp, inds)), segids, V)))))
else:
print('Error: Attribute combination not implemented!')
exit(1)
logits = tf.transpose(tf.reduce_mean(innerps, 0))
return logits
def get_target_score(self, latent, inds, device='/gpu:0'):
item_emb_cat = (self.item_embs2_cat if self.item_output else self.item_embs_cat)
i_biases_cat = (self.i_biases2_cat if self.item_output else self.i_biases_cat)
item_embs_mulhot = (self.item_embs2_mulhot if self.item_output else self.item_embs_mulhot)
item_biases_mulhot = (self.i_biases2_mulhot if self.item_output else self.i_biases_mulhot)
(cat_l, mulhot_l, i_bias) = self._get_embedded(item_emb_cat, item_embs_mulhot, i_biases_cat, item_biases_mulhot, inds, self.batch_size, self.item_attributes, 'item', concatenation=False, device=device)
with tf.device(device):
target_item_emb = tf.reduce_mean((cat_l + mulhot_l), 0)
return (tf.reduce_sum(tf.multiply(latent, target_item_emb), 1) + i_bias)
def get_batch_user(self, keep_prob, concat=True, no_id=False, device='/gpu:0'):
u_inds = self.u_indices['input']
with tf.device(device):
if concat:
(embedded_user, user_b) = self._get_embedded(self.user_embs_cat, self.user_embs_mulhot, b_cat=None, b_mulhot=None, inds=u_inds, mb=self.batch_size, attributes=self.user_attributes, prefix='user', concatenation=concat, no_id=no_id, device=device)
else:
(user_cat, user_mulhot, user_b) = self._get_embedded(self.user_embs_cat, self.user_embs_mulhot, b_cat=None, b_mulhot=None, inds=u_inds, mb=self.batch_size, attributes=self.user_attributes, prefix='user', concatenation=concat, no_id=no_id, device=device)
embedded_user = tf.reduce_mean((user_cat + user_mulhot), 0)
embedded_user = tf.nn.dropout(embedded_user, keep_prob)
return (embedded_user, user_b)
def get_batch_item(self, name, batch_size, concat=False, keep_prob=1.0, no_attribute=False, device='/gpu:0'):
assert (name in self.i_indices)
assert (keep_prob == 1.0), 'otherwise not implemented'
i_inds = self.i_indices[name]
if concat:
return self._get_embedded(self.item_embs_cat, self.item_embs_mulhot, self.i_biases_cat, self.i_biases_mulhot, i_inds, batch_size, self.item_attributes, 'item', True, no_attribute=no_attribute, device=device)
else:
(item_cat, item_mulhot, item_b) = self._get_embedded(self.item_embs_cat, self.item_embs_mulhot, self.i_biases_cat, self.i_biases_mulhot, i_inds, batch_size, self.item_attributes, 'item', False, no_attribute=no_attribute, device=device)
return ((item_cat + item_mulhot), item_b)
def get_sampled_item(self, n_sampled, device='/gpu:0'):
name = 'sampled'
mapping = self.i_indices[name]
with tf.device(device):
(item_cat, item_mulhot, item_b) = self._get_embedded_sampled(self.item_embs_cat, self.item_embs_mulhot, self.i_biases_cat, self.i_biases_mulhot, mapping, n_sampled, self.item_attributes)
return (tf.reduce_mean((item_cat + item_mulhot), 0), item_b)
def _embedded(self, attributes, prefix='', transpose=False, device='/gpu:0'):
with tf.device(device):
(embs_cat, embs_mulhot) = ([], [])
for i in xrange(attributes.num_features_cat):
d = attributes._embedding_size_list_cat[i]
V = attributes._embedding_classes_list_cat[i]
if (not transpose):
embedding = tf.get_variable(name=(prefix + 'embed_cat_{0}'.format(i)), shape=[V, d], dtype=tf.float32)
else:
embedding = tf.get_variable(name=(prefix + 'embed_cat_{0}'.format(i)), shape=[d, V], dtype=tf.float32)
embs_cat.append(embedding)
for i in xrange(attributes.num_features_mulhot):
d = attributes._embedding_size_list_mulhot[i]
V = attributes._embedding_classes_list_mulhot[i]
if (not transpose):
embedding = tf.get_variable(name=(prefix + 'embed_mulhot_{0}'.format(i)), shape=[V, d], dtype=tf.float32)
else:
embedding = tf.get_variable(name=(prefix + 'embed_mulhot_{0}'.format(i)), shape=[d, V], dtype=tf.float32)
embs_mulhot.append(embedding)
return (embs_cat, embs_mulhot)
def _embedded_bias(self, attributes, prefix, device='/gpu:0'):
with tf.device(device):
(biases_cat, biases_mulhot) = ([], [])
for i in range(attributes.num_features_cat):
V = attributes._embedding_classes_list_cat[i]
b = tf.get_variable((prefix + '_bias_cat_{0}'.format(i)), [V, 1], dtype=tf.float32)
biases_cat.append(b)
for i in range(attributes.num_features_mulhot):
V = attributes._embedding_classes_list_mulhot[i]
b = tf.get_variable((prefix + '_bias_mulhot_{0}'.format(i)), [V, 1], dtype=tf.float32)
biases_mulhot.append(b)
return (biases_cat, biases_mulhot)
def _init_attributes(self, att, name='user', device='/gpu:0'):
(features_cat, features_mulhot, mulhot_starts, mulhot_lengths) = ([], [], [], [])
with tf.device(device):
for i in range(att.num_features_cat):
features_cat.append(tf.constant(att.features_cat[i], dtype=tf.int32))
for i in range(att.num_features_mulhot):
features_mulhot.append(tf.constant(att.features_mulhot[i], dtype=tf.int32))
mulhot_starts.append(tf.constant(att.mulhot_starts[i], dtype=tf.int32))
mulhot_lengths.append(tf.constant(att.mulhot_lengths[i], dtype=tf.int32))
self.att[name] = (features_cat, features_mulhot, mulhot_starts, mulhot_lengths)
def _pass_sampled_items(self, prefix='item', device='/gpu:0'):
self.sampled_mulhot_l = []
res = []
var_s = self.i_indices['sampled']
att = self.item_attributes
inds = self.i_indices['sampled_pass']
with tf.device(device):
for i in xrange(att.num_features_cat):
vals = lookup(self.att[prefix][0][i], inds)
res.append(tf.assign(var_s[0][i], vals))
for i in xrange(att.num_features_mulhot):
begin_ = lookup(self.att[prefix][2][i], inds)
size_ = lookup(self.att[prefix][3][i], inds)
b = tf.unstack(begin_)
s = tf.unstack(size_)
mulhot_indices = batch_slice2(self.att[prefix][1][i], b, s, self.n_sampled)
mulhot_segids = batch_segids2(s, self.n_sampled)
l0 = tf.reduce_sum(size_)
indices = tf.range(l0)
res.append(tf.scatter_update(var_s[1][i], indices, mulhot_indices))
res.append(tf.scatter_update(var_s[2][i], indices, mulhot_segids))
res.append(tf.assign(var_s[3][i], tf.reshape(tf.to_float(size_), [self.n_sampled, 1])))
l = tf.get_variable(name='sampled_l_mulhot_{}'.format(i), dtype=tf.int32, initializer=tf.constant(0), trainable=False)
self.sampled_mulhot_l.append(l)
res.append(tf.assign(l, l0))
return res
def _get_embedded(self, embs_cat, embs_mulhot, b_cat, b_mulhot, inds, mb, attributes, prefix='', concatenation=True, no_id=False, no_attribute=False, device='/gpu:0'):
(cat_list, mulhot_list) = ([], [])
(bias_cat_list, bias_mulhot_list) = ([], [])
with tf.device(device):
if (no_id and (attributes.num_features_cat == 1)):
if ((b_cat is not None) or (b_mulhot is not None)):
print('error: not implemented')
exit()
bias = None
dim = attributes._embedding_size_list_cat[0]
cat_list = [tf.zeros([mb, dim], dtype=tf.float32)]
if concatenation:
return (cat_list[0], bias)
else:
return (cat_list, [], bias)
n1 = (1 if no_attribute else attributes.num_features_cat)
n2 = (0 if no_attribute else attributes.num_features_mulhot)
for i in xrange(n1):
if (no_id and (i == 0)):
continue
cat_indices = lookup(self.att[prefix][0][i], inds)
embedded = lookup(embs_cat[i], cat_indices, name='emb_lookup_item_{0}'.format(i))
cat_list.append(embedded)
if (b_cat is not None):
b = lookup(b_cat[i], cat_indices, name='emb_lookup_item_b_{0}'.format(i))
bias_cat_list.append(b)
for i in xrange(n2):
begin_ = lookup(self.att[prefix][2][i], inds)
size_ = lookup(self.att[prefix][3][i], inds)
b = tf.unstack(begin_)
s = tf.unstack(size_)
mulhot_indices = batch_slice2(self.att[prefix][1][i], b, s, mb)
mulhot_segids = batch_segids2(s, mb)
embedded_flat = lookup(embs_mulhot[i], mulhot_indices)
embedded_sum = tf.unsorted_segment_sum(embedded_flat, mulhot_segids, mb)
lengs = tf.reshape(tf.to_float(size_), [mb, 1])
embedded = tf.div(embedded_sum, lengs)
mulhot_list.append(embedded)
if (b_mulhot is not None):
b_embedded_flat = lookup(b_mulhot[i], mulhot_indices)
b_embedded_sum = tf.unsorted_segment_sum(b_embedded_flat, mulhot_segids, mb)
b_embedded = tf.div(b_embedded_sum, lengs)
bias_mulhot_list.append(b_embedded)
if ((b_cat is None) and (b_mulhot is None)):
bias = None
else:
bias = tf.squeeze(tf.reduce_mean((bias_cat_list + bias_mulhot_list), 0))
if concatenation:
return (concat_versions(1, (cat_list + mulhot_list)), bias)
else:
return (cat_list, mulhot_list, bias)
def _get_embedded2(self, embs_cat, embs_mulhot, b_cat, b_mulhot, inds, mb, attributes, prefix='', concatenation=True, no_id=False, device='/gpu:0'):
(cat_list, mulhot_list) = ([], [])
(bias_cat_list, bias_mulhot_list) = ([], [])
with tf.device(device):
if (no_id and (attributes.num_features_cat == 1)):
if ((b_cat is not None) or (b_mulhot is not None)):
print('error: not implemented')
exit()
bias = None
dim = attributes._embedding_size_list_cat[0]
cat_list = [tf.zeros([mb, dim], dtype=tf.float32)]
if concatenation:
return (cat_list[0], bias)
else:
return (cat_list, [], bias)
for i in xrange(attributes.num_features_cat):
if (no_id and (i == 0)):
continue
cat_indices = lookup(self.att[prefix][0][i], inds)
embedded = lookup(embs_cat[i], cat_indices, name='emb_lookup_item_{0}'.format(i))
cat_list.append(embedded)
if (b_cat is not None):
b = lookup(b_cat[i], cat_indices, name='emb_lookup_item_b_{0}'.format(i))
bias_cat_list.append(b)
for i in xrange(attributes.num_features_mulhot):
begin_ = tf.unstack(lookup(self.att[prefix][2][i], inds))
size_ = tf.unstack(lookup(self.att[prefix][3][i], inds))
mulhot_i = []
b_mulhot_i = []
for j in xrange(mb):
b = begin_[j]
s = size_[j]
m_inds = tf.slice(self.att[prefix][1][i], [b], [s])
mulhot_i.append(tf.reduce_mean(lookup(embs_mulhot[i], m_inds), 0))
if (b_mulhot is not None):
b_mulhot_i.append(tf.reduce_mean(lookup(b_mulhot[i], m_inds), 0))
mulhot_list.append(tf.stack(mulhot_i))
if (b_mulhot is not None):
bias_mulhot_list.append(tf.stack(b_mulhot_i))
if ((b_cat is None) and (b_mulhot is None)):
bias = None
else:
bias = tf.squeeze(tf.reduce_mean((bias_cat_list + bias_mulhot_list), 0))
if concatenation:
return (concat_versions(1, (cat_list + mulhot_list)), bias)
else:
return (cat_list, mulhot_list, bias)
def _get_embedded_sampled(self, embs_cat, embs_mulhot, b_cat, b_mulhot, mappings, n_sampled, attributes, device='/gpu:0'):
(cat_indices, mulhot_indices, mulhot_segids, mulhot_lengths) = mappings
(cat_list, mulhot_list) = ([], [])
(bias_cat_list, bias_mulhot_list) = ([], [])
with tf.device(device):
for i in xrange(attributes.num_features_cat):
embedded = lookup(embs_cat[i], cat_indices[i])
cat_list.append(embedded)
if (b_cat is not None):
b = lookup(b_cat[i], cat_indices[i])
bias_cat_list.append(b)
for i in xrange(attributes.num_features_mulhot):
inds = tf.slice(mulhot_indices[i], [0], [self.sampled_mulhot_l[i]])
segids = tf.slice(mulhot_segids[i], [0], [self.sampled_mulhot_l[i]])
embedded_flat = lookup(embs_mulhot[i], inds)
embedded_sum = tf.unsorted_segment_sum(embedded_flat, segids, n_sampled)
embedded = tf.div(embedded_sum, mulhot_lengths[i])
mulhot_list.append(embedded)
if (b_mulhot is not None):
b_embedded_flat = lookup(b_mulhot[i], inds)
b_embedded_sum = tf.unsorted_segment_sum(b_embedded_flat, segids, n_sampled)
b_embedded = tf.div(b_embedded_sum, mulhot_lengths[i])
bias_mulhot_list.append(b_embedded)
if ((b_cat is None) and (b_mulhot is None)):
bias = None
else:
bias = tf.squeeze(tf.reduce_mean((bias_cat_list + bias_mulhot_list), 0))
return (cat_list, mulhot_list, bias)
def get_user_model_size(self, no_id=False, concat=True):
if (concat == True):
cat_start = (1 if no_id else 0)
return (sum(self.user_attributes._embedding_size_list_cat[cat_start:self.user_attributes.num_features_cat]) + sum(self.user_attributes._embedding_size_list_mulhot[0:self.user_attributes.num_features_mulhot]))
else:
return self.user_attributes._embedding_size_list_cat[0]
def get_item_model_size(self, concat=True):
if concat:
return (sum(self.item_attributes._embedding_size_list_cat[0:self.item_attributes.num_features_cat]) + sum(self.item_attributes._embedding_size_list_mulhot[0:self.item_attributes.num_features_mulhot]))
else:
return self.item_attributes._embedding_size_list_cat[0]
def compute_loss(self, logits, item_target, loss='ce', true_rank=False, loss_func='log', exp_p=1.005, device='/gpu:0'):
assert (loss in ['ce', 'mce', 'warp', 'warp_eval', 'rs', 'rs-sig', 'rs-sig2', 'mw', 'bbpr', 'bpr', 'bpr-hinge'])
with tf.device(device):
if (loss == 'ce'):
return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=item_target)
elif (loss in ['rs', 'rs-sig', 'rs-sig2', 'bbpr']):
return self._compute_rs_loss(logits, item_target, loss=loss, tr=true_rank, loss_func=loss_func, exp_p=exp_p)
elif (loss == 'warp'):
return self._compute_warp_loss(logits, item_target)
elif (loss == 'mw'):
return self._compute_mw_loss(logits, item_target)
elif (loss == 'bpr'):
return tf.log((1 + tf.exp(logits)))
elif (loss == 'bpr-hinge'):
return tf.maximum((1 + logits), 0)
elif (loss == 'warp_eval'):
return self._compute_warp_eval_loss(logits, item_target)
else:
print('Error: not implemented other loss!!')
exit(1)
def _compute_rs_loss(self, logits, item_target, loss='rs', loss_func='log', exp_p=1.005, tr=False):
assert (loss in ['rs', 'rs-sig', 'bbpr', 'rs-sig2'])
if (loss not in self.mask):
self._prepare_loss_vars(loss)
V = self.logit_size
mb = self.batch_size
flat_matrix = tf.reshape(logits, [(- 1)])
idx_flattened = (self.idx_flattened0 + item_target)
target_logits = tf.gather(flat_matrix, idx_flattened)
target_logits = tf.reshape(target_logits, [mb, 1])
if (loss in ['rs', 'rs-sig']):
errors = (tf.subtract(logits, target_logits) + 1)
errors = tf.nn.relu(errors)
elif (loss in ['bbpr', 'rs-sig2']):
errors = tf.sigmoid(tf.subtract(logits, target_logits))
mask2 = tf.reshape(self.mask[loss], [mb, V])
errors_masked = tf.where(mask2, errors, self.zero_logits[loss])
if (loss in ['rs-sig']):
errors_masked = tf.sigmoid(errors_masked)
errors_masked = ((errors_masked * 2) - 1)
if (loss in ['rs', 'rs-sig', 'rs-sig2']):
if (loss_func == 'log'):
l = tf.log((1 + tf.reduce_sum(errors_masked, 1)))
elif (loss_func == 'exp'):
l = (1 - tf.pow(exp_p, (- tf.reduce_sum(errors_masked, 1))))
elif (loss_func == 'poly'):
l = tf.pow(tf.reduce_sum(errors_masked, 1), exp_p)
elif (loss_func == 'poly2'):
l = tf.pow((1 + tf.reduce_sum(errors_masked, 1)), exp_p)
elif (loss_func == 'linear'):
l = tf.reduce_sum(errors_masked, 1)
elif (loss_func == 'square'):
l = tf.square(tf.reduce_sum(errors_masked, 1))
elif (loss in ['bbpr']):
l = tf.reduce_sum(errors_masked, 1)
if tr:
errors_nomargin = tf.nn.relu(tf.subtract(logits, target_logits))
errors_nomargin_masked = tf.where(mask2, errors_nomargin, self.zero_logits[loss])
true_rank = tf.count_nonzero(errors_nomargin_masked, 1)
return [errors_masked, true_rank]
return l
def _compute_warp_loss(self, logits, item_target):
loss = 'warp'
if (loss not in self.mask):
self._prepare_loss_vars(loss)
V = self.logit_size
mb = self.batch_size
flat_matrix = tf.reshape(logits, [(- 1)])
idx_flattened = (self.idx_flattened0 + item_target)
logits_ = tf.gather(flat_matrix, idx_flattened)
logits_ = tf.reshape(logits_, [mb, 1])
logits2 = (tf.subtract(logits, logits_) + 1)
mask2 = tf.reshape(self.mask[loss], [mb, V])
target = tf.where(mask2, logits2, self.zero_logits[loss])
return tf.log((1 + tf.reduce_sum(tf.nn.relu(target), 1)))
def _compute_warp_eval_loss(self, logits, item_target):
loss = 'warp_eval'
if (loss not in self.mask):
self._prepare_loss_vars(loss)
V = self.logit_size
mb = self.batch_size
flat_matrix = tf.reshape(logits, [(- 1)])
idx_flattened = (self.idx_flattened0 + item_target)
logits_ = tf.gather(flat_matrix, idx_flattened)
logits_ = tf.reshape(logits_, [mb, 1])
logits2 = (tf.subtract(logits, logits_) + 1)
mask2 = tf.reshape(self.mask[loss], [mb, V])
target = tf.where(mask2, logits2, self.zero_logits[loss])
margin_rank = tf.reduce_sum(tf.nn.relu(target), 1)
logits3 = tf.nn.relu(tf.subtract(logits, logits_))
target2 = tf.where(mask2, logits3, self.zero_logits[loss])
true_rank = tf.count_nonzero(target2, 1)
return [margin_rank, true_rank]
def _compute_mw_loss(self, logits, item_target):
if ('mw' not in self.mask):
self._prepare_loss_vars('mw')
V = self.n_sampled
mb = self.batch_size
logits2 = (tf.subtract(logits, tf.reshape(item_target, [mb, 1])) + 1)
mask2 = tf.reshape(self.mask['mw'], [mb, V])
target = tf.where(mask2, logits2, self.zero_logits['mw'])
return tf.log((1 + tf.reduce_sum(tf.nn.relu(target), 1)))
def _prepare_loss_vars(self, loss='warp'):
V = (self.n_sampled if (loss == 'mw') else self.logit_size)
mb = self.batch_size
self.idx_flattened0 = (tf.range(0, mb) * V)
self.mask[loss] = tf.Variable((([True] * V) * mb), dtype=tf.bool, trainable=False)
self.zero_logits[loss] = tf.constant(([([0.0] * V)] * mb))
self.pos_indices[loss] = tf.placeholder(tf.int32, shape=[None])
self.l_true[loss] = tf.placeholder(tf.bool, shape=[None], name='l_true')
self.l_false[loss] = tf.placeholder(tf.bool, shape=[None], name='l_false')
def get_warp_mask(self, device='/gpu:0'):
(self.set_mask, self.reset_mask) = ({}, {})
with tf.device(device):
for loss in ['mw', 'warp', 'warp_eval', 'rs', 'rs-sig', 'rs-sig2', 'bbpr']:
if (loss not in self.mask):
continue
self.set_mask[loss] = tf.scatter_update(self.mask[loss], self.pos_indices[loss], self.l_false[loss])
self.reset_mask[loss] = tf.scatter_update(self.mask[loss], self.pos_indices[loss], self.l_true[loss])
return (self.set_mask, self.reset_mask)
def prepare_warp(self, pos_item_set, pos_item_set_eval):
self.pos_item_set = pos_item_set
self.pos_item_set_eval = pos_item_set_eval
return
def target_mapping(self, item_target):
m = self.item_ind2logit_ind
target = []
for items in item_target:
target.append([m[v] for v in items])
return target
def _add_input(self, input_feed, opt, input_, name_):
if (opt == 'user'):
att = self.user_attributes
mappings = self.u_indices[name_]
elif (opt == 'item'):
att = self.item_attributes
mappings = self.i_indices[name_]
else:
exit((- 1))
input_feed[mappings.name] = input_
def add_input(self, input_feed, user_input, item_input, neg_item_input=None, item_sampled=None, item_sampled_id2idx=None, forward_only=False, recommend=False, loss=None):
if (self.user_attributes is not None):
self._add_input(input_feed, 'user', user_input, 'input')
if ((self.item_attributes is not None) and (self.input_steps > 0)):
for step in range(len(item_input)):
self._add_input(input_feed, 'item', item_input[step], 'input{}'.format(step))
input_feed_sampled = {}
update_sampled = []
if ((self.item_attributes is not None) and (recommend is False) and (item_sampled is not None) and (loss in ['mw', 'mce'])):
self._add_input(input_feed_sampled, 'item', item_sampled, 'sampled_pass')
update_sampled = self.update_sampled
input_feed_warp = {}
if ((loss in ['warp', 'warp_eval', 'mw', 'rs', 'rs-sig', 'rs-sig2', 'bbpr']) and (recommend is False)):
V = (self.n_sampled if (loss == 'mw') else self.logit_size)
(mask_indices, c) = ([], 0)
s_2idx = (self.item_ind2logit_ind if (loss in ['warp', 'warp_eval', 'rs', 'rs-sig', 'rs-sig2', 'bbpr']) else item_sampled_id2idx)
item_set = (self.pos_item_set_eval if forward_only else self.pos_item_set)
if (loss in ['warp', 'warp_eval', 'bbpr', 'rs', 'rs-sig', 'rs-sig2']):
for u in user_input:
offset = (c * V)
if (u in item_set):
mask_indices.extend([(s_2idx[v] + offset) for v in item_set[u]])
c += 1
else:
for u in user_input:
offset = (c * V)
if (u in item_set):
mask_indices.extend([(s_2idx[v] + offset) for v in item_set[u] if (v in s_2idx)])
c += 1
L = len(mask_indices)
input_feed_warp[self.pos_indices[loss].name] = mask_indices
input_feed_warp[self.l_false[loss].name] = ([False] * L)
input_feed_warp[self.l_true[loss].name] = ([True] * L)
return (update_sampled, input_feed_sampled, input_feed_warp) |
class Writer(Manager):
def __init__(self, handle):
super(Writer, self).__init__()
self._handle = handle
def _pad_block(self):
extra = (self._handle.tell() % 512)
if extra:
logging.debug('padding with %d zeros', (512 - extra))
self._handle.write(('\x00' * (512 - extra)))
def write_metadata(self):
self.header.write(self._handle)
self._pad_block()
assert (self._handle.tell() == 512)
logging.debug('produced %d bytes of header data', self._handle.tell())
self._handle.write(struct.pack('BBBB', 0, 0, self.parameter_blocks(), 84))
id_groups = sorted(((i, g) for (i, g) in self.groups() if isinstance(i, int)))
for (group_id, group) in id_groups:
self._write_group(group_id, group)
self._pad_block()
while (self._handle.tell() != (512 * (self.header.data_block - 1))):
self._handle.write(('\x00' * 512))
logging.debug('produced %d bytes of metadata', self._handle.tell())
def _write_group(self, group_id, group):
logging.info('writing C3D parameter group #%d: %s: %s', group_id, group.name, group.desc)
self._handle.write(struct.pack('bb', len(group.name), (- group_id)))
self._handle.write(group.name)
self._handle.write(struct.pack('h', (3 + len(group.desc))))
self._handle.write(struct.pack('B', len(group.desc)))
self._handle.write(group.desc)
logging.debug('writing group info yields offset %d', self._handle.tell())
for (name, param) in group.params.iteritems():
self._handle.write(struct.pack('bb', len(name), group_id))
self._handle.write(name)
self._handle.write(struct.pack('h', ((param.binary_size() - 2) - len(name))))
param.write(self._handle)
logging.debug('writing %d bytes yields offset %d', ((4 + len(name)) + param.binary_size()), self._handle.tell())
logging.debug('group %s ends at byte offset %d', group.name, self._handle.tell())
def write_frames(self, frames):
assert (self._handle.tell() == (512 * (self.header.data_block - 1)))
format = 'fi'[(self.group('POINT').get_float('SCALE') >= 0)]
for (p, a) in frames:
point = array.array(format)
point.extend(p.flatten())
point.tofile(self._handle)
analog = array.array(format)
analog.extend(a)
analog.tofile(self._handle)
self._pad_block()
def write_like_phasespace(self, frames, frame_count, point_frame_rate=480.0, analog_frame_rate=0.0, point_scale_factor=(- 1.0), point_units='mm ', gen_scale=1.0):
try:
(points, analog) = iter(frames).next()
except StopIteration:
return
ppf = len(points)
point_group = self.check_group(1, 'POINT', 'POINT group')
point_group.add_param('USED', desc='Number of 3d markers', data_size=2, bytes=struct.pack('H', ppf))
point_group.add_param('FRAMES', desc='frame count', data_size=2, bytes=struct.pack('H', min(65535, frame_count)))
point_group.add_param('DATA_START', desc='data block number', data_size=2, bytes=struct.pack('H', 0))
point_group.add_param('SCALE', desc='3d scale factor', data_size=4, bytes=struct.pack('f', point_scale_factor))
point_group.add_param('RATE', desc='3d data capture rate', data_size=4, bytes=struct.pack('f', point_frame_rate))
point_group.add_param('X_SCREEN', desc='X_SCREEN parameter', data_size=(- 1), dimensions=[2], bytes='+X')
point_group.add_param('Y_SCREEN', desc='Y_SCREEN parameter', data_size=(- 1), dimensions=[2], bytes='+Z')
point_group.add_param('UNITS', desc='3d data units', data_size=(- 1), dimensions=[len(point_units)], bytes=point_units)
point_group.add_param('LABELS', desc='labels', data_size=(- 1), dimensions=[5, ppf], bytes=''.join((('M%03d ' % i) for i in xrange(ppf))))
point_group.add_param('DESCRIPTIONS', desc='descriptions', data_size=(- 1), dimensions=[16, ppf], bytes=((' ' * 16) * ppf))
apf = len(analog)
analog_group = self.check_group(2, 'ANALOG', 'ANALOG group')
analog_group.add_param('USED', desc='analog channel count', data_size=2, bytes=struct.pack('H', apf))
analog_group.add_param('RATE', desc='analog frame rate', data_size=4, bytes=struct.pack('f', analog_frame_rate))
analog_group.add_param('GEN_SCALE', desc='analog general scale factor', data_size=4, bytes=struct.pack('f', gen_scale))
analog_group.add_param('SCALE', desc='analog channel scale factors', data_size=4, dimensions=[0])
analog_group.add_param('OFFSET', desc='analog channel offsets', data_size=2, dimensions=[0])
trial_group = self.check_group(3, 'TRIAL', 'TRIAL group')
trial_group.add_param('ACTUAL_START_FIELD', desc='actual start frame', data_size=2, dimensions=[2], bytes=struct.pack('I', 1))
trial_group.add_param('ACTUAL_END_FIELD', desc='actual end frame', data_size=2, dimensions=[2], bytes=struct.pack('I', frame_count))
blocks = self.parameter_blocks()
point_group.params['DATA_START'].bytes = struct.pack('H', (2 + blocks))
self.header.data_block = (2 + blocks)
self.header.frame_rate = point_frame_rate
self.header.last_frame = min(frame_count, 65535)
self.header.point_count = ppf
self.header.analog_count = apf
self.write_metadata()
self.write_frames(frames)
def write_from_reader(self, frames, reader):
self.write_like_phasespace(frames, reader.end_field(), reader.frame_rate()) |
def launch_ec2(params_list, exp_prefix, docker_image, code_full_path, python_command='python', pre_commands=None, script='scripts/run_experiment.py', aws_config=None, dry=False, terminate_machine=True, use_gpu=False, sync_s3_pkl=False, sync_log_on_termination=True, periodic_sync=True, periodic_sync_interval=15):
if (len(params_list) == 0):
return
default_config = dict(image_id=config.AWS_IMAGE_ID, instance_type=config.AWS_INSTANCE_TYPE, key_name=config.AWS_KEY_NAME, spot=config.AWS_SPOT, spot_price=config.AWS_SPOT_PRICE, iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME, security_groups=config.AWS_SECURITY_GROUPS, security_group_ids=config.AWS_SECURITY_GROUP_IDS, network_interfaces=config.AWS_NETWORK_INTERFACES)
if (aws_config is None):
aws_config = dict()
aws_config = dict(default_config, **aws_config)
from io import StringIO
sio = StringIO()
sio.write('#!/bin/bash\n')
sio.write('{\n')
sio.write('\n die() { status=$1; shift; echo "FATAL: $*"; exit $status; }\n ')
sio.write('\n EC2_INSTANCE_ID="`wget -q -O - ')
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}\n '.format(exp_name=params_list[0].get('exp_name'), aws_region=config.AWS_REGION_NAME))
if config.LABEL:
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=owner,Value={label} --region {aws_region}\n '.format(label=config.LABEL, aws_region=config.AWS_REGION_NAME))
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}\n '.format(exp_prefix=exp_prefix, aws_region=config.AWS_REGION_NAME))
sio.write('\n service docker start\n ')
sio.write('\n docker --config /home/ubuntu/.docker pull {docker_image}\n '.format(docker_image=docker_image))
if config.FAST_CODE_SYNC:
sio.write('\n aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz --region {aws_region}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR, aws_region=config.AWS_REGION_NAME))
sio.write('\n mkdir -p {local_code_path}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR, aws_region=config.AWS_REGION_NAME))
sio.write('\n tar -zxvf /tmp/rllab_code.tar.gz -C {local_code_path}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR, aws_region=config.AWS_REGION_NAME))
else:
sio.write('\n aws s3 cp --recursive {code_full_path} {local_code_path} --region {aws_region}\n '.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR, aws_region=config.AWS_REGION_NAME))
s3_mujoco_key_path = (config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/')
sio.write('\n aws s3 cp --recursive {} {} --region {}\n '.format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH, config.AWS_REGION_NAME))
sio.write('\n cd {local_code_path}\n '.format(local_code_path=config.DOCKER_CODE_DIR))
for params in params_list:
log_dir = params.get('log_dir')
remote_log_dir = params.pop('remote_log_dir')
env = params.pop('env', None)
sio.write('\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}\n '.format(exp_name=params.get('exp_name'), aws_region=config.AWS_REGION_NAME))
sio.write('\n mkdir -p {log_dir}\n '.format(log_dir=log_dir))
if periodic_sync:
if sync_s3_pkl:
sio.write("\n while /bin/true; do\n aws s3 sync --exclude '*' --include '*.csv' --include '*.json' --include '*.pkl' {log_dir} {remote_log_dir} --region {aws_region}\n sleep {periodic_sync_interval}\n done & echo sync initiated".format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME, periodic_sync_interval=periodic_sync_interval))
else:
sio.write("\n while /bin/true; do\n aws s3 sync --exclude '*' --include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region}\n sleep {periodic_sync_interval}\n done & echo sync initiated".format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME, periodic_sync_interval=periodic_sync_interval))
if sync_log_on_termination:
sio.write('\n while /bin/true; do\n if [ -z $(curl -Is | head -1 | grep 404 | cut -d \\ -f 2) ]\n then\n logger "Running shutdown hook."\n aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}\n aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}\n break\n else\n # Spot instance not yet marked for termination.\n sleep 5\n fi\n done & echo log sync initiated\n '.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write('\n {command}\n '.format(command=to_docker_command(params, docker_image, python_command=python_command, script=script, use_gpu=use_gpu, env=env, pre_commands=pre_commands, local_code_dir=config.DOCKER_CODE_DIR)))
sio.write('\n aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}\n '.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write('\n aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}\n '.format(remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
if terminate_machine:
sio.write('\n EC2_INSTANCE_ID="`wget -q -O - || die "wget instance-id has failed: $?"`"\n aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}\n '.format(aws_region=config.AWS_REGION_NAME))
sio.write('} >> /home/ubuntu/user_data.log 2>&1\n')
full_script = dedent(sio.getvalue())
import boto3
import botocore
if aws_config['spot']:
ec2 = boto3.client('ec2', region_name=config.AWS_REGION_NAME, aws_access_key_id=config.AWS_ACCESS_KEY, aws_secret_access_key=config.AWS_ACCESS_SECRET)
else:
ec2 = boto3.resource('ec2', region_name=config.AWS_REGION_NAME, aws_access_key_id=config.AWS_ACCESS_KEY, aws_secret_access_key=config.AWS_ACCESS_SECRET)
if ((len(full_script) > 10000) or (len(base64.b64encode(full_script.encode()).decode('utf-8')) > 10000)):
s3_path = upload_file_to_s3(full_script)
sio = StringIO()
sio.write('#!/bin/bash\n')
sio.write('\n aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\\n chmod +x /home/ubuntu/remote_script.sh && \\\n bash /home/ubuntu/remote_script.sh\n '.format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))
user_data = dedent(sio.getvalue())
else:
user_data = full_script
instance_args = dict(ImageId=aws_config['image_id'], KeyName=aws_config['key_name'], UserData=user_data, InstanceType=aws_config['instance_type'], EbsOptimized=True, SecurityGroups=aws_config['security_groups'], SecurityGroupIds=aws_config['security_group_ids'], NetworkInterfaces=aws_config['network_interfaces'], IamInstanceProfile=dict(Name=aws_config['iam_instance_profile_name']))
if (aws_config.get('placement', None) is not None):
instance_args['Placement'] = aws_config['placement']
if (not aws_config['spot']):
instance_args['MinCount'] = 1
instance_args['MaxCount'] = 1
print('')
print(instance_args['UserData'])
print('')
if aws_config['spot']:
instance_args['UserData'] = base64.b64encode(instance_args['UserData'].encode()).decode('utf-8')
spot_args = dict(DryRun=dry, InstanceCount=1, LaunchSpecification=instance_args, SpotPrice=aws_config['spot_price'])
import pprint
pprint.pprint(spot_args)
if (not dry):
response = ec2.request_spot_instances(**spot_args)
print(response)
spot_request_id = response['SpotInstanceRequests'][0]['SpotInstanceRequestId']
for _ in range(10):
try:
ec2.create_tags(Resources=[spot_request_id], Tags=[{'Key': 'Name', 'Value': params_list[0]['exp_name']}])
break
except botocore.exceptions.ClientError:
continue
else:
import pprint
pprint.pprint(instance_args)
ec2.create_instances(DryRun=dry, **instance_args) |
class CmdIdBreakpoint(Breakpoint):
type = 'cmd-id'
pattern = re.compile('^[TD][0-9]+')
def __init__(self, text, cond=None, index=(- 1)) -> None:
super().__init__(text, cond, index)
self.match_type = (CMDType.tiu if (text[0] == 'T') else CMDType.dma)
self.match_index = int(text[1:])
def should_stop(self, tdb: 'TdbCmdBackend') -> bool:
cmd = tdb.get_cmd()
if (self.match_type != cmd.cmd_type):
return False
if (cmd.reg.cmd_id != self.match_index):
return False
return True |
(repr=False)
class ResponseTimeExceeded(FailureContext):
elapsed: float
deadline: int
message: str
title: str = 'Response time limit exceeded'
type: str = 'response_time_exceeded'
def unique_by_key(self, check_message: (str | None)) -> tuple[(str, ...)]:
return (self.title,) |
def fid_inception_v3():
print('---fid_inception_v3---')
inception = _inception_v3(num_classes=1008, aux_logits=False, pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
print('----- load inception ckpt -----')
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
inception.load_state_dict(state_dict)
print('----- done -----')
return inception |
class dynamics():
class Domain(Enum):
negative = (- 1)
positive = (+ 1)
class System_Response(Enum):
advantageous = (+ 1)
disadvantageous = (- 1)
def __init__(self, number_steps, max_required_step, safe_zone):
self._safe_zone = self._check_safe_zone(safe_zone)
self._strongest_penality_abs_idx = self.compute_strongest_penalty_absIdx(number_steps)
self._penalty_functions_array = self._define_reward_functions(number_steps, max_required_step)
def _check_safe_zone(self, safe_zone):
if (safe_zone < 0):
raise ValueError('safe_zone must be non-negative')
return safe_zone
def reset(self):
return (self.Domain.positive, 0, self.System_Response.advantageous)
def reward(self, Phi_idx, position):
return self.get_penalty_function(Phi_idx).reward(position)
def state_transition(self, domain, phi_idx, system_response, position):
old_domain = domain
domain = self._compute_domain(old_domain, position)
if (domain != old_domain):
system_response = self.System_Response.advantageous
phi_idx += self._compute_angular_step(domain, phi_idx, system_response, position)
system_response = self._updated_system_response(phi_idx, system_response)
phi_idx = self._apply_symmetry(phi_idx)
if ((phi_idx == 0) and (abs(position) <= self._safe_zone)):
(domain, phi_idx, system_response) = self.reset()
return (domain, phi_idx, system_response)
def _compute_domain(self, domain, position):
if (abs(position) <= self._safe_zone):
return domain
else:
return self.Domain(sign(position))
def _compute_angular_step(self, domain, phi_idx, system_response, position):
if (abs(position) <= self._safe_zone):
return (- sign(phi_idx))
if (phi_idx == ((- domain.value) * self._strongest_penality_abs_idx)):
return 0
return (system_response.value * sign(position))
def _updated_system_response(self, phi_idx, system_response):
if (abs(phi_idx) >= self._strongest_penality_abs_idx):
return self.System_Response.disadvantageous
else:
return system_response
def _apply_symmetry(self, phi_idx):
if (abs(phi_idx) < self._strongest_penality_abs_idx):
return phi_idx
phi_idx = ((phi_idx + (4 * self._strongest_penality_abs_idx)) % (4 * self._strongest_penality_abs_idx))
phi_idx = ((2 * self._strongest_penality_abs_idx) - phi_idx)
return phi_idx
def get_penalty_function(self, phi_idx):
idx = int((self._strongest_penality_abs_idx + phi_idx))
if (idx < 0):
idx = (idx + len(self._penalty_functions_array))
return self._penalty_functions_array[idx]
def _define_reward_functions(self, number_steps, max_required_step):
k = self._strongest_penality_abs_idx
angle_gid = (((np.arange((- k), (k + 1)) * 2) * pi) / number_steps)
reward_functions = [reward_function.reward_function(Phi, max_required_step) for Phi in angle_gid]
self._penalty_functions_array = np.array(reward_functions)
return self._penalty_functions_array
def compute_strongest_penalty_absIdx(self, number_steps):
if ((number_steps < 1) or ((number_steps % 4) != 0)):
raise ValueError('number_steps must be positive and integer multiple of 4')
_strongest_penality_abs_idx = (number_steps // 4)
return _strongest_penality_abs_idx |
def test_case28():
url = (brokerIp + '/ngsi10/subscribeContext')
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(data_ngsi10.subdata49), headers=headers)
resp_content = r.content
resInJson = resp_content.decode('utf8').replace("'", '"')
resp = json.loads(resInJson)
resp = resp['subscribeResponse']
sid = resp['subscriptionId']
assert (r.status_code == 200) |
def create_logger(log_file=None):
logger = logging.getLogger()
logger.handlers.clear()
logger.setLevel(level=logging.DEBUG)
logger.propagate = False
format_str = '[%(asctime)s] [%(levelname).4s] %(message)s'
stream_handler = logging.StreamHandler()
colored_formatter = coloredlogs.ColoredFormatter(format_str)
stream_handler.setFormatter(colored_formatter)
logger.addHandler(stream_handler)
if (log_file is not None):
file_handler = logging.FileHandler(log_file)
formatter = logging.Formatter(format_str, datefmt='%Y-%m-%d %H:%M:%S')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger |
def load_data(dataset):
path = (('dataset/' + dataset) + '/')
if (dataset == 'mnist'):
path = (path + 'mnist.pkl.gz')
if path.endswith('.gz'):
f = gzip.open(path, 'rb')
else:
f = open(path, 'rb')
if (sys.version_info < (3,)):
((x_train, y_train), (x_test, y_test)) = cPickle.load(f)
else:
((x_train, y_train), (x_test, y_test)) = cPickle.load(f, encoding='bytes')
f.close()
x_train = (x_train.astype('float32') / 255.0)
x_test = (x_test.astype('float32') / 255.0)
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
if (dataset == 'reuters10k'):
data = scio.loadmat((path + 'reuters10k.mat'))
X = data['X']
Y = data['Y'].squeeze()
if (dataset == 'har'):
data = scio.loadmat((path + 'HAR.mat'))
X = data['X']
X = X.astype('float32')
Y = (data['Y'] - 1)
X = X[:10200]
Y = Y[:10200]
return (X, Y) |
class BaseIoHandler():
def __init__(self, root: str) -> None:
self.root = root
if (not os.path.isdir(root)):
os.makedirs(root)
def save(self, *args, **kwargs) -> None:
raise NotImplementedError
def load(self, *args, **kwargs) -> Any:
raise NotImplementedError |
class Citizen(Agent):
def __init__(self, unique_id, model, pos, hardship, regime_legitimacy, risk_aversion, threshold, vision):
super().__init__(unique_id, model)
self.breed = 'citizen'
self.pos = pos
self.hardship = hardship
self.regime_legitimacy = regime_legitimacy
self.risk_aversion = risk_aversion
self.threshold = threshold
self.condition = 'Quiescent'
self.vision = vision
self.jail_sentence = 0
self.grievance = (lambda : (self.hardship * (1 - self.regime_legitimacy)))
self.arrest_probability = None
self.arrest_parameter = None
self.arrests = 0
self.prison_interaction = model.prison_interaction
self.history = [self.grievance()]
self.days_active = 0
self.neighborhood = None
self.neighbors = None
self.empty_neighbors = None
def update_arrest_parameter(self):
self.arrest_parameter = (self.grievance() - (self.arrest_probability * self.regime_legitimacy))
def step(self):
if self.jail_sentence:
self.jail_sentence -= 1
return
if (self.condition == 'Active'):
self.days_active += 1
self.update_neighbors()
self.update_estimated_arrest_probability()
self.update_arrest_parameter()
net_risk = (self.risk_aversion * self.arrest_probability)
if ((self.condition == 'Quiescent') and ((self.grievance() - net_risk) > self.threshold)):
self.condition = 'Active'
elif ((self.condition == 'Active') and ((self.grievance() - net_risk) <= self.threshold)):
self.condition = 'Quiescent'
if (self.model.movement and self.empty_neighbors):
new_pos = self.random.choice(self.empty_neighbors)
self.model.grid.move_agent(self, new_pos)
def update_neighbors(self):
self.neighborhood = self.model.grid.get_neighborhood(self.pos, moore=True, radius=self.vision)
self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood)
self.empty_neighbors = [c for c in self.neighborhood if self.model.grid.is_cell_empty(c)]
def update_estimated_arrest_probability(self):
cops_in_vision = len([c for c in self.neighbors if (c.breed == 'cop')])
actives_in_vision = 1.0
for unit in self.neighbors:
if ((unit.breed == 'citizen') and (unit.condition == 'Active') and (unit.jail_sentence == 0)):
actives_in_vision += 1
self.arrest_probability = (1 - math.exp((((- 1) * self.model.arrest_prob_constant) * (cops_in_vision / actives_in_vision))))
def update_risk_aversion(self):
cellmates_risk_aversion = [c.risk_aversion for c in self.neighbors if ((c.breed == 'citizen') and (c.jail_sentence > 0))]
if cellmates_risk_aversion:
min_aversion = max(cellmates_risk_aversion)
diff = (self.prison_interaction * (min_aversion - self.risk_aversion))
self.risk_aversion += diff |
class ObjectLogisticRegression(pl.LightningModule):
def __init__(self, hparams, train_dataset, val_dataset, ans2label=None, label2ans=None, chart=False, chart_val=False):
super(ObjectLogisticRegression, self).__init__()
self.save_hyperparameters(hparams)
(self.train_dataset, self.val_dataset) = (train_dataset, val_dataset)
(self.ans2label, self.label2ans) = (ans2label, label2ans)
(self.chart, self.chart_val) = (chart, chart_val)
self.build_model()
def build_model(self):
self.w_emb = WordEmbedding(ntoken=self.train_dataset.dictionary.ntoken, dim=self.hparams.emb_dim)
self.pad_token = self.w_emb.ntoken
self.linear = nn.Linear(((self.train_dataset.v_dim + 6) + self.hparams.emb_dim), len(self.ans2label))
def forward(self, image_features, spatial_features, question_features):
w_emb = self.w_emb(question_features)
embs = w_emb.mean(dim=1)
image_features = torch.cat([image_features, spatial_features], dim=2)
img_feats = image_features.mean(dim=1)
joint = torch.cat([embs, img_feats], dim=1)
return self.linear(joint)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters())
def training_step(self, train_batch, batch_idx):
(img, spatials, question, answer, idxs) = train_batch
logits = self.forward(img, spatials, question)
if (self.chart or self.chart_val):
probabilities = torch.softmax(logits, dim=1)
hot_answers = torch.nn.functional.one_hot(answer, num_classes=len(self.ans2label))
class_probabilities = torch.sum((probabilities * hot_answers), dim=1)
(max_probabilities, _) = torch.max(probabilities, dim=1)
bdict_conf = {}
(lidxs, lcp) = (idxs.cpu().numpy().tolist(), class_probabilities.detach().cpu().numpy().tolist())
lmp = max_probabilities.detach().cpu().numpy().tolist()
for i in range(len(lidxs)):
bdict_conf[lidxs[i]] = (lcp[i], lmp[i])
loss = nn.functional.cross_entropy(logits, answer)
accuracy = torch.mean((logits.argmax(dim=1) == answer).float())
log = {'train_loss': loss, 'train_acc': accuracy}
if (self.chart or self.chart_val):
return {'loss': loss, 'train_loss': loss, 'train_acc': accuracy, 'cartography': bdict_conf, 'progress_bar': log, 'log': log}
else:
return {'loss': loss, 'train_loss': loss, 'train_acc': accuracy, 'progress_bar': log, 'log': log}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x['train_loss'] for x in outputs]).mean()
avg_acc = torch.stack([x['train_acc'] for x in outputs]).mean()
if self.chart:
cartography_dict = {}
for x in outputs:
cartography_dict.update(x['cartography'])
pbar = {'train_epoch_loss': avg_loss, 'train_epoch_acc': avg_acc}
if self.chart:
log = {'train_epoch_loss': avg_loss, 'train_epoch_acc': avg_acc, 'cartography': cartography_dict}
else:
log = dict(pbar)
for (k, v) in log.items():
self.log(k, v)
def validation_step(self, val_batch, batch_idx):
(img, spatials, question, answer, idxs) = val_batch
logits = self.forward(img, spatials, question)
if self.chart_val:
probabilities = torch.softmax(logits, dim=1)
hot_answers = torch.nn.functional.one_hot(answer, num_classes=len(self.ans2label))
class_probabilities = torch.sum((probabilities * hot_answers), dim=1)
(max_probabilities, _) = torch.max(probabilities, dim=1)
bdict_conf = {}
(lidxs, lcp) = (idxs.cpu().numpy().tolist(), class_probabilities.detach().cpu().numpy().tolist())
lmp = max_probabilities.cpu().numpy().tolist()
for i in range(len(lidxs)):
bdict_conf[lidxs[i]] = (lcp[i], lmp[i])
loss = nn.functional.cross_entropy(logits, answer)
accuracy = torch.mean((logits.argmax(dim=1) == answer).float())
if self.chart_val:
return {'val_loss': loss, 'val_acc': accuracy, 'val_cartography': bdict_conf}
else:
return {'val_loss': loss, 'val_acc': accuracy}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
avg_acc = torch.stack([x['val_acc'] for x in outputs]).mean()
if self.chart_val:
cartography_dict = {}
for x in outputs:
cartography_dict.update(x['val_cartography'])
pbar = {'val_loss': avg_loss, 'val_acc': avg_acc}
if self.chart_val:
log = {'val_loss': avg_loss, 'val_acc': avg_acc, 'val_cartography': cartography_dict}
else:
log = dict(pbar)
return {'progress_bar': pbar, 'log': log}
def active_step(self, active_batch, batch_idx, mode='least-conf'):
(img, spatials, question, _, _) = active_batch
logits = self.forward(img, spatials, question)
if (mode in ['max-prob', 'least-conf']):
(probabilities, _) = torch.max(torch.softmax(logits, dim=1), dim=1)
probabilities = probabilities.detach().cpu().numpy()
return list(probabilities)
elif (mode in ['entropy']):
probabilities = torch.softmax(logits, dim=1).detach().cpu().numpy()
entropies = entropy(probabilities, axis=1)
return list(entropies)
def extract(self, extract_batch, batch_idx, mode='fused'):
(img, spatials, question, _, _) = extract_batch
if (mode == 'language'):
w_emb = self.w_emb(question)
enc = w_emb.mean(dim=1)
elif (mode == 'vision'):
enc = torch.cat([img, spatials], dim=2).mean(dim=1)
elif (mode == 'fused'):
w_emb = self.w_emb(question)
embs = w_emb.mean(dim=1)
image_features = torch.cat([img, spatials], dim=2)
img_feats = image_features.mean(dim=1)
enc = torch.cat([embs, img_feats], dim=1)
else:
raise AssertionError(('Mode %s not defined!' % mode))
enc = enc.detach().cpu().numpy()
return enc |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, batch_norm_fn=nn.BatchNorm2d):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = batch_norm_fn(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)
self.bn2 = batch_norm_fn(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = batch_norm_fn((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def main(cfg: DictConfig, **unused_kwargs):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
logger.info(cfg)
if (cfg.eval_lm.context_window > 0):
cfg.task.tokens_per_sample -= cfg.eval_lm.context_window
task = tasks.setup_task(cfg.task)
logger.info('loading model(s) from {}'.format(cfg.common_eval.path))
(models, model_args, task) = checkpoint_utils.load_model_ensemble_and_task([cfg.common_eval.path], arg_overrides=eval(cfg.common_eval.model_overrides), suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, task=task)
use_fp16 = cfg.common.fp16
use_cuda = (torch.cuda.is_available() and (not cfg.common.cpu))
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
for model in models:
if use_fp16:
model.half()
if (use_cuda and (not cfg.distributed_training.pipeline_model_parallel)):
model.cuda()
model.prepare_for_inference_(cfg)
assert (len(models) > 0)
logger.info('num. model params: {:,}'.format(sum((p.numel() for p in models[0].parameters()))))
task.load_dataset(cfg.dataset.gen_subset)
dataset = task.dataset(cfg.dataset.gen_subset)
logger.info('{} {} {:,} examples'.format(cfg.task.data, cfg.dataset.gen_subset, len(dataset)))
itr = task.eval_lm_dataloader(dataset=dataset, max_tokens=(cfg.dataset.max_tokens or 36000), batch_size=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions(*[model.max_positions() for model in models]), num_shards=max(cfg.dataset.num_shards, cfg.distributed_training.distributed_world_size), shard_id=max(cfg.dataset.shard_id, cfg.distributed_training.distributed_rank), num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, context_window=cfg.eval_lm.context_window)
itr = progress_bar.progress_bar(itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=('tqdm' if (not cfg.common.no_progress_bar) else 'simple'))
results = eval_lm(models=models, source_dictionary=task.source_dictionary, batch_iterator=itr, post_process=cfg.common_eval.post_process, output_word_probs=cfg.eval_lm.output_word_probs, output_word_stats=cfg.eval_lm.output_word_stats, target_dictionary=task.target_dictionary, softmax_batch=cfg.eval_lm.softmax_batch, remove_bos_token=getattr(cfg.task, 'add_bos_token', False))
logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(results['loss'], results['perplexity']))
return results |
class TextLoader():
def __init__(self, data_dir, batch_size, seq_length, encoding='utf-8'):
self.data_dir = data_dir
self.batch_size = batch_size
self.seq_length = seq_length
self.encoding = encoding
input_file = os.path.join(data_dir, 'input.txt')
vocab_file = os.path.join(data_dir, 'vocab.pkl')
tensor_file = os.path.join(data_dir, 'data.npy')
if (not (os.path.exists(vocab_file) and os.path.exists(tensor_file))):
print('reading text file')
self.preprocess(input_file, vocab_file, tensor_file)
else:
print('loading preprocessed files')
self.load_preprocessed(vocab_file, tensor_file)
self.create_batches()
self.reset_batch_pointer()
def preprocess(self, input_file, vocab_file, tensor_file):
with codecs.open(input_file, 'r', encoding=self.encoding) as f:
data = f.read()
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=(lambda x: (- x[1])))
(self.chars, _) = zip(*count_pairs)
self.vocab_size = len(self.chars)
self.vocab = dict(zip(self.chars, range(len(self.chars))))
with open(vocab_file, 'wb') as f:
cPickle.dump(self.chars, f)
self.tensor = np.array(list(map(self.vocab.get, data)))
np.save(tensor_file, self.tensor)
def load_preprocessed(self, vocab_file, tensor_file):
with open(vocab_file, 'rb') as f:
self.chars = cPickle.load(f)
self.vocab_size = len(self.chars)
self.vocab = dict(zip(self.chars, range(len(self.chars))))
self.tensor = np.load(tensor_file)
self.num_batches = int((self.tensor.size / (self.batch_size * self.seq_length)))
def create_batches(self):
self.num_batches = int((self.tensor.size / (self.batch_size * self.seq_length)))
if (self.num_batches == 0):
assert False, 'Not enough data. Make seq_length and batch_size small.'
self.tensor = self.tensor[:((self.num_batches * self.batch_size) * self.seq_length)]
xdata = self.tensor
ydata = np.copy(self.tensor)
ydata[:(- 1)] = xdata[1:]
ydata[(- 1)] = xdata[0]
self.x_batches = np.split(xdata.reshape(self.batch_size, (- 1)), self.num_batches, 1)
self.y_batches = np.split(ydata.reshape(self.batch_size, (- 1)), self.num_batches, 1)
def next_batch(self):
(x, y) = (self.x_batches[self.pointer], self.y_batches[self.pointer])
self.pointer += 1
return (x, y)
def reset_batch_pointer(self):
self.pointer = 0 |
_iterator
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
if PY2:
self._next = iter(iterator).next
else:
self._next = iter(iterator).__next__
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def __next__(self):
if self.closed:
warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
rv = self._next()
if (not self.headers_set):
warn('The application returned before it started the response.', WSGIWarning, stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
(status_code, headers) = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if (status_code == 304):
for (key, _value) in headers:
key = key.lower()
if ((key not in ('expires', 'content-location')) and is_entity_header(key)):
warn(('Entity header %r found in 304 response.' % key), HTTPWarning)
if bytes_sent:
warn('304 responses must not have a body.', HTTPWarning)
elif ((100 <= status_code < 200) or (status_code == 204)):
if (content_length != 0):
warn(('%r responses must have an empty content length.' % status_code), HTTPWarning)
if bytes_sent:
warn(('%r responses must not have a body.' % status_code), HTTPWarning)
elif ((content_length is not None) and (content_length != bytes_sent)):
warn('Content-Length and the number of bytes sent to the client do not match.', WSGIWarning)
def __del__(self):
if (not self.closed):
try:
warn('Iterator was garbage collected before it was closed.', WSGIWarning)
except Exception:
pass |
def test_generic_function_eq_modified(function_mock, type_system):
second = GenericFunction(type_system.convert_type_hint(int), MagicMock(InferredSignature))
assert (function_mock != second) |
def test_flatten_output():
data = np.array([[3, 1], [1, 2], [5, 0], [0, (- 4)]], dtype=int).reshape((2, 2, 2))
mask = np.array([[True, True], [True, True]])
data_sorted = np.array([[1, 2, 3, 1], [5, 0, 0, (- 4)]], dtype=int).reshape((2, 4, 1))
layer = SortPooling(k=2, flatten_output=True)
data_out = layer(data, mask=mask)
np.testing.assert_array_equal(data_out, data_sorted) |
class SentencePair():
def __init__(self, sent1: str, sent2: str, score, clean=True, label=None, method=None, source=None):
self.sent1 = Sentence((self.__clean(sent1) if clean else sent1))
self.sent2 = Sentence((self.__clean(sent2) if clean else sent2))
self.score: Union[(float, DetailedScore)] = score
self.label = label
self.method = method
self.source = source
def __clean(self, sent: str):
eos = sent[(- 1)]
if (eos in ('.', '?', '!')):
sent = sent[:(- 1)]
else:
eos = ''
sent = sent.strip("' \n\r-`").replace('\t', ' ')
return (sent + eos)
def min_length(self):
return min(len(self.sent1.text()), len(self.sent2.text()))
def max_length(self):
return max(len(self.sent1.text()), len(self.sent2.text()))
def similar_norms(self) -> bool:
norm1 = self.sent1.norm()
norm2 = self.sent2.norm()
if (norm1 == norm2):
return True
elif (norm1.startswith(norm2) or norm2.startswith(norm1)):
return True
elif (norm1.endswith(norm2) or norm2.endswith(norm1)):
return True
return False
def word_overlap(self) -> float:
words1 = self.sent1.words()
words2 = self.sent2.words()
if ((len(words1) == 0) or (len(words2) == 0)):
return 0.0
intersection = words1.intersection(words2)
union = words1.union(words2)
return (len(intersection) / len(union))
def text_hash(self):
str_min = min(self.sent1.text(), self.sent2.text())
str_max = max(self.sent1.text(), self.sent2.text())
hash = hashlib.sha512((str_min + str_max).encode('utf-8')).digest()
return base64.b64encode(hash).decode('ascii')
def to_dict(self):
res = {'sent1': self.sent1.text(), 'sent2': self.sent2.text()}
res['score'] = (asdict(self.score) if isinstance(self.score, DetailedScore) else self.score)
res['overlap'] = self.word_overlap()
res['hash'] = self.text_hash()
if self.method:
res['method'] = self.method
if self.source:
res['source'] = self.source
if self.label:
res['label'] = self.label
return res
def to_tuple(self):
return [self.sent1.text(), self.sent2.text(), self.score, self.word_overlap()]
def to_json(self) -> str:
return json.dumps(self.to_dict(), ensure_ascii=False)
def to_tsv(self) -> str:
return '\t'.join((str(val) for val in self.to_tuple()))
def formatted(self, format):
return {'json': self.to_json, 'tsv': self.to_tsv}[format]() |
class GPLayer(tfp.layers.DistributionLambda):
num_data: int
whiten: bool
num_samples: Optional[int]
full_cov: bool
full_output_cov: bool
q_mu: Parameter
q_sqrt: Parameter
def __init__(self, kernel: MultioutputKernel, inducing_variable: MultioutputInducingVariables, num_data: int, mean_function: Optional[MeanFunction]=None, *, num_samples: Optional[int]=None, full_cov: bool=False, full_output_cov: bool=False, num_latent_gps: int=None, whiten: bool=True, name: Optional[str]=None, verbose: bool=True):
super().__init__(make_distribution_fn=self._make_distribution_fn, convert_to_tensor_fn=self._convert_to_tensor_fn, dtype=default_float(), name=name)
self.kernel = kernel
self.inducing_variable = inducing_variable
self.num_data = num_data
if (mean_function is None):
mean_function = Identity()
if verbose:
warnings.warn('Beware, no mean function was specified in the construction of the `GPLayer` so the default `gpflow.mean_functions.Identity` is being used. This mean function will only work if the input dimensionality matches the number of latent Gaussian processes in the layer.')
self.mean_function = mean_function
self.full_output_cov = full_output_cov
self.full_cov = full_cov
self.whiten = whiten
self.verbose = verbose
try:
(num_inducing, self.num_latent_gps) = verify_compatibility(kernel, mean_function, inducing_variable)
except GPLayerIncompatibilityException as e:
if (num_latent_gps is None):
raise e
if verbose:
warnings.warn(f'Could not verify the compatibility of the `kernel`, `inducing_variable` and `mean_function`. We advise using `gpflux.helpers.construct_*` to create compatible kernels and inducing variables. As `num_latent_gps={num_latent_gps}` has been specified explicitly, this will be used to create the `q_mu` and `q_sqrt` parameters.')
(num_inducing, self.num_latent_gps) = (inducing_variable.num_inducing, num_latent_gps)
self.q_mu = Parameter(np.zeros((num_inducing, self.num_latent_gps)), dtype=default_float(), name=(f'{self.name}_q_mu' if self.name else 'q_mu'))
self.q_sqrt = Parameter(np.stack([np.eye(num_inducing) for _ in range(self.num_latent_gps)]), transform=triangular(), dtype=default_float(), name=(f'{self.name}_q_sqrt' if self.name else 'q_sqrt'))
self.num_samples = num_samples
def predict(self, inputs: TensorType, *, full_cov: bool=False, full_output_cov: bool=False) -> Tuple[(tf.Tensor, tf.Tensor)]:
mean_function = self.mean_function(inputs)
(mean_cond, cov) = conditional(inputs, self.inducing_variable, self.kernel, self.q_mu, q_sqrt=self.q_sqrt, full_cov=full_cov, full_output_cov=full_output_cov, white=self.whiten)
return ((mean_cond + mean_function), cov)
def call(self, inputs: TensorType, *args: List[Any], **kwargs: Dict[(str, Any)]) -> tf.Tensor:
outputs = super().call(inputs, *args, **kwargs)
if kwargs.get('training'):
log_prior = tf.add_n([p.log_prior_density() for p in self.kernel.trainable_parameters])
loss = (self.prior_kl() - log_prior)
loss_per_datapoint = (loss / self.num_data)
else:
loss_per_datapoint = tf.constant(0.0, dtype=default_float())
self.add_loss(loss_per_datapoint)
name = (f'{self.name}_prior_kl' if self.name else 'prior_kl')
self.add_metric(loss_per_datapoint, name=name, aggregation='mean')
return outputs
def prior_kl(self) -> tf.Tensor:
return prior_kl(self.inducing_variable, self.kernel, self.q_mu, self.q_sqrt, whiten=self.whiten)
def _make_distribution_fn(self, previous_layer_outputs: TensorType) -> tfp.distributions.Distribution:
(mean, cov) = self.predict(previous_layer_outputs, full_cov=self.full_cov, full_output_cov=self.full_output_cov)
if (self.full_cov and (not self.full_output_cov)):
return tfp.distributions.MultivariateNormalTriL(loc=tf.linalg.adjoint(mean), scale_tril=_cholesky_with_jitter(cov))
elif (self.full_output_cov and (not self.full_cov)):
return tfp.distributions.MultivariateNormalTriL(loc=mean, scale_tril=_cholesky_with_jitter(cov))
elif ((not self.full_cov) and (not self.full_output_cov)):
return tfp.distributions.MultivariateNormalDiag(loc=mean, scale_diag=tf.sqrt(cov))
else:
raise NotImplementedError('The combination of both `full_cov` and `full_output_cov` is not permitted.')
def _convert_to_tensor_fn(self, distribution: tfp.distributions.Distribution) -> tf.Tensor:
if (self.num_samples is not None):
samples = distribution.sample((self.num_samples,))
else:
samples = distribution.sample()
if self.full_cov:
samples = tf.linalg.adjoint(samples)
return samples
def sample(self) -> Sample:
return (efficient_sample(self.inducing_variable, self.kernel, self.q_mu, q_sqrt=self.q_sqrt, whiten=self.whiten) + self.mean_function) |
def run_highres(model_name):
from configs.higher_res.higher_res_config import experiment_params, data_params, model_params
train_test(experiment_params=experiment_params, data_params=data_params, model_params=model_params)
exp_dir = f'results'
inference_params = {'model_name': model_name, 'start_date_str': '01-01-2001', 'end_date_str': '02-01-2001', 'test_data_folder': 'data/data_dump', 'exp_dir': exp_dir, 'exp_num': get_exp_count(model_name, result_dir=exp_dir), 'forecast_horizon': 5, 'selected_dim': (- 1)}
inference_on_test(dataset_type='highres', device=experiment_params['device'], **inference_params) |
def xpos_vocab_factory(data, shorthand):
if (shorthand not in XPOS_DESCRIPTIONS):
logger.warning('%s is not a known dataset. Examining the data to choose which xpos vocab to use', shorthand)
desc = choose_simplest_factory(data, shorthand)
if (shorthand in XPOS_DESCRIPTIONS):
if (XPOS_DESCRIPTIONS[shorthand] != desc):
logger.error('XPOS tagset in %s has apparently changed! Was %s, is now %s', shorthand, XPOS_DESCRIPTIONS[shorthand], desc)
else:
logger.warning('Chose %s for the xpos factory for %s', desc, shorthand)
return build_xpos_vocab(desc, data, shorthand) |
def main():
args = parse_args(sys.argv[1:])
predictions = load_dataframe(args.predictions)
ground_truth = load_dataframe(args.ground_truth)
with open(args.metrics, 'w') as output:
compute_f1(ground_truth, predictions, output) |
def main():
initialize()
gui = ti.GUI('Shortest PIC', (800, 800))
while (not gui.get_event(ti.GUI.ESCAPE, ti.GUI.EXIT)):
for s in range(substepping):
substep()
vx_pos()
gui.circles(v_x_pos1.to_numpy(), color=255, radius=2)
gui.circles(v_x_pos2.to_numpy(), color=, radius=2)
gui.show() |
def clip_grad_norm(parameters: _tensor_or_tensors, max_norm: float, norm_type: float=2.0) -> torch.Tensor:
warnings.warn('torch.nn.utils.clip_grad_norm is now deprecated in favor of torch.nn.utils.clip_grad_norm_.', stacklevel=2)
return clip_grad_norm_(parameters, max_norm, norm_type) |
def copy_to_field(pixels: ti.types.ndarray(ndim=2)):
for I in ti.grouped(pixels):
img[I] = pixels[I] |
def create_control_panel(state) -> html.Div:
return html.Div(id='control-card', children=[html.Br(), html.P('Plots'), html.Div(id='select-plots-parent-prediction', children=[dcc.Dropdown(id='select-plots-prediction', options=[{'label': s, 'value': s} for s in state.get_plots('prediction')], value=state.get_display_plots('prediction'), multi=True, style={'width': '350px'})]), html.Br(), html.P('Number of figures per row'), dcc.Dropdown(id='select-num-figures-prediction', options=[{'label': '1', 'value': '1'}, {'label': '2', 'value': '2'}], value=str(state.get_num_figures_per_row('prediction')), style={'width': '350px'})]) |
class Partition13(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:13'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.18', 'l_1': 'decoder.block.19', 'l_2': 'decoder.block.20'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
return list(flatten((x0, x1, x2, t_1, t_2, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
('/csource', methods=['POST'])
def getNotifiedLD_csource():
data = request.get_json()
print(data)
return 'Done' |
class TensorProductOfHighestWeightModules(QuantumGroupModule):
def __init__(self, *modules, **options):
Q = modules[0]._Q
self._modules = tuple(modules)
self._libgap = libgap.TensorProductOfAlgebraModules([m._libgap for m in modules])
cat = Modules(Q.base_ring()).TensorProducts().FiniteDimensional().WithBasis()
QuantumGroupModule.__init__(self, Q, category=cat)
def _repr_(self):
return ' # '.join((repr(M) for M in self._modules))
def _latex_(self):
from sage.misc.latex import latex
return ' \\otimes '.join((latex(M) for M in self._modules))
_attribute
def _highest_weights_and_vectors(self):
return self._libgap.HighestWeightsAndVectors()
def highest_weight_vectors(self):
return [self.element_class(self, v) for vecs in self._highest_weights_and_vectors[1] for v in vecs]
some_elements = highest_weight_vectors
def _an_element_(self):
return self.highest_weight_vectors()[0]
_method
def highest_weight_decomposition(self):
return [HighestWeightSubmodule(self, self.element_class(self, v), tuple(wt.sage())) for (wt, vecs) in zip(*self._highest_weights_and_vectors) for v in vecs]
def tensor_factors(self):
return self._modules
Element = QuaGroupRepresentationElement |
_model
def gluon_resnet152_v1s(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['gluon_resnet152_v1s']
model = ResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes, in_chans=in_chans, stem_width=64, stem_type='deep', **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
.parametrize('knne, expected', [(False, 0.), (True, 0.)])
def test_desp(knne, expected):
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
desp = DESP(pool_classifiers, DFP=True, knne=knne)
desp.fit(X_dsel, y_dsel)
assert np.isclose(desp.score(X_test, y_test), expected) |
def test_trident_resnet_backbone():
tridentresnet_config = dict(num_branch=3, test_branch_idx=1, strides=(1, 2, 2), dilations=(1, 1, 1), trident_dilations=(1, 2, 3), out_indices=(2,))
with pytest.raises(AssertionError):
TridentResNet(18, **tridentresnet_config)
with pytest.raises(AssertionError):
TridentResNet(50, num_stages=4, **tridentresnet_config)
model = TridentResNet(50, num_stages=3, **tridentresnet_config)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert (len(feat) == 1)
assert (feat[0].shape == torch.Size([3, 1024, 2, 2])) |
def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, need_complete=True):
if (defaults is None):
defaults = buffer_defaults
(posargs, dictargs) = Interpreter.interpret_compiletime_options(posargs, dictargs, type_env=env, type_args=(0, 'dtype'))
if (len(posargs) > buffer_positional_options_count):
raise CompileError(posargs[(- 1)][1], ERR_BUF_TOO_MANY)
options = {}
for (name, (value, pos)) in dictargs.items():
if (not (name in buffer_options)):
raise CompileError(pos, (ERR_BUF_OPTION_UNKNOWN % name))
options[name] = value
for (name, (value, pos)) in zip(buffer_options, posargs):
if (not (name in buffer_options)):
raise CompileError(pos, (ERR_BUF_OPTION_UNKNOWN % name))
if (name in options):
raise CompileError(pos, (ERR_BUF_DUP % name))
options[name] = value
for name in buffer_options:
if (not (name in options)):
try:
options[name] = defaults[name]
except KeyError:
if need_complete:
raise CompileError(globalpos, (ERR_BUF_MISSING % name))
dtype = options.get('dtype')
if (dtype and dtype.is_extension_type):
raise CompileError(globalpos, ERR_BUF_DTYPE)
ndim = options.get('ndim')
if (ndim and ((not isinstance(ndim, int)) or (ndim < 0))):
raise CompileError(globalpos, ERR_BUF_NDIM)
mode = options.get('mode')
if (mode and (not (mode in ('full', 'strided', 'c', 'fortran')))):
raise CompileError(globalpos, ERR_BUF_MODE)
def assert_bool(name):
x = options.get(name)
if (not isinstance(x, bool)):
raise CompileError(globalpos, (ERR_BUF_BOOL % name))
assert_bool('negative_indices')
assert_bool('cast')
return options |
class Polynomial_generic_cdv(Polynomial_generic_domain):
def newton_slopes(self, repetition=True):
polygon = self.newton_polygon()
return [(- s) for s in polygon.slopes(repetition=repetition)]
def newton_polygon(self):
d = self.degree()
from sage.geometry.newton_polygon import NewtonPolygon
polygon = NewtonPolygon([(x, self[x].valuation()) for x in range((d + 1))])
polygon_prec = NewtonPolygon([(x, self[x].precision_absolute()) for x in range((d + 1))])
vertices = polygon.vertices(copy=False)
vertices_prec = polygon_prec.vertices(copy=False)
if (len(vertices_prec) > 0):
if (vertices[0][0] > vertices_prec[0][0]):
raise PrecisionError('first term with non-infinite valuation must have determined valuation')
elif (vertices[(- 1)][0] < vertices_prec[(- 1)][0]):
raise PrecisionError('last term with non-infinite valuation must have determined valuation')
else:
for (x, y) in vertices:
if (polygon_prec(x) <= y):
raise PrecisionError(('The coefficient of %s^%s has not enough precision' % (self.parent().variable_name(), x)))
return polygon
def hensel_lift(self, a):
selfa = self(a)
der = self.derivative()
dera = der(a)
if (selfa.valuation() <= (2 * dera.valuation())):
raise ValueError('a is not close enough to a root of this polynomial')
b = (~ dera)
while True:
na = (a - (selfa * b))
if (na == a):
return a
a = na
selfa = self(a)
dera = der(a)
b *= (2 - (dera * b))
def _factor_of_degree(self, deg):
coeffs = self.list()
a = coeffs[:(deg + 1)]
if (a[deg].precision_absolute() is Infinity):
a[deg] = a[deg].add_bigoh(self.base_ring().default_prec())
parent = self.parent()
a = parent(a)
v = parent.one()
x = (self % a)
while (not x.is_zero()):
a += ((v * x) % a)
(b, x) = self.quo_rem(a)
b %= a
v = ((v * (2 - (b * v))) % a)
return a
def factor_of_slope(self, slope=None):
one = self.parent()(1)
vertices = self.newton_polygon().vertices(copy=False)
if (len(vertices) < 2):
if (slope is Infinity):
return (self.parent().gen() ** self.degree())
else:
return one
if (slope is None):
deg_first = vertices[0][0]
deg_last = vertices[1][0]
else:
(deg_first, y_first) = vertices[0]
for i in range(1, len(vertices)):
(deg_last, y_last) = vertices[i]
slope_cur = ((y_first - y_last) / (deg_last - deg_first))
if (slope_cur == slope):
break
elif (slope_cur < slope):
return one
deg_first = deg_last
y_first = y_last
if (slope_cur > slope):
return one
if (deg_last == self.degree()):
div = self
else:
div = self._factor_of_degree(deg_last)
if (deg_first > 0):
div2 = div._factor_of_degree(deg_first)
(div, _) = div.quo_rem(div2)
return div.monic()
def slope_factorization(self):
vertices = self.newton_polygon().vertices(copy=False)
unit = self.leading_coefficient()
P = ((~ unit) * self)
deg_first = vertices[0][0]
factors = []
if (deg_first > 0):
P >>= deg_first
factors.append((self._parent.gen(), deg_first))
if (len(vertices) > 2):
for i in range(1, (len(vertices) - 1)):
deg = vertices[i][0]
div = P._factor_of_degree((deg - deg_first))
factors.append((div, 1))
(P, _) = P.quo_rem(div)
deg_first = deg
if (len(vertices) > 1):
factors.append((P, 1))
factors.reverse()
return Factorization(factors, sort=False, unit=unit)
def _roots(self, secure, minval, hint):
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
K = self.base_ring()
Pk = PolynomialRing(K.residue_field(), names='xbar')
x = self.parent().gen()
if (self.degree() == 0):
return []
if (self.degree() == 1):
return [(((- self[0]) / self[1]), 1)]
i = 0
while (self[i] == 0):
i += 1
if (secure and (i > 1)):
raise PrecisionError('not enough precision to determine the number of roots')
if (i == 0):
roots = []
P = self
else:
vali = self[i].valuation()
prec = min((((self[j].precision_absolute() - vali) / (i - j)) for j in range(i)))
if (prec is not Infinity):
prec = prec.ceil()
roots = [(K(0, prec), i)]
P = (self // self[:(i + 1)])
vertices = P.newton_polygon().vertices(copy=False)
deg = 0
for i in range(1, len(vertices)):
(deg_left, val_left) = vertices[(i - 1)]
(deg_right, val_right) = vertices[i]
slope = ((val_right - val_left) / (deg_left - deg_right))
if ((slope not in ZZ) or (slope < minval)):
continue
if ((hint is not None) and (slope == minval)):
rootsbar = hint
if (not rootsbar):
continue
if (i < (len(vertices) - 1)):
F = P._factor_of_degree((deg_right - deg))
P = (P // F)
else:
F = P
if (deg < deg_left):
G = F._factor_of_degree((deg_left - deg))
F //= G
deg = deg_right
val = F[0].valuation()
if ((hint is None) or (slope != minval)):
Fbar = Pk([(F[j] >> (val - (j * slope))) for j in range((F.degree() + 1))])
rootsbar = [r for (r, _) in Fbar.roots()]
if (not rootsbar):
continue
rbar = rootsbar.pop()
shift = (K(rbar).lift_to_precision() << slope)
roots += [((r + shift), m) for (r, m) in F((x + shift))._roots(secure, slope, [(r - rbar) for r in rootsbar])]
return roots |
_function_dispatch(_take_along_axis_dispatcher)
def take_along_axis(arr, indices, axis):
if (axis is None):
arr = arr.flat
arr_shape = (len(arr),)
axis = 0
else:
axis = normalize_axis_index(axis, arr.ndim)
arr_shape = arr.shape
return arr[_make_along_axis_idx(arr_shape, indices, axis)] |
class CorrelationTest(tf.test.TestCase):
def _test_correlation(self, in0, in1, out=None, **kwargs):
with self.test_session(use_gpu=True) as sess:
in0_op = tf.constant(in0, tf.float32)
in1_op = tf.constant(in1, tf.float32)
result_op = ops.correlation(in0_op, in1_op, **kwargs)
result = sess.run(result_op)
if (out is not None):
self.assertAllClose(out, result)
(jacob_t, jacob_n) = gradient_checker.compute_gradient([in0_op, in1_op], [in0.shape, in1.shape], result_op, result.shape)
self.assertAllClose(jacob_t, jacob_n, 0.001, 0.001)
def test_correlation_trivial(self):
first = [[1, 1, 2, 2], [0, 0, 2, 2], [3, 3, 4, 4], [3, 3, 2, 2]]
second = [[1, 1, 2, 2], [0, 0, 2, 2], [3, 3, 4, 4], [3, 3, 2, 2]]
first = np.reshape(first, [1, 1, 4, 4])
second = np.reshape(second, [1, 1, 4, 4])
expected = np.square(first)
self._test_correlation(first, second, expected, kernel_size=1, stride_2=1, max_displacement=0, pad=0)
def test_correlation_batch(self):
first = [[1, 1, 2, 2], [0, 0, 2, 2], [3, 3, 4, 4], [3, 3, 2, 2]]
second = [[1, 1, 2, 2], [0, 0, 2, 2], [3, 3, 4, 4], [3, 3, 2, 2]]
first = np.reshape(first, [1, 1, 4, 4])
second = np.reshape(second, [1, 1, 4, 4])
expected = np.square(first)
self._test_correlation(np.concatenate([first, first], 0), np.concatenate([second, second], 0), np.concatenate([expected, expected], 0), kernel_size=1, stride_2=1, max_displacement=0, pad=0)
def test_correlation_channels(self):
pass
def test_correlation_3x3(self):
return
first = [[1, 1, 3], [0, 0, 1], [2, 2, 0.2]]
second = [[1, 2, 0.1], [3, 4, 2.2], [4, 5, 1.6]]
first = np.reshape(first, [1, 1, 3, 3])
second = np.reshape(second, [1, 1, 3, 3])
self._test_correlation(first, second, None, kernel_size=3, stride_2=1, max_displacement=1, pad=2) |
class SawyerHandInsertV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'obj_pos': obs[3:6], 'goal_pos': obs[9:], 'unused_info': obs[6:9]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.0)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
def _desired_pos(o_d):
hand_pos = o_d['hand_pos']
obj_pos = o_d['obj_pos']
goal_pos = o_d['goal_pos']
if (np.linalg.norm((hand_pos[:2] - obj_pos[:2])) > 0.02):
return (obj_pos + np.array([0.0, 0.0, 0.1]))
elif (abs((hand_pos[2] - obj_pos[2])) > 0.05):
return (obj_pos + np.array([0.0, 0.0, 0.03]))
elif (np.linalg.norm((hand_pos[:2] - goal_pos[:2])) > 0.04):
return np.array([goal_pos[0], goal_pos[1], hand_pos[2]])
else:
return goal_pos
def _grab_effort(o_d):
hand_pos = o_d['hand_pos']
obj_pos = o_d['obj_pos']
if ((np.linalg.norm((hand_pos[:2] - obj_pos[:2])) > 0.02) or (abs((hand_pos[2] - obj_pos[2])) > 0.1)):
return 0.0
else:
return 0.65 |
def create_default_splits(n, object_class, is_train=True):
(ids_train, ids_test) = all_ids(object_class)
dataset_train = Dataset(ids_train, n, object_class, name='train', is_train=is_train)
dataset_test = Dataset(ids_test, n, object_class, name='test', is_train=is_train)
return (dataset_train, dataset_test) |
def resolve_data_config(args, default_cfg={}, model=None, verbose=True):
new_config = {}
default_cfg = default_cfg
if ((not default_cfg) and (model is not None) and hasattr(model, 'default_cfg')):
default_cfg = model.default_cfg
in_chans = 3
if (('chans' in args) and (args['chans'] is not None)):
in_chans = args['chans']
input_size = (in_chans, 224, 224)
if (('input_size' in args) and (args['input_size'] is not None)):
assert isinstance(args['input_size'], (tuple, list))
assert (len(args['input_size']) == 3)
input_size = tuple(args['input_size'])
in_chans = input_size[0]
elif (('img_size' in args) and (args['img_size'] is not None)):
assert isinstance(args['img_size'], int)
input_size = (in_chans, args['img_size'], args['img_size'])
elif ('input_size' in default_cfg):
input_size = default_cfg['input_size']
new_config['input_size'] = input_size
new_config['interpolation'] = 'bicubic'
if (('interpolation' in args) and args['interpolation']):
new_config['interpolation'] = args['interpolation']
elif ('interpolation' in default_cfg):
new_config['interpolation'] = default_cfg['interpolation']
new_config['mean'] = IMAGENET_DEFAULT_MEAN
if (('mean' in args) and (args['mean'] is not None)):
mean = tuple(args['mean'])
if (len(mean) == 1):
mean = tuple((list(mean) * in_chans))
else:
assert (len(mean) == in_chans)
new_config['mean'] = mean
elif ('mean' in default_cfg):
new_config['mean'] = default_cfg['mean']
new_config['std'] = IMAGENET_DEFAULT_STD
if (('std' in args) and (args['std'] is not None)):
std = tuple(args['std'])
if (len(std) == 1):
std = tuple((list(std) * in_chans))
else:
assert (len(std) == in_chans)
new_config['std'] = std
elif ('std' in default_cfg):
new_config['std'] = default_cfg['std']
new_config['crop_pct'] = DEFAULT_CROP_PCT
if (('crop_pct' in args) and (args['crop_pct'] is not None)):
new_config['crop_pct'] = args['crop_pct']
elif ('crop_pct' in default_cfg):
new_config['crop_pct'] = default_cfg['crop_pct']
if verbose:
logging.info('Data processing configuration for current model + dataset:')
for (n, v) in new_config.items():
logging.info(('\t%s: %s' % (n, str(v))))
return new_config |
class TypeClassProperty(Property):
def __get__(self, obj, objtype=None) -> typeclass:
return super().__get__(obj, objtype)
def dtype(self):
return typeclass
def from_string(s):
dtype = pydoc.locate('dace.dtypes.{}'.format(s))
if ((dtype is None) or (not isinstance(dtype, dace.dtypes.typeclass))):
raise ValueError('Not a valid data type: {}'.format(s))
return dtype
def to_string(obj):
return obj.to_string()
def to_json(self, obj):
if (obj is None):
return None
return obj.dtype.to_json()
def from_json(obj, context=None):
if (obj is None):
return None
elif isinstance(obj, typeclass):
return obj
elif isinstance(obj, str):
return TypeClassProperty.from_string(obj)
elif isinstance(obj, dict):
return dace.serialize.from_json(obj)
else:
raise TypeError('Cannot parse type from: {}'.format(obj)) |
class TransitionScheme(Enum):
TOP_DOWN = 1
TOP_DOWN_COMPOUND = 2
TOP_DOWN_UNARY = 3
IN_ORDER = 4
IN_ORDER_COMPOUND = 5
IN_ORDER_UNARY = 6 |
class FractionField_generic(ring.Field):
def __init__(self, R, element_class=fraction_field_element.FractionFieldElement, category=QuotientFields()):
self._R = R
self._element_class = element_class
cat = category
if (self in Rings().Infinite()):
cat = cat.Infinite()
elif (self in Rings().Finite()):
cat = cat.Finite()
Parent.__init__(self, base=R, names=R._names, category=cat)
def __reduce__(self):
return (FractionField, (self._R,))
def _coerce_map_from_(self, S):
from sage.rings.number_field.number_field_base import NumberField
from sage.rings.polynomial.laurent_polynomial_ring_base import LaurentPolynomialRing_generic
from sage.rings.rational_field import QQ
if (S is self._R):
parent = self._R.Hom(self)
return parent.__make_element_class__(FractionFieldEmbedding)(self._R, self, category=parent.homset_category())
def wrapper(x):
return self._element_class(self, x.numerator(), x.denominator())
if ((S is QQ) and self._R.has_coerce_map_from(ZZ)):
return CallableConvertMap(S, self, wrapper, parent_as_first_arg=False)
from sage.rings.localization import Localization
if isinstance(S, Localization):
parent = S.Hom(self)
return parent.__make_element_class__(FractionFieldEmbedding)(S, self, category=parent.homset_category())
if isinstance(S, NumberField):
return CallableConvertMap(S, self, self._number_field_to_frac_of_ring_of_integers, parent_as_first_arg=False)
if (isinstance(S, LaurentPolynomialRing_generic) and self._R.fraction_field().has_coerce_map_from(S.base_ring())):
def converter(x, y=None):
if (y is None):
return self._element_class(self, *x._fraction_pair())
(xnum, xden) = x._fraction_pair()
(ynum, yden) = y._fraction_pair()
return self._element_class(self, (xnum * yden), (xden * ynum))
return CallableConvertMap(S, self, converter, parent_as_first_arg=False)
if (isinstance(S, FractionField_generic) and self._R.has_coerce_map_from(S.ring())):
return CallableConvertMap(S, self, wrapper, parent_as_first_arg=False)
if self._R.has_coerce_map_from(S):
return CallableConvertMap(S, self, self._element_class, parent_as_first_arg=True)
return None
def _number_field_to_frac_of_ring_of_integers(self, x):
f = x.polynomial()
d = f.denominator()
return self._element_class(self, numerator=(d * x), denominator=d)
def is_field(self, proof=True):
return True
def is_finite(self):
return self._R.is_finite()
def base_ring(self):
return self._R.base_ring()
def characteristic(self):
return self._R.characteristic()
def _repr_(self):
return ('Fraction Field of %s' % self._R)
def _latex_(self):
return ('\\mathrm{Frac}(%s)' % latex.latex(self._R))
def _magma_init_(self, magma):
s = ('FieldOfFractions(%s)' % self.ring()._magma_init_(magma))
return magma._with_names(s, self.variable_names())
def ring(self):
return self._R
_method
def is_exact(self):
return self.ring().is_exact()
def _element_constructor_(self, x, y=None, coerce=True):
if (isinstance(x, (list, tuple)) and (len(x) == 1)):
x = x[0]
if (y is None):
if (parent(x) is self):
return x
ring_one = self.ring().one()
try:
return self._element_class(self, x, ring_one, coerce=coerce)
except (TypeError, ValueError):
pass
y = self._element_class(self, ring_one, ring_one, coerce=False, reduce=False)
else:
if (parent(x) is self):
y = self(y)
(x, y) = ((x.numerator() * y.denominator()), (y.numerator() * x.denominator()))
try:
return self._element_class(self, x, y, coerce=coerce)
except (TypeError, ValueError):
pass
if isinstance(x, str):
from sage.misc.sage_eval import sage_eval
try:
x = sage_eval(x, self.gens_dict_recursive())
except NameError:
raise TypeError('unable to evaluate {!r} in {}'.format(x, self))
if isinstance(y, str):
from sage.misc.sage_eval import sage_eval
try:
y = sage_eval(y, self.gens_dict_recursive())
except NameError:
raise TypeError('unable to evaluate {!r} in {}'.format(y, self))
x = py_scalar_to_element(x)
y = py_scalar_to_element(y)
from sage.libs.pari.all import pari_gen
if (isinstance(x, pari_gen) and (x.type() == 't_POL')):
d = x.poldegree()
if (d.type() == 't_INFINITY'):
return self.zero()
v = self._element_class(self, x.variable(), 1)
x = sum(((self(x[i]) * (v ** i)) for i in range((d + 1))))
def resolve_fractions(x, y):
xn = x.numerator()
xd = x.denominator()
yn = y.numerator()
yd = y.denominator()
try:
return ((xn * yd), (yn * xd))
except (AttributeError, TypeError, ValueError):
pass
try:
P = parent(yd)
return ((P(xn) * yd), (yn * P(xd)))
except (AttributeError, TypeError, ValueError):
pass
try:
P = parent(xd)
return ((xn * P(yd)), (P(yn) * xd))
except (AttributeError, TypeError, ValueError):
pass
raise TypeError
while True:
(x0, y0) = (x, y)
try:
(x, y) = resolve_fractions(x0, y0)
except (AttributeError, TypeError):
raise TypeError('cannot convert {!r}/{!r} to an element of {}'.format(x0, y0, self))
try:
return self._element_class(self, x, y, coerce=coerce)
except TypeError:
if (parent(x) is parent(x0)):
raise
def construction(self):
from sage.categories.pushout import FractionField
return (FractionField(), self.ring())
def __eq__(self, other):
if (not isinstance(other, FractionField_generic)):
return False
return (self._R == other._R)
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return (hash(self._R) ^ )
def ngens(self):
return self._R.ngens()
def gen(self, i=0):
x = self._R.gen(i)
one = self._R.one()
r = self._element_class(self, x, one, coerce=False, reduce=False)
return r
def _is_valid_homomorphism_(self, codomain, im_gens, base_map=None):
if (len(im_gens) != self.ngens()):
return False
if ((base_map is None) and (not codomain.has_coerce_map_from(self.base_ring()))):
return False
return True
def random_element(self, *args, **kwds):
return self._element_class(self, self._R.random_element(*args, **kwds), self._R._random_nonzero_element(*args, **kwds), coerce=False, reduce=True)
def some_elements(self):
ret = [self.zero(), self.one()]
for a in self._R.some_elements():
for b in self._R.some_elements():
if ((a != b) and self(a) and self(b)):
ret.append((self(a) / self(b)))
return ret
def _gcd_univariate_polynomial(self, f, g):
if g.is_zero():
if f.is_zero():
return f
else:
return f.monic()
Pol = f.parent()
Num = Pol.change_ring(self.base())
f1 = Num(f.numerator())
g1 = Num(g.numerator())
return Pol(f1.gcd(g1)).monic() |
def weights_init(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = (np.prod(weight_shape[2:4]) * weight_shape[0])
w_bound = np.sqrt((6.0 / (fan_in + fan_out)))
m.weight.data.uniform_((- w_bound), w_bound)
m.bias.data.fill_(0)
elif (classname.find('Linear') != (- 1)):
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt((6.0 / (fan_in + fan_out)))
m.weight.data.uniform_((- w_bound), w_bound)
m.bias.data.fill_(0) |
def register_Ns3TbAllocInfo_methods(root_module, cls):
cls.add_constructor([param('ns3::TbAllocInfo const &', 'arg0')])
cls.add_constructor([])
cls.add_instance_attribute('m_rbMap', 'std::vector< unsigned int >', is_const=False)
cls.add_instance_attribute('m_rnti', 'uint16_t', is_const=False)
cls.add_instance_attribute('m_sfnSf', 'ns3::SfnSf', is_const=False)
cls.add_instance_attribute('m_tbInfo', 'ns3::TbInfoElement', is_const=False)
return |
def c2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None):
if (plan is not None):
raise NotImplementedError('Passing a precomputed plan is not yet supported by scipy.fft functions')
tmp = _asfarray(x)
(shape, axes) = _init_nd_shape_and_axes(tmp, s, axes)
overwrite_x = (overwrite_x or _datacopied(tmp, x))
workers = _workers(workers)
if (len(axes) == 0):
return x
(tmp, copied) = _fix_shape(tmp, shape, axes)
overwrite_x = (overwrite_x or copied)
norm = _normalization(norm, forward)
out = (tmp if (overwrite_x and (tmp.dtype.kind == 'c')) else None)
return pfft.c2c(tmp, axes, forward, norm, out, workers) |
def _wrap_warn_once(regex):
def decorator(fn):
def inner(self, *args, **kwargs):
with self.assertWarnsOnceRegex(UserWarning, regex):
fn(self, *args, **kwargs)
return inner
return decorator |
class TestsMiniImagenet(unittest.TestCase):
def test_1_shot_5_way_with_restore(self):
config = {'data.dataset': 'mini-imagenet', 'data.dataset_path': 'data/mini-imagenet', 'data.split': 'ravi', 'data.train_way': 5, 'data.batch': 10, 'data.train_support': 5, 'data.train_query': 1, 'data.test_way': 5, 'data.test_support': 5, 'data.test_query': 1, 'data.episodes': 1, 'data.cuda': cuda_on, 'data.gpu': 0, 'model.x_dim': '84,84,3', 'model.lstm_size': 32, 'model.save_dir': './miniimagenet_test', 'train.epochs': 1, 'train.optim_method': 'Adam', 'train.lr': 0.001, 'train.patience': 100, 'train.restore': 0, 'train.log_dir': 'tests/logs'}
train(config)
config['train.restore'] = 1
train(config)
def test_5_shot_5_way(self):
config = {'data.dataset': 'mini-imagenet', 'data.dataset_path': 'data/mini-imagenet', 'data.split': 'vinyals', 'data.train_way': 5, 'data.batch': 10, 'data.train_support': 5, 'data.train_query': 5, 'data.test_way': 5, 'data.test_support': 5, 'data.test_query': 5, 'data.episodes': 1, 'data.cuda': cuda_on, 'data.gpu': 0, 'model.x_dim': '84,84,3', 'model.lstm_size': 32, 'model.save_dir': './miniimagenet_test', 'train.epochs': 1, 'train.optim_method': 'Adam', 'train.lr': 0.001, 'train.patience': 100, 'train.restore': 0, 'train.log_dir': 'tests/logs'}
train(config) |
class LabelSmoothCrossEntropyLoss(nn.Module):
def __init__(self, neg_factor=0.1):
super(LabelSmoothCrossEntropyLoss, self).__init__()
self.neg_factor = neg_factor
self.reduction = 'mean'
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, logits, targets, weight):
logits = logits.float()
(batch_size, num_pts, num_classes) = (logits.size(0), logits.size(1), logits.size(2))
logits = logits.reshape((- 1), num_classes)
targets = targets.reshape((- 1), 1)
with torch.no_grad():
targets = targets.clone().detach()
(label_pos, label_neg) = ((1.0 - self.neg_factor), (self.neg_factor / num_classes))
lb_one_hot = torch.empty_like(logits).fill_(label_neg)
lb_one_hot.scatter_(1, targets, label_pos)
lb_one_hot = lb_one_hot.detach()
logs = self.log_softmax(logits)
loss = (- torch.sum((logs * lb_one_hot), dim=1))
loss = weight_reduce_loss(loss, weight=weight, reduction=self.reduction, avg_factor=(batch_size * num_pts))
return loss |
.unbox(LookupType)
def unbox_Lookup(lookuptype, lookupobj, c):
arrayptrs_obj = c.pyapi.object_getattr_string(lookupobj, 'arrayptrs')
proxyout = c.context.make_helper(c.builder, lookuptype)
proxyout.arrayptrs = c.pyapi.to_native_value(lookuptype.arraytype, arrayptrs_obj).value
c.pyapi.decref(arrayptrs_obj)
is_error = numba.core.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return numba.extending.NativeValue(proxyout._getvalue(), is_error) |
class Config(object):
def __init__(self, config_path=None, db_path=None):
self.config_path = (config_path or self.default_config_path())
if (not os.path.isfile(self.config_path)):
self._create_config_prompt()
config = self._load_config(self.config_path)
self.config = config
try:
self.module_dir = os.path.dirname(os.path.realpath(__file__))
build_path = os.path.join(self.module_dir, 'build')
sys.path.append(build_path)
if (db_path is not None):
self.db_path = db_path
else:
storage = config['storage']
self.db_path = str(storage['db_path'])
mkdir_p(self.db_path)
storage_config = self._make_storage_config(config)
self.storage_config = storage_config
self.storage = sh.StorageBackend.make_from_config(storage_config)
self.master_address = 'localhost'
self.master_port = '5001'
self.worker_port = '5002'
if ('network' in config):
network = config['network']
if ('master' in network):
self.master_address = network['master']
if ('master_port' in network):
self.master_port = network['master_port']
if ('worker_port' in network):
self.worker_port = network['worker_port']
except KeyError as key:
raise ScannerException('Scanner config missing key: {}'.format(key))
def _make_storage_config(self, config):
storage = config['storage']
storage_type = storage['type']
if (storage_type == 'posix'):
storage_config = sh.StorageConfig.make_posix_config()
elif (storage_type == 'gcs'):
storage_config = sh.StorageConfig.make_gcs_config(storage['bucket'])
elif (storage_type == 's3'):
storage_config = sh.StorageConfig.make_s3_config(storage['bucket'], storage['region'], storage['endpoint'])
else:
raise ScannerException('Unsupported storage type {}'.format(storage_type))
return storage_config
def _load_config(self, path):
try:
with open(path, 'r') as f:
return toml.loads(f.read())
except IOError:
raise ScannerException('Scanner config file does not exist: {}'.format(path))
def _create_config_prompt(self):
sys.stdout.write('Your Scanner configuration file ({}) does not exist. Create one? [Y/n] '.format(self.config_path))
sys.stdout.flush()
if (sys.stdin.readline().strip().lower() == 'n'):
print('Exiting script. Please create a Scanner configuration file or re-run this script and follow the dialogue.')
exit()
config = self.default_config()
path = self.default_config_path()
mkdir_p(os.path.split(path)[0])
with open(path, 'w') as f:
f.write(toml.dumps(config))
print('Wrote Scanner configuration to {}'.format(path))
def default_config_path():
return os.path.expanduser('~/.scanner/config.toml')
def default_config():
hostname = 'localhost'
db_path = os.path.expanduser('~/.scanner/db')
return {'storage': {'type': 'posix', 'db_path': db_path}, 'network': {'master': hostname, 'master_port': '5001', 'worker_port': '5002'}}
def __getstate__(self):
state = self.__dict__.copy()
state.pop('storage_config', None)
state.pop('storage', None)
return state
def __setstate__(self, newstate):
self.module_dir = os.path.dirname(os.path.realpath(__file__))
build_path = (self.module_dir + '/build')
if (not (build_path in sys.path)):
sys.path.append(build_path)
sys.stdout.flush()
sc = self._make_storage_config(newstate['config'])
newstate['storage_config'] = sc
newstate['storage'] = sh.StorageBackend.make_from_config(sc)
self.__dict__.update(newstate) |
def move_existential_quantifiers(task):
def recurse(condition):
existential_parts = []
other_parts = []
for part in condition.parts:
part = recurse(part)
if isinstance(part, pddl.ExistentialCondition):
existential_parts.append(part)
else:
other_parts.append(part)
if (not existential_parts):
return condition
if isinstance(condition, pddl.ExistentialCondition):
new_parameters = (condition.parameters + existential_parts[0].parameters)
new_parts = existential_parts[0].parts
return pddl.ExistentialCondition(new_parameters, new_parts)
assert isinstance(condition, pddl.Conjunction)
new_parameters = []
new_conjunction_parts = other_parts
for part in existential_parts:
new_parameters += part.parameters
new_conjunction_parts += part.parts
new_conjunction = pddl.Conjunction(new_conjunction_parts)
return pddl.ExistentialCondition(new_parameters, (new_conjunction,))
for proxy in all_conditions(task):
if proxy.condition.has_existential_part():
proxy.set(recurse(proxy.condition).simplified()) |
def main():
print(f'{os.path.basename(__file__)}: {__doc__.strip()}')
numpy.set_printoptions(precision=4, linewidth=80)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('returnn_config')
arg_parser.add_argument('--cwd')
arg_parser.add_argument('--key', type=str, default='', help='Name of the tensor or object to inspect')
arg_parser.add_argument('--all_tensors', action='store_true', help='If True, print the values of all the tensors.')
arg_parser.add_argument('--stats_only', action='store_true')
arg_parser.add_argument('--printoptions', nargs='*', type=parse_numpy_printoption, help="Argument for numpy.set_printoptions(), in the form 'k=v'.")
arg_parser.add_argument('--device', default='cpu')
args = arg_parser.parse_args()
if args.cwd:
print('* Change working dir:', args.cwd)
os.chdir(args.cwd)
log.initialize(verbosity=[5])
config = Config()
print('* Load config:', args.returnn_config)
config.load_file(args.returnn_config)
set_global_config(config)
for k in ['train', 'dev', 'eval', 'eval_datasets', 'torch_amp', 'grad_scaler', 'torch_distributed', 'learning_rate_control', 'learning_rate_file']:
config.typed_dict.pop(k, None)
config.set('device', args.device)
print('* Setup RETURNN engine')
engine = Engine(config=config)
print('* Load model and optimizer state')
engine.init_train_from_config()
model = engine.get_pt_model()
assert (model is not None), 'No model loaded?'
opt = engine.get_pt_optimizer()
assert (opt is not None), 'No optimizer loaded?'
print('* Loaded.')
if args.key:
obj = model.get_parameter(args.key)
print(f'{args.key}:')
print_object(obj, stats_only=args.stats_only)
print('Optimizer state:')
if (obj in opt.state):
print_object(opt.state[obj], stats_only=args.stats_only)
else:
print('(None)')
else:
for (name, param) in model.named_parameters():
_print_key_value(name, param, print_all_tensors=args.all_tensors, stats_only=args.stats_only)
if (param in opt.state):
print(' Optimizer state:')
print_object(opt.state[param], prefix=' ', print_all_tensors=args.all_tensors, stats_only=args.stats_only)
else:
print(' Optimizer state: (None)') |
def is_in_param(p):
if ((param_kind(p) == IN) or (param_kind(p) == INOUT) or (param_kind(p) == IN_ARRAY) or (param_kind(p) == INOUT_ARRAY)):
return True
else:
return False |
class FNetConfig(PretrainedConfig):
model_type = 'fnet'
def __init__(self, vocab_size=32000, hidden_size=768, num_hidden_layers=12, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=4, initializer_range=0.02, layer_norm_eps=1e-12, use_tpu_fourier_optimizations=False, tpu_short_seq_length=512, pad_token_id=3, bos_token_id=1, eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.use_tpu_fourier_optimizations = use_tpu_fourier_optimizations
self.tpu_short_seq_length = tpu_short_seq_length |
class Attention(nn.Module):
def __init__(self, dim, heads, dropout, search=False):
super().__init__()
self.heads = heads
head_dim = (dim // heads)
self.scale = (head_dim ** (- 0.5))
self.attn = None
self.qkv = nn.Linear(dim, (dim * 3))
self.attn_drop = nn.Dropout(dropout)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(dropout)
if search:
self.alpha = nn.Parameter(torch.ones(1, 1, 1, 1, head_dim))
def unwrapped(self):
return self
def forward(self, x, mask=None):
(B, N, C) = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.heads, (- 1)).permute(2, 0, 3, 1, 4)
if hasattr(self, 'alpha'):
qkv = (qkv * self.alpha)
(q, k, v) = (qkv[0], qkv[1], qkv[2])
attn = ((q k.transpose((- 2), (- 1))) * self.scale)
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn v).transpose(1, 2).reshape(B, N, (- 1))
x = self.proj(x)
x = self.proj_drop(x)
return (x, attn) |
_torch
class DonutSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((DonutSwinModel,) if is_torch_available() else ())
pipeline_model_mapping = ({'feature-extraction': DonutSwinModel} if is_torch_available() else {})
fx_compatible = True
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = DonutSwinModelTester(self)
self.config_tester = ConfigTester(self, config_class=DonutSwinConfig, embed_dim=37)
def test_config(self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def create_and_test_config_common_properties(self):
return
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_inputs_embeds(self):
pass
def test_model_common_attributes(self):
(config, _) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), nn.Module)
x = model.get_output_embeddings()
self.assertTrue(((x is None) or isinstance(x, nn.Linear)))
def test_forward_signature(self):
(config, _) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ['pixel_values']
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_attention_outputs(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict['output_attentions'] = True
inputs_dict['output_hidden_states'] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
expected_num_attentions = len(self.model_tester.depths)
self.assertEqual(len(attentions), expected_num_attentions)
del inputs_dict['output_attentions']
config.output_attentions = True
window_size_squared = (config.window_size ** 2)
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(list(attentions[0].shape[(- 3):]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared])
out_len = len(outputs)
inputs_dict['output_attentions'] = True
inputs_dict['output_hidden_states'] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, 'num_hidden_states_types'):
added_hidden_states = self.model_tester.num_hidden_states_types
else:
added_hidden_states = 2
self.assertEqual((out_len + added_hidden_states), len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
self.assertListEqual(list(self_attentions[0].shape[(- 3):]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared])
def check_hidden_states_output(self, inputs_dict, config, model_class, image_size):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(self.model_tester, 'expected_num_hidden_layers', (len(self.model_tester.depths) + 1))
self.assertEqual(len(hidden_states), expected_num_layers)
patch_size = (config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size))
num_patches = ((image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]))
self.assertListEqual(list(hidden_states[0].shape[(- 2):]), [num_patches, self.model_tester.embed_dim])
reshaped_hidden_states = outputs.reshaped_hidden_states
self.assertEqual(len(reshaped_hidden_states), expected_num_layers)
(batch_size, num_channels, height, width) = reshaped_hidden_states[0].shape
reshaped_hidden_states = reshaped_hidden_states[0].view(batch_size, num_channels, (height * width)).permute(0, 2, 1)
self.assertListEqual(list(reshaped_hidden_states.shape[(- 2):]), [num_patches, self.model_tester.embed_dim])
def test_hidden_states_output(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
image_size = (self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size))
for model_class in self.all_model_classes:
inputs_dict['output_hidden_states'] = True
self.check_hidden_states_output(inputs_dict, config, model_class, image_size)
del inputs_dict['output_hidden_states']
config.output_hidden_states = True
self.check_hidden_states_output(inputs_dict, config, model_class, image_size)
def test_hidden_states_output_with_padding(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
config.patch_size = 3
image_size = (self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size))
patch_size = (config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size))
padded_height = ((image_size[0] + patch_size[0]) - (image_size[0] % patch_size[0]))
padded_width = ((image_size[1] + patch_size[1]) - (image_size[1] % patch_size[1]))
for model_class in self.all_model_classes:
inputs_dict['output_hidden_states'] = True
self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width))
del inputs_dict['output_hidden_states']
config.output_hidden_states = True
self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width))
def test_model_from_pretrained(self):
for model_name in DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = DonutSwinModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_initialization(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for (name, param) in model.named_parameters():
if (('embeddings' not in name) and param.requires_grad):
self.assertIn(((param.data.mean() * .0).round() / .0).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized') |
class TestCRFFeatures(SnipsTest):
def test_feature_should_work(self):
def fn(tokens, token_index):
value = tokens[token_index].value
return ('%s_%s' % (value, len(value)))
cache = [{TOKEN_NAME: token} for token in tokenize('hello beautiful world', LANGUAGE_EN)]
feature = Feature('test_feature', fn)
res = feature.compute(1, cache)
self.assertEqual(res, 'beautiful_9')
def test_feature_should_work_with_offset(self):
def fn(tokens, token_index):
value = tokens[token_index].value
return ('%s_%s' % (value, len(value)))
cache = [{TOKEN_NAME: token} for token in tokenize('hello beautiful world', LANGUAGE_EN)]
feature = Feature('test_feature', fn, offset=1)
res = feature.compute(1, cache)
self.assertEqual(res, 'world_5')
def test_feature_should_work_with_cache(self):
def fn(tokens, token_index):
value = tokens[token_index].value
return ('%s_%s' % (value, len(value)))
mocked_fn = MagicMock(side_effect=fn)
cache = [{TOKEN_NAME: token} for token in tokenize('hello beautiful world', LANGUAGE_EN)]
feature = Feature('test_feature', mocked_fn, offset=0)
feature.compute(2, cache)
feature1 = Feature('test_feature', mocked_fn, offset=1)
feature2 = Feature('test_feature', mocked_fn, offset=2)
res1 = feature1.compute(1, cache)
res1_bis = feature1.compute(0, cache)
res2 = feature2.compute(0, cache)
self.assertEqual(res1, 'world_5')
self.assertEqual(res1_bis, 'beautiful_9')
self.assertEqual(res2, 'world_5')
self.assertEqual(mocked_fn.call_count, 2)
def test_single_feature_factory(self):
('my_factory', override=True)
class MySingleFeatureFactory(SingleFeatureFactory):
def compute_feature(self, tokens, token_index):
value = tokens[token_index].value
return ('%s_%s' % (value, len(value)))
config = {'factory_name': 'my_factory', 'args': {}, 'offsets': [0, 1]}
factory = MySingleFeatureFactory(config)
factory.fit(None, None)
features = factory.build_features()
cache = [{TOKEN_NAME: token} for token in tokenize('hello beautiful world', LANGUAGE_EN)]
res_0 = features[0].compute(0, cache)
res_1 = features[1].compute(0, cache)
self.assertEqual(len(features), 2)
self.assertEqual(features[0].name, 'my_factory')
self.assertEqual(features[1].name, 'my_factory[+1]')
self.assertEqual(res_0, 'hello_5')
self.assertEqual(res_1, 'beautiful_9')
def test_is_digit_factory(self):
config = {'factory_name': 'is_digit', 'args': {}, 'offsets': [0]}
tokens = tokenize('hello 1 world', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
factory = CRFFeatureFactory.from_config(config)
factory.fit(None, None)
features = factory.build_features()
res1 = features[0].compute(0, cache)
res2 = features[0].compute(1, cache)
self.assertIsInstance(factory, IsDigitFactory)
self.assertEqual(features[0].base_name, 'is_digit')
self.assertEqual(res1, None)
self.assertEqual(res2, '1')
def test_is_first_factory(self):
config = {'factory_name': 'is_first', 'args': {}, 'offsets': [0]}
tokens = tokenize('hello beautiful world', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
factory = CRFFeatureFactory.from_config(config)
factory.fit(None, None)
features = factory.build_features()
res1 = features[0].compute(0, cache)
res2 = features[0].compute(1, cache)
self.assertIsInstance(factory, IsFirstFactory)
self.assertEqual(features[0].base_name, 'is_first')
self.assertEqual(res1, '1')
self.assertEqual(res2, None)
def test_is_last_factory(self):
config = {'factory_name': 'is_last', 'args': {}, 'offsets': [0]}
tokens = tokenize('hello beautiful world', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
factory = CRFFeatureFactory.from_config(config)
factory.fit(None, None)
features = factory.build_features()
res1 = features[0].compute(0, cache)
res2 = features[0].compute(2, cache)
self.assertIsInstance(factory, IsLastFactory)
self.assertEqual(features[0].base_name, 'is_last')
self.assertEqual(res1, None)
self.assertEqual(res2, '1')
def test_prefix_factory(self):
config = {'factory_name': 'prefix', 'args': {'prefix_size': 2}, 'offsets': [0]}
tokens = tokenize('hello beautiful world', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
factory = CRFFeatureFactory.from_config(config)
factory.fit(None, None)
features = factory.build_features()
res = features[0].compute(1, cache)
self.assertIsInstance(factory, PrefixFactory)
self.assertEqual(features[0].base_name, 'prefix_2')
self.assertEqual(res, 'be')
def test_suffix_factory(self):
config = {'factory_name': 'suffix', 'args': {'suffix_size': 2}, 'offsets': [0]}
tokens = tokenize('hello beautiful world', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
factory = CRFFeatureFactory.from_config(config)
factory.fit(None, None)
features = factory.build_features()
res = features[0].compute(1, cache)
self.assertIsInstance(factory, SuffixFactory)
self.assertEqual(features[0].base_name, 'suffix_2')
self.assertEqual(res, 'ul')
def test_length_factory(self):
config = {'factory_name': 'length', 'args': {}, 'offsets': [0]}
tokens = tokenize('hello beautiful world', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
factory = CRFFeatureFactory.from_config(config)
factory.fit(None, None)
features = factory.build_features()
res = features[0].compute(2, cache)
self.assertIsInstance(factory, LengthFactory)
self.assertEqual(features[0].base_name, 'length')
self.assertEqual(res, '5')
def test_ngram_factory(self):
config = {'factory_name': 'ngram', 'args': {'n': 2, 'use_stemming': False, 'common_words_gazetteer_name': None}, 'offsets': [0]}
tokens = tokenize('hello beautiful world', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
factory = CRFFeatureFactory.from_config(config)
mocked_dataset = {'language': 'en'}
factory.fit(mocked_dataset, None)
features = factory.build_features()
res = features[0].compute(0, cache)
self.assertIsInstance(factory, NgramFactory)
self.assertEqual(features[0].base_name, 'ngram_2')
self.assertEqual(res, 'hello beautiful')
def test_ngram_factory_with_stemming(self):
config = {'factory_name': 'ngram', 'args': {'n': 2, 'use_stemming': True, 'common_words_gazetteer_name': None}, 'offsets': [0]}
tokens = tokenize('hello beautiful world', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
resources = {STEMS: {'beautiful': 'beauty'}}
factory = CRFFeatureFactory.from_config(config, resources=resources)
mocked_dataset = {'language': 'en'}
factory.fit(mocked_dataset, None)
features = factory.build_features()
res = features[0].compute(0, cache)
self.assertIsInstance(factory, NgramFactory)
self.assertEqual(features[0].base_name, 'ngram_2')
self.assertEqual(res, 'hello beauty')
def test_ngram_factory_with_gazetteer(self):
config = {'factory_name': 'ngram', 'args': {'n': 2, 'use_stemming': False, 'common_words_gazetteer_name': 'my_gazetteer'}, 'offsets': [0]}
resources = {GAZETTEERS: {'my_gazetteer': {'hello', 'beautiful', 'world'}}}
tokens = tokenize('hello beautiful foobar world', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
factory = CRFFeatureFactory.from_config(config, resources=resources)
mocked_dataset = {'language': 'en'}
factory.fit(mocked_dataset, None)
features = factory.build_features()
res = features[0].compute(1, cache)
self.assertIsInstance(factory, NgramFactory)
self.assertEqual(features[0].base_name, 'ngram_2')
self.assertEqual(res, 'beautiful rare_word')
def test_shape_ngram_factory(self):
config = {'factory_name': 'shape_ngram', 'args': {'n': 3}, 'offsets': [0]}
tokens = tokenize('hello Beautiful foObar world', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
factory = CRFFeatureFactory.from_config(config)
mocked_dataset = {'language': 'en'}
factory.fit(mocked_dataset, None)
features = factory.build_features()
res = features[0].compute(1, cache)
self.assertIsInstance(factory, ShapeNgramFactory)
self.assertEqual(features[0].base_name, 'shape_ngram_3')
self.assertEqual(res, 'Xxx xX xxx')
def test_word_cluster_factory(self):
resources = {WORD_CLUSTERS: {'my_word_clusters': {'word1': '00', 'word2': '11'}}}
config = {'factory_name': 'word_cluster', 'args': {'cluster_name': 'my_word_clusters', 'use_stemming': False}, 'offsets': [0]}
tokens = tokenize('hello word1 word2', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
factory = CRFFeatureFactory.from_config(config, resources=resources)
mocked_dataset = {'language': 'en'}
factory.fit(mocked_dataset, None)
features = factory.build_features()
res0 = features[0].compute(0, cache)
res1 = features[0].compute(1, cache)
res2 = features[0].compute(2, cache)
self.assertIsInstance(factory, WordClusterFactory)
self.assertEqual(features[0].base_name, 'word_cluster_my_word_clusters')
self.assertEqual(res0, None)
self.assertEqual(res1, '00')
self.assertEqual(res2, '11')
def test_entity_match_factory(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: my_intent\nutterances:\n- this is [entity1](my first entity)\n- this is [entity2](second_entity)')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
config = {'factory_name': 'entity_match', 'args': {'tagging_scheme_code': TaggingScheme.BILOU.value, 'use_stemming': True}, 'offsets': [0]}
tokens = tokenize('my first entity and second_entity and third_entity', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
resources = {STEMS: dict()}
custom_entity_parser = CustomEntityParser.build(dataset, CustomEntityParserUsage.WITH_STEMS, resources)
factory = CRFFeatureFactory.from_config(config, custom_entity_parser=custom_entity_parser, resources=resources)
factory.fit(dataset, 'my_intent')
features = factory.build_features()
features = sorted(features, key=(lambda f: f.base_name))
res0 = features[0].compute(0, cache)
res1 = features[0].compute(1, cache)
res2 = features[0].compute(2, cache)
res3 = features[0].compute(3, cache)
res4 = features[0].compute(4, cache)
res5 = features[1].compute(0, cache)
res6 = features[1].compute(1, cache)
res7 = features[1].compute(2, cache)
res8 = features[1].compute(3, cache)
res9 = features[1].compute(4, cache)
self.assertIsInstance(factory, CustomEntityMatchFactory)
self.assertEqual(len(features), 2)
self.assertEqual(features[0].base_name, 'entity_match_entity1')
self.assertEqual(features[1].base_name, 'entity_match_entity2')
self.assertEqual(res0, BEGINNING_PREFIX)
self.assertEqual(res1, INSIDE_PREFIX)
self.assertEqual(res2, LAST_PREFIX)
self.assertEqual(res3, None)
self.assertEqual(res4, None)
self.assertEqual(res5, None)
self.assertEqual(res6, None)
self.assertEqual(res7, None)
self.assertEqual(res8, None)
self.assertEqual(res9, UNIT_PREFIX)
def test_entity_match_factory_with_filter(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: my_intent\nutterances:\n- this is [entity1](my first entity)\n- this is [entity2](second_entity)\n- this is [entity3](third_entity)\n\n---\ntype: entity\nname: entity3\nautomatically_extensible: false')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
config = {'factory_name': 'entity_match', 'args': {'tagging_scheme_code': TaggingScheme.BILOU.value, 'use_stemming': True, 'entity_filter': {'automatically_extensible': True, 'invalid_filter': "i'm invalid"}}, 'offsets': [0]}
tokens = tokenize('my first entity and second_entity and third_entity', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
resources = {STEMS: dict()}
custom_entity_parser = CustomEntityParser.build(dataset, CustomEntityParserUsage.WITH_STEMS, resources)
factory = CRFFeatureFactory.from_config(config, custom_entity_parser=custom_entity_parser, resources=resources)
factory.fit(dataset, 'my_intent')
features = factory.build_features()
features = sorted(features, key=(lambda f: f.base_name))
res0 = features[0].compute(0, cache)
res1 = features[0].compute(1, cache)
res2 = features[0].compute(2, cache)
res3 = features[0].compute(3, cache)
res4 = features[0].compute(4, cache)
res5 = features[1].compute(0, cache)
res6 = features[1].compute(1, cache)
res7 = features[1].compute(2, cache)
res8 = features[1].compute(3, cache)
res9 = features[1].compute(4, cache)
self.assertIsInstance(factory, CustomEntityMatchFactory)
self.assertEqual(len(features), 2)
self.assertEqual(features[0].base_name, 'entity_match_entity1')
self.assertEqual(features[1].base_name, 'entity_match_entity2')
self.assertEqual(res0, BEGINNING_PREFIX)
self.assertEqual(res1, INSIDE_PREFIX)
self.assertEqual(res2, LAST_PREFIX)
self.assertEqual(res3, None)
self.assertEqual(res4, None)
self.assertEqual(res5, None)
self.assertEqual(res6, None)
self.assertEqual(res7, None)
self.assertEqual(res8, None)
self.assertEqual(res9, UNIT_PREFIX)
def test_builtin_entity_match_factory(self):
def mock_builtin_entity_scope(dataset, _):
if (dataset[LANGUAGE] == LANGUAGE_EN):
return {SNIPS_NUMBER, SNIPS_DATETIME}
return []
config = {'factory_name': 'builtin_entity_match', 'args': {'tagging_scheme_code': TaggingScheme.BILOU.value}, 'offsets': [0]}
tokens = tokenize('one tea tomorrow at 2pm', LANGUAGE_EN)
cache = [{TOKEN_NAME: token} for token in tokens]
builtin_entity_parser = BuiltinEntityParser.build(language='en')
factory = CRFFeatureFactory.from_config(config, builtin_entity_parser=builtin_entity_parser)
factory._get_builtin_entity_scope = mock_builtin_entity_scope
mocked_dataset = {'language': 'en'}
factory.fit(mocked_dataset, None)
features = factory.build_features()
features = sorted(features, key=(lambda f: f.base_name))
res0 = features[0].compute(0, cache)
res1 = features[0].compute(1, cache)
res2 = features[0].compute(2, cache)
res3 = features[0].compute(3, cache)
res4 = features[0].compute(4, cache)
res5 = features[1].compute(0, cache)
res6 = features[1].compute(1, cache)
res7 = features[1].compute(2, cache)
res8 = features[1].compute(3, cache)
res9 = features[1].compute(4, cache)
self.assertIsInstance(factory, BuiltinEntityMatchFactory)
self.assertEqual(len(features), 2)
self.assertEqual(features[0].base_name, 'builtin_entity_match_snips/datetime')
self.assertEqual(features[1].base_name, 'builtin_entity_match_snips/number')
self.assertEqual(res0, UNIT_PREFIX)
self.assertEqual(res1, None)
self.assertEqual(res2, BEGINNING_PREFIX)
self.assertEqual(res3, INSIDE_PREFIX)
self.assertEqual(res4, LAST_PREFIX)
self.assertEqual(res5, UNIT_PREFIX)
self.assertEqual(res6, None)
self.assertEqual(res7, None)
self.assertEqual(res8, None)
self.assertEqual(res9, None)
def test_custom_single_feature_factory(self):
('my_single_feature', override=True)
class MySingleFeatureFactory(SingleFeatureFactory):
def compute_feature(self, tokens, token_index):
return ('(%s)[my_feature]' % tokens[token_index].value)
config = {'factory_name': 'my_single_feature', 'args': {}, 'offsets': [0, (- 1)]}
feature_factory = CRFFeatureFactory.from_config(config)
features = feature_factory.build_features()
feature_name = features[0].name
feature_name_offset = features[1].name
tokens = tokenize('hello world', 'en')
cache = [{TOKEN_NAME: token} for token in tokens]
feature_value = features[0].compute(1, cache)
feature_value_offset = features[1].compute(1, cache)
self.assertEqual('my_single_feature', feature_name)
self.assertEqual('my_single_feature[-1]', feature_name_offset)
self.assertEqual('(world)[my_feature]', feature_value)
self.assertEqual('(hello)[my_feature]', feature_value_offset)
def test_custom_multi_feature_factory(self):
('my_multi_feature_factory', override=True)
class MyMultiFeature(CRFFeatureFactory):
def build_features(self):
first_features = [Feature('my_first_feature', self.compute_feature_1, offset=offset) for offset in self.offsets]
second_features = [Feature('my_second_feature', self.compute_feature_2, offset=offset) for offset in self.offsets]
return (first_features + second_features)
def compute_feature_1(tokens, token_index):
return ('(%s)[my_feature_1]' % tokens[token_index].value)
def compute_feature_2(tokens, token_index):
return ('(%s)[my_feature_2]' % tokens[token_index].value)
config = {'factory_name': 'my_multi_feature_factory', 'args': {}, 'offsets': [(- 1), 0]}
feature_factory = CRFFeatureFactory.from_config(config)
features = feature_factory.build_features()
feature_0 = features[0]
feature_1 = features[1]
feature_2 = features[2]
feature_3 = features[3]
tokens = tokenize('foo bar baz', 'en')
cache = [{TOKEN_NAME: token} for token in tokens]
self.assertEqual('my_first_feature[-1]', feature_0.name)
self.assertEqual('(foo)[my_feature_1]', feature_0.compute(1, cache))
self.assertEqual('my_first_feature', feature_1.name)
self.assertEqual('my_second_feature[-1]', feature_2.name)
self.assertEqual('(bar)[my_feature_2]', feature_2.compute(2, cache))
self.assertEqual('my_second_feature', feature_3.name)
def test_factory_from_config(self):
('my_custom_feature')
class MySingleFeatureFactory(SingleFeatureFactory):
def compute_feature(self, tokens, token_index):
return ('(%s)[my_custom_feature]' % tokens[token_index].value)
config = {'factory_name': 'my_custom_feature', 'args': {}, 'offsets': [0]}
factory = CRFFeatureFactory.from_config(config)
self.assertIsInstance(factory, MySingleFeatureFactory)
def test_should_fail_loading_unregistered_factory_from_config(self):
config = {'factory_name': 'my_unknown_feature', 'args': {}, 'offsets': [0]}
with self.assertRaises(NotRegisteredError):
CRFFeatureFactory.from_config(config) |
def iter01x10y(num):
for ir in range(num):
for ic in range((num - 1), (- 1), (- 1)):
(yield (ir, ic)) |
def compute_pvalue(qdist, qobs):
qdist = qdist[(~ (np.isnan(qdist) | np.isinf(qdist)))]
p = (len(qdist[(qdist >= qobs)]) / len(qdist))
return p |
def test_inverted_residual():
with pytest.raises(AssertionError):
InvertedResidual(16, 16, 32, stride=3)
with pytest.raises(AssertionError):
InvertedResidual(16, 16, 32, se_cfg=list())
with pytest.raises(AssertionError):
InvertedResidual(16, 16, 32, with_expand_conv=False)
block = InvertedResidual(16, 16, 32, stride=1)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert (getattr(block, 'se', None) is None)
assert block.with_res_shortcut
assert (x_out.shape == torch.Size((1, 16, 56, 56)))
block = InvertedResidual(16, 16, 32, stride=2)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert (not block.with_res_shortcut)
assert (x_out.shape == torch.Size((1, 16, 28, 28)))
se_cfg = dict(channels=32)
block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert isinstance(block.se, SELayer)
assert (x_out.shape == torch.Size((1, 16, 56, 56)))
block = InvertedResidual(32, 16, 32, with_expand_conv=False)
x = torch.randn(1, 32, 56, 56)
x_out = block(x)
assert (getattr(block, 'expand_conv', None) is None)
assert (x_out.shape == torch.Size((1, 16, 56, 56)))
block = InvertedResidual(16, 16, 32, norm_cfg=dict(type='GN', num_groups=2))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
for m in block.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
assert (x_out.shape == torch.Size((1, 16, 56, 56)))
block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid'))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size((1, 16, 56, 56)))
block = InvertedResidual(16, 16, 32, with_cp=True)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert block.with_cp
assert (x_out.shape == torch.Size((1, 16, 56, 56))) |
def is_nltk_available():
if (importlib.util.find_spec('nltk') is not None):
return True
else:
return False |
def parse_args():
modify_args()
parser = argparse.ArgumentParser(description='evaluate composition-1k prediction result')
parser.add_argument('pred_root', help='Path to the predicted alpha matte folder')
parser.add_argument('gt_root', help='Path to the ground truth alpha matte folder')
parser.add_argument('--trimap-root', help='Path to trimap folder. If not specified, results are calculated on the full image.')
parser.add_argument('-v', '--verbose', action='store_true', help='Whether print result for each predicted alpha matte')
parser.add_argument('--nproc', type=int, default=4, help='number of processers')
return parser.parse_args() |
def test_for_with_nested_full_merge_branch():
(dace.int32)
def for_with_nested_full_merge_branch(a):
for i in range(20):
if (i < 10):
a += 2
else:
a += 1
sdfg = for_with_nested_full_merge_branch.to_sdfg(simplify=False)
propagate_states(sdfg)
state = sdfg.start_state
state_check_executions(state, 1)
state = sdfg.out_edges(state)[0].dst
state_check_executions(state, 21)
oedges = sdfg.out_edges(state)
end_branch_edge = None
for_branch_edge = None
for edge in oedges:
if (edge.data.label == '(i < 20)'):
for_branch_edge = edge
elif (edge.data.label == '(not (i < 20))'):
end_branch_edge = edge
if ((end_branch_edge is None) or (for_branch_edge is None)):
raise RuntimeError("Couldn't identify guard edges")
state = end_branch_edge.dst
state_check_executions(state, 1)
state = for_branch_edge.dst
state_check_executions(state, 20)
state = sdfg.out_edges(state)[0].dst
state_check_executions(state, 20)
oedges = sdfg.out_edges(state)
condition_met_edge = None
condition_broken_edge = None
for edge in oedges:
if (edge.data.label == '(i < 10)'):
condition_met_edge = edge
elif (edge.data.label == '(not (i < 10))'):
condition_broken_edge = edge
if ((condition_met_edge is None) or (condition_broken_edge is None)):
raise RuntimeError("Couldn't identify conditional guard edges")
state = condition_met_edge.dst
state_check_executions(state, 20, expected_dynamic=True)
state = sdfg.out_edges(state)[0].dst
state_check_executions(state, 20, expected_dynamic=True)
state = condition_broken_edge.dst
state_check_executions(state, 20, expected_dynamic=True)
state = sdfg.out_edges(state)[0].dst
state_check_executions(state, 20, expected_dynamic=True)
state = sdfg.out_edges(state)[0].dst
state_check_executions(state, 20) |
def conv(input_, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding='VALID', group=1):
c_i = input_.get_shape()[(- 1)]
assert ((c_i % group) == 0)
assert ((c_o % group) == 0)
convolve = (lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding))
if (group == 1):
conv = convolve(input_, kernel)
else:
input_groups = tf.split(axis=3, num_or_size_splits=group, value=input_)
kernel_groups = tf.split(axis=3, num_or_size_splits=group, value=kernel)
output_groups = [convolve(i, k) for (i, k) in zip(input_groups, kernel_groups)]
conv = tf.concat(axis=3, values=output_groups)
return tf.reshape(tf.nn.bias_add(conv, biases), ([(- 1)] + conv.get_shape().as_list()[1:])) |
class TestPC(unittest.TestCase):
def test(self):
directory = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(directory, '../data/data_linear.txt'), skiprows=1)
try:
from pyrca.thirdparty.causallearn.utils.TXT2GeneralGraph import txt2generalgraph
graph = txt2generalgraph(os.path.join(directory, '../data/graph.txt'))
df = pd.DataFrame(data, columns=[f'X{i}' for i in range(1, 21)])
graph = pd.DataFrame((graph.graph < 0).astype(int), columns=df.columns, index=df.columns)
model = PC(PC.config_class())
r = model.train(df)
except ImportError as e:
print(str(e))
return
diff = np.sum(np.abs((r.values - graph.values)))
self.assertLessEqual(diff, 21) |
class Universal(PatchInferencerBase):
def __init__(self, convnet_model: str, convnet_weight_path: str, input_patch_size: tuple, output_patch_size: tuple, output_patch_overlap: tuple, num_output_channels: int=1, dtype: str='float32', bump: str='wu'):
assert (bump == 'wu')
super().__init__(input_patch_size, output_patch_size, output_patch_overlap, num_output_channels, dtype=dtype)
self.num_output_channels = num_output_channels
net_source = load_source(convnet_model)
assert hasattr(net_source, 'PatchInferencer')
self.patch_inferencer = net_source.PatchInferencer(convnet_weight_path, self.output_patch_mask)
def compute_device(self):
if hasattr(self.patch_inferencer, 'compute_device'):
return self.patch_inferencer.compute_device
else:
return platform.processor()
def __call__(self, input_patch):
input_patch = self._reshape_patch_to_5d(input_patch)
output_patch = self.patch_inferencer(input_patch)
assert isinstance(output_patch, np.ndarray)
return output_patch |
def verify_out_features_out_indices(out_features: Optional[Iterable[str]], out_indices: Optional[Iterable[int]], stage_names: Optional[Iterable[str]]):
if (stage_names is None):
raise ValueError('Stage_names must be set for transformers backbones')
if (out_features is not None):
if (not isinstance(out_features, (list,))):
raise ValueError(f'out_features must be a list {type(out_features)}')
if any(((feat not in stage_names) for feat in out_features)):
raise ValueError(f'out_features must be a subset of stage_names: {stage_names} got {out_features}')
if (out_indices is not None):
if (not isinstance(out_indices, (list, tuple))):
raise ValueError(f'out_indices must be a list or tuple, got {type(out_indices)}')
if any(((idx >= len(stage_names)) for idx in out_indices)):
raise ValueError('out_indices must be valid indices for stage_names {stage_names}, got {out_indices}')
if ((out_features is not None) and (out_indices is not None)):
if (len(out_features) != len(out_indices)):
raise ValueError('out_features and out_indices should have the same length if both are set')
if (out_features != [stage_names[idx] for idx in out_indices]):
raise ValueError('out_features and out_indices should correspond to the same stages if both are set') |
.parametrize('observation_shape', [(100,), ((100,), (200,))])
.parametrize('action_size', [2])
.parametrize('batch_size', [32])
.parametrize('gamma', [0.99])
def test_discrete_mean_q_function_forwarder(observation_shape: Shape, action_size: int, batch_size: int, gamma: float) -> None:
encoder = DummyEncoder(observation_shape)
q_func = DiscreteMeanQFunction(encoder, encoder.get_feature_size(), action_size)
forwarder = DiscreteMeanQFunctionForwarder(q_func, action_size)
x = create_torch_observations(observation_shape, batch_size)
y = forwarder.compute_expected_q(x)
assert (y.shape == (batch_size, action_size))
action = torch.randint(high=action_size, size=(batch_size,))
target = forwarder.compute_target(x, action)
assert (target.shape == (batch_size, 1))
assert torch.allclose(y[(torch.arange(batch_size), action)], target.view((- 1)))
targets = forwarder.compute_target(x)
assert (targets.shape == (batch_size, action_size))
assert (y == targets).all()
q_tp1 = np.random.random((batch_size, 1))
rew_tp1 = np.random.random((batch_size, 1))
ter_tp1 = np.random.randint(2, size=(batch_size, 1))
target = (rew_tp1 + ((gamma * q_tp1) * (1 - ter_tp1)))
obs_t = create_torch_observations(observation_shape, batch_size)
act_t = np.random.randint(action_size, size=(batch_size, 1))
q_t = filter_by_action(q_func(obs_t).q_value.detach().numpy(), act_t, action_size)
ref_loss = ref_huber_loss(q_t.reshape(((- 1), 1)), target)
act_t = torch.tensor(act_t, dtype=torch.int64)
rew_tp1 = torch.tensor(rew_tp1, dtype=torch.float32)
q_tp1 = torch.tensor(q_tp1, dtype=torch.float32)
ter_tp1 = torch.tensor(ter_tp1, dtype=torch.float32)
loss = forwarder.compute_error(observations=obs_t, actions=act_t, rewards=rew_tp1, target=q_tp1, terminals=ter_tp1, gamma=gamma)
assert np.allclose(loss.detach().numpy(), ref_loss) |
def register_Ns3LteSpectrumValueCatcher_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteSpectrumValueCatcher const &', 'arg0')])
cls.add_method('GetValue', 'ns3::Ptr< ns3::SpectrumValue >', [])
cls.add_method('ReportValue', 'void', [param('ns3::SpectrumValue const &', 'value')])
return |
def evaluate_eval_for_inference(hist, dataset=None):
acc = (np.diag(hist).sum() / hist.sum())
acc_cls = (np.diag(hist) / hist.sum(axis=1))
acc_cls = np.nanmean(acc_cls)
iu = (np.diag(hist) / ((hist.sum(axis=1) + hist.sum(axis=0)) - np.diag(hist)))
print_evaluate_results(hist, iu, dataset=dataset)
freq = (hist.sum(axis=1) / hist.sum())
mean_iu = np.nanmean(iu)
logging.info('mean {}'.format(mean_iu))
fwavacc = (freq[(freq > 0)] * iu[(freq > 0)]).sum()
return (acc, acc_cls, mean_iu, fwavacc) |
def is_empty(path):
empty = False
if (os.path.exists(path) and (not os.path.isfile(path))):
if (not os.listdir(path)):
empty = True
print("'test_images' folder is empty. Please place images to be tested in this folder.")
else:
empty = True
print("There is no 'test_images' folder under current directory. Please create one and place images to be tested there.")
return empty |
def get_current_scope():
global _threadlocal_scope
if (not hasattr(_threadlocal_scope, 'current_scope')):
_threadlocal_scope.current_scope = {}
return _threadlocal_scope.current_scope |
class AddIndexIter(torch.utils.data.dataloader._SingleProcessDataLoaderIter):
def _next_data(self):
index = self._next_index()
data = self._dataset_fetcher.fetch(index)
if self._pin_memory:
data = torch.utils.data._utils.pin_memory.pin_memory(data)
return (index, data) |
def validate_rate_limit(ctx: click.core.Context, param: click.core.Parameter, raw_value: (str | None)) -> (str | None):
if (raw_value is None):
return raw_value
try:
throttling.parse_units(raw_value)
return raw_value
except exceptions.UsageError as exc:
raise click.UsageError(exc.args[0]) from exc |
def main():
assert (args.block in ['se', 'rese', 'res', 'basic']), '--block should be one of [se|rese|res|basic]'
print('=> Training a Sample CNN using "{}" block.'.format(args.block))
for stage in range(args.initial_stage, args.num_lr_decays):
stage_train_dir = make_path(args.train_dir, stage)
previous_stage_train_dirs = [make_path(args.train_dir, stage) for stage in range(0, stage)]
next_stage_train_dir = make_path(args.train_dir, (stage + 1))
if os.path.isdir(next_stage_train_dir):
continue
lr = (args.lr * (args.lr_decay ** stage))
os.makedirs(stage_train_dir, exist_ok=True)
(ckpt_path, ckpt_epoch, ckpt_val_loss) = find_best_checkpoint(stage_train_dir)
if (ckpt_path is None):
(ckpt_path, ckpt_epoch, ckpt_val_loss) = find_best_checkpoint(*previous_stage_train_dirs[(- 1):])
print('\n=> Start training stage {}: lr={}, train_dir={}'.format(stage, lr, stage_train_dir))
if ckpt_path:
print('=> Found a trained model: epoch={}, val_loss={}, path={}'.format(ckpt_epoch, ckpt_val_loss, ckpt_path))
else:
print('=> No trained model found.')
train(lr, stage_train_dir, ckpt_path, (ckpt_epoch + 1))
print('\n=> Done.\n') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.