code stringlengths 101 5.91M |
|---|
class MetricContextTest(unittest.TestCase):
def testPickableMetricContext(self):
import pickle
ctx = MetricContext()
class MyFile(object):
def __init__(self):
self.data = []
def write(self, stuff):
self.data.append(stuff)
pickle.dump(ctx, MyFile()) |
def get_dataset(path, has_class_directories=True):
dataset = []
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
target_dir = os.path.join(path_exp, class_name)
image_paths = get_image_paths(target_dir)
dataset.append(ImageClass(class_name, image_paths))
return dataset |
class FunctionTypeNode(ExprNode):
def __init__(self, parse_info=None, raw_text=None):
super().__init__(IRNodeType.FunctionType, parse_info=parse_info, raw_text=raw_text)
self.empty = None
self.params = []
self.separators = []
self.ret = None |
class Date(_fields.Date, ModelTypeValidator):
__doc__ = _fields.Date.__doc__
valid_types = (date,) |
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = ('%s/%s/' % (args.work_dir, ((args.arch + '_') + args.action)))
filename = (directory + filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, (directory + 'model_best.pth.tar')) |
class Viewer(Renderer):
def __init__(self, *args, overlay=True, **kwargs) -> None:
self.exclusive = False
self.world = World()
self.agent = Agent(sustain=True)
super().__init__(*args, model=self.world, agent=self.agent, **kwargs)
self.overlay = overlay
self.num_keys = [key._1, key._2, key._3, key._4, key._5, key._6, key._7, key._8, key._9, key._0]
(x, y) = ((self.width // 2), (self.height // 2))
n = 10
self.reticle = vertex_list(4, ('v2i', ((x - n), y, (x + n), y, x, (y - n), x, (y + n))))
def on_mouse_press(self, x, y, button, modifiers):
if self.exclusive:
right_click = ((button == mouse.RIGHT) or ((button == mouse.LEFT) and (modifiers & key.MOD_CTRL)))
left_click = (button == mouse.LEFT)
if (right_click or left_click):
self.world.place_or_remove_block(self.agent, remove=left_click, place=right_click)
else:
self.set_exclusive_mouse(True)
def on_mouse_motion(self, x, y, dx, dy):
if self.exclusive:
m = 0.15
self.world.move_camera(self.agent, (dx * m), (dy * m))
def set_exclusive_mouse(self, exclusive):
super().set_exclusive_mouse(exclusive)
self.exclusive = exclusive
def on_key_press(self, symbol, modifiers):
strafe = [0, 0]
dy = 0
inventory = None
if (symbol == key.W):
strafe[0] -= 1
elif (symbol == key.S):
strafe[0] += 1
elif (symbol == key.A):
strafe[1] -= 1
elif (symbol == key.D):
strafe[1] += 1
elif (symbol == key.SPACE):
dy = 1
elif (symbol == key.ESCAPE):
self.set_exclusive_mouse(False)
elif (symbol == key.TAB):
self.agent.flying = (not self.agent.flying)
elif ((symbol == key.Z) and self.agent.flying):
dy = (- 1)
elif (symbol in self.num_keys):
index = (((symbol - self.num_keys[0]) % len(self.agent.inventory)) + 1)
inventory = index
self.world.movement(self.agent, strafe, dy, inventory)
def on_key_release(self, symbol, modifiers):
strafe = [0, 0]
if (symbol == key.W):
strafe[0] += 1
elif (symbol == key.S):
strafe[0] -= 1
elif (symbol == key.A):
strafe[1] += 1
elif (symbol == key.D):
strafe[1] -= 1
self.world.movement(self.agent, strafe, dy=0, inventory=None)
def on_resize(self, width, height):
self.label.y = (height - 10)
if self.reticle:
self.reticle.delete()
(x, y) = ((self.width // 2), (self.height // 2))
n = 10
self.reticle = vertex_list(4, ('v2i', ((x - n), y, (x + n), y, x, (y - n), x, (y + n)))) |
def create_ffd_data(cat_id, n=3, edge_length_threshold=None, n_samples=None, overwrite=False):
FfdManager(cat_id, n, edge_length_threshold, n_samples).save_all(overwrite=overwrite) |
def test_digits_cosine_stochastic_object():
model = FacilityLocationSelection(100, 'cosine', optimizer=StochasticGreedy(random_state=0))
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_stochastic_ranking)
assert_array_almost_equal(model.gains, digits_cosine_stochastic_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
class shiftmlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0, shift_size=5):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.dim = in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.shift_size = shift_size
self.pad = (shift_size // 2)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
m.bias.data.zero_()
def forward(self, x, H, W):
(B, N, C) = x.shape
xn = x.transpose(1, 2).view(B, C, H, W).contiguous()
xn = F.pad(xn, (self.pad, self.pad, self.pad, self.pad), 'constant', 0)
xs = torch.chunk(xn, self.shift_size, 1)
x_shift = [torch.roll(x_c, shift, 2) for (x_c, shift) in zip(xs, range((- self.pad), (self.pad + 1)))]
x_cat = torch.cat(x_shift, 1)
x_cat = torch.narrow(x_cat, 2, self.pad, H)
x_s = torch.narrow(x_cat, 3, self.pad, W)
x_s = x_s.reshape(B, C, (H * W)).contiguous()
x_shift_r = x_s.transpose(1, 2)
x = self.fc1(x_shift_r)
x = self.dwconv(x, H, W)
x = self.act(x)
x = self.drop(x)
xn = x.transpose(1, 2).view(B, C, H, W).contiguous()
xn = F.pad(xn, (self.pad, self.pad, self.pad, self.pad), 'constant', 0)
xs = torch.chunk(xn, self.shift_size, 1)
x_shift = [torch.roll(x_c, shift, 3) for (x_c, shift) in zip(xs, range((- self.pad), (self.pad + 1)))]
x_cat = torch.cat(x_shift, 1)
x_cat = torch.narrow(x_cat, 2, self.pad, H)
x_s = torch.narrow(x_cat, 3, self.pad, W)
x_s = x_s.reshape(B, C, (H * W)).contiguous()
x_shift_c = x_s.transpose(1, 2)
x = self.fc2(x_shift_c)
x = self.drop(x)
return x |
class DPRState():
def __init__(self, src_file: Path):
self.src_file = src_file
def load_dpr_model(self):
raise NotImplementedError
def from_type(comp_type: str, *args, **kwargs) -> 'DPRState':
if comp_type.startswith('c'):
return DPRContextEncoderState(*args, **kwargs)
if comp_type.startswith('q'):
return DPRQuestionEncoderState(*args, **kwargs)
if comp_type.startswith('r'):
return DPRReaderState(*args, **kwargs)
else:
raise ValueError("Component type must be either 'ctx_encoder', 'question_encoder' or 'reader'.") |
class IndexedDatasetBuilder(object):
element_sizes = {np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float: 4, np.double: 8}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
bytes = self.out_file.write(np.array((tensor.numpy() + 1), dtype=self.dtype))
self.data_offsets.append((self.data_offsets[(- 1)] + (bytes / self.element_size)))
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append((self.dim_offsets[(- 1)] + len(tensor.size())))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert (index.dtype == self.dtype)
begin = self.data_offsets[(- 1)]
for offset in index.data_offsets[1:]:
self.data_offsets.append((begin + offset))
self.sizes.extend(index.sizes)
begin = self.dim_offsets[(- 1)]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append((begin + dim_offset))
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', (len(self.data_offsets) - 1), len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close() |
class ExtraTreesPreprocessorClassification(AutotabularPreprocessingAlgorithm):
def __init__(self, n_estimators, criterion, min_samples_leaf, min_samples_split, max_features, bootstrap, max_leaf_nodes, max_depth, min_weight_fraction_leaf, min_impurity_decrease, oob_score=False, n_jobs=1, random_state=None, verbose=0, class_weight=None):
self.n_estimators = n_estimators
self.estimator_increment = 10
if (criterion not in ('gini', 'entropy')):
raise ValueError(("'criterion' is not in ('gini', 'entropy'): %s" % criterion))
self.criterion = criterion
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.bootstrap = bootstrap
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.min_impurity_decrease = min_impurity_decrease
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.class_weight = class_weight
self.preprocessor = None
def fit(self, X, Y, sample_weight=None):
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
self.n_estimators = int(self.n_estimators)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
if check_none(self.max_depth):
self.max_depth = None
else:
self.max_depth = int(self.max_depth)
self.bootstrap = check_for_bool(self.bootstrap)
self.n_jobs = int(self.n_jobs)
self.min_impurity_decrease = float(self.min_impurity_decrease)
self.max_features = self.max_features
self.min_samples_leaf = int(self.min_samples_leaf)
self.min_samples_split = int(self.min_samples_split)
self.verbose = int(self.verbose)
max_features = int((X.shape[1] ** float(self.max_features)))
estimator = ExtraTreesClassifier(n_estimators=self.n_estimators, criterion=self.criterion, max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, bootstrap=self.bootstrap, max_features=max_features, max_leaf_nodes=self.max_leaf_nodes, min_impurity_decrease=self.min_impurity_decrease, oob_score=self.oob_score, n_jobs=self.n_jobs, verbose=self.verbose, random_state=self.random_state, class_weight=self.class_weight)
estimator.fit(X, Y, sample_weight=sample_weight)
self.preprocessor = SelectFromModel(estimator=estimator, threshold='mean', prefit=True)
return self
def transform(self, X):
if (self.preprocessor is None):
raise NotImplementedError
return self.preprocessor.transform(X)
def get_properties(dataset_properties=None):
return {'shortname': 'ETC', 'name': 'Extra Trees Classifier Preprocessing', 'handles_regression': False, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': False, 'is_deterministic': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (INPUT,)}
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
n_estimators = Constant('n_estimators', 100)
criterion = CategoricalHyperparameter('criterion', ['gini', 'entropy'], default_value='gini')
max_features = UniformFloatHyperparameter('max_features', 0, 1, default_value=0.5)
max_depth = UnParametrizedHyperparameter(name='max_depth', value='None')
max_leaf_nodes = UnParametrizedHyperparameter('max_leaf_nodes', 'None')
min_samples_split = UniformIntegerHyperparameter('min_samples_split', 2, 20, default_value=2)
min_samples_leaf = UniformIntegerHyperparameter('min_samples_leaf', 1, 20, default_value=1)
min_weight_fraction_leaf = UnParametrizedHyperparameter('min_weight_fraction_leaf', 0.0)
min_impurity_decrease = UnParametrizedHyperparameter('min_impurity_decrease', 0.0)
bootstrap = CategoricalHyperparameter('bootstrap', ['True', 'False'], default_value='False')
cs.add_hyperparameters([n_estimators, criterion, max_features, max_depth, max_leaf_nodes, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, min_impurity_decrease, bootstrap])
return cs |
class Masking(Layer):
def __init__(self, mask_value, bigdl_type='float'):
super(Masking, self).__init__(None, bigdl_type, mask_value) |
def train_batch_distill(teacher_model, student_model, optimizer, baseline, step, batch, tb_logger, opts):
(x, bl_val) = baseline.unwrap_batch(batch)
x = move_to(x, opts.device)
bl_val = (move_to(bl_val, opts.device) if (bl_val is not None) else None)
if (opts.router == 'teacher'):
if opts.multi_teacher:
(teacher_embeddings, teacher_hidden, teacher_logp, teacher_pi, teacher_attn) = ({}, {}, {}, {}, {})
for i in ['uniform', 'cluster', 'mixed']:
with torch.no_grad():
(teacher_embeddings[i], teacher_hidden[i], teacher_attn[i], teacher_logp[i], _, _, teacher_pi[i]) = teacher_model[i](x, distillation=True, return_pi=True, opts=opts)
class_type = np.random.choice(['uniform', 'cluster', 'mixed'], 1).item()
print('Randomly choose a {} teacher to route if you use teacher as router in multi-teacher'.format(class_type))
(student_embeddings, student_hidden, student_attn, student_logp, cost, log_likelihood, student_pi) = student_model(x, return_pi=True, log_time=False, distillation=True, route=teacher_pi[class_type], opts=opts)
assert torch.equal(teacher_pi[class_type], student_pi), 'Teacher route and student route are not same!'
else:
with torch.no_grad():
(teacher_embeddings, teacher_hidden, teacher_attn, teacher_logp, _, _, teacher_pi) = teacher_model(x, distillation=True, return_pi=True, opts=opts)
(student_embeddings, student_hidden, student_logp, cost, log_likelihood, student_pi) = student_model(x, return_pi=True, log_time=False, distillation=True, route=teacher_pi, opts=opts)
assert torch.equal(teacher_pi, student_pi), 'Teacher route and student route are not same!'
elif (opts.router == 'student'):
(student_embeddings, student_hidden, student_attn, student_logp, cost, log_likelihood, student_pi) = student_model(x, return_pi=True, log_time=False, distillation=True, opts=opts)
if opts.multi_teacher:
(teacher_embeddings, teacher_hidden, teacher_logp, teacher_pi, teacher_attn) = ({}, {}, {}, {}, {})
for i in ['uniform', 'cluster', 'mixed']:
with torch.no_grad():
(teacher_embeddings[i], teacher_hidden[i], teacher_attn[i], teacher_logp[i], _, _, teacher_pi[i]) = teacher_model[i](x, return_pi=True, distillation=True, route=student_pi, opts=opts)
assert torch.equal(teacher_pi[i], student_pi), 'Teacher route and student route are not same!'
else:
with torch.no_grad():
(teacher_embeddings, teacher_hidden, teacher_attn, teacher_logp, _, _, teacher_pi) = teacher_model(x, return_pi=True, distillation=True, route=student_pi, opts=opts)
assert torch.equal(teacher_pi, student_pi), 'Teacher route and student route are not same!'
(bl_val, bl_loss) = (baseline.eval(x, cost) if (bl_val is None) else (bl_val, 0))
reinforce_loss = ((cost - bl_val) * log_likelihood).mean()
task_loss = (reinforce_loss + bl_loss)
if opts.multi_teacher:
soft_loss0 = [nn.KLDivLoss()(student_logp, teacher_logp[_].exp()) for _ in ['uniform', 'cluster', 'mixed']]
soft_loss = torch.zeros(1).to(opts.device)
for i in range(3):
soft_loss.add_(soft_loss0[i])
else:
if opts.meaningful_KLD:
soft_loss = ((teacher_logp.exp() * ((teacher_logp.exp() + 1e-05).log() - (student_logp.exp() + 1e-05).log())).sum(dim=1).mean() if (not opts.twist_kldloss) else (student_logp.exp() * ((student_logp.exp() + 1e-05).log() - (teacher_logp.exp() + 1e-05).log())).sum(dim=1).mean())
else:
(teacher_logp, student_logp) = ((teacher_logp.exp() + 1e-05).log(), (student_logp.exp() + 1e-05).log())
soft_loss = (nn.KLDivLoss()(student_logp, teacher_logp.exp()) if (not opts.twist_kldloss) else nn.KLDivLoss()(teacher_logp, student_logp.exp()))
loss = ((task_loss * opts.rl_alpha) + (soft_loss * opts.distill_alpha))
optimizer.zero_grad()
loss.backward()
grad_norms = clip_grad_norms(optimizer.param_groups, opts.max_grad_norm)
optimizer.step()
if ((step % int(opts.log_step)) == 0):
log_values(cost, grad_norms, step, log_likelihood, reinforce_loss, bl_loss, soft_loss, loss, tb_logger, opts) |
def create_and_report(pre_sampled, model_id, n_samples, edge_length_threshold, overwrite=False):
import template_ffd.eval.ffd_emd as emd
kwargs = dict(pre_sampled=pre_sampled, model_id=model_id, n_samples=n_samples, edge_length_threshold=edge_length_threshold)
if pre_sampled:
kwargs.pop('edge_length_threshold')
print(emd.get_emd_average(**kwargs)) |
class HParams(object):
_HAS_DYNAMIC_ATTRIBUTES = True
def __init__(self, hparam_def=None, model_structure=None, **kwargs):
self._hparam_types = {}
self._model_structure = model_structure
if hparam_def:
raise ValueError('hparam_def has been disabled in this version')
else:
for (name, value) in six.iteritems(kwargs):
self.add_hparam(name, value)
def add_hparam(self, name, value):
if (getattr(self, name, None) is not None):
raise ValueError(('Hyperparameter name is reserved: %s' % name))
if isinstance(value, (list, tuple)):
if (not value):
raise ValueError(('Multi-valued hyperparameters cannot be empty: %s' % name))
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def set_hparam(self, name, value):
(param_type, is_list) = self._hparam_types[name]
if isinstance(value, list):
if (not is_list):
raise ValueError(('Must not pass a list for single-valued parameter: %s' % name))
setattr(self, name, [_cast_to_type_if_compatible(name, param_type, v) for v in value])
else:
if is_list:
raise ValueError(('Must pass a list for multi-valued parameter: %s.' % name))
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
def del_hparam(self, name):
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name]
def parse(self, values):
type_map = dict()
for (name, t) in self._hparam_types.items():
(param_type, _) = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self.override_from_dict(values_map)
def override_from_dict(self, values_dict):
for (name, value) in values_dict.items():
self.set_hparam(name, value)
return self
def set_from_map(self, values_map):
return self.override_from_dict(values_dict=values_map)
def set_model_structure(self, model_structure):
self._model_structure = model_structure
def get_model_structure(self):
return self._model_structure
def to_json(self, indent=None, separators=None, sort_keys=False):
return json.dumps(self.values(), indent=indent, separators=separators, sort_keys=sort_keys)
def parse_json(self, values_json):
values_map = json.loads(values_json)
return self.override_from_dict(values_map)
def values(self):
return {n: getattr(self, n) for n in self._hparam_types.keys()}
def get(self, key, default=None):
if (key in self._hparam_types):
if (default is not None):
(param_type, is_param_list) = self._hparam_types[key]
type_str = (('list<%s>' % param_type) if is_param_list else str(param_type))
fail_msg = ("Hparam '%s' of type '%s' is incompatible with default=%s" % (key, type_str, default))
is_default_list = isinstance(default, list)
if (is_param_list != is_default_list):
raise ValueError(fail_msg)
try:
if is_default_list:
for value in default:
_cast_to_type_if_compatible(key, param_type, value)
else:
_cast_to_type_if_compatible(key, param_type, default)
except ValueError as e:
raise ValueError(('%s. %s' % (fail_msg, e)))
return getattr(self, key)
return default
def __contains__(self, key):
return (key in self._hparam_types)
def __str__(self):
return str(sorted(self.values().items()))
def __repr__(self):
return ('%s(%s)' % (type(self).__name__, self.__str__()))
def _get_kind_name(param_type, is_list):
if issubclass(param_type, bool):
typename = 'bool'
elif issubclass(param_type, six.integer_types):
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError(('Unsupported parameter type: %s' % str(param_type)))
suffix = ('list' if is_list else 'value')
return '_'.join([typename, suffix]) |
class Deterministic_Layer(object):
def __init__(self, input_size, output_size, activation):
self.input_size = input_size
self.output_size = output_size
self.name = activation
if (activation == 'softplus'):
self._activation = tf.nn.softplus
if (activation == 'relu'):
self._activation = tf.nn.relu
if (activation == 'sigmoid'):
self._activation = tf.sigmoid
if (activation == 'tanh'):
self._activation = tf.tanh
if (activation == 'linear'):
self._activation = (lambda x: x)
if (activation == 'softmax'):
self._activation = tf.nn.softmax
W = tf.Variable(init_weights(input_size, output_size))
b = tf.Variable(tf.zeros([output_size]))
self.params = [W, b]
def encode(self, input):
return self._activation((tf.matmul(input, self.params[0]) + self.params[1]))
def get_name(self):
return self.name |
def _viz(trajectories, name=''):
fig = plt.figure(figsize=(10, 7), dpi=300)
ax = fig.add_subplot(1, 1, 1)
ax.set_aspect('equal')
(traj_lines, traj_points) = plot_trajectories(ax=ax, trajectories=list(trajectories))
ax.set_xlim([(- 1), 15])
ax.set_ylim([(- 10), 10])
plt.gca().relim(visible_only=True)
file_name = os.path.join(OUT_TESTS_DIR, f'{name}_test.png')
plt.savefig(file_name) |
def build_gflags(args):
path = os.path.join(args.build_path, 'gflags')
if os.path.exists(path):
return
url = '
archive_path = os.path.join(args.download_path, 'gflags-2.2.2.zip')
download_zipfile(url, archive_path, args.build_path, 'ff856ff64757f1381f7da260f79ba79b')
shutil.move(os.path.join(args.build_path, 'gflags-2.2.2'), path)
os.remove(os.path.join(path, 'BUILD'))
build_cmake_project(args, os.path.join(path, '__build__')) |
class ConsolidateBlocks(TransformationPass):
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
global_index_map = {}
for wire in dag.wires:
if (not isinstance(wire, Qubit)):
continue
global_qregs = list(dag.qregs.values())
global_index_map[wire] = (global_qregs.index(wire.register) + wire.index)
blocks = self.property_set['block_list']
nodes_seen = set()
for node in dag.topological_op_nodes():
if ((node in nodes_seen) or (node.type == 'in') or (node.type == 'out')):
continue
if (blocks and (node in blocks[0])):
block = blocks[0]
block_qargs = set()
for nd in block:
block_qargs |= set(nd.qargs)
block_width = len(block_qargs)
q = QuantumRegister(block_width)
subcirc = QuantumCircuit(q)
block_index_map = self._block_qargs_to_indices(block_qargs, global_index_map)
for nd in block:
nodes_seen.add(nd)
subcirc.append(nd.op, [q[block_index_map[i]] for i in nd.qargs])
unitary = UnitaryGate(Operator(subcirc))
new_dag.apply_operation_back(unitary, sorted(block_qargs, key=(lambda x: block_index_map[x])))
del blocks[0]
else:
for block in blocks[1:]:
if (node in block):
break
else:
nodes_seen.add(node)
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
return new_dag
def _block_qargs_to_indices(self, block_qargs, global_index_map):
block_indices = [global_index_map[q] for q in block_qargs]
ordered_block_indices = sorted(block_indices)
block_positions = {q: ordered_block_indices.index(global_index_map[q]) for q in block_qargs}
return block_positions |
def main():
rospy.init_node('pose_estimator', anonymous=True)
pose_estimator = PoseEstimator()
try:
rospy.spin()
except KeyboardInterrupt:
pass |
class TwoTowerModel(object):
def __init__(self, user_col_info, item_col_info, hidden_layers=[1024, 512, 128]):
self.user_col_info = user_col_info
self.item_col_info = item_col_info
self.hidden_layers = hidden_layers
def build_model(self):
hidden_layers = self.hidden_layers
def build_1tower(col_info):
indicator_input_layers = []
indicator_layers = []
for i in range(len(col_info.indicator_cols)):
indicator_input_layers.append(Input(shape=(), name=(col_info.name + col_info.indicator_cols[i]), dtype='int32'))
indicator_layers.append(tf.keras.backend.one_hot(indicator_input_layers[i], (col_info.indicator_dims[i] + 1)))
embed_input_layers = []
embed_layers = []
for i in range(len(col_info.embed_in_dims)):
embed_input_layers.append(Input(shape=(), name=(col_info.name + col_info.embed_cols[i]), dtype='int32'))
iembed = Embedding((col_info.embed_in_dims[i] + 1), output_dim=col_info.embed_out_dims[i])(embed_input_layers[i])
flat_embed = Flatten()(iembed)
embed_layers.append(flat_embed)
numerical_input_layers = []
for i in range(len(col_info.numerical_cols)):
numerical_input_layers.append(Input(shape=col_info.numerical_dims[i], name=(col_info.name + col_info.numerical_cols[i])))
cocated = ((indicator_layers + embed_layers) + numerical_input_layers)
concated = (cocated[0] if (len(cocated) == 1) else concatenate(cocated, axis=1))
linear = Dense(hidden_layers[0], activation='relu')(concated)
for ilayer in range(1, len(hidden_layers)):
linear_mid = Dense(hidden_layers[ilayer], activation='relu')(linear)
linear = linear_mid
last_linear = linear
nomorlized = Lambda((lambda x: tf.nn.l2_normalize(x, axis=1)), name=(col_info.name + '_embed_output'))
out = nomorlized(last_linear)
input = ((indicator_input_layers + embed_input_layers) + numerical_input_layers)
return (input, out)
(user_input, user_out) = build_1tower(col_info=self.user_col_info)
(item_input, item_out) = build_1tower(col_info=self.item_col_info)
doted = tf.keras.layers.Dot(axes=1)
out = doted([user_out, item_out])
model = tf.keras.Model((user_input + item_input), out)
return model |
def print_noise_layer_weights(model: tf.keras.models.Model):
i = 1
for l in model.layers:
class_name = l.__class__.__name__
if (class_name == 'LearnGaussianNoiseVarPropagationLayer'):
(rate, mean) = l.get_weights()
print((str(i) + '. noise layer: LearnGaussianNoiseVarPropagationLayer'))
print('Mean:')
print(mean)
print('Standard deviation:')
print(np.exp(rate))
elif (class_name == 'LearnDropoutVarPropagationLayer'):
rate = l.get_weights()
print((str(i) + '. noise layer: LearnDropoutVarPropagationLayer'))
print('Dropout rate:')
print(sigmoid_np(np.array(rate)))
i += 1 |
class TestGaussianCNNBaseline(TfGraphTestCase):
.parametrize('obs_dim', [[1, 1, 1], [2, 2, 2], [1, 1], [2, 2]])
def test_fit(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch('garage.tf.baselines.gaussian_cnn_baseline.GaussianCNNRegressor', new=SimpleGaussianCNNRegressor):
gcb = GaussianCNNBaseline(env_spec=box_env.spec)
paths = [{'observations': [np.full(obs_dim, 1)], 'returns': [1]}, {'observations': [np.full(obs_dim, 2)], 'returns': [2]}]
gcb.fit(paths)
obs = {'observations': [np.full(obs_dim, 1), np.full(obs_dim, 2)]}
prediction = gcb.predict(obs)
assert np.array_equal(prediction, [1, 2])
.parametrize('obs_dim', [[1], [2], [1, 1, 1, 1], [2, 2, 2, 2]])
def test_invalid_obs_shape(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with pytest.raises(ValueError):
GaussianCNNBaseline(env_spec=box_env.spec)
def test_obs_is_image(self):
env = GarageEnv(DummyDiscretePixelEnv(), is_image=True)
with mock.patch('garage.tf.baselines.gaussian_cnn_baseline.GaussianCNNRegressor', new=SimpleGaussianCNNRegressor):
with mock.patch('garage.tf.baselines.gaussian_cnn_baseline.normalize_pixel_batch', side_effect=normalize_pixel_batch) as npb:
gcb = GaussianCNNBaseline(env_spec=env.spec)
obs_dim = env.spec.observation_space.shape
paths = [{'observations': [np.full(obs_dim, 1)], 'returns': [1]}, {'observations': [np.full(obs_dim, 2)], 'returns': [2]}]
gcb.fit(paths)
observations = np.concatenate([p['observations'] for p in paths])
assert (npb.call_count == 1), ("Expected '%s' to have been called once. Called %s times." % ((npb._mock_name or 'mock'), npb.call_count))
assert (npb.call_args_list[0][0][0] == observations).all()
obs = {'observations': [np.full(obs_dim, 1), np.full(obs_dim, 2)]}
observations = obs['observations']
gcb.predict(obs)
assert (npb.call_args_list[1][0][0] == observations)
def test_obs_not_image(self):
env = GarageEnv(DummyDiscretePixelEnv(), is_image=False)
with mock.patch('garage.tf.baselines.gaussian_cnn_baseline.GaussianCNNRegressor', new=SimpleGaussianCNNRegressor):
with mock.patch('garage.tf.baselines.gaussian_cnn_baseline.normalize_pixel_batch', side_effect=normalize_pixel_batch) as npb:
gcb = GaussianCNNBaseline(env_spec=env.spec)
obs_dim = env.spec.observation_space.shape
paths = [{'observations': [np.full(obs_dim, 1)], 'returns': [1]}, {'observations': [np.full(obs_dim, 2)], 'returns': [2]}]
gcb.fit(paths)
obs = {'observations': [np.full(obs_dim, 1), np.full(obs_dim, 2)]}
gcb.predict(obs)
assert (not npb.called)
.parametrize('obs_dim', [[1, 1, 1], [2, 2, 2], [1, 1], [2, 2]])
def test_param_values(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch('garage.tf.baselines.gaussian_cnn_baseline.GaussianCNNRegressor', new=SimpleGaussianCNNRegressor):
gcb = GaussianCNNBaseline(env_spec=box_env.spec)
new_gcb = GaussianCNNBaseline(env_spec=box_env.spec, name='GaussianCNNBaseline2')
with tf.compat.v1.variable_scope('GaussianCNNBaseline', reuse=True):
return_var = tf.compat.v1.get_variable('SimpleGaussianCNNModel/return_var')
return_var.load(1.0)
old_param_values = gcb.get_param_values()
new_param_values = new_gcb.get_param_values()
assert (not np.array_equal(old_param_values, new_param_values))
new_gcb.set_param_values(old_param_values)
new_param_values = new_gcb.get_param_values()
assert np.array_equal(old_param_values, new_param_values)
.parametrize('obs_dim', [[1, 1, 1], [2, 2, 2], [1, 1], [2, 2]])
def test_get_params_internal(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch('garage.tf.baselines.gaussian_cnn_baseline.GaussianCNNRegressor', new=SimpleGaussianCNNRegressor):
gcb = GaussianCNNBaseline(env_spec=box_env.spec, regressor_args=dict())
params_interal = gcb.get_params_internal()
trainable_params = tf.compat.v1.trainable_variables(scope='GaussianCNNBaseline')
assert np.array_equal(params_interal, trainable_params)
def test_is_pickleable(self):
box_env = GarageEnv(DummyBoxEnv(obs_dim=(1, 1)))
with mock.patch('garage.tf.baselines.gaussian_cnn_baseline.GaussianCNNRegressor', new=SimpleGaussianCNNRegressor):
gcb = GaussianCNNBaseline(env_spec=box_env.spec)
obs = {'observations': [np.full((1, 1), 1), np.full((1, 1), 1)]}
with tf.compat.v1.variable_scope('GaussianCNNBaseline', reuse=True):
return_var = tf.compat.v1.get_variable('SimpleGaussianCNNModel/return_var')
return_var.load(1.0)
prediction = gcb.predict(obs)
h = pickle.dumps(gcb)
with tf.compat.v1.Session(graph=tf.Graph()):
gcb_pickled = pickle.loads(h)
prediction2 = gcb_pickled.predict(obs)
assert np.array_equal(prediction, prediction2) |
def Fog(name=None, deterministic=False, random_state=None):
if (name is None):
name = ('Unnamed%s' % (ia.caller_name(),))
return CloudLayer(intensity_mean=(220, 255), intensity_freq_exponent=((- 2.0), (- 1.5)), intensity_coarse_scale=2, alpha_min=(0.7, 0.9), alpha_multiplier=0.3, alpha_size_px_max=(2, 8), alpha_freq_exponent=((- 4.0), (- 2.0)), sparsity=0.9, density_multiplier=(0.4, 0.9), name=name, deterministic=deterministic, random_state=random_state) |
def _load_state_dict_into_model(model_to_load, state_dict):
state_dict = state_dict.copy()
error_msgs = []
def load(module: torch.nn.Module, prefix=''):
args = (state_dict, prefix, {}, True, [], [], error_msgs)
module._load_from_state_dict(*args)
for (name, child) in module._modules.items():
if (child is not None):
load(child, ((prefix + name) + '.'))
load(model_to_load)
return error_msgs |
class BlipImageProcessor(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
class TestSummary():
(scope='module')
def summary(self):
list_feat = ['John', 'James', 'Christine']
list_descript = ['kind', 'caring', 'righteous']
list_contrib_ordering = [[['A', 5.0], ['B', 3.0], ['C', (- 1.0)], ['D', (- 5.0)]], [['D', 100.0], ['C', 99.0], ['B', 98.0], ['A', 97.0]], [['C', (- 4.0)], ['D', (- 5.0)], ['B', (- 7.0)], ['A', (- 132.0)]]]
list_freq_ordering = [[['A', 300.0], ['B', 200.0], ['C', 100.0], ['D', 0.0]], [['D', 5.0], ['C', 5.0], ['B', 5.0], ['A', 5.0]], [['C', 2.0], ['D', 23.0], ['B', 34.0], ['A', 45.0]]]
od = OrderedDict([('feat', list_feat), ('descript', list_descript), ('contrib_ordering', list_contrib_ordering), ('freq_ordering', list_freq_ordering)])
return ordering.OrderingSummary(od)
def test_sort(self, summary):
return
sorted_summary = copy.deepcopy(summary)
sorted_summary.sort()
sorted_od = sorted_summary.od
assert (sorted_od['feat'] == ['Christine', 'John', 'James'])
assert (sorted_od['descript'] == ['righteous', 'kind', 'caring'])
assert (sorted_od['contrib_ordering'] == [[['C', (- 4.0)], ['D', (- 5.0)], ['B', (- 7.0)], ['A', (- 132.0)]], [['A', 5.0], ['B', 3.0], ['C', (- 1.0)], ['D', (- 5.0)]], [['D', 100.0], ['C', 99.0], ['B', 98.0], ['A', 97.0]]])
assert (sorted_od['freq_ordering'] == [[['C', 2.0], ['D', 23.0], ['B', 34.0], ['A', 45.0]], [['A', 300.0], ['B', 200.0], ['C', 100.0], ['D', 0.0]], [['D', 5.0], ['C', 5.0], ['B', 5.0], ['A', 5.0]]])
def test_save(self, summary):
def stringify_pair(p):
return (((p[0] + ' (') + str(round(p[1], PRECISION))) + ')')
out_directory = 'tests/riddle/temp'
if (not os.path.exists(out_directory)):
os.makedirs(out_directory)
summary.save(out_directory)
with open((out_directory + '/orderings_ordered_dict.pkl'), 'r') as f:
saved_od = pickle.load(f)
assert (summary.od.items() == saved_od.items())
with open((out_directory + '/orderings.txt'), 'r') as f:
lines = f.read().splitlines()
assert (lines[0] == '\t'.join(summary.od.keys()))
list_feat = summary.od['feat']
list_descript = summary.od['descript']
list_contrib_ordering = [' > '.join([stringify_pair(p) for p in o]) for o in summary.od['contrib_ordering']]
list_freq_ordering = [' > '.join([stringify_pair(p) for p in o]) for o in summary.od['freq_ordering']]
for row_idx in range(1, len(summary.od.values()[0])):
expected = '\t'.join([list_feat[(row_idx - 1)], list_descript[(row_idx - 1)], list_contrib_ordering[(row_idx - 1)], list_freq_ordering[(row_idx - 1)]])
assert (lines[row_idx] == expected)
shutil.rmtree(out_directory)
def test_save_individual_tables(self, summary):
out_directory = 'tests/riddle/temp'
if (not os.path.exists(out_directory)):
os.makedirs(out_directory)
summary.save(out_directory)
idx_class_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
sorted_idx_class = sorted(idx_class_dict.items(), key=(lambda x: x[0]))
summary.save_individual_tables(idx_class_dict, out_directory)
ordering_keys = [key for key in summary.od.keys() if ('ordering' in key)]
for key in ordering_keys:
features = summary.od['feat']
curr_table_data = summary.od[key]
with open((((out_directory + '/') + key) + '_table.txt'), 'r') as f:
lines = f.read().splitlines()
assert (lines[0] == 'feat\tA\tB\tC\tD')
for row_idx in range(1, len(summary.od.values()[0])):
def search_score(c, list_pairs):
c_index = [cl for (cl, score) in list_pairs].index(c)
return list_pairs[c_index][1]
feat = features[(row_idx - 1)]
list_pairs = curr_table_data[(row_idx - 1)]
sorted_scores = [str(search_score(c, list_pairs)) for (idx, c) in sorted_idx_class]
expected_line = '\t'.join(([feat] + sorted_scores))
assert (lines[row_idx] == expected_line)
shutil.rmtree(out_directory) |
class MT5TokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
class ActionPublisher_2():
def __init__(self):
if rospy.get_param('train_mode'):
raise Exception('This node should be used solely in eval mode!')
rospy.init_node('action_publisher', anonymous=True)
self._action_publish_rate = rospy.get_param('/action_frequency', default=10)
rate = rospy.Duration((1 / self._action_publish_rate))
self._pub_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self._pub_cycle_trigger = rospy.Publisher('next_cycle', Bool, queue_size=1)
self._sub = rospy.Subscriber('cmd_vel_pub', Twist, self.callback_receive_cmd_vel, queue_size=1)
self._action = Twist()
self._signal = Bool()
self.STAND_STILL_ACTION = Twist()
(self.STAND_STILL_ACTION.linear.x, self.STAND_STILL_ACTION.angular.z) = (0, 0)
self._cmd_received = False
while (self._sub.get_num_connections() < 1):
print('ActionPublisher: No publisher to cmd_vel_pub yet.. ')
time.sleep(1)
rospy.Timer(rate, self.callback_publish_action)
rospy.spin()
def callback_publish_action(self, event):
if self._cmd_received:
self._pub_cmd_vel.publish(self._action)
self._cmd_received = False
else:
rospy.logdebug('No action received during recent action horizon.')
self._pub_cmd_vel.publish(self.STAND_STILL_ACTION)
self._pub_cycle_trigger.publish(self._signal)
def callback_receive_cmd_vel(self, msg_cmd_vel: Twist):
self._cmd_received = True
self._action = msg_cmd_vel |
def get_Activation(activation, inverse=False):
if (activation == 'gdn'):
return partial(GDN, inverse=inverse)
return getattr(torch.nn, activation) |
class FixedCrop(FeatureTransformer):
def __init__(self, x1, y1, x2, y2, normalized=True, is_clip=True, bigdl_type='float'):
super(FixedCrop, self).__init__(bigdl_type, x1, y1, x2, y2, normalized, is_clip) |
class LearnedPositionalEmbedding(nn.Embedding):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
if (self.padding_idx is not None):
self.max_positions = ((self.num_embeddings - self.padding_idx) - 1)
else:
self.max_positions = self.num_embeddings
def forward(self, input: Tensor, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, positions: Optional[Tensor]=None):
assert ((positions is None) or (self.padding_idx is None)), 'If positions is pre-computed then padding_idx should not be set.'
if (positions is None):
if (incremental_state is not None):
positions = torch.zeros((1, 1), device=input.device, dtype=input.dtype).fill_(int((self.padding_idx + input.size(1))))
else:
positions = utils.make_positions(input, self.padding_idx, onnx_trace=self.onnx_trace)
return F.embedding(positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) |
class CosineDecayWithSkip(object):
def __init__(self, total_steps, skip_steps=None):
super(CosineDecayWithSkip, self).__init__()
assert ((not skip_steps) or (skip_steps > 0)), 'skip steps must be greater than zero'
assert (total_steps > 0), 'total step must be greater than zero'
assert ((not skip_steps) or (skip_steps < total_steps)), 'skip steps must be smaller than total steps'
self.total_steps = total_steps
self.skip_steps = skip_steps
def __call__(self, base_lr=None, learning_rate=None):
steps = _decay_step_counter()
total = self.total_steps
if (self.skip_steps is not None):
total -= self.skip_steps
lr = fluid.layers.tensor.create_global_var(shape=[1], value=base_lr, dtype='float32', persistable=True, name='learning_rate')
def decay():
cos_lr = ((base_lr * 0.5) * (cos((steps * (math.pi / total))) + 1))
fluid.layers.tensor.assign(input=cos_lr, output=lr)
if (self.skip_steps is None):
decay()
else:
skipped = (steps >= self.skip_steps)
fluid.layers.cond(skipped, decay)
return lr |
class Task():
def __init__(self, render=False, shuffle_on_reset=False, n_noise_features=0, env_seed=None, feature_seed=None, max_episode_steps=1000):
self.env = gym.make('CartPoleSwingUp-v1')
self.env._max_episode_steps = max_episode_steps
self.shuffle_on_reset = shuffle_on_reset
self.render = render
self.n_noise_features = n_noise_features
self.n_features = (N_ORIGINAL_FEATURES + n_noise_features)
self.perm_ix = np.arange(self.n_features)
self.noise_std = 0.1
self.env.seed(env_seed)
self.rnd = np.random.RandomState(seed=feature_seed)
def reset_for_rollout(self):
self.perm_ix = np.arange(self.n_features)
if self.shuffle_on_reset:
self.rnd.shuffle(self.perm_ix)
def modify_obs(self, obs):
noise = (self.rnd.randn(self.n_noise_features) * self.noise_std)
obs_and_noise = np.concatenate([obs, noise], axis=0)
obs_modified = obs_and_noise[self.perm_ix]
return obs_modified
def rollout(self, solution):
n_features_solution = solution.get_n_features()
n_features_task = self.n_features
if ((n_features_solution is not None) and (n_features_solution != n_features_task)):
raise IncompatibleNFeatures
self.reset_for_rollout()
solution.reset()
obs = self.env.reset()
if self.render:
self.env.render()
ep_reward = 0
done = False
while (not done):
obs_modified = self.modify_obs(obs)
action = solution.get_action(obs_modified)
(obs, reward, done, _) = self.env.step(action)
ep_reward += reward
if self.render:
self.env.render()
return ep_reward |
class _PredictManager():
def __init__(self, predictor: Predictor, input_file: str, output_file: Optional[str], batch_size: int, print_to_console: bool, has_dataset_reader: bool, beam_size: int) -> None:
self._predictor = predictor
self._input_file = input_file
if (output_file is not None):
self._output_file = open(output_file, 'w')
else:
self._output_file = None
self._batch_size = batch_size
self._print_to_console = print_to_console
if has_dataset_reader:
self._dataset_reader = predictor._dataset_reader
else:
self._dataset_reader = None
if (type(predictor) in (STOGPredictor,)):
self.beam_size = beam_size
self._predictor._model.set_beam_size(self.beam_size)
self._predictor._model.set_decoder_token_indexers(self._dataset_reader._token_indexers)
def _predict_json(self, batch_data: List[JsonDict]) -> Iterator[str]:
if (len(batch_data) == 1):
results = [self._predictor.predict_json(batch_data[0])]
else:
results = self._predictor.predict_batch_json(batch_data)
for output in results:
(yield self._predictor.dump_line(output))
def _predict_instances(self, batch_data: List[Instance]) -> Iterator[str]:
if (len(batch_data) == 1):
results = [self._predictor.predict_instance(batch_data[0])]
else:
results = self._predictor.predict_batch_instance(batch_data)
for output in results:
(yield self._predictor.dump_line(output))
def _maybe_print_to_console_and_file(self, prediction: str, model_input: str=None) -> None:
if self._print_to_console:
if (model_input is not None):
print('input: ', model_input)
print('prediction: ', prediction)
if (self._output_file is not None):
self._output_file.write(prediction)
def _get_json_data(self) -> Iterator[JsonDict]:
if (self._input_file == '-'):
for line in sys.stdin:
if (not line.isspace()):
(yield self._predictor.load_line(line))
else:
with open(self._input_file, 'r') as file_input:
for line in file_input:
if (not line.isspace()):
(yield self._predictor.load_line(line))
def _get_instance_data(self) -> Iterator[Instance]:
if (self._input_file == '-'):
raise ConfigurationError('stdin is not an option when using a DatasetReader.')
elif (self._dataset_reader is None):
raise ConfigurationError('To generate instances directly, pass a DatasetReader.')
else:
(yield from self._dataset_reader.read(self._input_file))
def run(self) -> None:
has_reader = (self._dataset_reader is not None)
if has_reader:
for batch in lazy_groups_of(self._get_instance_data(), self._batch_size):
for (model_input_instance, result) in zip(batch, self._predict_instances(batch)):
self._maybe_print_to_console_and_file(result, str(model_input_instance))
else:
for batch_json in lazy_groups_of(self._get_json_data(), self._batch_size):
for (model_input_json, result) in zip(batch_json, self._predict_json(batch_json)):
self._maybe_print_to_console_and_file(result, json.dumps(model_input_json))
if (self._output_file is not None):
self._output_file.close() |
def visualize_boxes_and_labels_on_image_array(image, boxes, classes, scores, category_index, instance_masks=None, keypoints=None, use_normalized_coordinates=False, max_boxes_to_draw=20, min_score_thresh=0.5, agnostic_mode=False, line_thickness=4):
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if (not max_boxes_to_draw):
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if ((scores is None) or (scores[i] > min_score_thresh)):
box = tuple(boxes[i].tolist())
if (instance_masks is not None):
box_to_instance_masks_map[box] = instance_masks[i]
if (keypoints is not None):
box_to_keypoints_map[box].extend(keypoints[i])
if (scores is None):
box_to_color_map[box] = 'black'
else:
if (not agnostic_mode):
if (classes[i] in category_index.keys()):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = '{}: {}%'.format(class_name, int((100 * scores[i])))
else:
display_str = 'score: {}%'.format(int((100 * scores[i])))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[(classes[i] % len(STANDARD_COLORS))]
for (box, color) in box_to_color_map.items():
(ymin, xmin, ymax, xmax) = box
if (instance_masks is not None):
draw_mask_on_image_array(image, box_to_instance_masks_map[box], color=color)
draw_bounding_box_on_image_array(image, ymin, xmin, ymax, xmax, color=color, thickness=line_thickness, display_str_list=box_to_display_str_map[box], use_normalized_coordinates=use_normalized_coordinates)
if (keypoints is not None):
draw_keypoints_on_image_array(image, box_to_keypoints_map[box], color=color, radius=(line_thickness / 2), use_normalized_coordinates=use_normalized_coordinates)
return image |
def load_yaml(file_path):
if (not isinstance(file_path, Path)):
file_path = Path(file_path)
with open(str(file_path), 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
return data |
class ShuffleNetV2_MPNCOV(nn.Module):
def __init__(self, stages_repeats, stages_out_channels, num_classes=1000):
super(ShuffleNetV2_MPNCOV, self).__init__()
if (len(stages_repeats) != 3):
raise ValueError('expected stages_repeats as list of 3 positive ints')
if (len(stages_out_channels) != 5):
raise ValueError('expected stages_out_channels as list of 5 positive ints')
self._stage_out_channels = stages_out_channels
input_channels = 3
output_channels = self._stage_out_channels[0]
self.conv1 = nn.Sequential(nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True))
input_channels = output_channels
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
for (name, repeats, output_channels) in zip(stage_names, stages_repeats, self._stage_out_channels[1:]):
seq = [InvertedResidual(input_channels, output_channels, 2)]
for i in range((repeats - 1)):
seq.append(InvertedResidual(output_channels, output_channels, 1))
setattr(self, name, nn.Sequential(*seq))
input_channels = output_channels
output_channels = self._stage_out_channels[(- 1)]
self.conv5 = nn.Sequential(nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True))
self.dimension_reduction = nn.Sequential(nn.Conv2d(self._stage_out_channels[(- 1)], 256, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(256), nn.ReLU6(inplace=True))
self.fc = nn.Linear(32896, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.conv5(x)
x = self.dimension_reduction(x)
x = MPNCOV.CovpoolLayer(x)
x = MPNCOV.SqrtmLayer(x, 5)
x = MPNCOV.TriuvecLayer(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def FindStartOfExpressionInLine(line, endpos, stack):
i = endpos
while (i >= 0):
char = line[i]
if (char in ')]}'):
stack.append(char)
elif (char == '>'):
if ((i > 0) and ((line[(i - 1)] == '-') or Match('\\s>=\\s', line[(i - 1):]) or Search('\\boperator\\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif (char == '<'):
if ((i > 0) and (line[(i - 1)] == '<')):
i -= 1
elif (stack and (stack[(- 1)] == '>')):
stack.pop()
if (not stack):
return (i, None)
elif (char in '([{'):
while (stack and (stack[(- 1)] == '>')):
stack.pop()
if (not stack):
return ((- 1), None)
if (((char == '(') and (stack[(- 1)] == ')')) or ((char == '[') and (stack[(- 1)] == ']')) or ((char == '{') and (stack[(- 1)] == '}'))):
stack.pop()
if (not stack):
return (i, None)
else:
return ((- 1), None)
elif (char == ';'):
while (stack and (stack[(- 1)] == '>')):
stack.pop()
if (not stack):
return ((- 1), None)
i -= 1
return ((- 1), stack) |
class DummyAttribution(FeatureAttribution):
method_name = 'dummy'
def attribute_step(self, attribute_fn_main_args: Dict[(str, Any)], attribution_args: Dict[(str, Any)]={}) -> FeatureAttributionStepOutput:
return FeatureAttributionStepOutput(source_attributions=None, target_attributions=None, step_scores={}) |
class DMCEnv(core.Env):
def __init__(self, domain_name: Optional[str]=None, task_name: Optional[str]=None, env: Optional[dm_env.Environment]=None, task_kwargs: Optional[Dict]={}, environment_kwargs=None):
assert ('random' in task_kwargs), 'Please specify a seed, for deterministic behaviour.'
assert ((env is not None) or ((domain_name is not None) and (task_name is not None))), 'You must provide either an environment or domain and task names.'
if (env is None):
env = suite.load(domain_name=domain_name, task_name=task_name, task_kwargs=task_kwargs, environment_kwargs=environment_kwargs)
self._env = env
self.action_space = dmc_spec2gym_space(self._env.action_spec())
self.observation_space = dmc_spec2gym_space(self._env.observation_spec())
self.seed(seed=task_kwargs['random'])
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action: np.ndarray) -> TimeStep:
assert self.action_space.contains(action)
time_step = self._env.step(action)
reward = (time_step.reward or 0)
done = time_step.last()
obs = time_step.observation
info = {}
if (done and (time_step.discount == 1.0)):
info['TimeLimit.truncated'] = True
return (obs, reward, done, info)
def reset(self):
time_step = self._env.reset()
return time_step.observation
def render(self, mode='rgb_array', height: int=84, width: int=84, camera_id: int=0):
assert (mode == 'rgb_array'), ('only support rgb_array mode, given %s' % mode)
return self._env.physics.render(height=height, width=width, camera_id=camera_id) |
def generate(num_hosts, num_services, fully_obs=False, flat_actions=True, flat_obs=True, **params):
env_kwargs = {'fully_obs': fully_obs, 'flat_actions': flat_actions, 'flat_obs': flat_obs}
scenario = generate_scenario(num_hosts, num_services, **params)
return NASimEnv(scenario, **env_kwargs) |
def init_engine(bigdl_type='float'):
callBigDlFunc(bigdl_type, 'initEngine')
get_spark_context()._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.initialize() |
def cyclegan_generator_resnet(images, arg_scope_fn=cyclegan_arg_scope, num_resnet_blocks=6, num_filters=64, upsample_fn=cyclegan_upsample, kernel_size=3, num_outputs=3, tanh_linear_slope=0.0, is_training=False):
del is_training
end_points = {}
input_size = images.shape.as_list()
(height, width) = (input_size[1], input_size[2])
if (height and ((height % 4) != 0)):
raise ValueError('The input height must be a multiple of 4.')
if (width and ((width % 4) != 0)):
raise ValueError('The input width must be a multiple of 4.')
if (not isinstance(kernel_size, (list, tuple))):
kernel_size = [kernel_size, kernel_size]
kernel_height = kernel_size[0]
kernel_width = kernel_size[1]
pad_top = ((kernel_height - 1) // 2)
pad_bottom = (kernel_height // 2)
pad_left = ((kernel_width - 1) // 2)
pad_right = (kernel_width // 2)
paddings = np.array([[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], dtype=np.int32)
spatial_pad_3 = np.array([[0, 0], [3, 3], [3, 3], [0, 0]])
with tf.contrib.framework.arg_scope(arg_scope_fn()):
with tf.variable_scope('input'):
net = tf.pad(images, spatial_pad_3, 'REFLECT')
net = layers.conv2d(net, num_filters, kernel_size=[7, 7], padding='VALID')
end_points['encoder_0'] = net
with tf.variable_scope('encoder'):
with tf.contrib.framework.arg_scope([layers.conv2d], kernel_size=kernel_size, stride=2, activation_fn=tf.nn.relu, padding='VALID'):
net = tf.pad(net, paddings, 'REFLECT')
net = layers.conv2d(net, (num_filters * 2))
end_points['encoder_1'] = net
net = tf.pad(net, paddings, 'REFLECT')
net = layers.conv2d(net, (num_filters * 4))
end_points['encoder_2'] = net
with tf.variable_scope('residual_blocks'):
with tf.contrib.framework.arg_scope([layers.conv2d], kernel_size=kernel_size, stride=1, activation_fn=tf.nn.relu, padding='VALID'):
for block_id in xrange(num_resnet_blocks):
with tf.variable_scope('block_{}'.format(block_id)):
res_net = tf.pad(net, paddings, 'REFLECT')
res_net = layers.conv2d(res_net, (num_filters * 4))
res_net = tf.pad(res_net, paddings, 'REFLECT')
res_net = layers.conv2d(res_net, (num_filters * 4), activation_fn=None)
net += res_net
end_points[('resnet_block_%d' % block_id)] = net
with tf.variable_scope('decoder'):
with tf.contrib.framework.arg_scope([layers.conv2d], kernel_size=kernel_size, stride=1, activation_fn=tf.nn.relu):
with tf.variable_scope('decoder1'):
net = upsample_fn(net, num_outputs=(num_filters * 2), stride=[2, 2])
end_points['decoder1'] = net
with tf.variable_scope('decoder2'):
net = upsample_fn(net, num_outputs=num_filters, stride=[2, 2])
end_points['decoder2'] = net
with tf.variable_scope('output'):
net = tf.pad(net, spatial_pad_3, 'REFLECT')
logits = layers.conv2d(net, num_outputs, [7, 7], activation_fn=None, normalizer_fn=None, padding='valid')
logits = tf.reshape(logits, _dynamic_or_static_shape(images))
end_points['logits'] = logits
end_points['predictions'] = (tf.tanh(logits) + (logits * tanh_linear_slope))
return (end_points['predictions'], end_points) |
class Collision(xmlr.Object):
def __init__(self, geometry=None, origin=None):
self.geometry = geometry
self.origin = origin |
def test_stable_mixed_volume(vrblvl=0):
polynomials = ['x^3 + 2*x*y - x^2*y;', 'x + y - x^3;']
set_double_system(2, polynomials, vrblvl)
(mvl, smv) = stable_mixed_volume(True, vrblvl)
if (vrblvl > 0):
print('the mixed volume by DEMiCs :', mvl)
print('the stable mixed volume by DEMiCs :', smv)
(mvl, smv) = stable_mixed_volume(False, vrblvl)
if (vrblvl > 0):
print('the mixed volume by MixedVol :', mvl)
print('the stable mixed volume by MixedVol :', smv)
return (int((mvl != 3)) + int((smv != 5))) |
(ValueError)
def test_check_max_iter():
(model, param_dist) = setup()
HyperbandSearchCV(model, param_dist, max_iter=(- 1))._validate_input() |
class GeneralOptimizer(Optimizer):
def __init__(self, params, stats=None, on_step: Callable=None):
defaults = {s.name: s.param for s in listify(stats) if (s.name is not None)}
super().__init__(params, defaults)
(self.global_stats, self.group_stats, self.layer_stats, self.channel_stats, self.weight_stats) = self._split_stats(stats)
self.init_stats()
if (on_step is not None):
self.on_step = types.MethodType(on_step, self)
def step(self, closure=None):
self.update_stats()
for (i, pg) in enumerate(self.param_groups):
for p in pg['params']:
if (p.grad is not None):
self.on_step(p, pg, i)
def on_step(self, p, group, group_idx):
p.data.add_((- group['lr']), p.grad.data)
def _split_stats(self, stats):
splits = [[stat for stat in listify(stats) if (stat.scope == scope)] for scope in StatScope]
for (split, s) in zip([splits[0], splits[1], ((splits[2] + splits[3]) + splits[4])], StatScope):
if np.any([getattr(s, 'debias', False) for s in split]):
split.insert(0, CounterStat('step', scope=s))
return splits
def _init_stats(self, stats, data=None):
return {stat.buf: (stat.init if (data is None) else (torch.zeros_like(data) + stat.init)) for stat in stats if (stat.buf is not None)}
def init_stats(self):
self.state['global'] = self._init_stats(self.global_stats)
for (i, pg) in enumerate(self.param_groups):
self.state[f'group{i}'] = self._init_stats(self.group_stats)
for p in pg['params']:
self.state[p] = self._init_stats(self.layer_stats)
self.state[p].update(self._init_stats(self.channel_stats, p.data.view(p.data.size(0), (- 1)).mean(1)))
self.state[p].update(self._init_stats(self.weight_stats, p.data))
def _set_bufs(self, p, stats, pg, val=None):
d = self.state[p]
for stat in stats:
if (stat.buf is not None):
d[stat.buf] = stat.update(d[stat.buf], pg[stat.name], val=val, step=d.get('step', None))
def update_stats(self):
for stat in self.global_stats:
stat.new_step()
for (i, pg) in enumerate(self.param_groups):
for stat in self.group_stats:
stat.new_step()
for p in pg['params']:
if (p.grad is not None):
for stat in (self.global_stats + self.group_stats):
stat.accumulate(p.grad.data)
self._set_bufs(p, ((self.layer_stats + self.channel_stats) + self.weight_stats), pg, p.grad.data)
self._set_bufs(f'group{i}', self.group_stats, pg)
self._set_bufs('global', self.global_stats, self.param_groups[0]) |
class MAMLVPG(MAML):
def __init__(self, env, policy, value_function, inner_lr=_Default(0.1), outer_lr=0.001, max_path_length=100, discount=0.99, gae_lambda=1, center_adv=True, positive_adv=False, policy_ent_coeff=0.0, use_softplus_entropy=False, stop_entropy_gradient=False, entropy_method='no_entropy', meta_batch_size=20, num_grad_updates=1, meta_evaluator=None, evaluate_every_n_epochs=1):
policy_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=inner_lr)), policy)
vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=inner_lr)), value_function)
inner_algo = VPG(env.spec, policy, value_function, policy_optimizer=policy_optimizer, vf_optimizer=vf_optimizer, max_path_length=max_path_length, num_train_per_epoch=1, discount=discount, gae_lambda=gae_lambda, center_adv=center_adv, positive_adv=positive_adv, policy_ent_coeff=policy_ent_coeff, use_softplus_entropy=use_softplus_entropy, stop_entropy_gradient=stop_entropy_gradient, entropy_method=entropy_method)
super().__init__(inner_algo=inner_algo, env=env, policy=policy, meta_optimizer=torch.optim.Adam, meta_batch_size=meta_batch_size, inner_lr=inner_lr, outer_lr=outer_lr, num_grad_updates=num_grad_updates, meta_evaluator=meta_evaluator, evaluate_every_n_epochs=evaluate_every_n_epochs) |
class Runner(object):
def __init__(self, base_dir, create_agent_fn, random_seed, agent_name, game_name, num_iterations, create_environment_fn=create_atari_environment, sticky_actions=True, checkpoint_file_prefix='ckpt', logging_file_prefix='log', log_every_n=1, training_steps=250000, evaluation_steps=125000, max_steps_per_episode=27000):
assert (base_dir is not None)
assert (game_name is not None)
self._logging_file_prefix = logging_file_prefix
self._log_every_n = log_every_n
self._num_iterations = num_iterations
self._training_steps = training_steps
self._evaluation_steps = evaluation_steps
self._max_steps_per_episode = max_steps_per_episode
self._base_dir = base_dir
self._create_directories()
self._summary_writer = tf.summary.FileWriter(self._base_dir)
self.average_reward_eval = (- 100)
self.game_name = game_name
self.agent_name = agent_name
self._environment = create_environment_fn(game_name, sticky_actions)
tf.set_random_seed(random_seed)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
self._sess = tf.Session('', config=tfconfig)
self._agent = create_agent_fn(self._sess, self._environment, summary_writer=self._summary_writer)
tf.logging.info('Running %s with the following parameters:', self.__class__.__name__)
tf.logging.info('\t random_seed: %s', random_seed)
tf.logging.info('\t num_iterations: %s', num_iterations)
tf.logging.info('\t training_steps: %s', training_steps)
tf.logging.info('\t sticky_actions: %s', sticky_actions)
tf.logging.info('\t game_name: %s', game_name)
self._summary_writer.add_graph(graph=tf.get_default_graph())
self._sess.run(tf.global_variables_initializer())
self._initialize_checkpointer_and_maybe_resume(checkpoint_file_prefix)
def _create_directories(self):
self._checkpoint_dir = os.path.join(self._base_dir, 'checkpoints')
self._logger = logger.Logger(os.path.join(self._base_dir, 'logs'))
def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):
self._checkpointer = checkpointer.Checkpointer(self._checkpoint_dir, checkpoint_file_prefix)
self._start_iteration = 0
latest_checkpoint_version = checkpointer.get_latest_checkpoint_number(self._checkpoint_dir)
if (latest_checkpoint_version >= 0):
experiment_data = self._checkpointer.load_checkpoint(latest_checkpoint_version)
if self._agent.unbundle(self._checkpoint_dir, latest_checkpoint_version, experiment_data):
assert ('logs' in experiment_data)
assert ('current_iteration' in experiment_data)
self._logger.data = experiment_data['logs']
self._start_iteration = (experiment_data['current_iteration'] + 1)
tf.logging.info('Reloaded checkpoint and will start from iteration %d', self._start_iteration)
def restore_checkpoints(self, restore_dir, filename):
saver = tf.train.Saver()
saver.restore(self._sess, os.path.join(restore_dir, filename))
def _initialize_episode(self):
initial_observation = self._environment.reset()
return self._agent.begin_episode(initial_observation)
def _run_one_step(self, action):
(observation, reward, is_terminal, _) = self._environment.step(action)
return (observation, reward, is_terminal)
def _end_episode(self, reward):
self._agent.end_episode(reward)
def _end_episode_store(self, reward, total_reward, step_number, is_opt):
if is_opt:
self._agent.end_episode_(reward, total_reward, step_number)
else:
self._agent.end_episode(reward)
def _run_one_episode(self):
step_number = 0
total_reward = 0.0
action = self._initialize_episode()
is_terminal = False
while True:
(observation, reward, is_terminal) = self._run_one_step(action)
total_reward += reward
step_number += 1
if (self._environment.game_over or (step_number == self._max_steps_per_episode)):
break
elif is_terminal:
self._agent.end_episode(reward)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation)
if (self.agent_name in RPG_AGENTS):
is_opt = False
if (total_reward >= episodic_return[self.game_name]):
is_opt = True
self._end_episode_store(reward, total_reward, step_number, is_opt)
else:
self._end_episode(reward)
return (step_number, total_reward)
def _run_one_phase(self, min_steps, statistics, run_mode_str):
step_count = 0
num_episodes = 0
sum_returns = 0.0
num_good_trajs = 0
good_traj_label = 0
while (step_count < min_steps):
(episode_length, episode_return) = self._run_one_episode()
good_traj_label = 0
if (episode_return >= episodic_return[self.game_name]):
good_traj_label = 1
num_good_trajs += 1
statistics.append({'{}_episode_lengths'.format(run_mode_str): episode_length, '{}_episode_returns'.format(run_mode_str): episode_return, '{}_episode_goodtraj'.format(run_mode_str): good_traj_label})
step_count += episode_length
sum_returns += episode_return
num_episodes += 1
if (self.agent_name in ['rpg', 'repg']):
sys.stdout.write(('epsilon: {} '.format(self._agent.epsilon_current) + 'replaysize {}\r'.format(self._agent.current_replay_size)))
elif (self.agent_name in RPG_AGENTS):
sys.stdout.write('Opt replay size: {} '.format(self._agent._replay_opt.memory.add_count))
sys.stdout.write(((('Steps executed: {} '.format(step_count) + 'Episode length: {} '.format(episode_length)) + 'Return: {}'.format(episode_return)) + 'Good traj?: {}\r'.format(good_traj_label)))
sys.stdout.flush()
return (step_count, sum_returns, num_episodes, num_good_trajs)
def _run_train_phase(self, statistics, eval_mode=False):
self._agent.eval_mode = eval_mode
start_time = time.time()
(number_steps, sum_returns, num_episodes, num_good_trajs) = self._run_one_phase(self._training_steps, statistics, 'train')
average_return = ((sum_returns / num_episodes) if (num_episodes > 0) else 0.0)
statistics.append({'train_average_return': average_return})
average_good_trajs = ((num_good_trajs / num_episodes) if (num_episodes > 0) else 0.0)
statistics.append({'train_average_goodtraj': average_good_trajs})
time_delta = (time.time() - start_time)
tf.logging.info('Average undiscounted return per training episode: %.2f', average_return)
tf.logging.info('Average training steps per second: %.2f', (number_steps / time_delta))
return (num_episodes, average_return)
def _run_eval_phase(self, statistics):
self._agent.eval_mode = True
(_, sum_returns, num_episodes, _) = self._run_one_phase(self._evaluation_steps, statistics, 'eval')
average_return = ((sum_returns / num_episodes) if (num_episodes > 0) else 0.0)
tf.logging.info('Average undiscounted return per evaluation episode: %.2f', average_return)
statistics.append({'eval_average_return': average_return})
return (num_episodes, average_return)
def _run_one_iteration(self, iteration):
statistics = iteration_statistics.IterationStatistics()
tf.logging.info('Starting iteration %d', iteration)
train_eval_mode = False
if (self.average_reward_eval >= episodic_return_switch[self.game_name]):
train_eval_mode = True
print('Stop training at iteration {}'.format(iteration))
(num_episodes_train, average_reward_train) = self._run_train_phase(statistics, train_eval_mode)
if ((self.agent_name in RPG_AGENTS) and (self._agent._replay_opt.memory.add_count == 0)):
(num_episodes_eval, average_reward_eval) = ((- 10000), (- 10000))
else:
(num_episodes_eval, average_reward_eval) = self._run_eval_phase(statistics)
self.average_reward_eval = average_reward_eval
self._save_tensorboard_summaries(iteration, num_episodes_train, average_reward_train, num_episodes_eval, average_reward_eval)
return statistics.data_lists
def _save_tensorboard_summaries(self, iteration, num_episodes_train, average_reward_train, num_episodes_eval, average_reward_eval):
summary = tf.Summary(value=[tf.Summary.Value(tag='Train/NumEpisodes', simple_value=num_episodes_train), tf.Summary.Value(tag='Train/AverageReturns', simple_value=average_reward_train), tf.Summary.Value(tag='Eval/NumEpisodes', simple_value=num_episodes_eval), tf.Summary.Value(tag='Eval/AverageReturns', simple_value=average_reward_eval)])
self._summary_writer.add_summary(summary, iteration)
def _log_experiment(self, iteration, statistics):
self._logger['iteration_{:d}'.format(iteration)] = statistics
if ((iteration % self._log_every_n) == 0):
self._logger.log_to_file(self._logging_file_prefix, iteration)
def _checkpoint_experiment(self, iteration):
experiment_data = self._agent.bundle_and_checkpoint(self._checkpoint_dir, iteration)
if experiment_data:
experiment_data['current_iteration'] = iteration
experiment_data['logs'] = self._logger.data
self._checkpointer.save_checkpoint(iteration, experiment_data)
def run_experiment(self):
tf.logging.info('Beginning training...')
if (self._num_iterations <= self._start_iteration):
tf.logging.warning('num_iterations (%d) < start_iteration(%d)', self._num_iterations, self._start_iteration)
return
for iteration in range(self._start_iteration, self._num_iterations):
statistics = self._run_one_iteration(iteration)
self._log_experiment(iteration, statistics)
self._checkpoint_experiment(iteration) |
class LogicLayer(nn.Module):
def __init__(self, breadth, input_dims, output_dims, logic_hidden_dim, exclude_self=True, residual=False, activation=nn.Sigmoid()):
super().__init__()
assert (breadth > 0), 'Does not support breadth <= 0.'
if (breadth > 3):
print('Using LogicLayer with breadth > 3 may cause speed and memory issue.')
self.max_order = breadth
self.residual = residual
input_dims = _get_tuple_n(input_dims, (self.max_order + 1), int)
output_dims = _get_tuple_n(output_dims, (self.max_order + 1), int)
(self.logic, self.dim_perms, self.dim_expanders, self.dim_reducers) = [nn.ModuleList() for _ in range(4)]
for i in range((self.max_order + 1)):
current_dim = input_dims[i]
if (i > 0):
expander = Expander((i - 1))
self.dim_expanders.append(expander)
current_dim += expander.get_output_dim(input_dims[(i - 1)])
else:
self.dim_expanders.append(None)
if ((i + 1) < (self.max_order + 1)):
reducer = Reducer((i + 1), exclude_self)
self.dim_reducers.append(reducer)
current_dim += reducer.get_output_dim(input_dims[(i + 1)])
else:
self.dim_reducers.append(None)
if (current_dim == 0):
self.dim_perms.append(None)
self.logic.append(None)
output_dims[i] = 0
else:
perm = Permutation(i)
self.dim_perms.append(perm)
current_dim = perm.get_output_dim(current_dim)
self.logic.append(LogicInference(current_dim, output_dims[i], logic_hidden_dim, activation))
self.input_dims = input_dims
self.output_dims = output_dims
def forward(self, inputs):
assert (len(inputs) == (self.max_order + 1))
outputs = []
for i in range((self.max_order + 1)):
f = []
if ((i > 0) and (self.input_dims[(i - 1)] > 0)):
n = (inputs[i].size(1) if (i == 1) else None)
f.append(self.dim_expanders[i](inputs[(i - 1)], n))
if ((i < len(inputs)) and (self.input_dims[i] > 0)):
f.append(inputs[i])
if (((i + 1) < len(inputs)) and (self.input_dims[(i + 1)] > 0)):
f.append(self.dim_reducers[i](inputs[(i + 1)]))
if (len(f) == 0):
output = None
else:
f = torch.cat(f, dim=(- 1))
f = self.dim_perms[i](f)
output = self.logic[i](f)
if (self.residual and (self.input_dims[i] > 0)):
output = (inputs[i] + output)
outputs.append(output)
return outputs
__hyperparams__ = ('breadth', 'input_dims', 'output_dims', 'logic_hidden_dim', 'exclude_self', 'residual')
__hyperparam_defaults__ = {'exclude_self': True, 'residual': False}
def make_nlm_parser(cls, parser, defaults, prefix=None):
for (k, v) in cls.__hyperparam_defaults__.items():
defaults.setdefault(k, v)
if (prefix is None):
prefix = '--'
else:
prefix = (('--' + str(prefix)) + '-')
parser.add_argument((prefix + 'breadth'), type='int', default=defaults['breadth'], metavar='N', help='breadth of the logic layer')
parser.add_argument((prefix + 'logic-hidden-dim'), type=int, nargs='+', default=defaults['logic_hidden_dim'], metavar='N', help='hidden dim of the logic model')
parser.add_argument((prefix + 'exclude-self'), type='bool', default=defaults['exclude_self'], metavar='B', help='not allow multiple occurrence of same variable')
parser.add_argument((prefix + 'residual'), type='bool', default=defaults['residual'], metavar='B', help='use residual connections')
def from_args(cls, input_dims, output_dims, args, prefix=None, **kwargs):
if (prefix is None):
prefix = ''
else:
prefix = (str(prefix) + '_')
setattr(args, (prefix + 'input_dims'), input_dims)
setattr(args, (prefix + 'output_dims'), output_dims)
init_params = {k: getattr(args, (prefix + k)) for k in cls.__hyperparams__}
init_params.update(kwargs)
return cls(**init_params) |
def _elu_flops_compute(input: Tensor, alpha: float=1.0, inplace: bool=False):
return (torch.numel(input), 0) |
class LoopPadding(object):
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
out = frame_indices
for index in out:
if (len(out) >= self.size):
break
out.append(index)
return out |
def test_wrap_experiment_launcher_outside_git():
prefix = 'wrap_exp_test_launcher_outside_git'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = (exp_path / 'test_exp')
with tempfile.TemporaryDirectory() as launcher_dir:
launcher_path = (pathlib.Path(launcher_dir) / 'run_exp.py')
(snapshot_dir, _) = _run_launcher(launcher_path, prefix)
assert os.path.samefile(str(expected_path), str(snapshot_dir)) |
_module()
class FastRCNN(TwoStageDetector):
'Implementation of `Fast R-CNN <
def __init__(self, backbone: ConfigType, roi_head: ConfigType, train_cfg: ConfigType, test_cfg: ConfigType, neck: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None:
super().__init__(backbone=backbone, neck=neck, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg, data_preprocessor=data_preprocessor) |
class MChefParams():
cuda_details: CudaDetails
index_to_graph_lookup: typing.Callable[([torch.Tensor], graph_as_adj_list.GraphAsAdjList)]
total_number_of_graphs: int
stop_indx: int
latent_dim: int
gnn_hidden_size: int = 101
edge_names = ['single', 'double', 'triple']
ggnn_time_steps = 4
embedding_dim = 50
property_dim: int = 1
decd_layers: int = 2
decd_max_steps: int = 5 |
def link_markdown_cells(cells, modules):
for (i, cell) in enumerate(cells):
if (cell['cell_type'] == 'markdown'):
cell['source'] = link_docstring(modules, cell['source']) |
class EMAModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch']) |
def load_datasets(raw_data, split_idx, args, n_workers=5):
data_splits = data_utils.read_splits(('%s/split_%d.txt' % (args.data, split_idx)))
dataset_loaders = {}
if (not args.test_mode):
dataset_loaders['train'] = get_loader(raw_data, data_splits['train'], args, shuffle=True, num_workers=n_workers)
dataset_loaders['valid'] = get_loader(raw_data, data_splits['valid'], args, shuffle=False, num_workers=n_workers)
dataset_loaders['test'] = get_loader(raw_data, data_splits['test'], args, shuffle=False, num_workers=n_workers)
return dataset_loaders |
class DiceMetaSampleProcessor(DiceSampleProcessor):
process_samples = MetaSampleProcessor.process_samples |
def main():
result = minimum_spanning_tree({(1, 2): 10, (2, 3): 15, (3, 4): 10, (1, 4): 10})
for edge in result:
(print(edge),)
print()
result = minimum_spanning_tree({(1, 2): 6, (1, 3): 1, (1, 4): 5, (2, 3): 5, (2, 5): 3, (3, 4): 5, (3, 5): 6, (3, 6): 4, (4, 6): 2, (5, 6): 6})
for edge in result:
(print(edge),)
print()
result = minimum_spanning_tree({(1, 2): 6, (1, 3): 1, (2, 4): 2})
for edge in result:
(print(edge),)
print() |
def eval(sess_config, input_hooks, model, data_init_op, steps, checkpoint_dir):
model.is_training = False
hooks = []
hooks.extend(input_hooks)
scaffold = tf.train.Scaffold(local_init_op=tf.group(tf.local_variables_initializer(), data_init_op))
session_creator = tf.train.ChiefSessionCreator(scaffold=scaffold, checkpoint_dir=checkpoint_dir, config=sess_config)
writer = tf.summary.FileWriter(os.path.join(checkpoint_dir, 'eval'))
merged = tf.summary.merge_all()
with tf.train.MonitoredSession(session_creator=session_creator, hooks=hooks) as sess:
for _in in range(1, (steps + 1)):
if (_in != steps):
sess.run([model.acc_op, model.auc_op])
if ((_in % 1000) == 0):
print('Evaluation complete:[{}/{}]'.format(_in, steps))
else:
(eval_acc, eval_auc, events) = sess.run([model.acc_op, model.auc_op, merged])
writer.add_summary(events, _in)
print('Evaluation complete:[{}/{}]'.format(_in, steps))
print('ACC = {}\nAUC = {}'.format(eval_acc, eval_auc))
return (eval_acc, eval_auc) |
class NoBadWordsLogitsProcessor():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
def repeat_batch(batch: Batch, num_repeat) -> Batch:
datas = batch.to_data_list()
new_data = []
for i in range(num_repeat):
new_data += copy.deepcopy(datas)
return Batch.from_data_list(new_data) |
def query(sql_query, local=False, timeit=False, use_cache=True, verbose=False, dbname='catalogs', user='postgres'):
if (local and ('gaiaedr3.' in sql_query)):
sql_query = sql_query.replace('gaiaedr3.', 'gaiaedr3_')
elif ((not local) and ('gaiaedr3_' in sql_query)):
sql_query = sql_query.replace('gaiaedr3_', 'gaia3dr3.')
elif (local and ('gaiadr2.' in sql_query)):
sql_query = sql_query.replace('gaiadr2.', 'gaiadr2_')
elif ((not local) and ('gaiadr2_' in sql_query)):
sql_query = sql_query.replace('gaiadr2_', 'gaiadr2.')
if local:
sql_query = _localize(sql_query)
if use_cache:
out = query_cache.load(sql_query)
if out:
return out
if local:
import psycopg2
conn = psycopg2.connect('dbname={} user={}'.format(dbname, user))
cur = conn.cursor()
if timeit:
start = time.time()
cur.execute(sql_query)
if timeit:
print('Query took {:.3f} s'.format((time.time() - start)))
out = cur.fetchall()
names = [desc[0] for desc in cur.description]
cur.close()
conn.close()
out = Table(numpy.array(out), names=names)
else:
if timeit:
start = time.time()
job = Gaia.launch_job_async(sql_query, verbose=verbose)
if timeit:
print('Query took {:.3f} s'.format((time.time() - start)))
out = job.get_results()
if use_cache:
query_cache.save(sql_query, out)
return out |
def train(args):
with open(args.train_data, 'rb') as f:
train_dataset: SNLIDataset = pickle.load(f)
with open(args.valid_data, 'rb') as f:
valid_dataset: SNLIDataset = pickle.load(f)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=2, collate_fn=train_dataset.collate, pin_memory=True)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers=2, collate_fn=valid_dataset.collate, pin_memory=True)
word_vocab = train_dataset.word_vocab
label_vocab = train_dataset.label_vocab
model = SNLIModel(num_classes=len(label_vocab), num_words=len(word_vocab), word_dim=args.word_dim, hidden_dim=args.hidden_dim, clf_hidden_dim=args.clf_hidden_dim, clf_num_layers=args.clf_num_layers, use_leaf_rnn=args.leaf_rnn, use_batchnorm=args.batchnorm, intra_attention=args.intra_attention, dropout_prob=args.dropout, bidirectional=args.bidirectional)
if args.glove:
logging.info('Loading GloVe pretrained vectors...')
glove_weight = load_glove(path=args.glove, vocab=word_vocab, init_weight=model.word_embedding.weight.data.numpy())
glove_weight[word_vocab.pad_id] = 0
model.word_embedding.weight.data.set_(torch.FloatTensor(glove_weight))
if args.fix_word_embedding:
logging.info('Will not update word embeddings')
model.word_embedding.weight.requires_grad = False
model.to(args.device)
logging.info(f'Using device {args.device}')
if (args.optimizer == 'adam'):
optimizer_class = optim.Adam
elif (args.optimizer == 'adagrad'):
optimizer_class = optim.Adagrad
elif (args.optimizer == 'adadelta'):
optimizer_class = optim.Adadelta
params = [p for p in model.parameters() if p.requires_grad]
optimizer = optimizer_class(params=params, weight_decay=args.l2reg)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='max', factor=0.5, patience=10, verbose=True)
criterion = nn.CrossEntropyLoss()
train_summary_writer = SummaryWriter(log_dir=os.path.join(args.save_dir, 'log', 'train'))
valid_summary_writer = SummaryWriter(log_dir=os.path.join(args.save_dir, 'log', 'valid'))
def run_iter(batch, is_training):
model.train(is_training)
pre = batch['pre'].to(args.device)
hyp = batch['hyp'].to(args.device)
pre_length = batch['pre_length'].to(args.device)
hyp_length = batch['hyp_length'].to(args.device)
label = batch['label'].to(args.device)
logits = model(pre=pre, pre_length=pre_length, hyp=hyp, hyp_length=hyp_length)
label_pred = logits.max(1)[1]
accuracy = torch.eq(label, label_pred).float().mean()
loss = criterion(input=logits, target=label)
if is_training:
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(parameters=params, max_norm=5)
optimizer.step()
return (loss, accuracy)
def add_scalar_summary(summary_writer, name, value, step):
if torch.is_tensor(value):
value = value.item()
summary_writer.add_scalar(tag=name, scalar_value=value, global_step=step)
num_train_batches = len(train_loader)
validate_every = (num_train_batches // 10)
best_vaild_accuacy = 0
iter_count = 0
for epoch_num in range(args.max_epoch):
logging.info(f'Epoch {epoch_num}: start')
for (batch_iter, train_batch) in enumerate(train_loader):
if ((iter_count % args.anneal_temperature_every) == 0):
rate = args.anneal_temperature_rate
new_temperature = max([0.5, math.exp(((- rate) * iter_count))])
model.encoder.gumbel_temperature = new_temperature
logging.info(f'Iter #{iter_count}: Set Gumbel temperature to {new_temperature:.4f}')
(train_loss, train_accuracy) = run_iter(batch=train_batch, is_training=True)
iter_count += 1
add_scalar_summary(summary_writer=train_summary_writer, name='loss', value=train_loss, step=iter_count)
add_scalar_summary(summary_writer=train_summary_writer, name='accuracy', value=train_accuracy, step=iter_count)
if (((batch_iter + 1) % validate_every) == 0):
torch.set_grad_enabled(False)
valid_loss_sum = valid_accuracy_sum = 0
num_valid_batches = len(valid_loader)
for valid_batch in valid_loader:
(valid_loss, valid_accuracy) = run_iter(batch=valid_batch, is_training=False)
valid_loss_sum += valid_loss.item()
valid_accuracy_sum += valid_accuracy.item()
torch.set_grad_enabled(True)
valid_loss = (valid_loss_sum / num_valid_batches)
valid_accuracy = (valid_accuracy_sum / num_valid_batches)
scheduler.step(valid_accuracy)
add_scalar_summary(summary_writer=valid_summary_writer, name='loss', value=valid_loss, step=iter_count)
add_scalar_summary(summary_writer=valid_summary_writer, name='accuracy', value=valid_accuracy, step=iter_count)
progress = (epoch_num + (batch_iter / num_train_batches))
logging.info(f'Epoch {progress:.2f}: valid loss = {valid_loss:.4f}, valid accuracy = {valid_accuracy:.4f}')
if (valid_accuracy > best_vaild_accuacy):
best_vaild_accuacy = valid_accuracy
model_filename = f'model-{progress:.2f}-{valid_loss:.4f}-{valid_accuracy:.4f}.pkl'
model_path = os.path.join(args.save_dir, model_filename)
torch.save(model.state_dict(), model_path)
print(f'Saved the new best model to {model_path}') |
class SiteBELData():
name: str
bel_type: str
bel_class: str
pins: list[SiteBELPinData] |
.parametrize('input_dim, output_dim, hidden_sizes, n_heads, nonlinearity, w_init, b_init', invalid_settings)
def test_invalid_settings(input_dim, output_dim, hidden_sizes, n_heads, nonlinearity, w_init, b_init):
expected_msg_template = 'should be either an integer or a collection of length n_heads'
with pytest.raises(ValueError, match=expected_msg_template):
MultiHeadedMLPModule(n_heads=n_heads, input_dim=input_dim, output_dims=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=None, hidden_w_init=nn.init.ones_, output_nonlinearities=nonlinearity, output_w_inits=list(map(_helper_make_inits, w_init)), output_b_inits=b_init) |
def update_labels(label, state, sample_len, opt):
if (state[0] >= sample_len):
tmp = (- 1)
else:
tmp = label[state[0]]
gt_cls = torch.zeros([opt.n_classes], dtype=torch.float).cuda()
gt_box = torch.zeros([opt.n_classes], dtype=torch.float).cuda()
if (tmp == (- 1)):
for i in range(0, opt.n_classes):
gt_cls[i] = (- 1)
gt_box[i] = (- 1)
else:
tmp = (tmp + 1)
for i in range(0, opt.n_classes):
anchor = (opt.anchors[i] * state[1])
I = min(tmp, anchor)
U = max(tmp, anchor)
IOU = (float(I) / U)
if (IOU >= opt.iou_ubound):
gt_cls[i] = 1
gt_box[i] = math.log((tmp / anchor))
elif (IOU <= opt.iou_lbound):
gt_cls[i] = 0
gt_box[i] = (- 1)
else:
gt_cls[i] = (- 1)
gt_box[i] = (- 1)
return (gt_cls, gt_box) |
class _WildCatPoolDecision(nn.Module):
def __init__(self, kmax=0.5, kmin=None, alpha=1.0, dropout=0.0):
super(_WildCatPoolDecision, self).__init__()
assert isinstance(kmax, (int, float))
assert (kmax > 0.0)
assert ((kmin is None) or isinstance(kmin, (int, float)))
if isinstance(kmin, (int, float)):
assert (kmin >= 0.0)
self.kmax = kmax
self.kmin = (kmax if (kmin is None) else kmin)
self.alpha = alpha
self.dropout = dropout
self.dropout_md = nn.Dropout(p=dropout, inplace=False)
def get_k(self, k, n):
if (k <= 0):
return 0
elif (k < 1):
return round((k * n))
elif ((k == 1) and isinstance(k, float)):
return int(n)
elif ((k == 1) and isinstance(k, int)):
return 1
elif (k > n):
return int(n)
else:
return int(k)
def forward(self, x):
(b, c, h, w) = x.shape
activations = x.view(b, c, (h * w))
n = (h * w)
sorted_features = torch.sort(activations, dim=(- 1), descending=True)[0]
kmax = self.get_k(self.kmax, n)
kmin = self.get_k(self.kmin, n)
assert (kmax != 0), 'kmax=0'
if (self.dropout != 0.0):
sorted_features = self.dropout_md(sorted_features)
scores = sorted_features.narrow((- 1), 0, kmax).sum((- 1)).div_(kmax)
if ((kmin > 0) and (self.alpha != 0.0)):
scores.add(sorted_features.narrow((- 1), (n - kmin), kmin).sum((- 1)).mul_((self.alpha / kmin))).div_(2.0)
return scores
def __str__(self):
return (self.__class__.__name__ + '(kmax={}, kmin={}, alpha={}, dropout={}'.format(self.kmax, self.kmin, self.alpha, self.dropout))
def __repr__(self):
return super(_WildCatPoolDecision, self).__repr__() |
class MobileBertForPreTraining():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
class TestGARetinaHead(TestCase):
def test_ga_retina_head_init_and_forward(self):
ga_retina_head = GARetinaHead(**ga_retina_head_config)
s = 256
feats = (torch.rand(1, 4, (s // stride[1]), (s // stride[0])) for stride in ga_retina_head.square_anchor_generator.strides)
ga_retina_head(feats) |
class DBLogger():
def __init__(self, db, model):
self.db = db
self.model = model
def on_train_begin(self, logs={}):
print('start')
def on_train_end(self, logs={}):
print('end')
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
self.et = time.time()
return
def on_epoch_end(self, epoch, logs={}):
self.et = (time.time() - self.et)
print('ending')
print(epoch)
logs['epoch'] = epoch
logs['time'] = datetime.utcnow()
logs['stepTime'] = self.et
logs['acc'] = np.asscalar(logs['acc'])
print(logs)
w = self.model.Params
fid = self.db.save_params(w, logs)
logs.update({'params': fid})
self.db.valid_log(logs)
def on_batch_begin(self, batch, logs={}):
self.t = time.time()
self.losses = []
self.batch = batch
def on_batch_end(self, batch, logs={}):
self.t2 = (time.time() - self.t)
logs['acc'] = np.asscalar(logs['acc'])
logs['step_time'] = self.t2
logs['time'] = datetime.utcnow()
logs['epoch'] = self.epoch
logs['batch'] = self.batch
self.db.train_log(logs) |
def imread(filename, flags=cv2.IMREAD_COLOR):
global _im_zfile
path = filename
pos_at = path.index('')
if (pos_at == (- 1)):
print(("character '' is not found from the given path '%s'" % path))
assert 0
path_zip = path[0:pos_at]
path_img = path[(pos_at + 2):]
if (not os.path.isfile(path_zip)):
print(("zip file '%s' is not found" % path_zip))
assert 0
for i in range(len(_im_zfile)):
if (_im_zfile[i]['path'] == path_zip):
data = _im_zfile[i]['zipfile'].read(path_img)
return cv2.imdecode(np.frombuffer(data, np.uint8), flags)
_im_zfile.append({'path': path_zip, 'zipfile': zipfile.ZipFile(path_zip, 'r')})
data = _im_zfile[(- 1)]['zipfile'].read(path_img)
return cv2.imdecode(np.frombuffer(data, np.uint8), flags) |
class DefaultTaskMctsPolicies(DefaultMctsPolicies):
def __init__(self, task, *args, **kwargs):
super(DefaultTaskMctsPolicies, self).__init__(*args, initialize=TaskModelInitialize(task), **kwargs) |
def build_rgb_background(world: bpy.types.World, rgb: Tuple[(float, float, float, float)]=(0.9, 0.9, 0.9, 1.0), strength: float=1.0) -> None:
world.use_nodes = True
node_tree = world.node_tree
rgb_node = node_tree.nodes.new(type='ShaderNodeRGB')
rgb_node.outputs['Color'].default_value = rgb
node_tree.nodes['Background'].inputs['Strength'].default_value = strength
node_tree.links.new(rgb_node.outputs['Color'], node_tree.nodes['Background'].inputs['Color'])
arrange_nodes(node_tree) |
def bin_pack(dummy_generator: DummyGenerator) -> BinPack:
return BinPack(generator=dummy_generator, obs_num_ems=5) |
def run_selfplay(config: MuZeroConfig, storage: SharedStorage, replay_buffer: ReplayBuffer):
while True:
network = storage.latest_network()
game = play_game(config, network)
replay_buffer.save_game(game) |
('cnn_lstm')
def cnn_lstm(nlstm=128, layer_norm=False, **conv_kwargs):
def network_fn(X, nenv=1):
nbatch = X.shape[0]
nsteps = (nbatch // nenv)
h = nature_cnn(X, **conv_kwargs)
M = tf.placeholder(tf.float32, [nbatch])
S = tf.placeholder(tf.float32, [nenv, (2 * nlstm)])
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
if layer_norm:
(h5, snew) = utils.lnlstm(xs, ms, S, scope='lnlstm', nh=nlstm)
else:
(h5, snew) = utils.lstm(xs, ms, S, scope='lstm', nh=nlstm)
h = seq_to_batch(h5)
initial_state = np.zeros(S.shape.as_list(), dtype=float)
return (h, {'S': S, 'M': M, 'state': snew, 'initial_state': initial_state})
return network_fn |
def ellipsis_clip(text, length):
if (len(text) > length):
return (text[:(length - 3)] + '...')
else:
return text |
def astroNNDistances(dr=None):
if (dr is None):
dr = path._default_dr()
if (int(dr) == 14):
filePath = path.astroNNDistancesPath(dr=dr)
if os.path.exists(filePath):
return None
downloadPath = '
_download_file(downloadPath, filePath, dr, verbose=True)
return None
else:
return astroNN(dr=dr) |
def main():
print('')
print('')
print(' Running the Testing Scripts. ')
testMultipleTS()
print(' Testing Scripts Done. ')
print('')
print('') |
def compute_metrics(p: EvalPrediction):
preds = (p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions)
preds = np.argmax(preds, axis=1)
if (data_args.task_name is not None):
result = metric.compute(predictions=preds, references=p.label_ids)
if (len(result) > 1):
result['combined_score'] = np.mean(list(result.values())).item()
return result
else:
return {'accuracy': (preds == p.label_ids).astype(np.float32).mean().item()} |
class Md(object):
num_classes = 9
inputchannel = 1
def __init__(self, data_dir, transfer_task, normlizetype='0-1'):
self.data_dir = data_dir
self.source_N = transfer_task[0]
self.target_N = transfer_task[1]
self.normlizetype = normlizetype
self.data_transforms = {'train': Compose([Reshape(), Normalize(self.normlizetype), Retype()]), 'val': Compose([Reshape(), Normalize(self.normlizetype), Retype()])}
def data_split(self, transfer_learning=True):
if transfer_learning:
list_data = get_files(self.data_dir, self.source_N)
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
(train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label'])
source_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])
source_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])
list_data = get_files(self.data_dir, self.target_N)
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
(train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label'])
target_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])
target_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])
return (source_train, source_val, target_train, target_val)
else:
list_data = get_files(self.data_dir, self.source_N)
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
(train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label'])
source_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])
source_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])
list_data = get_files(self.data_dir, self.target_N)
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
target_val = dataset(list_data=data_pd, transform=self.data_transforms['val'])
return (source_train, source_val, target_val) |
def main():
args = get_args()
segments = []
with open(args.input_rttm, 'r') as f:
for line in f.readlines():
parts = line.strip().split()
segments.append(Segment(parts[1], float(parts[3]), dur=float(parts[4]), spk_id=parts[7]))
reco2segs = defaultdict(list, {reco_id: list(g) for (reco_id, g) in groupby(segments, (lambda x: x.reco_id))})
overlap_segs = []
for reco_id in reco2segs.keys():
segs = reco2segs[reco_id]
overlap_segs.extend(find_overlapping_segments(segs, args.label))
single_speaker_segs = []
for reco_id in reco2segs.keys():
segs = reco2segs[reco_id]
single_speaker_segs.extend(find_single_speaker_segments(segs))
final_segs = sorted((overlap_segs + single_speaker_segs), key=(lambda x: (x.reco_id, x.start_time)))
rttm_str = 'SPEAKER {0} 1 {1:7.3f} {2:7.3f} <NA> <NA> {3} <NA> <NA>'
for seg in final_segs:
if (seg.dur > 0):
print(rttm_str.format(seg.reco_id, seg.start_time, seg.dur, seg.spk_id)) |
def test_split_by_num():
n = 1000
v_label = np.random.randint(0, 10, n)
(n_train, n_val, n_test) = (50, 20, 10)
(m_train, m_val, m_test) = split_by_num(n, v_label, n_train, n_val, n_test)
assert (m_train.sum() == 500)
assert (m_val.sum() == 200)
assert (m_test.sum() == 100)
(m_train, m_val, m_test) = split_by_num(n, v_label, n_train, n_val)
assert (m_train.sum() == 500)
assert (m_val.sum() == 200)
assert (m_test.sum() == 300) |
def convert_mpcat40_to_12cat(im):
assert (len(im.shape) == 2)
if isinstance(im, torch.Tensor):
im = im.to(dtype=torch.int)
new_im = torch.zeros(im.shape, dtype=torch.int)
unique = torch.unique(im)
unique = unique.data.cpu().numpy()
for u in unique:
u = u.item()
if (u in mpcat40_to_12cat):
new_im[(im == u)] = (mpcat40_to_12cat[u] + 1)
return new_im
elif isinstance(im, np.ndarray):
im = im.astype(np.int)
new_im = np.zeros(im.shape, dtype=np.int)
unique = np.unique(im)
unique = unique.astype(np.int)
for u in unique:
if (u in mpcat40_to_12cat):
new_im[(im == u)] = (mpcat40_to_12cat[u] + 1)
return new_im
else:
print('format not supported: ', type(im))
return None |
class Embedding(nn.Module):
def __init__(self, vocab_size, input_encoding_size, glove=None):
super(Embedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, input_encoding_size)
self.glove = glove
self.vocab_size = vocab_size
self.input_encoding_size = input_encoding_size
self._init_hidden()
def _init_hidden(self):
if (self.glove is not None):
self.embedding.load_state_dict({'weight': np.load(self.glove)})
def forward(self, input):
return self.embedding(input) |
class RWEqualCELoss(nn.Module):
def __init__(self):
super(RWEqualCELoss, self).__init__()
def forward(self, x, y_, e):
celoss = CELoss()(x, y_)
rwloss = RWLoss()(x, y_)
loss = (celoss + rwloss)
return loss |
class CheckParamCallback(pl.Callback):
def on_after_backward(self, trainer, pl_module):
for (name, param) in pl_module.model.rag.named_parameters():
if (param.grad is None):
print(name) |
def train(args, model, ad_net, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch, method):
model.train()
len_source = len(train_loader)
len_target = len(train_loader1)
if (len_source > len_target):
num_iter = len_source
else:
num_iter = len_target
args.log_interval = num_iter
high = args.trade_off
loss_value = 0
loss_target_value = 0
for batch_idx in tqdm(range(num_iter), total=num_iter):
if ((batch_idx % len_source) == 0):
iter_source = iter(train_loader)
if ((batch_idx % len_target) == 0):
iter_target = iter(train_loader1)
(data_source, label_source) = iter_source.next()
(data_source, label_source) = (data_source.cuda(), label_source.cuda())
(data_target, label_target) = iter_target.next()
data_target = data_target.cuda()
optimizer.zero_grad()
optimizer_ad.zero_grad()
(features_source, outputs_source) = model(data_source)
(features_target, outputs_target) = model(data_target)
feature = torch.cat((features_source, features_target), dim=0)
output = torch.cat((outputs_source, outputs_target), dim=0)
classifier_loss = nn.CrossEntropyLoss()(output.narrow(0, 0, data_source.size(0)), label_source)
softmax_output = nn.Softmax(dim=1)(output)
if (epoch > start_epoch):
if (method == 'DANN'):
transfer_loss = loss_func.DANN(feature, ad_net)
elif (method == 'ALDA'):
ad_out = ad_net(feature)
if (label_source.size(0) != (ad_out.size(0) // 2)):
continue
(adv_loss, reg_loss, correct_loss) = loss_func.ALDA_loss(ad_out, label_source, softmax_output, weight_type=1, threshold=args.threshold)
if ('nocorrect' in args.loss_type):
transfer_loss = adv_loss
else:
transfer_loss = (adv_loss + correct_loss)
if ('noreg' not in args.loss_type):
for param in model.parameters():
param.requires_grad = False
reg_loss.backward(retain_graph=True)
for param in model.parameters():
param.requires_grad = True
else:
raise ValueError('Method cannot be recognized.')
loss_target_value += (transfer_loss.item() / args.log_interval)
else:
transfer_loss = 0
loss = (classifier_loss + transfer_loss)
if math.isnan(loss.item()):
raise AssertionError
loss.backward()
optimizer.step()
if (epoch > start_epoch):
optimizer_ad.step()
if ((batch_idx % args.log_interval) == (args.log_interval - 1)):
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, (batch_idx * args.batch_size), (num_iter * args.batch_size), ((100.0 * batch_idx) / num_iter), loss.item()))
print('transfer_loss: {:.3f} classifier_loss: {:.3f}'.format(loss_target_value, loss_value))
loss_value = 0
loss_target_value = 0 |
def adjust_lower_plane(a, b, c, x0, y0, x_minus, x_plus, y_minus, y_plus, lr=0.01, max_iter=100, print_info=True):
device = x_minus.device
x0 = x0.detach()
y0 = y0.detach()
x1 = ((x0 + x_minus) / 2).data.clone()
y1 = ((y0 + y_minus) / 2).data.clone()
x2 = ((x0 + x_minus) / 2).data.clone()
y2 = ((y0 + y_plus) / 2).data.clone()
x3 = ((x0 + x_plus) / 2).data.clone()
y3 = ((y0 + y_plus) / 2).data.clone()
x4 = ((x0 + x_plus) / 2).data.clone()
y4 = ((y0 + y_minus) / 2).data.clone()
x1.requires_grad = True
y1.requires_grad = True
x2.requires_grad = True
y2.requires_grad = True
x3.requires_grad = True
y3.requires_grad = True
x4.requires_grad = True
y4.requires_grad = True
a = a.detach()
b = b.detach()
c = c.detach()
optimizer = optim.Adam([x1, y1, x2, y2, x3, y3, x4, y4], lr=lr)
x1_best = torch.zeros(x_minus.shape, device=device)
y1_best = torch.zeros(x_minus.shape, device=device)
loss1_best = (torch.ones(x_minus.shape, device=device) * 1000)
x2_best = torch.zeros(x_minus.shape, device=device)
y2_best = torch.zeros(x_minus.shape, device=device)
loss2_best = (torch.ones(x_minus.shape, device=device) * 1000)
x3_best = torch.zeros(x_minus.shape, device=device)
y3_best = torch.zeros(x_minus.shape, device=device)
loss3_best = (torch.ones(x_minus.shape, device=device) * 1000)
x4_best = torch.zeros(x_minus.shape, device=device)
y4_best = torch.zeros(x_minus.shape, device=device)
loss4_best = (torch.ones(x_minus.shape, device=device) * 1000)
for i in range(max_iter):
loss1 = ((torch.tanh(x1) * torch.sigmoid(y1)) - (((a * x1) + (b * y1)) + c))
loss2 = ((torch.tanh(x2) * torch.sigmoid(y2)) - (((a * x2) + (b * y2)) + c))
loss3 = ((torch.tanh(x3) * torch.sigmoid(y3)) - (((a * x3) + (b * y3)) + c))
loss4 = ((torch.tanh(x4) * torch.sigmoid(y4)) - (((a * x4) + (b * y4)) + c))
(qloss1, valid1) = qualification_loss_lower(x1, y1, x_minus, x_plus, y_minus, y_plus)
best1 = ((loss1 < loss1_best) * valid1)
x1_best[best1] = x1[best1]
y1_best[best1] = y1[best1]
loss1_best[best1] = loss1[best1]
(qloss2, valid2) = qualification_loss_lower(x2, y2, x_minus, x_plus, y_minus, y_plus)
best2 = ((loss2 < loss2_best) * valid2)
x2_best[best2] = x2[best2]
y2_best[best2] = y2[best2]
loss2_best[best2] = loss2[best2]
(qloss3, valid3) = qualification_loss_lower(x3, y3, x_minus, x_plus, y_minus, y_plus)
best3 = ((loss3 < loss3_best) * valid3)
x3_best[best3] = x3[best3]
y3_best[best3] = y3[best3]
loss3_best[best3] = loss3[best3]
(qloss4, valid4) = qualification_loss_lower(x4, y4, x_minus, x_plus, y_minus, y_plus)
best4 = ((loss4 < loss4_best) * valid4)
x4_best[best4] = x4[best4]
y4_best[best4] = y4[best4]
loss4_best[best4] = loss4[best4]
loss = ((loss1 * (valid1.float() + 0.1)) + qloss1)
loss = ((loss + (loss2 * (valid2.float() + 0.1))) + qloss2)
loss = ((loss + (loss3 * (valid3.float() + 0.1))) + qloss3)
loss = ((loss + (loss4 * (valid4.float() + 0.1))) + qloss4)
loss = loss.mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if print_info:
print(('2 adjust lower plane loss: %.4f' % loss.item()))
return (x1_best, y1_best, x2_best, y2_best, x3_best, y3_best, x4_best, y4_best, loss1_best, loss2_best, loss3_best, loss4_best) |
def merge_result_batches(batches):
res = list(batches[0])
for i in range(1, len(batches)):
for j in range(len(res)):
res[j] = np.concatenate([res[j], batches[i][j]])
return res |
def sort(terms=[], context='', service=GOOGLE, license=None, strict=True, prefix=False, **kwargs):
service = SERVICES.get(service, SearchEngine)(license, language=kwargs.pop('language', None))
R = []
for word in terms:
q = ((prefix and ((context + ' ') + word)) or ((word + ' ') + context))
q.strip()
q = ((strict and ('"%s"' % q)) or q)
t = (((service in (WIKIPEDIA, WIKIA)) and '*') or SEARCH)
r = service.search(q, type=t, count=1, **kwargs)
R.append(r)
s = (float(sum([(r.total or 1) for r in R])) or 1.0)
R = [(((r.total or 1) / s), r.query) for r in R]
R = sorted(R, reverse=kwargs.pop('reverse', True))
return R |
class MobileNetV2(nn.Module):
def __init__(self, num_classes=1000, norm_layer=nn.BatchNorm2d):
super(MobileNetV2, self).__init__()
output_stride = cfg.MODEL.OUTPUT_STRIDE
self.multiplier = cfg.MODEL.BACKBONE_SCALE
if (output_stride == 32):
dilations = [1, 1]
elif (output_stride == 16):
dilations = [1, 2]
elif (output_stride == 8):
dilations = [2, 4]
else:
raise NotImplementedError
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
input_channels = (int((32 * self.multiplier)) if (self.multiplier > 1.0) else 32)
self.conv1 = _ConvBNReLU(3, input_channels, 3, 2, 1, relu6=True, norm_layer=norm_layer)
self.planes = input_channels
self.block1 = self._make_layer(InvertedResidual, self.planes, inverted_residual_setting[0:1], norm_layer=norm_layer)
self.block2 = self._make_layer(InvertedResidual, self.planes, inverted_residual_setting[1:2], norm_layer=norm_layer)
self.block3 = self._make_layer(InvertedResidual, self.planes, inverted_residual_setting[2:3], norm_layer=norm_layer)
self.block4 = self._make_layer(InvertedResidual, self.planes, inverted_residual_setting[3:5], dilations[0], norm_layer=norm_layer)
self.block5 = self._make_layer(InvertedResidual, self.planes, inverted_residual_setting[5:], dilations[1], norm_layer=norm_layer)
self.last_inp_channels = self.planes
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if (m.bias is not None):
nn.init.zeros_(m.bias)
def _make_layer(self, block, planes, inverted_residual_setting, dilation=1, norm_layer=nn.BatchNorm2d):
features = list()
for (t, c, n, s) in inverted_residual_setting:
out_channels = int((c * self.multiplier))
stride = (s if (dilation == 1) else 1)
features.append(block(planes, out_channels, stride, t, dilation, norm_layer))
planes = out_channels
for i in range((n - 1)):
features.append(block(planes, out_channels, 1, t, norm_layer=norm_layer))
planes = out_channels
self.planes = planes
return nn.Sequential(*features)
def forward(self, x):
x = self.conv1(x)
x = self.block1(x)
c1 = self.block2(x)
c2 = self.block3(c1)
c3 = self.block4(c2)
c4 = self.block5(c3)
return (c1, c2, c3, c4) |
def cau_recall_mrr_n(preds, labels, cutoff=20):
recall = []
mrr = []
for (batch, b_label) in zip(preds, labels):
ranks = ((batch[b_label] < batch).sum() + 1)
recall.append((ranks <= cutoff))
mrr.append(((1 / ranks) if (ranks <= cutoff) else 0.0))
return (recall, mrr) |
def printError(message):
print('ERROR: {}'.format(message))
print('')
print('USAGE:')
printHelp()
sys.exit((- 1)) |
def CRNN1D(X_shape, nb_classes):
nb_layers = 3
kernel_size = 5
activation = 'relu'
pool_size = 2
input_shape = (X_shape[1], X_shape[2], X_shape[3])
frequency_axis = 1
time_axis = 2
channel_axis = 3
model = Sequential()
model.add(Permute((time_axis, frequency_axis, channel_axis), input_shape=input_shape))
resize_shape = (model.output_shape[2] * model.output_shape[3])
model.add(Reshape((model.output_shape[1], resize_shape)))
model.add(Conv1D(64, kernel_size))
model.add(Activation(activation))
model.add(MaxPooling1D(pool_size=pool_size, strides=pool_size))
for _ in range((nb_layers - 1)):
model.add(Conv1D(128, kernel_size))
model.add(Activation(activation))
model.add(MaxPooling1D(pool_size=pool_size, strides=pool_size))
model.add(GRU(64, return_sequences=True))
model.add(GRU(64, return_sequences=False))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
return model |
def kmnist():
return collect_download_configs((lambda : datasets.KMNIST(ROOT, download=True)), name='KMNIST') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.