code stringlengths 3 6.57k |
|---|
tuple([ext.new_tensor_like("%s x" % p.name, p) |
Hx_plain() |
TT.sum([TT.sum(g * x) |
zip(constraint_grads, xs) |
TT.concatenate([TT.flatten(s) |
Hx_plain() |
build_eval(self, inputs) |
eval(x) |
tuple(self.target.flat_to_params(x, trainable=True) |
sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices) |
FiniteDifferenceHvp(Serializable) |
__init__(self, base_eps=1e-8, symmetric=True, grad_clip=None, num_slices=1) |
Serializable.quick_init(self, locals() |
update_opt(self, f, target, inputs, reg_coeff) |
target.get_params(trainable=True) |
ext.flatten_tensor_variables(constraint_grads) |
f_Hx_plain(*args) |
len(inputs) |
len(inputs) |
np.concatenate([np.reshape(x, (-1,) |
self.target.get_param_values(trainable=True) |
np.linalg.norm(param_val) |
self.target.set_param_values(param_val, trainable=True) |
self.target.set_param_values(param_val, trainable=True) |
build_eval(self, inputs) |
eval(x) |
tuple(self.target.flat_to_params(x, trainable=True) |
sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices) |
ConjugateGradientOptimizer(Serializable) |
Serializable.quick_init(self, locals() |
PerlmutterHvp(num_slices) |
tuple (f, epsilon) |
f(*inputs) |
tuple(inputs) |
tuple() |
tuple(extra_inputs) |
target.get_params(trainable=True) |
theano.grad(loss, wrt=params, disconnected_inputs='warn') |
ext.flatten_tensor_variables(grads) |
loss(self, inputs, extra_inputs=None) |
tuple(inputs) |
tuple() |
sliced_fun(self._opt_fun["f_loss"], self._num_slices) |
constraint_val(self, inputs, extra_inputs=None) |
tuple(inputs) |
tuple() |
sliced_fun(self._opt_fun["f_constraint"], self._num_slices) |
optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None) |
tuple(inputs) |
tuple() |
tuple() |
len(inputs_grouped[0]) |
int(n_samples * self._subsample_factor) |
tuple([x[inds] for x in inputs_grouped]) |
logger.log("computing loss before") |
sliced_fun(self._opt_fun["f_loss"], self._num_slices) |
logger.log("performing update") |
logger.log("computing descent direction") |
sliced_fun(self._opt_fun["f_grad"], self._num_slices) |
self._hvp_approach.build_eval(subsample_inputs + extra_inputs) |
krylov.cg(Hx, flat_g, cg_iters=self._cg_iters) |
descent_direction.dot(Hx(descent_direction) |
np.isnan(initial_step_size) |
logger.log("descent direction computed") |
np.copy(self._target.get_param_values(trainable=True) |
enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks) |
self._target.set_param_values(cur_param, trainable=True) |
if (np.isnan(loss) |
np.isnan(constraint_val) |
logger.log("Line search condition violated. Rejecting the step!") |
np.isnan(loss) |
logger.log("Violated because loss is NaN") |
np.isnan(constraint_val) |
logger.log("Violated because loss not improving") |
self._target.set_param_values(prev_param, trainable=True) |
logger.log("backtrack iters: %d" % n_iter) |
logger.log("computing loss after") |
logger.log("optimization finished") |
openapi_types (dict) |
attribute_map (dict) |
dict(str, TemplateState) |
__init__(self, flow=None, states=None, workflow=None) |
flow(self) |
flow(self, flow) |
states(self) |
dict(str, TemplateState) |
states(self, states) |
dict(str, TemplateState) |
workflow(self) |
workflow(self, workflow) |
to_dict(self) |
six.iteritems(self.openapi_types) |
getattr(self, attr) |
isinstance(value, list) |
x.to_dict() |
hasattr(x, "to_dict") |
hasattr(value, "to_dict") |
value.to_dict() |
isinstance(value, dict) |
to_dict() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.