code stringlengths 101 5.91M |
|---|
def _get_broker_actor(cache_dir, input_shards, processor, rows_per_chunk=DEFAULT_ROWS_PER_CHUNK):
return ChunkCacheBroker.options(name=('lev_cache_manager::' + cache_dir), get_if_exists=True).remote(cache_dir, input_shards, processor, rows_per_chunk) |
def untargeted_detection(model, img, dataset, lr, u_radius, cap=1000, margin=20, use_margin=False):
model.eval()
x_var = torch.autograd.Variable(img.clone().cuda(), requires_grad=True)
true_label = model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item()
optimizer_s = optim.SGD([x_var], lr=lr)
counter = 0
while (model(transform(x_var.clone(), dataset=dataset)).data.max(1, keepdim=True)[1][0].item() == true_label):
optimizer_s.zero_grad()
output = model(transform(x_var, dataset=dataset))
if use_margin:
(_, top2_1) = output.data.cpu().topk(2)
argmax11 = top2_1[0][0]
if (argmax11 == true_label):
argmax11 = top2_1[0][1]
loss = ((output[0][true_label] - output[0][argmax11]) + margin).clamp(min=0)
else:
loss = (- F.cross_entropy(output, torch.LongTensor([true_label]).cuda()))
loss.backward()
x_var.data = torch.clamp((x_var - (lr * x_var.grad.data)), min=0, max=1)
x_var.data = (torch.clamp((x_var - img), min=(- u_radius), max=u_radius) + img)
counter += 1
if (counter >= cap):
break
return counter |
class _BaseMetric(ABC):
def __init__(self):
self.plottable = False
self.integer_fields = []
self.float_fields = []
self.array_labels = []
self.integer_array_fields = []
self.float_array_fields = []
self.fields = []
self.summary_fields = []
self.registered = False
_timing.time
def eval_sequence(self, data):
...
def combine_sequences(self, all_res):
...
def combine_classes_class_averaged(self, all_res, ignore_empty_classes=False):
...
def combine_classes_det_averaged(self, all_res):
...
def plot_single_tracker_results(self, all_res, tracker, output_folder, cls):
if self.plottable:
raise NotImplementedError(('plot_results is not implemented for metric %s' % self.get_name()))
else:
pass
def get_name(cls):
return cls.__name__
def _combine_sum(all_res, field):
return sum([all_res[k][field] for k in all_res.keys()])
def _combine_weighted_av(all_res, field, comb_res, weight_field):
return (sum([(all_res[k][field] * all_res[k][weight_field]) for k in all_res.keys()]) / np.maximum(1.0, comb_res[weight_field]))
def print_table(self, table_res, tracker, cls):
print('')
metric_name = self.get_name()
self._row_print(([((((metric_name + ': ') + tracker) + '-') + cls)] + self.summary_fields))
for (seq, results) in sorted(table_res.items()):
if (seq == 'COMBINED_SEQ'):
continue
summary_res = self._summary_row(results)
self._row_print(([seq] + summary_res))
summary_res = self._summary_row(table_res['COMBINED_SEQ'])
self._row_print((['COMBINED'] + summary_res))
def _summary_row(self, results_):
vals = []
for h in self.summary_fields:
if (h in self.float_array_fields):
vals.append('{0:1.5g}'.format((100 * np.mean(results_[h]))))
elif (h in self.float_fields):
vals.append('{0:1.5g}'.format((100 * float(results_[h]))))
elif (h in self.integer_fields):
vals.append('{0:d}'.format(int(results_[h])))
else:
raise NotImplementedError('Summary function not implemented for this field type.')
return vals
def _row_print(*argv):
if (len(argv) == 1):
argv = argv[0]
to_print = ('%-35s' % argv[0])
for v in argv[1:]:
to_print += ('%-10s' % str(v))
print(to_print)
def summary_results(self, table_res):
return dict(zip(self.summary_fields, self._summary_row(table_res['COMBINED_SEQ'])))
def detailed_results(self, table_res):
detailed_fields = (self.float_fields + self.integer_fields)
for h in (self.float_array_fields + self.integer_array_fields):
for alpha in [int((100 * x)) for x in self.array_labels]:
detailed_fields.append(((h + '___') + str(alpha)))
detailed_fields.append((h + '___AUC'))
detailed_results = {}
for (seq, res) in table_res.items():
detailed_row = self._detailed_row(res)
if (len(detailed_row) != len(detailed_fields)):
raise TrackEvalException(('Field names and data have different sizes (%i and %i)' % (len(detailed_row), len(detailed_fields))))
detailed_results[seq] = dict(zip(detailed_fields, detailed_row))
return detailed_results
def _detailed_row(self, res):
detailed_row = []
for h in (self.float_fields + self.integer_fields):
detailed_row.append(res[h])
for h in (self.float_array_fields + self.integer_array_fields):
for (i, alpha) in enumerate([int((100 * x)) for x in self.array_labels]):
detailed_row.append(res[h][i])
detailed_row.append(np.mean(res[h]))
return detailed_row |
def run_info(tool, findings):
fnames = {finding['name'] for finding in findings}
return {'tool': tool_info(tool, fnames), 'results': [result_info(tool['id'], finding) for finding in findings]} |
def process_alias_tokenization(tokens):
processed_tokens = []
i = 0
while (i < len(tokens)):
token = tokens[i]
if (token.endswith('alias ') and ((i < (len(tokens) - 1)) and re.fullmatch(alias_id_revtok_pattern, tokens[(i + 1)])) and token[:(- 6)].isupper()):
processed_tokens.append('{} '.format(token[:(- 6)]))
processed_tokens.append(('alias' + tokens[(i + 1)]))
i += 2
else:
processed_tokens.append(token)
i += 1
return processed_tokens |
def main():
t0 = time()
print(__doc__)
pwd = os.path.dirname(__file__)
eps = (np.finfo(float).eps * 100)
a_range = np.array([eps, (0.0001 * (1 - eps)), 0.0001, (0.0001 * (1 + eps)), (0.001 * (1 - eps)), 0.001, (0.001 * (1 + eps)), 0.1, 0.5, (1 * (1 - eps)), 1, (1 * (1 + eps)), 1.5, 2, 4.999, 5, 10])
b_range = np.array([0, eps, 1e-10, 1e-05, 0.1, 1, 2, 10, 20, 100])
x_range = np.array([0, eps, (1 - eps), 1, (1 + eps), 1.5, (2 - eps), 2, (2 + eps), (9 - eps), 9, (9 + eps), (10 * (1 - eps)), 10, (10 * (1 + eps)), (100 * (1 - eps)), 100, (100 * (1 + eps)), 500, exp_inf, 1000.0, 100000.0, .0, 1e+20])
(a_range, b_range, x_range) = np.meshgrid(a_range, b_range, x_range, indexing='ij')
a_range = a_range.flatten()
b_range = b_range.flatten()
x_range = x_range.flatten()
bool_filter = (~ ((a_range < 0.005) & (x_range >= exp_inf)))
bool_filter = (bool_filter & (~ ((a_range < 0.2) & (x_range > exp_inf))))
bool_filter = (bool_filter & (~ ((a_range < 0.5) & (x_range > 1000.0))))
bool_filter = (bool_filter & (~ ((a_range < 0.56) & (x_range > 5000.0))))
bool_filter = (bool_filter & (~ ((a_range < 1) & (x_range > 10000.0))))
bool_filter = (bool_filter & (~ ((a_range < 1.4) & (x_range > 100000.0))))
bool_filter = (bool_filter & (~ ((a_range < 1.8) & (x_range > 1000000.0))))
bool_filter = (bool_filter & (~ ((a_range < 2.2) & (x_range > .0))))
bool_filter = (bool_filter & (~ ((a_range < 2.5) & (x_range > .0))))
bool_filter = (bool_filter & (~ ((a_range < 2.9) & (x_range > .0))))
bool_filter = (bool_filter & (~ ((a_range < 3.3) & (x_range > .0))))
bool_filter = (bool_filter & (~ ((a_range < 3.7) & (x_range > .0))))
bool_filter = (bool_filter & (~ ((a_range < 4) & (x_range > .0))))
bool_filter = (bool_filter & (~ ((a_range < 4.4) & (x_range > .0))))
bool_filter = (bool_filter & (~ ((a_range < 4.7) & (x_range > .0))))
bool_filter = (bool_filter & (~ ((a_range < 5.1) & (x_range > .0))))
bool_filter = (bool_filter & (~ ((a_range < 5.4) & (x_range > 1e+16))))
bool_filter = (bool_filter & (~ ((a_range < 5.8) & (x_range > 1e+17))))
bool_filter = (bool_filter & (~ ((a_range < 6.2) & (x_range > 1e+18))))
bool_filter = (bool_filter & (~ ((a_range < 6.2) & (x_range > 1e+18))))
bool_filter = (bool_filter & (~ ((a_range < 6.5) & (x_range > 1e+19))))
bool_filter = (bool_filter & (~ ((a_range < 6.9) & (x_range > 1e+20))))
failing = np.array([[0.1, 100, 709.], [0.5, 10, 709.], [0.5, 10, 1000], [0.5, 100, 1000], [1, 20, 100000], [1, 100, 100000], [1., 20, 100000], [1., 100, 100000], [1.5, 0, 500], [1.5, 2.e-14, 500], [1.5, 1e-10, 500], [1.5, 1e-05, 500], [1.5, 0.1, 500], [1.5, 20, 100000], [1.5, 100, 100000]]).tolist()
does_fail = np.full_like(a_range, False, dtype=bool)
for i in range(x_range.size):
if ([a_range[i], b_range[i], x_range[i]] in failing):
does_fail[i] = True
a_range = a_range[bool_filter]
b_range = b_range[bool_filter]
x_range = x_range[bool_filter]
does_fail = does_fail[bool_filter]
dataset = []
print(f'Computing {x_range.size} single points.')
print('Tests will fail for the following data points:')
for i in range(x_range.size):
a = a_range[i]
b = b_range[i]
x = x_range[i]
maxterms = 1000
if ((a < 1e-06) and (x >= (exp_inf / 10))):
maxterms = 2000
f = mp_wright_bessel(a, b, x, maxterms=maxterms)
if does_fail[i]:
print(f'failing data point a, b, x, value = [{a}, {b}, {x}, {f}]')
else:
dataset.append((a, b, x, f))
dataset = np.array(dataset)
filename = os.path.join(pwd, '..', 'tests', 'data', 'local', 'wright_bessel.txt')
np.savetxt(filename, dataset)
print(f'{((time() - t0) / 60):.1f} minutes elapsed') |
def test_compare_chromosome_2_none(comparator):
assert (comparator.compare(MagicMock(chrom.Chromosome), None) == (- 1)) |
def _get_bytes(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
f_o.write((Bytes.encode(s.strip()) + '\n')) |
def GetWeightedPageRankMP(Graph, PRankH, Attr, C=0.85, Eps=0.0001, MaxIter=100):
return _snap.GetWeightedPageRankMP(Graph, PRankH, Attr, C, Eps, MaxIter) |
()
def run_pure_state(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
physics = Physics.from_xml_string(*get_model_and_assets())
task = Humanoid(move_speed=_RUN_SPEED, pure_state=True, random=random)
environment_kwargs = (environment_kwargs or {})
return control.Environment(physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP, **environment_kwargs) |
class alias(option_base):
description = 'define a shortcut to invoke one or more commands'
command_consumes_arguments = True
user_options = ([('remove', 'r', 'remove (unset) the alias')] + option_base.user_options)
boolean_options = (option_base.boolean_options + ['remove'])
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if (self.remove and (len(self.args) != 1)):
raise DistutilsOptionError('Must specify exactly one argument (the alias name) when using --remove')
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if (not self.args):
print('Command Aliases')
print('')
for alias in aliases:
print('setup.py alias', format_alias(alias, aliases))
return
elif (len(self.args) == 1):
(alias,) = self.args
if self.remove:
command = None
elif (alias in aliases):
print('setup.py alias', format_alias(alias, aliases))
return
else:
print(('No alias definition found for %r' % alias))
return
else:
alias = self.args[0]
command = ' '.join(map(shquote, self.args[1:]))
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run) |
class MobileNetV2(nn.Module):
def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
self.output_stride = output_stride
current_stride = 1
if (inverted_residual_setting is None):
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
if ((len(inverted_residual_setting) == 0) or (len(inverted_residual_setting[0]) != 4)):
raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting))
input_channel = _make_divisible((input_channel * width_mult), round_nearest)
self.last_channel = _make_divisible((last_channel * max(1.0, width_mult)), round_nearest)
features = [ConvBNReLU(3, input_channel, stride=2)]
current_stride *= 2
dilation = 1
previous_dilation = 1
for (t, c, n, s) in inverted_residual_setting:
output_channel = _make_divisible((c * width_mult), round_nearest)
previous_dilation = dilation
if (current_stride == output_stride):
stride = 1
dilation *= s
else:
stride = s
current_stride *= s
output_channel = int((c * width_mult))
for i in range(n):
if (i == 0):
features.append(block(input_channel, output_channel, stride, previous_dilation, expand_ratio=t))
else:
features.append(block(input_channel, output_channel, 1, dilation, expand_ratio=t))
input_channel = output_channel
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
self.features = nn.Sequential(*features)
self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = x.mean([2, 3])
x = self.classifier(x)
return x |
_test(run_synthesis=False)
def test_nd_split():
(csdfg, sdfg) = _exec_hbmtransform((lambda : create_nd_sdfg('nd_split')), [('x', 'HBM', '0:10'), ('y', 'HBM', '10:20'), ('z', 'HBM', '20:30')])
validate_nd_sdfg(csdfg, 10, 10, divide_n=10)
return sdfg |
class Constellation_class(Element):
def __init__(self, parent, g, connected, mutable, check):
Element.__init__(self, parent)
self._connected = connected
self._mutable = mutable
self._g = g
if check:
self._check()
def __hash__(self):
if self._mutable:
raise ValueError('cannot hash mutable constellation')
return hash(tuple(self._g))
def set_immutable(self):
self._mutable = False
def is_mutable(self):
return self._mutable
def switch(self, i, j0, j1):
if (not self._mutable):
raise ValueError('this constellation is immutable. Take a mutable copy first.')
S = SymmetricGroup(list(range(self.degree())))
tr = S((j0, j1))
i = int(i)
if ((i < 0) or (i >= len(self._g))):
raise ValueError('index out of range')
ii = (i + 1)
if (ii == len(self._g)):
ii = 0
self._g[i] = (self._g[i] * tr)
self._g[ii] = (tr * self._g[ii])
def euler_characteristic(self):
return Integer(((self.degree() * 2) - sum((sum(((j - 1) for j in self.profile(i))) for i in range(self.length())))))
def genus(self):
return (1 - (self.euler_characteristic() // 2))
def _check(self):
d = self.degree()
Sd = self.parent()._sym
if (prod(self._g, Sd.one()) != Sd.one()):
raise ValueError('the product is not identity')
if (self._connected and (not perms_are_connected(self._g, d))):
raise ValueError('not connected')
def __copy__(self):
return self.parent()([gg for gg in self._g], check=False, mutable=self._mutable)
copy = __copy__
def mutable_copy(self):
return self.parent()([gg for gg in self._g], check=False, mutable=True)
def is_connected(self):
if self._connected:
return True
else:
return perms_are_connected(self._g, self.degree())
def connected_components(self):
if self._connected:
return [self]
G = Graph()
G.add_vertices(list(range(self.degree())))
for p in self._g:
G.add_edges(enumerate(p.domain()), loops=False)
m = G.connected_components(sort=False)
if (len(m) == 1):
return [self]
for mm in m:
mm.sort()
m.sort()
g = [[] for _ in repeat(None, len(m))]
m_inv = ([None] * self.degree())
for (t, mt) in enumerate(m):
for (i, mti) in enumerate(mt):
m_inv[mti] = i
for k in range(self.length()):
tmp = ([None] * len(mt))
for (i, mti) in enumerate(mt):
tmp[i] = m_inv[self._g[k](mti)]
g[t].append(tmp)
return [Constellation(g=g[i], check=False) for i in range(len(m))]
def _richcmp_(self, other, op):
if (not isinstance(other, Constellation_class)):
return (op == op_NE)
if (op == op_EQ):
return (self._g == other._g)
if (op == op_NE):
return (self._g != other._g)
lx = self.length()
rx = other.length()
if (lx != rx):
return richcmp_not_equal(lx, rx, op)
lx = self.degree()
rx = other.degree()
if (lx != rx):
return richcmp_not_equal(lx, rx, op)
for i in range((self.length() - 1)):
lx = self._g[i]
rx = other._g[i]
if (lx != rx):
return richcmp_not_equal(lx, rx, op)
return rich_to_bool(op, 0)
def is_isomorphic(self, other, return_map=False):
if return_map:
if (not ((self.degree() == other.degree()) and (self.length() == other.length()))):
return (False, None)
(sn, sn_map) = self.relabel(return_map=True)
(on, on_map) = other.relabel(return_map=True)
if (sn != on):
return (False, None)
return (True, (sn_map * (~ on_map)))
return ((self.degree() == other.degree()) and (self.length() == other.length()) and (self.relabel() == other.relabel()))
def _repr_(self):
s = 'Constellation of length {} and degree {}'.format(self.length(), self.degree())
for i in range(self.length()):
s += '\ng{} {}'.format(i, self._g[i].cycle_string(True))
return s
def degree(self):
return self.parent()._degree
def length(self):
return Integer(len(self._g))
def profile(self, i=None):
if (i is None):
return tuple((self.profile(j) for j in range(self.length())))
else:
parts = [len(cy) for cy in self._g[i].cycle_tuples(True)]
return Partition(sorted(parts, reverse=True))
passport = profile
def g(self, i=None):
from copy import copy
if (i is None):
return copy(self._g)
else:
gi = self._g[i]
return gi.parent()(gi)
def relabel(self, perm=None, return_map=False):
if (perm is not None):
g = [([None] * self.degree()) for _ in range(self.length())]
for i in range(len(perm.domain())):
for k in range(self.length()):
g[k][perm(i)] = perm(self._g[k](i))
return Constellation(g=g, check=False, mutable=self.is_mutable())
if return_map:
try:
return (self._normal_form, self._normal_form_map)
except AttributeError:
pass
else:
try:
return self._normal_form
except AttributeError:
pass
if (not self.is_connected()):
raise ValueError('no canonical labels implemented for non connected constellation')
domain = list(self.parent()._sym.domain())
index = {e: i for (i, e) in enumerate(domain)}
g = [[index[gg(i)] for i in domain] for gg in self._g]
(c_win, m_win) = perms_canonical_labels(g)
c_win = [[domain[i] for i in gg] for gg in c_win]
m_win = self.parent()._sym([domain[i] for i in m_win])
c_win = self.parent()(c_win, mutable=False, check=False)
if (not self.is_mutable()):
self._normal_form = c_win
self._normal_form_map = m_win
c_win._normal_form = c_win
c_win._normal_form_map = m_win
if return_map:
return (c_win, m_win)
else:
return c_win
def braid_group_action(self, i):
if ((i < 0) or (i >= self.length())):
txt = 'i should be between 0 and {}'
raise ValueError(txt.format((self.length() - 1)))
j = (i + 1)
if (j == self.length()):
j = 0
h = self.copy()
si = self._g[i]
sj = self._g[j]
h._g[i] = sj
h._g[j] = (((~ sj) * si) * sj)
return h
def braid_group_orbit(self):
from sage.graphs.digraph import DiGraph
G = DiGraph(multiedges=True, loops=True)
waiting = [self.relabel()]
while waiting:
c = waiting.pop()
G.add_vertex(c)
for i in range(self.length()):
cc = self.braid_group_action(i).relabel()
if (cc not in G):
waiting.append(cc)
G.add_edge(c, cc, i)
return G |
def _fit_single_estimator(estimator, X, y, sample_weight=None, message_clsname=None, message=None):
if (sample_weight is not None):
try:
with _print_elapsed_time(message_clsname, message):
estimator.fit(X, y, sample_weight=sample_weight)
except TypeError as exc:
if ("unexpected keyword argument 'sample_weight'" in str(exc)):
raise TypeError('Underlying estimator {} does not support sample weights.'.format(estimator.__class__.__name__)) from exc
raise
else:
with _print_elapsed_time(message_clsname, message):
estimator.fit(X, y)
return estimator |
class BitBottleneckLayer(nn.Module):
def __init__(self, config, in_channels, out_channels=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, drop_path_rate=0.0, is_first_layer=False):
super().__init__()
first_dilation = (first_dilation or dilation)
out_channels = (out_channels or in_channels)
mid_chs = make_div((out_channels * bottle_ratio))
if is_first_layer:
self.downsample = BitDownsampleConv(config, in_channels, out_channels, stride=stride, preact=False)
else:
self.downsample = None
self.conv1 = WeightStandardizedConv2d(in_channels, mid_chs, 1, eps=1e-08, padding=config.global_padding)
self.norm1 = BitGroupNormActivation(config, num_channels=mid_chs)
self.conv2 = WeightStandardizedConv2d(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups, eps=1e-08, padding=config.global_padding)
self.norm2 = BitGroupNormActivation(config, num_channels=mid_chs)
self.conv3 = WeightStandardizedConv2d(mid_chs, out_channels, 1, eps=1e-08, padding=config.global_padding)
self.norm3 = BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
self.drop_path = (BitDropPath(drop_path_rate) if (drop_path_rate > 0) else nn.Identity())
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
shortcut = hidden_states
if (self.downsample is not None):
shortcut = self.downsample(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.norm1(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.norm2(hidden_states)
hidden_states = self.conv3(hidden_states)
hidden_states = self.norm3(hidden_states)
hidden_states = self.drop_path(hidden_states)
hidden_states = self.activation((hidden_states + shortcut))
return hidden_states |
def main():
args = parse_args()
mmcv.check_file_exist(args.prediction_path)
cfg = Config.fromfile(args.config)
update_data_root(cfg)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
cfg.data.test.pop('samples_per_gpu', 0)
cfg.data.test.pipeline = get_loading_pipeline(cfg.data.train.pipeline)
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.prediction_path)
result_visualizer = ResultVisualizer(args.show, args.wait_time, args.show_score_thr)
result_visualizer.evaluate_and_show(dataset, outputs, topk=args.topk, show_dir=args.show_dir) |
class TestNorms(unittest.TestCase):
def test_norm(self):
def f(t, x):
return x
t = torch.tensor([0.0, 1.0])
is_called = False
def norm(state):
nonlocal is_called
is_called = True
self.assertIsInstance(state, torch.Tensor)
self.assertEqual(state.shape, ())
return state.pow(2).mean().sqrt()
x0 = torch.tensor(1.0)
torchdiffeq.odeint(f, x0, t, options=dict(norm=norm))
self.assertTrue(is_called)
is_called = False
def norm(state):
nonlocal is_called
is_called = True
self.assertIsInstance(state, tuple)
self.assertEqual(len(state), 1)
(state,) = state
self.assertEqual(state.shape, ())
return state.pow(2).mean().sqrt()
x0 = (torch.tensor(1.0),)
torchdiffeq.odeint(f, x0, t, options=dict(norm=norm))
self.assertTrue(is_called)
is_called = False
def norm(state):
nonlocal is_called
is_called = True
self.assertIsInstance(state, tuple)
self.assertEqual(len(state), 2)
(state1, state2) = state
self.assertEqual(state1.shape, ())
self.assertEqual(state2.shape, (2, 2))
return state1.pow(2).mean().sqrt()
x0 = (torch.tensor(1.0), torch.tensor([[0.5, 0.5], [0.1, 0.1]]))
torchdiffeq.odeint(f, x0, t, options=dict(norm=norm))
self.assertTrue(is_called)
def test_adjoint_norm(self):
def f(t, x):
return x
t = torch.tensor([0.0, 1.0])
adjoint_params = (torch.rand(7, requires_grad=True), torch.rand((), requires_grad=True))
def make_spy_on_adjoint_norm(adjoint_norm, actual_norm):
is_spy_called = [False]
def spy_on_adjoint_norm(tensor):
nonlocal is_spy_called
is_spy_called[0] = True
norm_result = adjoint_norm(tensor)
true_norm_result = actual_norm(tensor)
self.assertIsInstance(norm_result, torch.Tensor)
self.assertEqual(norm_result.shape, true_norm_result.shape)
self.assertLess((norm_result - true_norm_result).abs().max(), 1e-06)
return norm_result
return (spy_on_adjoint_norm, is_spy_called)
for shape in ((), (1,), (2, 2)):
for (use_adjoint_options, seminorm) in ((False, False), (True, False), (True, True)):
with self.subTest(shape=shape, use_adjoint_options=use_adjoint_options, seminorm=seminorm):
x0 = torch.full(shape, 1.0)
if use_adjoint_options:
if seminorm:
kwargs = dict(adjoint_options=dict(norm='seminorm'))
else:
kwargs = dict(adjoint_options={})
else:
kwargs = {}
xs = torchdiffeq.odeint_adjoint(f, x0, t, adjoint_params=adjoint_params, **kwargs)
_adjoint_norm = xs.grad_fn.adjoint_options['norm']
is_called = False
def actual_norm(tensor_tuple):
nonlocal is_called
is_called = True
self.assertIsInstance(tensor_tuple, tuple)
(t, y, adj_y, adj_param1, adj_param2) = tensor_tuple
self.assertEqual(t.shape, ())
self.assertEqual(y.shape, shape)
self.assertEqual(adj_y.shape, shape)
self.assertEqual(adj_param1.shape, (7,))
self.assertEqual(adj_param2.shape, ())
out = max(t.abs(), y.pow(2).mean().sqrt(), adj_y.pow(2).mean().sqrt())
if (not seminorm):
out = max(out, adj_param1.pow(2).mean().sqrt(), adj_param2.abs())
return out
(xs.grad_fn.adjoint_options['norm'], is_spy_called) = make_spy_on_adjoint_norm(_adjoint_norm, actual_norm)
xs.sum().backward()
self.assertTrue(is_called)
self.assertTrue(is_spy_called[0])
for (use_adjoint_options, seminorm) in ((False, False), (True, False), (True, True)):
with self.subTest(shape=shape, use_adjoint_options=use_adjoint_options, seminorm=seminorm):
x0 = (torch.tensor(1.0), torch.tensor([[0.5, 0.5], [0.1, 0.1]]))
if use_adjoint_options:
if seminorm:
kwargs = dict(adjoint_options=dict(norm='seminorm'))
else:
kwargs = dict(adjoint_options={})
else:
kwargs = {}
xs = torchdiffeq.odeint_adjoint(f, x0, t, adjoint_params=adjoint_params, **kwargs)
adjoint_options_dict = xs[0].grad_fn.next_functions[0][0].next_functions[0][0].adjoint_options
_adjoint_norm = adjoint_options_dict['norm']
is_called = False
def actual_norm(tensor_tuple):
nonlocal is_called
is_called = True
self.assertIsInstance(tensor_tuple, tuple)
(t, y, adj_y, adj_param1, adj_param2) = tensor_tuple
self.assertEqual(t.shape, ())
self.assertEqual(y.shape, (5,))
self.assertEqual(adj_y.shape, (5,))
self.assertEqual(adj_param1.shape, (7,))
self.assertEqual(adj_param2.shape, ())
ya = y[0]
yb = y[1:]
adj_ya = adj_y[0]
adj_yb = adj_y[1:4]
out = max(t.abs(), ya.abs(), yb.pow(2).mean().sqrt(), adj_ya.abs(), adj_yb.pow(2).mean().sqrt())
if (not seminorm):
out = max(out, adj_param1.pow(2).mean().sqrt(), adj_param2.abs())
return out
(spy_on_adjoint_norm, is_spy_called) = make_spy_on_adjoint_norm(_adjoint_norm, actual_norm)
adjoint_options_dict['norm'] = spy_on_adjoint_norm
xs[0].sum().backward()
self.assertTrue(is_called)
self.assertTrue(is_spy_called[0])
is_called = False
def adjoint_norm(tensor_tuple):
nonlocal is_called
is_called = True
self.assertIsInstance(tensor_tuple, tuple)
(t, y, adj_y, adj_param1, adj_param2) = tensor_tuple
self.assertEqual(t.shape, ())
self.assertEqual(y.shape, ())
self.assertEqual(adj_y.shape, ())
self.assertEqual(adj_param1.shape, (7,))
self.assertEqual(adj_param2.shape, ())
return max(t.abs(), y.pow(2).mean().sqrt(), adj_y.pow(2).mean().sqrt(), adj_param1.pow(2).mean().sqrt(), adj_param2.abs())
x0 = torch.tensor(1.0)
xs = torchdiffeq.odeint_adjoint(f, x0, t, adjoint_params=adjoint_params, adjoint_options=dict(norm=adjoint_norm))
xs.sum().backward()
self.assertTrue(is_called)
is_called = False
def adjoint_norm(tensor_tuple):
nonlocal is_called
is_called = True
self.assertIsInstance(tensor_tuple, tuple)
(t, ya, yb, adj_ya, adj_yb, adj_param1, adj_param2) = tensor_tuple
self.assertEqual(t.shape, ())
self.assertEqual(ya.shape, ())
self.assertEqual(yb.shape, (2, 2))
self.assertEqual(adj_ya.shape, ())
self.assertEqual(adj_yb.shape, (2, 2))
self.assertEqual(adj_param1.shape, (7,))
self.assertEqual(adj_param2.shape, ())
return max(t.abs(), ya.abs(), yb.pow(2).mean().sqrt(), adj_ya.abs(), adj_yb.pow(2).mean().sqrt(), adj_param1.pow(2).mean().sqrt(), adj_param2.abs())
x0 = (torch.tensor(1.0), torch.tensor([[0.5, 0.5], [0.1, 0.1]]))
xs = torchdiffeq.odeint_adjoint(f, x0, t, adjoint_params=adjoint_params, adjoint_options=dict(norm=adjoint_norm))
xs[0].sum().backward()
self.assertTrue(is_called)
def test_large_norm(self):
def norm(tensor):
return tensor.abs().max()
def large_norm(tensor):
return (10 * tensor.abs().max())
for dtype in DTYPES:
for device in DEVICES:
for method in ADAPTIVE_METHODS:
if ((dtype == torch.float32) and (method == 'dopri8')):
continue
with self.subTest(dtype=dtype, device=device, method=method):
x0 = torch.tensor([1.0, 2.0], device=device, dtype=dtype)
t = torch.tensor([0.0, 1.0], device=device, dtype=torch.float64)
norm_f = _NeuralF(width=10, oscillate=True).to(device, dtype)
torchdiffeq.odeint(norm_f, x0, t, method=method, options=dict(norm=norm))
large_norm_f = _NeuralF(width=10, oscillate=True).to(device, dtype)
with torch.no_grad():
for (norm_param, large_norm_param) in zip(norm_f.parameters(), large_norm_f.parameters()):
large_norm_param.copy_(norm_param)
torchdiffeq.odeint(large_norm_f, x0, t, method=method, options=dict(norm=large_norm))
self.assertLessEqual(norm_f.nfe, large_norm_f.nfe)
def test_seminorm(self):
for dtype in DTYPES:
for device in DEVICES:
for method in ADAPTIVE_METHODS:
with self.subTest(dtype=dtype, device=device, method=method):
if (dtype == torch.float64):
tol = 1e-08
else:
tol = 1e-06
x0 = torch.tensor([1.0, 2.0], device=device, dtype=dtype)
t = torch.tensor([0.0, 1.0], device=device, dtype=torch.float64)
ode_f = _NeuralF(width=1024, oscillate=True).to(device, dtype)
out = torchdiffeq.odeint_adjoint(ode_f, x0, t, atol=tol, rtol=tol, method=method)
ode_f.nfe = 0
out.sum().backward()
default_nfe = ode_f.nfe
out = torchdiffeq.odeint_adjoint(ode_f, x0, t, atol=tol, rtol=tol, method=method, adjoint_options=dict(norm='seminorm'))
ode_f.nfe = 0
out.sum().backward()
seminorm_nfe = ode_f.nfe
self.assertLessEqual(seminorm_nfe, default_nfe) |
.parametrize('factory_type', (' 'requests'))
.parametrize('response_schema, payload, schema_path, instance, instance_path', (({'type': 'object'}, [], ['type'], [], []), ({'$ref': '#/components/schemas/Foo'}, [], ['type'], [], []), ({'type': 'object', 'properties': {'foo': {'type': 'object'}}}, {'foo': 42}, ['properties', 'foo', 'type'], 42, ['foo'])))
def test_validate_response_schema_path(response_factory, factory_type, empty_open_api_3_schema, response_schema, payload, schema_path, instance, instance_path):
empty_open_api_3_schema['paths'] = {'/test': {'post': {'responses': {'200': {'description': 'OK', 'content': {'application/json': {'schema': response_schema}}}}}}}
empty_open_api_3_schema['components'] = {'schemas': {'Foo': {'type': 'object'}}}
schema = schemathesis.from_dict(empty_open_api_3_schema)
response = getattr(response_factory, factory_type)(content=json.dumps(payload).encode('utf-8'))
with pytest.raises(CheckFailed) as exc:
schema['/test']['POST'].validate_response(response)
assert (exc.value.context.schema_path == schema_path)
assert (exc.value.context.schema == {'type': 'object'})
assert (exc.value.context.instance == instance)
assert (exc.value.context.instance_path == instance_path) |
class IntQuantizer(Function):
def __init__(self, size, params):
self.num_bits = size
self.stochastic = False
self.int_exp = False
self.enforce_true_zero = True
self.clipping = params['threshold']
self.alpha_gaus = {2: 1.71, 3: 2.15, 4: 2.55, 5: 2.93, 6: 3.28, 7: 3.61, 8: 3.92}
self.alpha_laplace = {2: 2.83, 3: 3.89, 4: 5.03, 5: 6.2, 6: 7.41, 7: 8.64, 8: 9.89}
self.gaussian_const = ((0.5 * 0.35) * (1 + ((math.pi * math.log(4)) ** 0.5)))
def __call__(self, tensor, tag='', stat_id=None):
if ((self.clipping == 'no') or (tag != 'activation')):
(min_, max_, mean_) = (None, None, None)
if (stat_id is not None):
kind = {'min': 'mean', 'max': 'mean', 'mean': 'mean', 'std': 'mean', 'range': 'mean', 'mean_abs': 'mean', 'b': 'mean'}
if ((tag == 'activation_linear') and (tensor.shape[1] == 1000)):
kind['min'] = 'min'
kind['max'] = 'max'
kind['range'] = 'max'
(min_, max_, mean_, _, _, _, _) = SM().get_tensor_stats(stat_id, kind)
return self.gemmlowpQuantize(tensor, min_value=min_, max_value=max_, mean=mean_)
else:
return self.gemmlowpClippingQuantize(tensor, tag, stat_id=stat_id, clip_type=self.clipping)
def get_alpha_laplace(self, tensor, stat_id=None):
if (stat_id is not None):
kind = {'min': 'mean', 'max': 'mean', 'mean': 'mean', 'std': 'mean', 'range': 'mean', 'mean_abs': 'mean', 'b': 'mean'}
(_, _, _, _, _, _, b) = SM().get_tensor_stats(stat_id, kind)
else:
b = torch.mean(torch.abs((tensor - tensor.mean()))).cpu().numpy()
return (self.alpha_laplace[self.num_bits] * b)
def get_alpha_gaus(self, tensor, tag, stat_id=None):
if ((tag == 'activation') and (len(tensor.shape) == 4)):
N = ((tensor.shape[1] * tensor.shape[2]) * tensor.shape[3])
else:
N = tensor.view((- 1)).size()[0]
if (stat_id is not None):
kind = {'min': 'mean', 'max': 'mean', 'mean': 'mean', 'std': 'mean', 'range': 'mean', 'mean_abs': 'mean', 'b': 'mean'}
(min_value, max_value, _, std, _, _, _) = SM().get_tensor_stats(stat_id, kind)
else:
min_value = tensor.min()
max_value = tensor.max()
std = (((max_value - min_value) * self.gaussian_const) / ((2 * math.log(N)) ** 0.5))
return (self.alpha_gaus[self.num_bits] * std)
def alpha2DeltaOffset(self, alpha, max_value, min_value, mean):
max_range = (max_value - min_value)
if ((alpha <= 0) or (alpha >= (max_range / 2))):
delta = max_range
else:
delta = (2 * alpha)
min_value = max(min_value, (mean - (delta / 2)))
return (delta, min_value)
def gemmlowpClippingQuantize(self, input, tag='', stat_id=None, clip_type='laplace'):
if (stat_id is not None):
kind = {'min': 'mean', 'max': 'mean', 'mean': 'mean', 'std': 'mean', 'range': 'mean', 'mean_abs': 'mean', 'b': 'mean'}
(min_value, max_value, mean, std, range, mean_abs, b) = SM().get_tensor_stats(stat_id, kind)
else:
min_value = input.min()
max_value = input.max()
mean = input.mean()
max_range = (max_value - min_value)
if (clip_type == 'laplace'):
alpha = self.get_alpha_laplace(input, stat_id)
elif (clip_type == 'gaus'):
alpha = self.get_alpha_gaus(input, tag, stat_id)
elif (clip_type == 'exp'):
alpha = self.get_alpha_exp(input, stat_id)
elif (clip_type == 'mix'):
alpha_laplace = self.get_alpha_laplace(input, stat_id)
alpha_gause = self.get_alpha_gaus(input, tag, stat_id)
mse_est_laplace = IntQuantizer.mse_laplace(b, alpha_laplace, self.num_bits)
mse_est_gaus = IntQuantizer.mse_gaus(std, alpha_gause, self.num_bits)
if (mse_est_laplace < mse_est_gaus):
alpha = alpha_laplace
else:
alpha = alpha_gause
elif (clip_type == 'test'):
(mse_laplace, mse_laplace_est) = self.__clip_and_mse_mesure(input, tag, stat_id, 'laplace', max_value, min_value, mean, std, b)
(mse_gaus, mse_gaus_est) = self.__clip_and_mse_mesure(input, tag, stat_id, 'gaus', max_value, min_value, mean, std, b)
(mse_no_clip, _) = self.__clip_and_mse_mesure(input, tag, stat_id, 'no', max_value, min_value, mean, std, b)
min_mse_id = np.argmin([mse_no_clip, mse_laplace, mse_gaus])
clippings = ['no clipping', 'laplace', 'gaussian']
print(('%s - MSE no clipping: %f, laplace: %f, laplace est: %f, gaussian: %f, gaussian est: %f, min mse: %s, bits: %d, std: %f, b: %f' % (stat_id, mse_no_clip, mse_laplace, mse_laplace_est, mse_gaus, mse_gaus_est, clippings[min_mse_id], self.num_bits, std, b)))
return input
else:
alpha = (max_range / 2)
(delta, min_value) = self.alpha2DeltaOffset(alpha, max_value, min_value, mean)
res = self.__gemmlowpQuantize__(input.contiguous(), delta, min_value)
return res
def gemmlowpQuantize(self, tensor, min_value=None, max_value=None, mean=None):
if (min_value is None):
min_value = tensor.detach().min()
if (max_value is None):
max_value = tensor.detach().max()
range = (max_value - min_value)
return self.__gemmlowpQuantize__(tensor, range, min_value)
def mse_laplace(b, alpha, num_bits):
return (((2 * (b ** 2)) * np.exp(((- alpha) / b))) + ((alpha ** 2) / (3 * (2 ** (2 * num_bits)))))
def mse_gaus(sigma, alpha, num_bits):
clipping_err = ((((sigma ** 2) + (alpha ** 2)) * (1 - math.erf((alpha / (sigma * np.sqrt(2.0)))))) - (((np.sqrt((2.0 / np.pi)) * alpha) * sigma) * (np.e ** (((- 1) * (0.5 * (alpha ** 2))) / (sigma ** 2)))))
quant_err = ((alpha ** 2) / (3 * (2 ** (2 * num_bits))))
return (clipping_err + quant_err)
def __clip_and_mse_mesure(self, tensor, tag, stat_id, clip_type, max_value, min_value, mean, std, b):
if (clip_type == 'laplace'):
alpha = self.get_alpha_laplace(tensor, stat_id)
mse_est = IntQuantizer.mse_laplace(b, alpha, self.num_bits)
elif (clip_type == 'gaus'):
alpha = self.get_alpha_gaus(tensor, tag, stat_id)
mse_est = IntQuantizer.mse_gaus(std, alpha, self.num_bits)
else:
alpha = ((max_value - min_value) / 2)
mse_est = (- 1)
(delta, min_value) = self.alpha2DeltaOffset(alpha, max_value, min_value, mean)
res = self.__gemmlowpQuantize__(tensor.contiguous(), delta, min_value)
mse = torch.mean(((tensor - res) ** 2))
del res
return (mse, mse_est)
def __gemmlowpQuantize__(self, tensor, delta, offset):
if self.stochastic:
noise = tensor.new(tensor.shape).uniform_((- 0.5), 0.5)
else:
noise = torch.cuda.FloatTensor(tensor.shape).fill_(0)
preserve_zero = (self.enforce_true_zero and ((offset + delta) > 0) and (offset < 0))
return int_quantization.float2gemmlowp(tensor.contiguous(), delta, offset, self.num_bits, self.int_exp, preserve_zero, noise) |
def initialize(NI, NJ, NK, datatype=np.float64):
alpha = datatype(1.5)
beta = datatype(1.2)
C = np.fromfunction((lambda i, j: ((((i * j) + 1) % NI) / NI)), (NI, NJ), dtype=datatype)
A = np.fromfunction((lambda i, k: (((i * (k + 1)) % NK) / NK)), (NI, NK), dtype=datatype)
B = np.fromfunction((lambda k, j: (((k * (j + 2)) % NJ) / NJ)), (NK, NJ), dtype=datatype)
return (alpha, beta, C, A, B) |
def retrieve_step_blobs(net, prefix='rnn'):
count = 1
output_list = []
for op in net.Proto().op:
if (op.type == 'RecurrentNetwork'):
blob_name = ((prefix + '_') + str(count))
count = (count + 1)
scratch_workspaces_blob_name = op.output[(- 1)]
workspace.RunOperatorOnce(core.CreateOperator('RecurrentNetworkBlobFetcher', [scratch_workspaces_blob_name], [blob_name], prefix=prefix))
output_list += workspace.FetchBlob(blob_name).tolist()
return output_list |
def all_generator_source():
r = []
for (directory, _, filenames) in os.walk('tools'):
for f in filenames:
if (os.path.splitext(f)[1] in source_files):
full = os.path.join(directory, f)
r.append(full)
return sorted(r) |
def list_sum(x):
if (len(x) == 1):
return x[0]
else:
return (x[0] + list_sum(x[1:])) |
_utils.test(require=ti.extension.sparse)
def test_complex_pointer():
a = ti.field(ti.i32, shape=(4, 4))
b = ti.field(ti.i32, shape=(16, 16))
c = ti.field(ti.i32, shape=(16, 4))
d = ti.field(ti.i32, shape=(4, 4, 4))
w = ti.field(ti.i32)
x = ti.field(ti.i32)
y = ti.field(ti.i32)
z = ti.field(ti.i32)
blk = ti.root.pointer(ti.ij, 4)
blk.place(w)
blk.pointer(ti.ij, 2).dense(ti.ij, 2).place(x)
blk.dense(ti.i, 4).place(y)
blk.dense(ti.k, 4).place(z)
def set_w():
for I in ti.grouped(ti.ndrange(4, 4)):
w[I] = 1
def set_x():
for I in ti.grouped(ti.ndrange(16, 16)):
x[I] = 2
def set_y():
for I in ti.grouped(ti.ndrange(16, 4)):
y[I] = 3
def set_z():
for I in ti.grouped(ti.ndrange(4, 4, 4)):
z[I] = 4
def set_a():
for I in ti.grouped(w):
a[I] = w[I]
def set_b():
for I in ti.grouped(x):
b[I] = x[I]
def set_c():
for I in ti.grouped(y):
c[I] = y[I]
def set_d():
for I in ti.grouped(z):
d[I] = z[I]
set_w()
set_x()
set_y()
set_z()
set_a()
set_b()
set_c()
set_d()
for i in range(4):
for j in range(4):
assert (a[(i, j)] == 1)
for i in range(16):
for j in range(16):
assert (b[(i, j)] == 2)
for i in range(16):
for j in range(4):
assert (c[(i, j)] == 3)
for i in range(4):
for j in range(4):
for k in range(4):
assert (d[(i, j, k)] == 4) |
def softmax_predictive_accuracy(logits_list, y, criterion, ret_loss=False):
probs_list = [logits for logits in logits_list]
probs_tensor = torch.stack(probs_list, dim=2)
probs = torch.mean(probs_tensor, dim=2)
if ret_loss:
loss = criterion(probs, y).item()
(_, pred_class) = torch.max(probs, 1)
correct = pred_class.eq(y.view_as(pred_class)).sum().item()
if ret_loss:
return (correct, loss)
return correct |
def load_json(fname, subdict=None):
try:
with open(fname) as json_file:
params = json.load(json_file)
except ValueError as e:
raise Exception(f'Unable to load file {fname}') from e
if (subdict is None):
return params
else:
return params[subdict] |
def is_parallel(model):
return (type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)) |
def load_results(align_fine='none'):
addregated_dir = Path(to_absolute_path('results/aggregated/'))
figure_dir = Path(to_absolute_path('figures/'))
if align_fine.startswith('icp'):
addregated_dir = (addregated_dir.parent / (addregated_dir.name + f'_align{align_fine}'))
figure_dir = (figure_dir.parent / (figure_dir.name + f'_align{align_fine}'))
elif (align_fine == 'none'):
addregated_dir = (addregated_dir.parent / (addregated_dir.name + '_alignNone'))
figure_dir = (figure_dir.parent / (figure_dir.name + '_alignNone'))
else:
raise ValueError(f'align_fine={align_fine} must be one of [icp.<type>, none]')
ours_results = torch.load((addregated_dir / 'ours.pth'))
lnerfopt_results = torch.load((addregated_dir / 'nerfopt.pth'))
return (ours_results, lnerfopt_results) |
class BufferBinding():
def __init__(self, binding: int, iarg: int, buffer_bind_ty: BufferBindingType):
self.binding: int = binding
self.iarg: int = iarg
self.buffer_bind_ty: BufferBindingType = buffer_bind_ty |
def get_path(u: ExtNode, v: ExtNode, ancestor: ExtNode, leaf_token, up_symbol=UP, down_symbol=DOWN):
path = []
(start, end) = (leaf_token(u), leaf_token(v))
while (u != ancestor):
path.append(u.bn)
path.append(up_symbol)
u = u.log_parents[0]
path.append(ancestor.bn)
aux_path = []
while (v != ancestor):
aux_path.append(v.bn)
aux_path.append(down_symbol)
v = v.log_parents[0]
path = (path + aux_path[::(- 1)])
return (start, tuple(path), end) |
def vgg19():
return VGG(make_layers([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'])) |
def modify_results(result_lines, duplicate_files):
if (not duplicate_files):
return result_lines
else:
mod_result_lines = []
for i in range(len(result_lines)):
res_hid = json.loads(result_lines[i])['hole_identity']
if is_valid_hole(res_hid, duplicate_files):
mod_result_lines.append(result_lines[i])
return mod_result_lines |
def _create_entry(img, data, answer):
if (None != answer):
answer.pop('image_name')
answer.pop('qid')
entry = {'qid': data['qid'], 'image_name': data['image_name'], 'image': img, 'question': data['question'], 'answer': answer, 'answer_text': data['answer'], 'answer_type': data['answer_type'], 'question_type': data['question_type'], 'phrase_type': data['phrase_type']}
return entry |
def compute_vw_kohel_even_deg3(b2, b4, s1, s2, s3):
temp1 = ((s1 ** 2) - (2 * s2))
v = ((3 * temp1) + (((b2 * s1) + (3 * b4)) / 2))
w = ((3 * (((s1 ** 3) - ((3 * s1) * s2)) + (3 * s3))) + (((b2 * temp1) + (b4 * s1)) / 2))
return (v, w) |
('/start_session', methods=['GET', 'POST'])
def start_session():
json_data = request.get_json()
return api.start_session(**json_data) |
class SawyerDrawerCloseEnv(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.9, 0.04)
obj_high = (0.1, 0.9, 0.04)
goal_low = ((- 0.1), 0.699, 0.04)
goal_high = (0.1, 0.701, 0.04)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_angle': np.array([0.3], dtype=np.float32), 'obj_init_pos': np.array([0.0, 0.9, 0.04], dtype=np.float32), 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32)}
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_drawer.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pullDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.06))}
return (ob, reward, False, info)
def _get_pos_objects(self):
return self.data.get_geom_xpos('handle')
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self._target_pos = (self.obj_init_pos - np.array([0.0, 0.2, 0.0]))
self.objHeight = self.data.get_geom_xpos('handle')[2]
if self.random_init:
obj_pos = self._get_state_rand_vec()
self.obj_init_pos = obj_pos
goal_pos = obj_pos.copy()
goal_pos[1] -= 0.2
self._target_pos = goal_pos
drawer_cover_pos = self.obj_init_pos.copy()
drawer_cover_pos[2] -= 0.02
self.sim.model.body_pos[self.model.body_name2id('drawer')] = self.obj_init_pos
self.sim.model.body_pos[self.model.body_name2id('drawer_cover')] = drawer_cover_pos
self.sim.model.site_pos[self.model.site_name2id('goal')] = self._target_pos
self._set_obj_xyz((- 0.2))
self.maxDist = np.abs((self.data.get_geom_xpos('handle')[1] - self._target_pos[1]))
self.target_reward = ((1000 * self.maxDist) + (1000 * 2))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
pullGoal = self._target_pos[1]
reachDist = np.linalg.norm((objPos - fingerCOM))
pullDist = np.abs((objPos[1] - pullGoal))
c1 = 1000
c2 = 0.01
c3 = 0.001
if (reachDist < 0.05):
pullRew = ((1000 * (self.maxDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
else:
pullRew = 0
reward = ((- reachDist) + pullRew)
return [reward, reachDist, pullDist] |
def minimum(c):
install_minimum(c)
c.run('python -m pip check')
c.run('python -m pytest') |
def init_hf_bert_tenzorizer(args, **kwargs):
if (importlib.util.find_spec('transformers') is None):
raise RuntimeError('Please install transformers lib')
from .hf_models import get_bert_tensorizer
return get_bert_tensorizer(args) |
class StochasticBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, survival_rate=1):
super().__init__()
self.survival_rate = survival_rate
self.conv1 = nn.Conv2d(inplanes, planes, 3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.increasing = (inplanes != (planes * self.expansion))
if self.increasing:
assert ((((1.0 * planes) * self.expansion) / inplanes) == 2)
if (stride != 1):
self.shortcut = nn.Sequential(nn.AvgPool2d(stride))
else:
self.shortcut = nn.Sequential()
def forward(self, inputs):
shortcut = self.shortcut(inputs)
if self.increasing:
shortcut = torch.cat(([shortcut] + [shortcut.mul(0)]), 1)
if ((not self.training) or (torch.rand(1)[0] <= self.survival_rate)):
H = self.conv1(inputs)
H = self.bn1(H)
H = F.relu(H)
H = self.conv2(H)
H = self.bn2(H)
if self.training:
H /= self.survival_rate
H += shortcut
else:
H = shortcut
outputs = F.relu(H)
return outputs |
def test_0459_types():
plain_plain = ak.highlevel.Array([[0.0, 1.1, 2.2, 3.3, 4.4]])
array_plain = ak.operations.with_parameter(plain_plain, '__list__', 'zoinks')
plain_isdoc = ak.operations.with_parameter(plain_plain, '__doc__', 'This is a zoink.')
array_isdoc = ak.operations.with_parameter(array_plain, '__doc__', 'This is a zoink.')
assert (ak.operations.parameters(plain_plain) == {})
assert (ak.operations.parameters(array_plain) == {'__list__': 'zoinks'})
assert (ak.operations.parameters(plain_isdoc) == {'__doc__': 'This is a zoink.'})
assert (ak.operations.parameters(array_isdoc) == {'__list__': 'zoinks', '__doc__': 'This is a zoink.'})
assert (ak.operations.type(plain_plain) == ak.operations.type(plain_plain))
assert (ak.operations.type(array_plain) == ak.operations.type(array_plain))
assert (ak.operations.type(plain_isdoc) == ak.operations.type(plain_isdoc))
assert (ak.operations.type(array_isdoc) == ak.operations.type(array_isdoc))
assert (ak.operations.type(plain_plain) != ak.operations.type(array_plain))
assert (ak.operations.type(array_plain) != ak.operations.type(plain_plain))
assert (ak.operations.type(plain_plain) == ak.operations.type(plain_isdoc))
assert (ak.operations.type(plain_isdoc) == ak.operations.type(plain_plain))
assert (ak.operations.type(array_plain) == ak.operations.type(array_isdoc))
assert (ak.operations.type(array_isdoc) == ak.operations.type(array_plain))
assert (ak.operations.type(plain_isdoc) != ak.operations.type(array_isdoc))
assert (ak.operations.type(array_isdoc) != ak.operations.type(plain_isdoc))
assert (array_plain.layout.parameters == {'__list__': 'zoinks'})
assert (ak.operations.without_parameters(array_plain).layout.parameters == {})
assert (plain_isdoc.layout.parameters == {'__doc__': 'This is a zoink.'})
assert (ak.operations.without_parameters(plain_isdoc).layout.parameters == {})
assert (array_isdoc.layout.parameters == {'__list__': 'zoinks', '__doc__': 'This is a zoink.'})
assert (ak.operations.without_parameters(array_isdoc).layout.parameters == {}) |
_optimizer('adam')
class FairseqAdam(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = ((not getattr(args, 'use_old_adam', False)) and (fused_adam_cls is not None) and torch.cuda.is_available())
if getattr(args, 'tpu', False):
self._optimizer = Adam(params, **self.optimizer_config)
elif use_fused_adam:
logger.info('using FusedAdam')
self._optimizer = fused_adam_cls(params, **self.optimizer_config)
else:
self._optimizer = Adam(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer')
parser.add_argument('--adam-eps', type=float, default=1e-08, metavar='D', help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--use-old-adam', action='store_true', default=False, help='Use fairseq.optim.adam.Adam')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay}
def average_params(self):
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for (_, value) in state_dict['state'].items():
value['exp_avg'] /= total_gpus
value['exp_avg_sq'] /= total_gpus
dist.all_reduce(value['exp_avg'], op=dist.ReduceOp.SUM)
dist.all_reduce(value['exp_avg_sq'], op=dist.ReduceOp.SUM) |
def synchronize():
global _USE_HVD
if _USE_HVD:
hvd.broadcast_object(0)
return
return comm.synchronize() |
def load_errors(path):
with open(path, 'r') as f:
errors = yaml.load(f, Loader=yaml.CLoader)
return errors |
def dump_label(feat_dir, split, km_path, nshard, rank, lab_dir):
apply_kmeans = ApplyKmeans(km_path)
(generator, num) = get_feat_iterator(feat_dir, split, nshard, rank)
iterator = generator()
lab_path = f'{lab_dir}/{split}_{rank}_{nshard}.km'
os.makedirs(lab_dir, exist_ok=True)
with open(lab_path, 'w') as f:
for feat in tqdm.tqdm(iterator, total=num):
lab = apply_kmeans(feat).tolist()
f.write((' '.join(map(str, lab)) + '\n'))
logger.info('finished successfully') |
class YolosConfig(PretrainedConfig):
model_type = 'yolos'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=[512, 864], patch_size=16, num_channels=3, qkv_bias=True, num_detection_tokens=100, use_mid_position_embeddings=True, auxiliary_loss=False, class_cost=1, bbox_cost=5, giou_cost=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.num_detection_tokens = num_detection_tokens
self.use_mid_position_embeddings = use_mid_position_embeddings
self.auxiliary_loss = auxiliary_loss
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.eos_coefficient = eos_coefficient |
def MathonPseudocyclicStronglyRegularGraph(t, G=None, L=None):
from sage.rings.finite_rings.finite_field_constructor import FiniteField as GF
from sage.rings.integer_ring import ZZ
from sage.matrix.constructor import matrix, block_matrix, ones_matrix, identity_matrix
from sage.arith.misc import two_squares
p = ((4 * t) + 1)
try:
x = two_squares(p)
except ValueError:
raise ValueError((str(p) + ' must be a sum of two squares!'))
if (G is None):
from sage.graphs.strongly_regular_db import strongly_regular_graph as SRG
G = SRG(p, (2 * t), (t - 1))
G.relabel(range(p))
if (L is None):
from sage.matrix.constructor import circulant
L = circulant((list(range(((2 * t) + 1))) + list(range(((- 2) * t), 0))))
q = ((4 * t) - 1)
K = GF(q, prefix='x')
K_pairs = set((frozenset([x, (- x)]) for x in K))
K_pairs.discard(frozenset([0]))
a = ([None] * (q - 1))
for (i, (x, y)) in enumerate(K_pairs):
a[i] = x
a[((- i) - 1)] = y
a.append(K(0))
P = [matrix(ZZ, q, q, (lambda i, j: (1 if (a[j] == (a[i] + b)) else 0))) for b in a]
g = K.primitive_element()
F = sum((P[a.index((g ** (2 * i)))] for i in range(1, (2 * t))))
E = matrix(ZZ, q, q, (lambda i, j: (0 if (a[j] - a[0]).is_square() else 1)))
def B(m):
I = identity_matrix(q)
J = ones_matrix(q)
if (m == 0):
def f(i, j):
if (i == j):
return (0 * I)
elif (a[j] - a[i]).is_square():
return (I + F)
else:
return (J - F)
elif (m < (2 * t)):
def f(i, j):
return (F * P[a.index(((g ** (2 * m)) * (a[i] + a[j])))])
elif (m == (2 * t)):
def f(i, j):
return (E * P[i])
return block_matrix(q, q, [f(i, j) for i in range(q) for j in range(q)])
def Acon(i, j):
J = ones_matrix((q ** 2))
if (i == j):
return B(0)
if (L[(i, j)] > 0):
if G.has_edge(i, j):
return B(L[(i, j)])
return (J - B(L[(i, j)]))
if G.has_edge(i, j):
return B((- L[(i, j)])).T
return (J - B((- L[(i, j)])).T)
A = Graph(block_matrix(p, p, [Acon(i, j) for i in range(p) for j in range(p)]))
A.name((("Mathon's PC SRG on " + str((p * (q ** 2)))) + ' vertices'))
A.relabel()
return A |
def make_data_loader_view(cfg, is_train=False):
batch_size = cfg.SOLVER.IMS_PER_BATCH
transforms = build_transforms(cfg, is_train)
datasets = build_dataset_view(cfg.DATASETS.TRAIN, transforms, use_mask=cfg.DATASETS.USE_MASK, num_frame=cfg.DATASETS.NUM_FRAME)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = data.DataLoader(datasets, batch_size=batch_size, shuffle=True, num_workers=num_workers)
return (data_loader, datasets) |
def enhance_contrast(image, footprint, out=None, mask=None, shift_x=False, shift_y=False, shift_z=False):
np_image = np.asanyarray(image)
if (np_image.ndim == 2):
return _apply_scalar_per_pixel(generic_cy._enhance_contrast, image, footprint, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
elif (np_image.ndim == 3):
return _apply_scalar_per_pixel_3D(generic_cy._enhance_contrast_3D, image, footprint, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y, shift_z=shift_z)
raise ValueError(f'`image` must have 2 or 3 dimensions, got {np_image.ndim}.') |
def get_config():
config = ml_collections.ConfigDict()
config.actor_lr = 0.0003
config.value_lr = 0.0003
config.critic_lr = 0.0003
config.hidden_dims = (256, 256)
config.discount = 0.99
config.dropout_rate = 0
config.layernorm = True
config.tau = 0.005
return config |
def _GetAutoCorr(ps):
AC = {}
AC.update(GetAutoCorrMoreauBroto(ps))
AC.update(GetAutoCorrMoran(ps))
AC.update(GetAutoCorrGeary(ps))
return AC |
.spark
def test_diff_feedback_type(log, model):
dataset = create_dataset(log)
pred_exp = model.fit_predict(dataset, k=1)
model.implicit_prefs = True
pred_imp = model.fit_predict(dataset, k=1)
assert (not np.allclose(pred_exp.toPandas().sort_values('user_idx')['relevance'].values, pred_imp.toPandas().sort_values('user_idx')['relevance'].values)) |
class Attackmodel(nn.Module):
def __init__(self, out_channel=3):
super(Attackmodel, self).__init__()
self.dconv_down1 = double_conv(3, 64)
self.dconv_down2 = double_conv(64, 128)
self.dconv_down3 = double_conv(128, 256)
self.dconv_down4 = double_conv(256, 512)
self.dconv_down5 = double_conv(512, 1024)
self.maxpool = nn.AvgPool2d(2)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.dconv_up4 = double_conv((512 + 1024), 512)
self.dconv_up3 = double_conv((256 + 512), 256)
self.dconv_up2 = double_conv((128 + 256), 128)
self.dconv_up1 = double_conv((128 + 64), 64)
self.conv_last = nn.Sequential(nn.Conv2d(64, out_channel, 1), nn.BatchNorm2d(out_channel))
def forward(self, x):
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.maxpool(conv3)
conv4 = self.dconv_down4(x)
x = self.maxpool(conv4)
x = self.dconv_down5(x)
x = self.upsample(x)
x = torch.cat([x, conv4], dim=1)
x = self.dconv_up4(x)
x = self.upsample(x)
x = torch.cat([x, conv3], dim=1)
x = self.dconv_up3(x)
x = self.upsample(x)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
out = self.conv_last(x)
out = torch.tanh(out)
return out |
.parametrize('split,num_sample', [('train', 37951), ('test', 9488), ('competition', 31626)])
def test_california_house_price(split, num_sample):
df = create_dataset('california_house_price', split).data
assert (len(df) == num_sample) |
def is_integral(dtype: torch.dtype) -> bool:
dtypes = [x for x in get_all_dtypes() if (x not in get_all_complex_dtypes())]
return ((dtype in dtypes) and (not dtype.is_floating_point)) |
def test_add():
def _run_test(values):
stat = OnlineStatistics()
for i in values:
stat.add(i)
_assert_correct_stats(stat, values)
_run_test(range(51))
_run_test(range(10, 10000, 3))
_run_test(range((- 400), (- 300)))
_run_test((list(range(4, 900, 2)) + list(range((- 1000), (- 300), 7))))
_run_test((list(range((- 100), 100, 7)) + list(range((- 100), 100, 2))))
_run_test(np.linspace(0, 1, 100))
_run_test(np.logspace((- 100), 3, 100))
_run_test([0, 1])
_run_test([(- 1), 1]) |
def enable_dropout(model):
for module in model.modules():
if module.__class__.__name__.startswith('Dropout'):
module.train()
return model |
def deepnn(l, x, final_dim=1):
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [(- 1), (l * 28), 28, 1])
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu((conv2d(x_image, W_conv1) + b_conv1))
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu((conv2d(h_pool1, W_conv2) + b_conv2))
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
with tf.name_scope('fc1'):
W_fc1 = weight_variable([(((l * 7) * 7) * 64), 64])
b_fc1 = bias_variable([64])
h_pool2_flat = tf.reshape(h_pool2, [(- 1), (((l * 7) * 7) * 64)])
h_fc1 = tf.nn.relu((tf.matmul(h_pool2_flat, W_fc1) + b_fc1))
with tf.name_scope('fc2'):
W_fc2 = weight_variable([64, final_dim])
b_fc2 = bias_variable([final_dim])
h_fc1_flat = tf.reshape(h_fc1, [(- 1), 64])
h_fc2 = (tf.matmul(h_fc1_flat, W_fc2) + b_fc2)
return h_fc2
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
return (h_fc1_drop, keep_prob) |
def NN_Regression(x, y, x_test, y_test):
print('SGD Training Begins!')
x = x.flatten()
X = Var(x)
X = torch.unsqueeze(X, 1)
y = y.flatten()
Y = Var(y)
Y = torch.unsqueeze(Y, 1)
X_test = Var(x_test)
X_test = torch.unsqueeze(X_test, 1)
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.l1 = torch.nn.Linear(n_feature, n_hidden[0])
self.l2 = torch.nn.Linear(n_hidden[0], n_hidden[1])
self.l3 = torch.nn.Linear(n_hidden[1], n_hidden[2])
self.predict = torch.nn.Linear(n_hidden[2], n_output)
def forward(self, x):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = F.relu(self.l3(x))
x = self.predict(x)
return x
net = Net(n_feature=1, n_hidden=[16, 16, 16], n_output=1)
optimizer = torch.optim.SGD(net.parameters(), lr=0.2)
loss_func = torch.nn.MSELoss()
for epoch in range(2000):
if ((epoch % 10) == 0):
print('Epoch: ', epoch)
prediction = net(X)
loss = loss_func(prediction, Y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
prediction = net(X_test)
plt.scatter(x, y, marker='x', c='black', label='target')
plt.plot(x_test, prediction.detach().numpy(), c='red', label='Prediction')
plt.plot(x_test, y_test, c='grey', label='truth')
plt.legend()
plt.tight_layout()
plt.savefig('../Results/Regression_NN.png')
plt.savefig('../Results/Regression_NN.eps', format='eps', dpi=1000)
plt.clf() |
.no_cover
.mujoco
.timeout(100)
def test_te_ppo_metaworld_ml1_push():
assert (subprocess.run([str((EXAMPLES_ROOT_DIR / 'tf/te_ppo_metaworld_ml1_push.py')), '--n_epochs', '1', '--batch_size_per_task', '100'], check=False).returncode == 0) |
_torch
class AutoModelTest(unittest.TestCase):
def test_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModel.from_pretrained(model_name)
(model, loading_info) = AutoModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertModel)
for value in loading_info.values():
self.assertEqual(len(value), 0)
def test_lmhead_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelWithLMHead.from_pretrained(model_name)
(model, loading_info) = AutoModelWithLMHead.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
def test_sequence_classification_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
(model, loading_info) = AutoModelForSequenceClassification.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForSequenceClassification)
def test_question_answering_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
(model, loading_info) = AutoModelForQuestionAnswering.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForQuestionAnswering)
def test_from_pretrained_identifier(self):
logging.basicConfig(level=logging.INFO)
model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(model, BertForMaskedLM) |
def find_missing_tags(known_tags, test_tags):
if (isinstance(known_tags, list) and isinstance(known_tags[0], list)):
known_tags = set((x for y in known_tags for x in y))
if (isinstance(test_tags, list) and isinstance(test_tags[0], list)):
test_tags = sorted(set((x for y in test_tags for x in y)))
missing_tags = sorted((x for x in test_tags if (x not in known_tags)))
return missing_tags |
def _init_beta_gamma(shape, fix_parameters, param_init, no_bias, no_scale):
from nnabla.parameter import get_parameter_or_create
from nnabla.initializer import ConstantInitializer
if no_bias:
beta = None
else:
beta_init = param_init.get('beta', ConstantInitializer(0))
beta = get_parameter_or_create('beta', shape, beta_init, True, (not fix_parameters))
if no_scale:
gamma = None
else:
gamma_init = param_init.get('gamma', ConstantInitializer(1))
gamma = get_parameter_or_create('gamma', shape, gamma_init, True, (not fix_parameters))
return (beta, gamma) |
def register_Ns3Ipv6RawSocketFactory_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Ipv6RawSocketFactory const &', 'arg0')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
return |
def load_state_dict(model, state_dict):
try:
model.load_state_dict(state_dict)
except RuntimeError:
new_state_dict = {i[len('module.'):]: j for (i, j) in state_dict.items()}
model.load_state_dict(new_state_dict) |
def buzzard_tpslopes(p, N, kmax):
v = gp().eval(('tpslopes(%s, %s, %s)' % (p, N, kmax)))
v = sage_eval(v)
v.insert(0, [])
return v |
class AttentionalAggregator(GraphSAGEAggregator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hidden_dim = self.output_dim
self.attn_act = LeakyReLU(0.2)
def _build_group_weights(self, in_shape, out_size, group_idx=0):
if (group_idx == 0):
if (out_size > 0):
weights = self.add_weight(name=f'w_self', shape=(int(in_shape[(- 1)]), out_size), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True)
else:
weights = None
else:
w_g = self.add_weight(name=f'w_g{group_idx}', shape=(int(in_shape[(- 1)]), out_size), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True)
w_attn_s = self.add_weight(name=f'w_attn_s{group_idx}', shape=(out_size, 1), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True)
w_attn_g = self.add_weight(name=f'w_attn_g{group_idx}', shape=(out_size, 1), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True)
weights = [w_g, w_attn_s, w_attn_g]
return weights
def calculate_group_sizes(self, input_shape):
self.included_weight_groups = [all(((dim != 0) for dim in group_shape[1:])) for group_shape in input_shape]
num_groups = (np.sum(self.included_weight_groups) - 1)
if (num_groups == 0):
weight_dims = ([self.output_dim] + ([0] * (len(input_shape) - 1)))
else:
group_output_dim = (self.output_dim // num_groups)
remainder_dim = (self.output_dim - (num_groups * group_output_dim))
weight_dims = [0]
for g in self.included_weight_groups[1:]:
if g:
group_dim = (group_output_dim + remainder_dim)
remainder_dim = 0
else:
group_dim = 0
weight_dims.append(group_dim)
self.weight_dims = weight_dims
def call(self, inputs, **kwargs):
if (not self.included_weight_groups[0]):
raise ValueError('The head node group must have non-zero dimension')
x_self = inputs[0]
group_sources = []
for (ii, x_g) in enumerate(inputs[1:]):
group_idx = (ii + 1)
if (not self.included_weight_groups[group_idx]):
continue
(w_g, w_attn_s, w_attn_g) = self.w_group[group_idx]
xw_self = K.expand_dims(K.dot(x_self, w_g), axis=2)
xw_neigh = K.dot(x_g, w_g)
xw_all = K.concatenate([xw_self, xw_neigh], axis=2)
attn_self = K.dot(xw_self, w_attn_s)
attn_neigh = K.dot(xw_all, w_attn_g)
attn_u = self.attn_act((attn_self + attn_neigh))
attn = K.softmax(attn_u, axis=2)
h_out = K.sum((attn * xw_all), axis=2)
group_sources.append(h_out)
if (not group_sources):
group_sources = [K.dot(x_self, self.w_group[0])]
h_out = K.concatenate(group_sources, axis=2)
if self.has_bias:
h_out = (h_out + self.bias)
return self.act(h_out) |
def clean_dict(content):
new_content = {}
new_sent = ''
new_sent_id = ''
for (sent_id, sent_) in content.items():
try:
sent = sent_['sentence'].rstrip()
sent = sent.replace('\n', ' ').replace('\t', ' ')
sent = re.sub(' +', ' ', sent)
if ((new_sent != '') and sent[0].isupper()):
new_content.update({new_sent_id[:(- 1)]: {'sentence': (new_sent + '.')}})
new_sent = ''
new_sent_id = ''
if ((sent[(- 1)] != '.') or sent.endswith('Fig.')):
new_sent += (' ' + sent)
new_sent_id += (str(sent_id) + '+')
continue
new_sent += sent
new_sent_id += str(sent_id)
new_content.update({new_sent_id: {'sentence': new_sent}})
new_sent = ''
new_sent_id = ''
except:
pass
return new_content |
def all_networks():
import os
nns_dir = os.path.dirname(os.path.abspath(__file__))
nns = [f[:(- len('.py'))] for f in os.listdir(nns_dir) if (f.endswith('.py') and (not f.startswith('__')))]
return list(sorted(nns)) |
_module()
class MSELoss(nn.Module):
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss = (self.loss_weight * mse_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor))
return loss |
class TestJointMotionPlanner(unittest.TestCase):
def test_same_start_and_end_pos_with_no_start_orientations(self):
jm_planner = ml_action_manager_simple.joint_motion_planner
start = (((1, 1), w), ((1, 2), s))
goal = (((1, 1), n), ((2, 1), n))
(joint_action_plan, end_jm_state, finshing_times) = jm_planner.get_low_level_action_plan(start, goal)
optimal_plan = [(n, e), (interact, n)]
self.assertEqual(joint_action_plan, optimal_plan)
optimal_end_jm_state = (((1, 1), n), ((2, 1), n))
self.assertEqual(end_jm_state, optimal_end_jm_state)
optimal_finshing_times = (2, 3)
self.assertEqual(finshing_times, optimal_finshing_times)
def test_with_start_orientations_simple_mdp(self):
jm_planner = or_ml_action_manager_simple.joint_motion_planner
self.simple_mdp_suite(jm_planner)
def test_without_start_orientations_simple_mdp(self):
jm_planner = ml_action_manager_simple.joint_motion_planner
self.simple_mdp_suite(jm_planner)
def simple_mdp_suite(self, jm_planner):
self.simple_mdp_already_at_goal(jm_planner)
self.simple_mdp_only_orientations_switch(jm_planner)
self.simple_mdp_one_at_goal(jm_planner)
self.simple_mdp_position_swap(jm_planner)
self.simple_mdp_one_at_goal_other_conflicting_path(jm_planner)
self.simple_mdp_test_final_orientation_optimization(jm_planner)
def simple_mdp_already_at_goal(self, planner):
a1_start = a1_goal = ((1, 1), n)
a2_start = a2_goal = ((2, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(1, 1), min_t=1)
a1_start = a1_goal = ((1, 1), w)
a2_start = a2_goal = ((1, 2), s)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(1, 1), min_t=1)
def simple_mdp_only_orientations_switch(self, planner):
a1_start = ((1, 1), s)
a1_goal = ((1, 1), w)
a2_start = ((1, 2), s)
a2_goal = ((1, 2), w)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(2, 2), min_t=2)
def simple_mdp_one_at_goal(self, planner):
a1_start = ((3, 2), s)
a1_goal = ((3, 2), s)
a2_start = ((2, 1), w)
a2_goal = ((1, 1), w)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(1, 2))
def simple_mdp_position_swap(self, planner):
a1_start = ((1, 1), w)
a2_start = ((3, 2), s)
a1_goal = a2_start
a2_goal = a1_start
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal)
def simple_mdp_one_at_goal_other_conflicting_path(self, planner):
a1_start = ((1, 1), w)
a1_goal = ((3, 1), e)
a2_start = a2_goal = ((2, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=1)
def simple_mdp_test_final_orientation_optimization(self, planner):
a1_start = ((2, 1), n)
a1_goal = ((1, 2), w)
a2_start = a2_goal = ((3, 2), s)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
a1_goal = ((1, 2), s)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, times=(3, 1))
def test_large_mdp_suite_shared_motion_goals(self):
if large_mdp_tests:
jmp = ml_planner_large.ml_action_manager.joint_motion_planner
self.large_mdp_test_basic_plan(jmp)
self.large_mdp_test_shared_motion_goal(jmp)
self.large_mdp_test_shared_motion_goal_with_conflict(jmp)
self.large_mdp_test_shared_motion_goal_with_conflict_other(jmp)
def large_mdp_test_basic_plan(self, planner):
a1_start = ((5, 1), n)
a2_start = ((8, 1), n)
a1_goal = a2_start
a2_goal = a1_start
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal)
def large_mdp_test_shared_motion_goal(self, planner):
a1_start = ((4, 1), n)
a2_start = ((1, 1), n)
a1_goal = ((5, 1), n)
a2_goal = ((5, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=3)
def large_mdp_test_shared_motion_goal_with_conflict(self, planner):
assert planner.same_motion_goals
a1_start = ((5, 2), n)
a2_start = ((4, 1), n)
a1_goal = ((5, 1), n)
a2_goal = ((5, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=2)
def large_mdp_test_shared_motion_goal_with_conflict_other(self, planner):
assert planner.same_motion_goals
a1_start = ((4, 2), e)
a2_start = ((4, 1), e)
a1_goal = ((5, 1), n)
a2_goal = ((5, 1), n)
start = (a1_start, a2_start)
goal = (a1_goal, a2_goal)
self.check_joint_plan(planner, start, goal, min_t=3)
def check_joint_plan(self, joint_motion_planner, start, goal, times=None, min_t=None, display=False):
debug = False
(action_plan, end_pos_and_orients, plan_lengths) = joint_motion_planner.get_low_level_action_plan(start, goal)
if debug:
print('Start state: {}, Goal state: {}, Action plan: {}'.format(start, goal, action_plan))
start_state = OvercookedState([P(*start[0]), P(*start[1])], {}, all_orders=simple_mdp.start_all_orders)
env = OvercookedEnv.from_mdp(joint_motion_planner.mdp, horizon=1000)
(resulting_state, _) = env.execute_plan(start_state, action_plan, display=display)
self.assertTrue(any([(agent_goal in resulting_state.players_pos_and_or) for agent_goal in goal]))
self.assertEqual(resulting_state.players_pos_and_or, end_pos_and_orients)
self.assertEqual(len(action_plan), min(plan_lengths))
if (min_t is not None):
self.assertEqual(len(action_plan), min_t)
if (times is not None):
self.assertEqual(plan_lengths, times) |
_BOX_PREDICTOR.register('FastRCNNPredictor')
class FastRCNNPredictor(nn.Module):
def __init__(self, config, in_channels):
super(FastRCNNPredictor, self).__init__()
assert (in_channels is not None)
num_inputs = in_channels
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls_score = nn.Linear(num_inputs, num_classes)
num_bbox_reg_classes = (2 if config.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes)
self.bbox_pred = nn.Linear(num_inputs, (num_bbox_reg_classes * 4))
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
cls_logit = self.cls_score(x)
bbox_pred = self.bbox_pred(x)
return (cls_logit, bbox_pred) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('val', [0.5, 1, 2])
def test_add_scalar_forward_backward(seed, val, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 2)]
function_tester(rng, F.add_scalar, (lambda x, y: (x + y)), inputs, func_args=[val], ctx=ctx, func_name=func_name) |
def deleteImage(request):
file = request.FILES.get('file')
if file:
filename = file.name
file.delete()
return HttpResponse('ok') |
def evaluate_factual_consistency(args):
scorer = FactualConsistencyScorer(align=args.align)
scores = []
for (grounding, hypo) in tqdm(zip(open(args.grounding).readlines(), open(args.hypo).readlines())):
(grounding, hypo) = (grounding.strip(), hypo.strip())
if ((grounding == '') and (hypo == '')):
continue
scores.append(scorer.score(grounding=grounding, hypo=hypo, aspect=args.aspect, remove_stopwords=args.remove_stopwords))
return scores |
def inception_v4_ra(cnn, k, l, m, n):
cols = [[('mpool', 3, 3, 2, 2, 'VALID')], [('conv', n, 3, 3, 2, 2, 'VALID')], [('conv', k, 1, 1), ('conv', l, 3, 3), ('conv', m, 3, 3, 2, 2, 'VALID')]]
cnn.inception_module('incept_v4_ra', cols) |
def save_best_model(path, model, word_encoder, word_pos_encoder, time_delay_encoder, optimizer, type_, file):
path_model = os.path.join(path, 'best_model', (((('best_model_' + type_) + '_') + file) + '.pt'))
path_word_encoder = os.path.join(path, 'best_model', (((('best_model_word_encoder_' + type_) + '_') + file) + '.pt'))
path_word_pos_encoder = os.path.join(path, 'best_model', (((('best_model_word_pos_encoder_' + type_) + '_') + file) + '.pt'))
path_time_delay_encoder = os.path.join(path, 'best_model', (((('best_model_time_delay_encoder_' + type_) + '_') + file) + '.pt'))
path_optimizer = os.path.join(path, 'best_model', (((('best_model_optimizer_' + type_) + '_') + file) + '.pt'))
torch.save(model.state_dict(), path_model)
torch.save(word_encoder.state_dict(), path_word_encoder)
torch.save(word_pos_encoder.state_dict(), path_word_pos_encoder)
torch.save(time_delay_encoder.state_dict(), path_time_delay_encoder)
torch.save(optimizer.state_dict(), path_optimizer) |
def namedtuple_fieldnames(declaration):
returns = declaration['returns']
if ((len(returns) <= 1) or all([('field_name' not in x) for x in returns])):
return []
else:
def get_field_name(x):
if ('field_name' not in x):
raise ValueError('Unnamed field is not supported by codegen')
else:
return x['field_name']
return [get_field_name(x) for x in returns] |
def compute_pose(image_dir, annotations_file, savePath):
annotations_file = pd.read_csv(annotations_file, sep=':')
annotations_file = annotations_file.set_index('name')
image_size = (128, 64)
cnt = len(annotations_file)
for i in range(cnt):
print(('processing %d / %d ...' % (i, cnt)))
row = annotations_file.iloc[i]
name = row.name
print(savePath, name)
file_name = os.path.join(savePath, (name + '.npy'))
kp_array = load_pose_cords_from_strings(row.keypoints_y, row.keypoints_x)
pose = cords_to_map(kp_array, image_size)
print(np.sum(pose))
np.save(file_name, pose) |
def _has_4gram_match(ref, pred):
if ((len(ref) < 4) or (len(pred) < 4)):
return False
for i in range((len(ref) - 3)):
for j in range((len(pred) - 3)):
if (ref[i:(i + 4)] == pred[j:(j + 4)]):
return True
return False |
def test_one_word():
text = '(FOO) (BAR)'
trees = tree_reader.read_trees(text)
assert (len(trees) == 2)
assert trees[0].is_leaf()
assert (trees[0].label == 'FOO')
assert trees[1].is_leaf()
assert (trees[1].label == 'BAR') |
def make_open3d_visualiser():
vis = o3d.visualization.Visualizer()
vis.create_window(window_name='test', width=1280, height=840, left=0, top=0, visible=True)
vis.get_render_option().light_on = False
vis.get_render_option().line_width = 100.0
return vis |
def captioning(audio_path):
audio_tensor = get_audio(audio_path=audio_path)
if (device is not None):
audio_tensor = audio_tensor.to(device)
with torch.no_grad():
output = model.generate(samples=audio_tensor, num_beams=5)
inference = ''
number_of_chunks = range(audio_tensor.shape[0])
for (chunk, text) in zip(number_of_chunks, output):
time = f'[{(chunk * 10)}:00-{((chunk + 1) * 10)}:00]'
inference += f'''{time}
{text}
'''
return inference |
def download_mwoz_21(destination):
mwoz_21_archive = os.path.join(destination, 'MultiWOZ_21.zip')
download_file(MULTIWOZ_21_DATASET_URL, mwoz_21_archive)
shutil.unpack_archive(mwoz_21_archive, destination)
shutil.rmtree(os.path.join(destination, '__MACOSX'))
mwoz_21 = os.path.join(destination, 'MultiWOZ_21')
os.makedirs(mwoz_21, exist_ok=True)
mwoz_21_repo = os.path.join(destination, 'MultiWOZ_2.1')
for relevant_file in ['data.json', 'valListFile.txt', 'testListFile.txt']:
shutil.move(os.path.join(mwoz_21_repo, relevant_file), os.path.join(mwoz_21, relevant_file))
shutil.rmtree(mwoz_21_repo) |
class augmentations(object):
def __init__(self):
self.jitter_scale_ratio = 0.001
self.jitter_ratio = 0.001
self.max_seg = 5 |
def setup_seed(seed=1024):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True |
class PartitionTuples_level(PartitionTuples):
def __init__(self, level, category=None):
if (level not in NN):
raise ValueError('level must be a non-negative integer')
if (category is None):
category = InfiniteEnumeratedSets()
super().__init__(category=category)
self._level = level
def _repr_(self):
return 'Partition tuples of level {}'.format(self._level)
def __contains__(self, mu):
return (PartitionTuples.__contains__(self, mu) and (len(mu) == self._level))
def __iter__(self):
for size in NN:
for mu in PartitionTuples_level_size(self._level, size):
(yield self.element_class(self, list(mu)))
def _an_element_(self):
return self.element_class(self, tuple(([l] for l in range(self.level())))) |
_REGISTRY.register()
class Generator_RPA(nn.Module):
def __init__(self, num_in_ch=3, num_out_ch=3, scale=2, num_feat=64, num_block=20):
super(Generator, self).__init__()
self.scale = scale
self.conv1 = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
self.rpa = nn.Sequential(OrderedDict([('rpa{}'.format(i), RPA(num_feat=num_feat)) for i in range(num_block)]))
num_usblock = ceil(log2(scale))
self.us = nn.Sequential(OrderedDict([('us{}'.format(i), US(num_feat=num_feat, scale=2)) for i in range(num_usblock)]))
self.conv2 = nn.Conv2d(num_feat, (num_feat // 2), 3, 1, 1)
self.conv3 = nn.Conv2d((num_feat // 2), num_out_ch, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
z = self.conv1(x)
z = self.lrelu(z)
z_ = self.rpa(z)
z = (z + z_)
z = self.us(z)
z = self.conv2(z)
z = self.lrelu(z)
out = self.conv3(z)
return out |
def log_accuracy(pred_class_logits, gt_classes, topk=(1,)):
bsz = pred_class_logits.size(0)
maxk = max(topk)
(_, pred_class) = pred_class_logits.topk(maxk, 1, True, True)
pred_class = pred_class.t()
correct = pred_class.eq(gt_classes.view(1, (- 1)).expand_as(pred_class))
ret = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(dim=0, keepdim=True)
ret.append(correct_k.mul_((1.0 / bsz)))
storage = get_event_storage()
storage.put_scalar('cls_accuracy', ret[0]) |
class CategoricalVarField(CategoricalDataFrameField):
def __init__(self, *args, **kwargs):
super().__init__(*args, field_type='var', **kwargs) |
class VAEforMNIST(nn.Module):
def __init__(self, latent_dim):
super().__init__()
self.latent_dim = latent_dim
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, latent_dim)
self.fc22 = nn.Linear(400, latent_dim)
self.fc3 = nn.Linear(latent_dim, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return (self.fc21(h1), self.fc22(h1))
def reparameterize(mu, logvar):
std = torch.exp((0.5 * logvar))
eps = torch.randn_like(std)
return (mu + (eps * std))
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
(mu, logvar) = self.encode(x.view((- 1), 784))
z = self.reparameterize(mu, logvar)
return (self.decode(z), mu, logvar) |
def instantiate_non_scriptable_remote_module_template():
generated_module_name = f'{_FILE_PREFIX}non_sriptable'
str_dict = dict(assign_module_interface_cls='module_interface_cls = None', args='*args', kwargs='**kwargs', arg_types='*args, **kwargs', arrow_and_return_type='', arrow_and_future_return_type='', jit_script_decorator='')
return _do_instantiate_remote_module_template(generated_module_name, str_dict) |
def register_Ns3FdNetDevice_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True)
cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True)
cls.add_method('GetEncapsulationMode', 'ns3::FdNetDevice::EncapsulationMode', [], is_const=True)
cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True)
cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True)
cls.add_method('SetEncapsulationMode', 'void', [param('ns3::FdNetDevice::EncapsulationMode', 'mode')])
cls.add_method('SetFileDescriptor', 'void', [param('int', 'fd')])
cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True)
cls.add_method('SetIsBroadcast', 'void', [param('bool', 'broadcast')], is_virtual=True)
cls.add_method('SetIsMulticast', 'void', [param('bool', 'multicast')], is_virtual=True)
cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True)
cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True)
cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('Start', 'void', [param('ns3::Time', 'tStart')])
cls.add_method('Stop', 'void', [param('ns3::Time', 'tStop')])
cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
def evaluate(args):
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = (((args['save_dir'] + '/') + args['save_name']) if (args['save_name'] is not None) else '{}/{}_mwt_expander.pt'.format(args['save_dir'], args['shorthand']))
use_cuda = (args['cuda'] and (not args['cpu']))
trainer = Trainer(model_file=model_file, use_cuda=use_cuda)
(loaded_args, vocab) = (trainer.args, trainer.vocab)
for k in args:
if (k.endswith('_dir') or k.endswith('_file') or (k in ['shorthand'])):
loaded_args[k] = args[k]
print('max_dec_len:', loaded_args['max_dec_len'])
print('Loading data with batch size {}...'.format(args['batch_size']))
batch = DataLoader(args['eval_file'], args['batch_size'], loaded_args, vocab=vocab, evaluation=True)
if (len(batch) > 0):
dict_preds = trainer.predict_dict(batch.conll.get_mwt_expansion_cands())
if loaded_args['dict_only']:
preds = dict_preds
else:
print('Running the seq2seq model...')
preds = []
for (i, b) in enumerate(batch):
preds += trainer.predict(b)
if loaded_args.get('ensemble_dict', False):
preds = trainer.ensemble(batch.conll.get_mwt_expansion_cands(), preds)
else:
preds = []
batch.conll.write_conll_with_mwt_expansions(preds, open(system_pred_file, 'w'))
if (gold_file is not None):
(_, _, score) = scorer.score(system_pred_file, gold_file)
print('MWT expansion score:')
print('{} {:.2f}'.format(args['shorthand'], (score * 100))) |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
pathlib.Path(cfg.work_dir).mkdir(parents=True, exist_ok=True)
cfg.gpus = args.gpus
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
logger = get_root_logger(cfg.work_dir)
logger.info('Distributed training: {}'.format(distributed))
if (args.seed is not None):
logger.info('Set random seed to {}'.format(args.seed))
set_random_seed(args.seed)
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
if distributed:
model = MMDistributedDataParallel(model.cuda())
else:
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
train_dataset = get_dataset(cfg.data.train)
optimizer = build_optimizer(model, cfg.optimizer)
train_loader = build_dataloader(train_dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, dist=distributed)
start_epoch = it = 0
last_epoch = (- 1)
(lr_scheduler, lr_warmup_scheduler) = build_scheduler(optimizer, total_iters_each_epoch=len(train_loader), total_epochs=cfg.total_epochs, last_epoch=last_epoch, optim_cfg=cfg.optimizer, lr_cfg=cfg.lr_config)
logger.info('Start training')
train_model(model, optimizer, train_loader, lr_scheduler=lr_scheduler, optim_cfg=cfg.optimizer, start_epoch=start_epoch, total_epochs=cfg.total_epochs, start_iter=it, rank=args.local_rank, logger=logger, ckpt_save_dir=cfg.work_dir, lr_warmup_scheduler=lr_warmup_scheduler, ckpt_save_interval=cfg.checkpoint_config.interval, max_ckpt_save_num=args.max_ckpt_save_num, log_interval=cfg.log_config.interval)
logger.info('End training') |
class FunnelModelTester():
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, block_sizes=[1, 1, 2], num_decoder_layers=1, d_model=32, n_head=4, d_head=8, d_inner=37, hidden_act='gelu_new', hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, max_position_embeddings=512, type_vocab_size=3, num_labels=3, num_choices=4, scope=None, base=False):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.block_sizes = block_sizes
self.num_decoder_layers = num_decoder_layers
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.d_inner = d_inner
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = 2
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.num_attention_heads = n_head
self.hidden_size = self.d_model
self.num_hidden_layers = (sum(self.block_sizes) + (0 if base else self.num_decoder_layers))
if (not base):
self.expected_num_hidden_layers = (self.num_hidden_layers + 2)
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
fake_token_labels = ids_tensor([self.batch_size, self.seq_length], 1)
config = self.get_config()
return (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels)
def get_config(self):
return FunnelConfig(vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size)
def create_and_check_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
model = FunnelModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model))
model.config.truncate_seq = False
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model))
model.config.separate_cls = False
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model))
def create_and_check_base_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
model = FunnelBaseModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model))
model.config.truncate_seq = False
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model))
model.config.separate_cls = False
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model))
def create_and_check_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
config.num_labels = self.num_labels
model = FunnelForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=fake_token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
model = FunnelForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
config.num_labels = self.num_labels
model = FunnelForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_multiple_choice(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
config.num_choices = self.num_choices
model = FunnelForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand((- 1), self.num_choices, (- 1)).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand((- 1), self.num_choices, (- 1)).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand((- 1), self.num_choices, (- 1)).contiguous()
result = model(multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
config.num_labels = self.num_labels
model = FunnelForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels):
model = FunnelForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return (config, inputs_dict) |
class DivergenceEstimator(EntropyEstimator, ABC, metaclass=DivergenceEstimatorType):
def __init__(self, entropy=Nsb()):
super(DivergenceEstimator, self).__init__()
self.input_data_ndim = 2
estimator_name = type(entropy).__name__
if (estimator_name not in entropy_estimators):
raise NddError(('%s is not a valid entropy estimator' % estimator_name))
self.entropy_estimator = entropy
def algorithm(self):
return self.entropy_estimator.__class__.__name__
def fit(self, nk, k=None, zk=None): |
class ClipOutputFeatures(ModelOutput):
image_embeds: Optional[torch.FloatTensor] = None
image_embeds_proj: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
text_embeds_proj: Optional[torch.FloatTensor] = None |
class Trainer():
def __init__(self, cfg: CfgNode, model: nn.Module, evaluator: Evaluator, device: torch.device) -> None:
self.cfg = cfg
self.model = model
self.device = device
logger.info('\tSetting up the optimizer...')
self.optimizer = make_optimizer([self.model], cfg.SOLVER)
self.scheduler = make_scheduler(self.optimizer, cfg.SOLVER)
self.cls_criterion = build_loss(self.cfg)
self.checkpointer = Checkpointer(self.model, save_dir=cfg.OUTPUT_DIR, save_to_disk=True)
if (len(cfg.MODEL.WEIGHT_PATH) > 0):
checkpointables = [key for key in self.checkpointer.checkpointables if (key not in ['head.last_layer.bias', 'head.last_layer.weight'])]
self.checkpointer.load(cfg.MODEL.WEIGHT_PATH, checkpointables)
logger.info(f'Model weight loaded from {cfg.MODEL.WEIGHT_PATH}')
self.evaluator = evaluator
self.cpu_device = torch.device('cpu')
def forward_one_batch(self, inputs, targets, is_train):
inputs = inputs.to(self.device, non_blocking=True)
targets = targets.to(self.device, non_blocking=True)
if self.cfg.DBG:
logger.info(f'shape of inputs: {inputs.shape}')
logger.info(f'shape of targets: {targets.shape}')
with torch.set_grad_enabled(is_train):
outputs = self.model(inputs)
if self.cfg.DBG:
logger.info('shape of model output: {}, targets: {}'.format(outputs.shape, targets.shape))
if (self.cls_criterion.is_local() and is_train):
self.model.eval()
loss = self.cls_criterion(outputs, targets, self.cls_weights, self.model, inputs)
elif self.cls_criterion.is_local():
return (torch.tensor(1), outputs)
else:
loss = self.cls_criterion(outputs, targets, self.cls_weights)
if (loss == float('inf')):
logger.info('encountered infinite loss, skip gradient updating for this batch!')
return ((- 1), (- 1))
elif torch.isnan(loss).any():
logger.info('encountered nan loss, skip gradient updating for this batch!')
return ((- 1), (- 1))
if is_train:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return (loss, outputs)
def get_input(self, data):
if (not isinstance(data['image'], torch.Tensor)):
for (k, v) in data.items():
data[k] = torch.from_numpy(v)
inputs = data['image'].float()
labels = data['label']
return (inputs, labels)
def train_classifier(self, train_loader, val_loader, test_loader):
self.model.eval()
total_epoch = self.cfg.SOLVER.TOTAL_EPOCH
total_data = len(train_loader)
best_epoch = (- 1)
best_metric = 0
log_interval = self.cfg.SOLVER.LOG_EVERY_N
losses = AverageMeter('Loss', ':.4e')
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
self.cls_weights = train_loader.dataset.get_class_weights(self.cfg.DATA.CLASS_WEIGHTS_TYPE)
patience = 0
for epoch in range(total_epoch):
losses.reset()
batch_time.reset()
data_time.reset()
lr = self.scheduler.get_lr()[0]
logger.info('Training {} / {} epoch, with learning rate {}'.format((epoch + 1), total_epoch, lr))
self.model.train()
end = time.time()
for (idx, input_data) in enumerate(train_loader):
if (self.cfg.DBG and (idx == 20)):
break
(X, targets) = self.get_input(input_data)
data_time.update((time.time() - end))
(train_loss, _) = self.forward_one_batch(X, targets, True)
if (train_loss == (- 1)):
return None
losses.update(train_loss.item(), X.shape[0])
batch_time.update((time.time() - end))
end = time.time()
if (((idx + 1) % log_interval) == 0):
seconds_per_batch = batch_time.val
eta = datetime.timedelta(seconds=int(((seconds_per_batch * ((total_data - idx) - 1)) + ((seconds_per_batch * total_data) * ((total_epoch - epoch) - 1)))))
logger.info((('\tTraining {}/{}. train loss: {:.4f},'.format((idx + 1), total_data, train_loss) + '\t{:.4f} s / batch. (data: {:.2e}). ETA={}, '.format(seconds_per_batch, data_time.val, str(eta))) + 'max mem: {:.1f} GB '.format(gpu_mem_usage())))
logger.info((('Epoch {} / {}: '.format((epoch + 1), total_epoch) + 'avg data time: {:.2e}, avg batch time: {:.4f}, '.format(data_time.avg, batch_time.avg)) + 'average train loss: {:.4f}'.format(losses.avg)))
self.scheduler.step()
self.model.eval()
self.save_prompt((epoch + 1))
self.evaluator.update_iteration(epoch)
self.eval_classifier(val_loader, 'val', (epoch == (total_epoch - 1)))
if (test_loader is not None):
self.eval_classifier(test_loader, 'test', (epoch == (total_epoch - 1)))
t_name = ('val_' + val_loader.dataset.name)
try:
curr_acc = self.evaluator.results[f'epoch_{epoch}']['classification'][t_name]['top1']
except KeyError:
return
if (curr_acc > best_metric):
best_metric = curr_acc
best_epoch = (epoch + 1)
logger.info(f'Best epoch {best_epoch}: best metric: {best_metric:.3f}')
patience = 0
else:
patience += 1
if (patience >= self.cfg.SOLVER.PATIENCE):
logger.info('No improvement. Breaking out of loop.')
break
if self.cfg.MODEL.SAVE_CKPT:
Checkpointer(self.model, save_dir=self.cfg.OUTPUT_DIR, save_to_disk=True).save('last_model')
_grad()
def save_prompt(self, epoch):
if self.cfg.MODEL.PROMPT.SAVE_FOR_EACH_EPOCH:
if ((self.cfg.MODEL.TYPE in ['vit', 'ssl-vit']) and ('prompt' in self.cfg.MODEL.TRANSFER_TYPE)):
prompt_embds = self.model.prompt_embeddings.cpu().numpy()
out = {'shallow_prompt': prompt_embds}
if self.cfg.MODEL.PROMPT.DEEP:
deep_embds = self.model.enc.transformer.deep_prompt_embeddings.cpu().numpy()
out['deep_prompt'] = deep_embds
torch.save(out, os.path.join(self.cfg.OUTPUT_DIR, f'prompt_ep{epoch}.pth'))
_grad()
def eval_classifier(self, data_loader, prefix, save=False):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
log_interval = self.cfg.SOLVER.LOG_EVERY_N
test_name = ((prefix + '_') + data_loader.dataset.name)
total = len(data_loader)
total_logits = []
total_targets = []
for (idx, input_data) in enumerate(data_loader):
end = time.time()
(X, targets) = self.get_input(input_data)
data_time.update((time.time() - end))
if self.cfg.DBG:
logger.info('during eval: {}'.format(X.shape))
(loss, outputs) = self.forward_one_batch(X, targets, False)
if (loss == (- 1)):
return
losses.update(loss, X.shape[0])
batch_time.update((time.time() - end))
if (((idx + 1) % log_interval) == 0):
logger.info(('\tTest {}/{}. loss: {:.3f}, {:.4f} s / batch. (data: {:.2e})'.format((idx + 1), total, losses.val, batch_time.val, data_time.val) + 'max mem: {:.5f} GB '.format(gpu_mem_usage())))
total_targets.extend(list(targets.numpy()))
total_logits.append(outputs)
logger.info(((f'Inference ({prefix}):' + 'avg data time: {:.2e}, avg batch time: {:.4f}, '.format(data_time.avg, batch_time.avg)) + 'average loss: {:.4f}'.format(losses.avg)))
if (self.model.side is not None):
logger.info('--> side tuning alpha = {:.4f}'.format(self.model.side_alpha))
joint_logits = torch.cat(total_logits, dim=0).cpu().numpy()
self.evaluator.classify(joint_logits, total_targets, test_name, self.cfg.DATA.MULTILABEL)
if (save and self.cfg.MODEL.SAVE_CKPT):
out = {'targets': total_targets, 'joint_logits': joint_logits}
out_path = os.path.join(self.cfg.OUTPUT_DIR, f'{test_name}_logits.pth')
torch.save(out, out_path)
logger.info(f'Saved logits and targets for {test_name} at {out_path}') |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_sigmoid_double_backward(seed, ctx, func_name):
from nbla_test_utils import cap_ignore_region, backward_function_tester
rng = np.random.RandomState(seed)
inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 2)]
backward_function_tester(rng, F.sigmoid, inputs=inputs, func_args=[], func_kwargs={}, atol_accum=0.001, dstep=0.001, ctx=ctx) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.