code stringlengths 101 5.91M |
|---|
def transform_epbcs(adict, prefix='epbc'):
d2 = {}
for (ii, (key, conf)) in enumerate(six.iteritems(adict)):
if isinstance(conf, tuple):
if (len(conf) == 3):
c2 = tuple_to_conf(key, conf, ['region', 'dofs', 'match'])
else:
c2 = tuple_to_conf(key, conf, ['region', 'times', 'dofs', 'match'])
d2[('%s_%s__%d' % (prefix, c2.name, ii))] = c2
else:
c2 = transform_to_struct_1(conf)
d2[('%s_%s' % (prefix, c2.name))] = c2
return d2 |
def filter_tapaco(cosine_low=0.0, cosine_high=0.8, edit_high=70, diff_ratio=1.0, min_len=5):
texts = []
labels = []
with open('../data/processed_datasets/tapaco/tapaco_train_score.tsv') as f:
for line in f:
line = line.rstrip('\n')
(text, paraphrase, cosine_score, edit_distance) = line.split('\t')
diff = abs((len(text.split()) - len(paraphrase.split())))
ratio = (diff / max(len(text.split()), len(paraphrase.split())))
if ((cosine_low <= float(cosine_score) <= cosine_high) and (ratio < diff_ratio) and (max(len(text.split()), len(paraphrase.split())) > min_len) and (int(edit_distance) < edit_high)):
texts.append(text)
labels.append(paraphrase)
filtered = '../paraphraser/data/processed_datasets/tapaco/tapaco.txt'
with open(filtered, 'w') as f:
for (a, b) in zip(texts, labels):
f.write((((a + '\t') + b) + '\n'))
return ([(a, b) for (a, b) in zip(texts, labels)], filtered) |
class DummyAlgo():
def __init__(self, action_size: int, ref_x: NDArray, ref_y: NDArray, action_scaler: Optional[ActionScaler]=None):
self.action_size = action_size
self.ref_x = ref_x
self.ref_y = ref_y
self.action_scaler = action_scaler
def predict(self, x: Observation) -> NDArray:
assert np.all((x == self.ref_x))
return self.ref_y
def predict_value(self, x: Observation, action: NDArray) -> NDArray:
raise NotImplementedError
def sample_action(self, x: Observation) -> NDArray:
raise NotImplementedError
def gamma(self) -> float:
raise NotImplementedError
def observation_scaler(self) -> Optional[ObservationScaler]:
raise NotImplementedError
def reward_scaler(self) -> Optional[RewardScaler]:
raise NotImplementedError |
def parse(exit_code, log, output):
(findings, infos) = ([], set())
cleaned_log = filter(is_relevant, log)
(errors, fails) = sb.parse_utils.errors_fails(exit_code, cleaned_log)
errors.discard('EXIT_CODE_1')
analysis_completed = False
in_tx = False
for line in log:
if in_tx:
if line:
tx_dict += line
else:
in_tx = False
try:
tx = ast.literal_eval(tx_dict)
if (not ('exploit' in finding)):
finding['exploit'] = []
finding['exploit'].append(tx)
except Exception:
pass
m = TRANSACTION.match(line)
if m:
in_tx = True
tx_dict = ''
continue
m = FINDING.match(line)
if m:
finding = {'name': m[1]}
findings.append(finding)
continue
if FINISHED.match(line):
analysis_completed = True
if (log and (not analysis_completed)):
infos.add('analysis incomplete')
if ((not fails) and (not errors)):
fails.add('execution failed')
return (findings, infos, errors, fails) |
def test_case_5():
int_0 = 1235
queue_0 = module_0.Queue(int_0)
assert (f'{type(queue_0).__module__}.{type(queue_0).__qualname__}' == 'queue_example.Queue')
assert (queue_0.max == 1235)
assert (queue_0.head == 0)
assert (queue_0.tail == 0)
assert (queue_0.size == 0)
assert (f'{type(queue_0.data).__module__}.{type(queue_0.data).__qualname__}' == 'array.array')
assert (len(queue_0.data) == 1235)
queue_1 = module_0.Queue(int_0)
assert (queue_1.head == 0)
assert (queue_1.tail == 0)
assert (queue_1.size == 0)
bool_0 = queue_1.empty()
assert (bool_0 is False)
int_1 = 4904
int_2 = 3504
bool_1 = queue_0.empty()
assert (bool_1 is False)
queue_2 = module_0.Queue(int_2)
assert (queue_2.head == 0)
assert (queue_2.tail == 0)
assert (queue_2.size == 0)
bool_2 = queue_2.enqueue(int_1)
assert (bool_2 is True)
assert (queue_2.tail == 1)
assert (queue_2.size == 1) |
.parametrize('lil_container', LIL_CONTAINERS)
def test_error(lil_container):
clf = svm.SVC()
X_sp = lil_container(X)
Y2 = Y[:(- 1)]
with pytest.raises(ValueError):
clf.fit(X_sp, Y2)
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result) |
class WindowDataParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _WINDOWDATAPARAMETER |
def julia_plot(f=None, **kwds):
period = kwds.pop('period', None)
mandelbrot = kwds.pop('mandelbrot', True)
point_color = kwds.pop('point_color', 'tomato')
x_center = kwds.pop('x_center', 0.0)
y_center = kwds.pop('y_center', 0.0)
image_width = kwds.pop('image_width', 4.0)
max_iteration = kwds.pop('max_iteration', 500)
pixel_count = kwds.pop('pixel_count', 500)
base_color = kwds.pop('base_color', 'steelblue')
level_sep = kwds.pop('level_sep', 1)
number_of_colors = kwds.pop('number_of_colors', 30)
interacts = kwds.pop('interact', False)
f_is_default_after_all = None
if period:
R = PolynomialRing(CC, 'c')
c = R.gen()
(x, y) = ProjectiveSpace(R, 1, 'x,y').gens()
F = DynamicalSystem([((x ** 2) + (c * (y ** 2))), (y ** 2)])
L = F.dynatomic_polynomial(period).subs({x: 0, y: 1}).roots(ring=CC)
c = L[randint(0, (len(L) - 1))][0]
base_color = Color(base_color)
point_color = Color(point_color)
EPS = 1e-05
if ((f is not None) and (period is None)):
S = PolynomialRing(CC, names='z')
z = S.gen()
try:
f_poly = S(f)
except TypeError:
R = f.parent()
if (not (R.is_integral_domain() and (CC.is_subring(R) or CDF.is_subring(R)))):
raise ValueError('given `f` must be a complex polynomial')
raise NotImplementedError('Julia sets not implemented for rational functions')
if ((f_poly - (z * z)) in CC):
f_is_default_after_all = True
c = (f_poly - (z * z))
elif interacts:
raise NotImplementedError('The interactive plot is only implemented for polynomials of the form f = z^2 + c.')
else:
return general_julia(f_poly, x_center, y_center, image_width, max_iteration, pixel_count, level_sep, number_of_colors, base_color)
if (f_is_default_after_all or (f is None) or (period is not None)):
if ((not f_is_default_after_all) and (period is None)):
c = (- 1)
c = CC(c)
c_real = c.real()
c_imag = c.imag()
if interacts:
from ipywidgets.widgets import FloatSlider, IntSlider, ColorPicker, interact
widgets = dict(c_real=FloatSlider(min=(- 2.0), max=2.0, step=EPS, value=c_real, description='Real c'), c_imag=FloatSlider(min=(- 2.0), max=2.0, step=EPS, value=c_imag, description='Imag c'), x_center=FloatSlider(min=(- 1.0), max=1.0, step=EPS, value=x_center, description='Real center'), y_center=FloatSlider(min=(- 1.0), max=1.0, step=EPS, value=y_center, description='Imag center'), image_width=FloatSlider(min=EPS, max=4.0, step=EPS, value=image_width, description='Width'), max_iteration=IntSlider(min=0, max=1000, value=max_iteration, description='Iterations'), pixel_count=IntSlider(min=10, max=1000, value=pixel_count, description='Pixels'), level_sep=IntSlider(min=1, max=20, value=level_sep, description='Color sep'), color_num=IntSlider(min=1, max=100, value=number_of_colors, description='# Colors'), base_color=ColorPicker(value=base_color.html_color(), description='Base color'))
if mandelbrot:
widgets['point_color'] = ColorPicker(value=point_color.html_color(), description='Point color')
return interact(**widgets).widget(julia_helper)
else:
return interact(**widgets).widget(fast_julia_plot)
elif mandelbrot:
return julia_helper(c_real, c_imag, x_center, y_center, image_width, max_iteration, pixel_count, level_sep, number_of_colors, base_color, point_color)
else:
return fast_julia_plot(c_real, c_imag, x_center, y_center, image_width, max_iteration, pixel_count, level_sep, number_of_colors, base_color) |
class ExteriorAlgebraCoboundary(ExteriorAlgebraDifferential):
def __init__(self, E, s_coeff):
self._cos_coeff = {}
zero = E.zero()
B = E.basis()
for (k, v) in dict(s_coeff).items():
if (k[0] > k[1]):
k = sorted(k)
v = (- v)
k = B[FrozenBitset(k)]
for (m, c) in v:
self._cos_coeff[m] = (self._cos_coeff.get(m, zero) + (c * k))
ExteriorAlgebraDifferential.__init__(self, E, s_coeff)
def _repr_type(self):
return 'Coboundary'
def _on_basis(self, m):
E = self.domain()
cc = self._cos_coeff
tot = E.zero()
for (sgn, i) in enumerate(m):
k = FrozenBitset((i,))
if (k in cc):
below = tuple([j for j in m if (j < i)])
above = tuple([j for j in m if (j > i)])
if (not below):
below = E.one()
else:
below = E.monomial(FrozenBitset(below))
if (not above):
above = E.one()
else:
above = E.monomial(FrozenBitset(above))
tot += (((((- 1) ** sgn) * below) * cc[k]) * above)
return tot
_method
def chain_complex(self, R=None):
from sage.homology.chain_complex import ChainComplex
from sage.matrix.constructor import Matrix
E = self.domain()
n = E.ngens()
if (R is None):
R = E.base_ring()
if (n == 0):
return ChainComplex({(- 1): Matrix(R, [[]])}, degree=1)
basis_by_deg = {deg: [] for deg in range((n + 1))}
for b in E.basis().keys():
basis_by_deg[len(b)].append(b)
data = {}
basis = basis_by_deg[0]
for deg in range(n):
next_basis = sorted(basis_by_deg[(deg + 1)])
mat = []
for b in basis:
ret = self._on_basis(b)
try:
mat.append([ret.coefficient(p) for p in next_basis])
except AttributeError:
mat.append(([E.base_ring()(ret)] * len(next_basis)))
data[deg] = Matrix(mat).transpose().change_ring(R)
basis = next_basis
return ChainComplex(data, degree=1) |
class AdamDictionary(Dictionary[Dict[(str, Set[str])]]):
def __init__(self, trove_path: str, target_concepts: Collection[str]):
super().__init__(trove_path, 'AdamDictionary')
self.target_concepts = target_concepts
def get_url(self) -> str:
return '
def load(self) -> Dict[(str, Set[str])]:
synset: Dict[(str, Set[str])] = collections.defaultdict(set)
with tarfile.open(self.full_path) as f:
possible_adam_file: Optional[BinaryIO] = cast(BinaryIO, f.extractfile('adam_database'))
if (possible_adam_file is None):
raise RuntimeError('Could not find the adam_database file within the downloaded tar')
with io.TextIOWrapper(possible_adam_file) as adam_file:
for (i, line) in enumerate(adam_file):
if (line[0] == '#'):
continue
(pref_abbrv, alt_abbrv, long_form, score, num) = line.strip().split('\t')
long_form = long_form.split(':')[0]
alt_abbrv = alt_abbrv.split(':')[0]
if (float(score) < 0.5):
continue
if ((long_form in self.target_concepts) or (lowercase(long_form) in self.target_concepts)):
synset[pref_abbrv].add(lowercase(long_form))
return synset |
def empty(dir):
if os.path.isdir(dir):
shutil.rmtree(dir, ignore_errors=True)
else:
os.makedirs(dir) |
def let_model_save_mem_when_zero_grad(model: nn.Module):
def new_zero_grad(self, set_to_none: bool=True) -> None:
if getattr(self, '_is_replica', False):
warnings.warn("Calling .zero_grad() from a module created with nn.DataParallel() has no effect. The parameters are copied (in a differentiable manner) from the original module. This means they are not leaf nodes in autograd and so don't accumulate gradients. If you need gradients in your forward method, consider using autograd.grad instead.")
for p in self.parameters():
if (p.grad is not None):
if set_to_none:
p.grad = None
else:
if (p.grad.grad_fn is not None):
p.grad.detach_()
else:
p.grad.requires_grad_(False)
p.grad.zero_()
model.zero_grad = types.MethodType(new_zero_grad, model)
return model |
def split_text_into_sentences_by_length(text, length=512):
result = []
for i in range(0, len(text), length):
result.append((text[i:(i + length)], i))
return result |
class DEO(BaseScore):
def __call__(self, **kwargs):
logits = kwargs[self.logits_name]
labels = kwargs[self.label_name]
sensible_attribute = kwargs['sensible_attribute']
with torch.no_grad():
n = logits.shape[0]
logits_s_negative = logits[(sensible_attribute.bool() & (labels == 1))]
logits_s_positive = logits[((~ sensible_attribute.bool()) & (labels == 1))]
return ((1 / n) * torch.abs((torch.sum((logits_s_negative > 0)) - torch.sum((logits_s_positive > 0))))).cpu().item() |
class SmoothValue(object):
def __init__(self, beta: float):
(self.beta, self.n, self.mov_avg) = (beta, 0, 0)
self.smooth = None
def add_value(self, val: float) -> None:
self.n += 1
self.mov_avg = ((self.beta * self.mov_avg) + ((1 - self.beta) * val))
self.smooth = (self.mov_avg / (1 - (self.beta ** self.n))) |
def get_parser():
parser = argparse.ArgumentParser(description='writes text from binarized file to stdout')
parser.add_argument('--dataset-impl', help='dataset implementation', choices=indexed_dataset.get_available_dataset_impl())
parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None)
parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read')
return parser |
def simPushStringOntoStack(stackHandle, value):
ret = lib.simPushStringOntoStack(stackHandle, value.encode('ascii'), 0)
_check_return(ret) |
class FlaxAutoModelForTokenClassification(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class GroupedBatchSampler(BatchSampler):
def __init__(self, sampler, group_ids, batch_size):
if (not isinstance(sampler, Sampler)):
raise ValueError('sampler should be an instance of torch.utils.data.Sampler, but got sampler={}'.format(sampler))
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert (self.group_ids.dim() == 1)
self.batch_size = batch_size
self.groups = torch.unique(self.group_ids).sort(0)[0]
assert ((self.groups[0].item() == 0) and (self.groups[(- 1)].item() == (len(self.groups) - 1)))
self.buffer_per_group = [[] for k in self.groups]
def __iter__(self):
for idx in self.sampler:
group_id = self.group_ids[idx]
group_buffer = self.buffer_per_group[group_id]
group_buffer.append(idx)
if (len(group_buffer) == self.batch_size):
(yield group_buffer[:])
del group_buffer[:] |
def _format(val: Any, output_format: str='standard', split: bool=False, errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_at_vnr(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
result = ([vnr.compact(val)] + result)
return result |
def run_search(executable, args, sas_file, plan_manager, time, memory):
complete_args = (([executable] + args) + ['--internal-plan-file', plan_manager.get_plan_prefix()])
print(('args: %s' % complete_args))
try:
exitcode = call.check_call('search', complete_args, stdin=sas_file, time_limit=time, memory_limit=memory)
except subprocess.CalledProcessError as err:
exitcode = err.returncode
print(('exitcode: %d' % exitcode))
print()
return exitcode |
class ProppyEmbedder(nn.Module):
def __init__(self, dim, base_embedder, iterations, neighbor_rels, max_neighbors, aggregator):
super(ProppyEmbedder, self).__init__()
self._dim = dim
self._base_embedder = base_embedder
self._iterations = iterations
self._neighbor_rels = {x: i for (i, x) in enumerate(sorted(set(neighbor_rels)))}
self._max_neighbors = max_neighbors
assert all(((x in GraphRels.LOOKUP) for x in self._neighbor_rels))
self._aggregator = aggregator
def embed_dim(self):
return self._dim
def token_embedder(self):
return self._base_embedder.token_embedder
def utterance_embedder(self):
return self._base_embedder.utterance_embedder
def forward(self, nodes, mask=None):
embeds = self._base_embedder(nodes)
batch_size = embeds.shape[0]
if (mask is not None):
embeds = embeds.mul(mask.unsqueeze(1))
for itr in xrange(self._iterations):
(neighbors, rels) = self._get_neighbors(nodes[0].web_page)
embeds = self._aggregator(embeds, neighbors, rels)
return embeds
def _get_neighbors(self, web_page):
G = web_page.graph
batch_neighbors = [[] for _ in xrange(len(web_page.nodes))]
batch_rels = [[] for _ in xrange(len(web_page.nodes))]
for (src, tgts) in G.nodes.iteritems():
rel_to_tgts = defaultdict(list)
for (tgt, rels) in tgts.iteritems():
for rel in rels:
rel_to_tgts[rel].append(tgt)
for (rel, index) in self._neighbor_rels.iteritems():
tgts = rel_to_tgts[rel]
random.shuffle(tgts)
if (not tgts):
continue
if (len(tgts) > self._max_neighbors):
tgts = tgts[:self._max_neighbors]
batch_neighbors[src].extend(tgts)
batch_rels[src].extend(([index] * len(tgts)))
max_len = max((len(x) for x in batch_neighbors))
batch_mask = []
for (neighbors, rels) in izip(batch_neighbors, batch_rels):
assert (len(neighbors) == len(rels))
this_len = len(neighbors)
batch_mask.append((([1.0] * this_len) + ([0.0] * (max_len - this_len))))
neighbors.extend(([0] * (max_len - this_len)))
rels.extend(([0] * (max_len - this_len)))
return (SequenceBatch(V(LT(batch_neighbors)), V(FT(batch_mask))), SequenceBatch(V(LT(batch_rels)), V(FT(batch_mask)))) |
.parametrize('ctx, func_name', ctxs)
.parametrize('start, stop, num', test_data)
def test_linspace_forward_half(start, stop, num, ctx, func_name):
(ext, dtype) = ctx.backend[0].split(':')
assert (dtype == 'float')
ctx_h = ext_utils.get_extension_context(ext, type_config='half')
ctx_h.device_id = ctx.device_id
with nn.context_scope(ctx_h):
o_h = force_tuple(F.linspace(start, stop, num))
o_h[0].parent.forward([], o_h)
y_h = [o.d.copy() for o in o_h]
y_np = force_tuple(ref_linspace_half(start, stop, num))
for (y, l) in zip(y_h, y_np):
assert (y.all() == l.all()) |
.skipif((packaging.version.Version(cppyy.__version__) < packaging.version.Version('3.0.1')), reason='Awkward Array can only work with cppyy 3.0.1 or later.')
def test_array_as_type():
array = ak.Array([[{'x': 1, 'y': [1.1]}, {'x': 2, 'y': [2.2, 0.2]}], [], [{'x': 3, 'y': [3.0, 0.3, 3.3]}]])
source_code_cpp = f'''
double go_fast_cpp({array.cpp_type} awkward_array) {{
double out = 0.0;
for (auto list : awkward_array) {{
for (auto record : list) {{
for (auto item : record.y()) {{
out += item;
}}
}}
}}
return out;
}}
'''
cppyy.cppdef(source_code_cpp)
out = cppyy.gbl.go_fast_cpp(array)
assert (out == ak.sum(array['y'])) |
def _encode(s):
errors = ('surrogateescape' if six.PY3 else 'strict')
return s.encode('utf-8', errors) |
def save_model(model_dir, filename, model_params, train_params, feature_metas, feature_column_names, label_meta, feature_column_code):
pai_model_store.save_file(model_dir, filename)
pai_model_store.save_file(model_dir, '{}.pmml'.format(filename))
pai_model_store.save_file(model_dir, 'model_meta.json')
pai_model_store.save_metas(model_dir, 1, 'xgboost_model_desc', '', model_params, train_params, feature_metas, feature_column_names, label_meta, feature_column_code) |
class TestBoundaryConditionAnalytics(unittest.TestCase):
def test_ana_boundary_computation(self):
hxind = [(0, 25, 1.3), (21, 12.5), (0, 25, 1.3)]
hyind = [(0, 25, 1.3), (21, 12.5), (0, 25, 1.3)]
hzind = [(0, 25, 1.3), (20, 12.5), (0, 25, 1.3)]
M3 = discretize.TensorMesh([hxind, hyind, hzind], 'CCC')
(indxd, indxu, indyd, indyu, indzd, indzu) = M3.face_boundary_indices
chibkg = 0.0
chiblk = 0.01
chi = (np.ones(M3.nC) * chibkg)
sph_ind = get_indices_sphere([0, 0, 0], 100, M3.gridCC)
chi[sph_ind] = chiblk
(Bbc, const) = mag.analytics.CongruousMagBC(M3, np.array([1.0, 0.0, 0.0]), chi)
flag = 'secondary'
Box = 1.0
H0 = (Box / mu_0)
(Bbcxx, Bbcxy, Bbcxz) = mag.analytics.MagSphereAnaFun(M3.gridFx[((indxd | indxu), 0)], M3.gridFx[((indxd | indxu), 1)], M3.gridFx[((indxd | indxu), 2)], 100, 0.0, 0.0, 0.0, mu_0, (mu_0 * (1 + chiblk)), H0, flag)
(Bbcyx, Bbcyy, Bbcyz) = mag.analytics.MagSphereAnaFun(M3.gridFy[((indyd | indyu), 0)], M3.gridFy[((indyd | indyu), 1)], M3.gridFy[((indyd | indyu), 2)], 100, 0.0, 0.0, 0.0, mu_0, (mu_0 * (1 + chiblk)), H0, flag)
(Bbczx, Bbczy, Bbczz) = mag.analytics.MagSphereAnaFun(M3.gridFz[((indzd | indzu), 0)], M3.gridFz[((indzd | indzu), 1)], M3.gridFz[((indzd | indzu), 2)], 100, 0.0, 0.0, 0.0, mu_0, (mu_0 * (1 + chiblk)), H0, flag)
Bbc_ana = np.r_[(Bbcxx, Bbcyy, Bbczz)]
if plotIt:
import matplotlib.pyplot as plt
(fig, ax) = plt.subplots(1, 1, figsize=(10, 10))
ax.plot(Bbc_ana)
ax.plot(Bbc)
plt.show()
err = (np.linalg.norm((Bbc - Bbc_ana)) / np.linalg.norm(Bbc_ana))
assert (err < 0.1), 'Mag Boundary computation is wrong!!, err = {}'.format(err) |
def test_doors(trainer, env, cfg):
ctrl_stats_fname = f'{cfg.log_dir}/hole_stats.pkl'
if (LOAD_STATS and os.path.isfile(ctrl_stats_fname)):
ctrl_stats = pickle.load(open(ctrl_stats_fname, 'rb'))
print(f'Loaded {len(ctrl_stats)} hole stats.')
else:
ctrl_stats = {}
all_holes = env.unwrapped._prob.gen_all_holes()
all_holes_total = [hole for (i, hole) in enumerate(all_holes) if ((i % 10) == 0)]
all_holes = [hole for hole in all_holes_total if ((tuple(hole[0][0]), tuple(hole[1][0])) not in ctrl_stats)]
n_envs = (max(1, cfg.num_workers) * cfg.num_envs_per_worker)
if (len(all_holes) >= n_envs):
env_hole_int = (len(all_holes) // n_envs)
env_holes = [all_holes[(env_hole_int * i):(env_hole_int * (i + 1))] for i in range(n_envs)]
envs = trainer.evaluation_workers.foreach_env((lambda env: env))
envs = [env for worker_env in envs for env in worker_env]
idx_counter = IdxCounter.options(name='idx_counter', max_concurrency=1).remote()
idx_counter.set_keys.remote(all_holes)
hashes = trainer.evaluation_workers.foreach_env((lambda env: hash(env.unwrapped._prob)))
hashes = [hash for worker_hash in hashes for hash in worker_hash]
idx_counter.set_hashes.remote(hashes)
assert ray.get(idx_counter.scratch.remote())
trainer.evaluation_workers.foreach_env((lambda env: env.unwrapped._prob.queue_holes(idx_counter)))
while (len(ctrl_stats) < len(all_holes_total)):
result = trainer.evaluate()
hist_stats = result['evaluation']['hist_stats']
if ('holes_start' in hist_stats):
for (hole_start, hole_end, path_len) in zip(hist_stats['holes_start'], hist_stats['holes_end'], hist_stats['connected-path-length-val']):
ctrl_stats[(hole_start, hole_end)] = path_len
print(f'{len(ctrl_stats)} out of {len(all_holes_total)} hole stats collected')
pickle.dump(ctrl_stats, open(ctrl_stats_fname, 'wb'))
(width, height, length) = cfg.map_shape
HEATMAP = 1
heat = np.zeros(((width * 4), (width * 4)))
heat.fill(np.nan)
fail = np.zeros(((width * 4), (width * 4)))
fail.fill(np.nan)
heat_dict = {(i, j): [] for i in range((width * 4)) for j in range((width * 4))}
failed_heat_dict = {(i, j): [] for i in range((width * 4)) for j in range((width * 4))}
for (hole_pair, hole_stats) in ctrl_stats.items():
projs = [None, None]
((ax, ay, az), (bx, by, bz)) = hole_pair
for (i, (z, y, x)) in enumerate([(ax, ay, az), (bx, by, bz)]):
if (x == 0):
proj = y
elif (y == (width + 1)):
proj = (width + x)
elif (x == (width + 1)):
proj = (((3 * width) - y) - 1)
elif (y == 0):
proj = (((4 * width) - x) - 1)
else:
raise Exception
projs[i] = proj
(proj_a, proj_b) = projs
if (hole_stats > (- 1)):
heat_dict[(proj_a, proj_b)] += [ctrl_stats[hole_pair]]
failed_heat_dict[(proj_a, proj_b)] += [0]
else:
failed_heat_dict[(proj_a, proj_b)] += [1]
num_pair = np.zeros(((width * 4), (width * 4)))
num_pair.fill(np.nan)
for k in heat_dict:
val = np.mean(heat_dict[k])
fai = np.mean(failed_heat_dict[k])
heat[(k[0], k[1])] = val
fail[(k[0], k[1])] = fai
num_pair[(k[0], k[1])] = len(failed_heat_dict[k])
(fig, axs) = plt.subplots(1, 1)
fig.set_size_inches(7, 5)
axs = sns.heatmap(heat, cmap='viridis', ax=axs, cbar=True, square=True, xticklabels=True, yticklabels=True)
axs.xaxis.set_major_locator(ticker.MultipleLocator(5))
axs.xaxis.set_major_formatter(ticker.ScalarFormatter())
axs.yaxis.set_major_locator(ticker.MultipleLocator(5))
axs.yaxis.set_major_formatter(ticker.ScalarFormatter())
axs.invert_yaxis()
axs.set_title('Path-length between entrances/exits')
axs.set_xlabel('Entrance position')
axs.set_ylabel('Exit position')
plt.tight_layout()
plt.savefig(os.path.join(cfg.log_dir, 'hole_heatmap_0_0.png'))
plt.close()
(fig, axs) = plt.subplots(1, 1)
fig.set_size_inches(7, 5)
axs = sns.heatmap(fail, cmap='Reds', ax=axs, cbar=True, square=True, xticklabels=True, yticklabels=True)
axs.xaxis.set_major_locator(ticker.MultipleLocator(5))
axs.xaxis.set_major_formatter(ticker.ScalarFormatter())
axs.yaxis.set_major_locator(ticker.MultipleLocator(5))
axs.yaxis.set_major_formatter(ticker.ScalarFormatter())
axs.invert_yaxis()
axs.set_title('Failed connections between entrances/exits')
axs.set_xlabel('Entrance position')
axs.set_ylabel('Exit position')
plt.tight_layout()
plt.savefig(os.path.join(cfg.log_dir, 'hole_heatmap_0_1.png'))
plt.close()
heat_dict = {h: {(i, j): [] for i in range((length + 2)) for j in range((width + 2))} for h in range((height - 1))}
failed_heat_dict = {h: {(i, j): [] for i in range((length + 2)) for j in range((width + 2))} for h in range((height - 1))}
for (hole_pair, hole_stat) in ctrl_stats.items():
if (hole_stat > (- 1)):
((az, ay, ax), (bz, by, bx)) = hole_pair
diff_z = abs((az - bz))
diff_x = abs((ax - bx))
diff_y = abs((ay - by))
heat_dict[diff_z][(diff_x, diff_y)] += [hole_stat]
failed_heat_dict[diff_z][(diff_x, diff_y)] += [0]
else:
failed_heat_dict[diff_z][(diff_x, diff_y)] += [1]
heats = {h: np.zeros(((length + 2), (width + 2))) for h in range((height - 1))}
[heat.fill(np.nan) for heat in heats.values()]
fails = {h: np.zeros(((length + 2), (width + 2))) for h in range((height - 1))}
[fail.fill(np.nan) for fail in fails.values()]
for (h, value) in heat_dict.items():
for k in value:
val = np.mean(heat_dict[h][k])
fai = np.mean(failed_heat_dict[h][k])
heats[h][(k[0], k[1])] = val
fails[h][(k[0], k[1])] = fai
(fig, axes) = plt.subplots(1, (height - 1), sharex=True, sharey=True, figsize=(10, 3))
cbar_ax = fig.add_axes([0.91, 0.3, 0.03, 0.4])
for (i, ax) in enumerate(axes.flat):
heat = heats[i]
plt.subplot(1, (height - 1), (i + 1))
ax_s = sns.heatmap(heat, cmap='viridis', ax=ax, cbar=(i == 0), square=True, xticklabels=(i == 0), yticklabels=(i == 0), cbar_ax=(None if i else cbar_ax))
ax_s.invert_yaxis()
if (i == 0):
_extracted_from_test_doors_(ax, i)
else:
ax.set_title(i)
fig.suptitle('Heatmap of path-length between entrances/exits')
fig.tight_layout(rect=[0, 0, 0.9, 1])
plt.savefig(os.path.join(cfg.log_dir, 'hole_heatmap_1_0.png'))
plt.close()
(fig, axes) = plt.subplots(1, (height - 1), sharex=True, sharey=True, figsize=(10, 3))
cbar_ax = fig.add_axes([0.91, 0.3, 0.03, 0.4])
for (i, ax) in enumerate(axes.flat):
fail = fails[i]
plt.subplot(1, (height - 1), (i + 1))
ax_s = sns.heatmap(fail, cmap='Reds', ax=ax, cbar=(i == 0), square=True, xticklabels=(i == 0), yticklabels=(i == 0), cbar_ax=(None if i else cbar_ax))
ax_s.invert_yaxis()
if (i == 0):
_extracted_from_test_doors_(ax, i)
else:
ax.set_title(i)
fig.suptitle('Heatmap of failed connection between entrances/exits')
fig.tight_layout(rect=[0, 0, 0.9, 1])
plt.savefig(os.path.join(cfg.log_dir, 'hole_heatmap_1_1.png'))
plt.close()
return {} |
def get_image_generation_adapter_spec(num_outputs: int=1, output_image_width: Optional[int]=None, output_image_height: Optional[int]=None, guidance_scale: Optional[float]=None, diffusion_denoising_steps: Optional[int]=None, random: Optional[str]=None) -> AdapterSpec:
image_generation_parameters: ImageGenerationParameters = ImageGenerationParameters(output_image_width=output_image_width, output_image_height=output_image_height, guidance_scale=guidance_scale, diffusion_denoising_steps=diffusion_denoising_steps)
return AdapterSpec(method=ADAPT_GENERATION, input_prefix='', input_suffix='', output_prefix='', output_suffix='', max_train_instances=0, num_outputs=num_outputs, max_tokens=0, random=random, image_generation_parameters=image_generation_parameters) |
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms) |
class TestLargestNConnectedComponents(unittest.TestCase):
def setUp(self):
image = sitk.Image((5, 5), sitk.sitkUInt8)
image.SetPixel((0, 0), 1)
image.SetPixel((2, 0), 1)
image.SetPixel((2, 1), 1)
image.SetPixel((4, 0), 1)
image.SetPixel((4, 1), 1)
image.SetPixel((4, 2), 1)
self.image = image
def test_zero_components(self):
with self.assertRaises(ValueError):
fltr.LargestNConnectedComponents(0, False)
def test_one_components(self):
dut = fltr.LargestNConnectedComponents(1, False)
result = dut.execute(self.image)
self.assertEqual(result.GetPixel((4, 0)), 1)
self.assertEqual(result.GetPixel((4, 1)), 1)
self.assertEqual(result.GetPixel((4, 2)), 1)
result_array = sitk.GetArrayFromImage(result)
self.assertEqual(result_array.sum(), 3)
def test_two_components(self):
dut = fltr.LargestNConnectedComponents(2, False)
result = dut.execute(self.image)
self.assertEqual(result.GetPixel((2, 0)), 1)
self.assertEqual(result.GetPixel((2, 1)), 1)
self.assertEqual(result.GetPixel((4, 0)), 1)
self.assertEqual(result.GetPixel((4, 1)), 1)
self.assertEqual(result.GetPixel((4, 2)), 1)
result_array = sitk.GetArrayFromImage(result)
self.assertEqual(result_array.sum(), 5)
def test_three_components(self):
dut = fltr.LargestNConnectedComponents(3, False)
result = dut.execute(self.image)
self.assertEqual(result.GetPixel((0, 0)), 1)
self.assertEqual(result.GetPixel((2, 0)), 1)
self.assertEqual(result.GetPixel((2, 1)), 1)
self.assertEqual(result.GetPixel((4, 0)), 1)
self.assertEqual(result.GetPixel((4, 1)), 1)
self.assertEqual(result.GetPixel((4, 2)), 1)
result_array = sitk.GetArrayFromImage(result)
self.assertEqual(result_array.sum(), 6)
def test_four_components(self):
dut = fltr.LargestNConnectedComponents(3, False)
result = dut.execute(self.image)
self.assertEqual(result.GetPixel((0, 0)), 1)
self.assertEqual(result.GetPixel((2, 0)), 1)
self.assertEqual(result.GetPixel((2, 1)), 1)
self.assertEqual(result.GetPixel((4, 0)), 1)
self.assertEqual(result.GetPixel((4, 1)), 1)
self.assertEqual(result.GetPixel((4, 2)), 1)
result_array = sitk.GetArrayFromImage(result)
self.assertEqual(result_array.sum(), 6)
def test_consecutive_labels(self):
dut = fltr.LargestNConnectedComponents(3, True)
result = dut.execute(self.image)
self.assertEqual(result.GetPixel((0, 0)), 3)
self.assertEqual(result.GetPixel((2, 0)), 2)
self.assertEqual(result.GetPixel((2, 1)), 2)
self.assertEqual(result.GetPixel((4, 0)), 1)
self.assertEqual(result.GetPixel((4, 1)), 1)
self.assertEqual(result.GetPixel((4, 2)), 1)
result_array = sitk.GetArrayFromImage(result)
self.assertEqual(result_array.sum(), 10) |
def test_unnormalized_pmf():
counts = numpy.random.random(size=100)
pk = (counts / counts.sum())
assert (ndd.entropy(counts) == approx(Pmf().entropy_from_pmf(pk))) |
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100), avg_non_ignore=False, **kwargs):
if (pred.size(1) == 1):
assert (label[(label != ignore_index)].max() <= 1), 'For pred with shape [N, 1, H, W], its label must have at most 2 classes'
pred = pred.squeeze()
if (pred.dim() != label.dim()):
assert (((pred.dim() == 2) and (label.dim() == 1)) or ((pred.dim() == 4) and (label.dim() == 3))), 'Only pred shape [N, C], label shape [N] or pred shape [N, C, H, W], label shape [N, H, W] are supported'
(label, weight, valid_mask) = _expand_onehot_labels(label, weight, pred.shape, ignore_index)
else:
valid_mask = ((label >= 0) & (label != ignore_index)).float()
if (weight is not None):
weight = (weight * valid_mask)
else:
weight = valid_mask
if ((reduction == 'mean') and (avg_factor is None) and avg_non_ignore):
avg_factor = valid_mask.sum().item()
loss = F.binary_cross_entropy_with_logits(pred, label.float(), pos_weight=class_weight, reduction='none')
loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss |
class ResBlock(nn.Module):
def __init__(self, n_feats, kernel_size, bias=True, conv=default_conv, norm=False, act=default_act):
super(ResBlock, self).__init__()
modules = []
for i in range(2):
modules.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if norm:
modules.append(norm(n_feats))
if (act and (i == 0)):
modules.append(act())
self.body = nn.Sequential(*modules)
def forward(self, x):
res = self.body(x)
res += x
return res |
class ASTModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _rebuild_qtensor(storage, storage_offset, size, stride, quantizer_params, requires_grad, backward_hooks):
qscheme = quantizer_params[0]
if (qscheme == torch.per_tensor_affine):
(_, scale, zero_point) = quantizer_params
tensor = torch._empty_affine_quantized(size, scale=scale, zero_point=zero_point, dtype=storage.dtype)
elif (qscheme in (torch.per_channel_affine, torch.per_channel_affine_float_qparams)):
(_, scales, zero_points, axis) = quantizer_params
if ((type(scales) is list) and (type(zero_points) is list)):
if (qscheme == torch.per_channel_affine):
scales = torch.tensor(scales, dtype=torch.double)
zero_points = torch.tensor(zero_points, dtype=torch.long)
else:
scales = torch.tensor(scales, dtype=torch.float)
zero_points = torch.tensor(zero_points, dtype=torch.float)
tensor = torch._empty_per_channel_affine_quantized(size, scales=scales, zero_points=zero_points, axis=axis, dtype=storage.dtype)
else:
raise RuntimeError("Can't deserialize quantized tensor with qscheme {}".format(qscheme))
tensor.set_(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor |
def get_modified_python_files(diff_with_last_commit=False):
repo = Repo(PATH_TO_TRANFORMERS)
if (not diff_with_last_commit):
print(f'Master is at {repo.refs.master.commit}')
print(f'Current head is at {repo.head.commit}')
branching_commits = repo.merge_base(repo.refs.master, repo.head)
for commit in branching_commits:
print(f'Branching commit: {commit}')
return get_diff(repo, repo.head.commit, branching_commits)
else:
print(f'Master is at {repo.head.commit}')
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f'Parent commit: {commit}')
return get_diff(repo, repo.head.commit, parent_commits) |
class ResnetDiscriminator(nn.Module):
def __init__(self, input_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', use_sigmoid=False, n_downsampling=2):
assert (n_blocks >= 0)
super(ResnetDiscriminator, self).__init__()
self.input_nc = input_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), norm_layer(ngf), nn.ReLU(True)]
if (n_downsampling <= 2):
for i in range(n_downsampling):
mult = (2 ** i)
model += [nn.Conv2d((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(((ngf * mult) * 2)), nn.ReLU(True)]
elif (n_downsampling == 3):
mult = (2 ** 0)
model += [nn.Conv2d((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(((ngf * mult) * 2)), nn.ReLU(True)]
mult = (2 ** 1)
model += [nn.Conv2d((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(((ngf * mult) * 2)), nn.ReLU(True)]
mult = (2 ** 2)
model += [nn.Conv2d((ngf * mult), (ngf * mult), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer((ngf * mult)), nn.ReLU(True)]
if (n_downsampling <= 2):
mult = (2 ** n_downsampling)
else:
mult = 4
for i in range(n_blocks):
model += [ResnetBlock((ngf * mult), padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
if use_sigmoid:
model += [nn.Sigmoid()]
self.model = nn.Sequential(*model)
def forward(self, input, mask=None):
y = self.model(input)
if (mask is not None):
mask = F.interpolate(mask, size=(y.shape[2], y.shape[3]))
y = (y * mask)
return y |
def __is_functional_inputs_a_list(op_call_args: Any) -> bool:
if ((len(op_call_args) > 0) and isinstance(op_call_args[0], list)):
inputs_as_list = True
for arg in op_call_args[0]:
inputs_as_list = (inputs_as_list and isinstance(arg, KerasTensor))
return inputs_as_list
return False |
class ScaleToFixed(object):
def __init__(self, dimA, dimB, dimC):
self.dimA = dimA
self.dimB = dimB
self.dimC = dimC
def __call__(self, image, imageA, imageB, imageC, label):
image = skTrans.resize(image, (self.dimA, self.dimB, self.dimC), order=1, preserve_range=True)
imageA = skTrans.resize(imageA, (self.dimA, self.dimB, self.dimC), order=1, preserve_range=True)
imageB = skTrans.resize(imageB, (self.dimA, self.dimB, self.dimC), order=1, preserve_range=True)
imageC = skTrans.resize(imageC, (self.dimA, self.dimB, self.dimC), order=1, preserve_range=True)
label = skTrans.resize(label, (self.dimA, self.dimB, self.dimC), order=0, preserve_range=True)
return [image, imageA, imageB, imageC, label] |
class ChooseGuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
self.waiting = False
self.closed = False
nenvs = len(env_fns)
(self.remotes, self.work_remotes) = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseguardworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = False
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
(observation_space, share_observation_space, action_space) = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space)
def step_async(self, actions):
for (remote, action) in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
(obs, rews, dones, infos) = zip(*results)
return (np.stack(obs), np.stack(rews), np.stack(dones), infos)
def reset(self, reset_choose):
for (remote, choose) in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True |
def merge_new_config(config, new_config):
if ('_BASE_CONFIG_' in new_config):
with open(new_config['_BASE_CONFIG_'], 'r') as f:
try:
yaml_config = yaml.load(f, Loader=yaml.FullLoader)
except:
yaml_config = yaml.load(f)
config.update(EasyDict(yaml_config))
for (key, val) in new_config.items():
if (not isinstance(val, dict)):
config[key] = val
continue
if (key not in config):
config[key] = EasyDict()
merge_new_config(config[key], val)
return config |
class AttendNodeModule(nn.Module):
def __init__(self, dim_vis_feat, visual_init_norm, jemb_dim, dim_lang_feat, jemb_dropout):
super(AttendNodeModule, self).__init__()
self.matching = Matching(dim_vis_feat, dim_lang_feat, jemb_dim, jemb_dropout, (- 1))
self.feat_normalizer = NormalizeScale(dim_vis_feat, visual_init_norm)
def forward(self, vis_feats, lang_feats, cls):
(bs, n) = (vis_feats.size(0), vis_feats.size(1))
vis_feats = self.feat_normalizer(vis_feats.view((bs * n), (- 1))).view(bs, n, (- 1))
attn = self.matching(vis_feats, lang_feats, (cls != (- 1)).float())
return attn |
def t_stop(j, Js=[(1, 2), (3, 4), (5, 6)], Trange=(1, 10)):
if (j == (- 1)):
a = min(Trange)
return ((2 * a) - t_start(0, Js, Trange))
else:
return Js[j][1] |
def isomers_c11h24(mean_function='geometric') -> GoalDirectedBenchmark:
specification = uniform_specification(159)
return GoalDirectedBenchmark(name='C11H24', objective=IsomerScoringFunction('C11H24', mean_function=mean_function), contribution_specification=specification) |
def test_numpyarray_localindex():
v2_array = ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3], dtype=np.float64))
assert (to_list(ak._do.local_index(v2_array, axis=0)) == [0, 1, 2, 3])
assert (ak._do.local_index(v2_array.to_typetracer(), axis=0).form == ak._do.local_index(v2_array, axis=0).form)
assert (to_list(ak._do.local_index(v2_array, axis=(- 1))) == [0, 1, 2, 3])
assert (ak._do.local_index(v2_array.to_typetracer(), axis=(- 1)).form == ak._do.local_index(v2_array, axis=(- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, axis=1)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, axis=2)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, axis=(- 2)) |
class Sqrtm(Benchmark):
params = [['float64', 'complex128'], [64, 256], [32, 64, 256]]
param_names = ['dtype', 'n', 'blocksize']
def setup(self, dtype, n, blocksize):
n = int(n)
dtype = np.dtype(dtype)
blocksize = int(blocksize)
A = np.random.rand(n, n)
if (dtype == np.complex128):
A = (A + (1j * np.random.rand(n, n)))
self.A = A
if (blocksize > n):
raise NotImplementedError()
def time_sqrtm(self, dtype, n, blocksize):
scipy.linalg.sqrtm(self.A, disp=False, blocksize=blocksize) |
def add_VGG16_roi_context_2fc_head(model, blob_in, dim_in, spatial_scale):
blobs_out = []
l = model.RoIFeatureTransform(blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale)
l = model.net.RoIFeatureBoost([l, 'obn_scores'], 'roi_feat_boost')
l = model.FC(l, 'fc6', ((dim_in * 7) * 7), 4096)
l = model.Relu(l, 'fc6')
l = DropoutIfTraining(model, l, 'drop6', 0.5)
l = model.FC(l, 'fc7', 4096, 4096)
l = model.Relu(l, 'fc7')
l = DropoutIfTraining(model, l, 'drop7', 0.5)
blobs_out.append(l)
l = model.RoIFeatureTransform(blob_in, 'pool5_frame', blob_rois='rois_frame', method='RoILoopPool', resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale)
l = model.net.RoIFeatureBoost([l, 'obn_scores'], 'roi_feat_boost_frame')
l = model.net.FC([l, 'fc6_w', 'fc6_b'], 'fc6_frame')
l = model.Relu(l, 'fc6_frame')
l = DropoutIfTraining(model, l, 'drop6_frame', 0.5)
l = model.net.FC([l, 'fc7_w', 'fc7_b'], 'fc7_frame')
l = model.Relu(l, 'fc7_frame')
l = DropoutIfTraining(model, l, 'drop7_frame', 0.5)
blobs_out.append(l)
l = model.RoIFeatureTransform(blob_in, 'pool5_context', blob_rois='rois_context', method='RoILoopPool', resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale)
l = model.net.RoIFeatureBoost([l, 'obn_scores'], 'roi_feat_boost_context')
l = model.net.FC([l, 'fc6_w', 'fc6_b'], 'fc6_context')
l = model.Relu(l, 'fc6_context')
l = DropoutIfTraining(model, l, 'drop6_context', 0.5)
l = model.net.FC([l, 'fc7_w', 'fc7_b'], 'fc7_context')
l = model.Relu(l, 'fc7_context')
l = DropoutIfTraining(model, l, 'drop7_context', 0.5)
blobs_out.append(l)
return (blobs_out, 4096) |
def get_target_list(target_path):
targets = inout.load_json(target_path)
target_list = []
for i in range(len(targets)):
tgt = targets[i]
im_id = tgt['im_id']
inst_count = tgt['inst_count']
obj_id = tgt['obj_id']
scene_id = tgt['scene_id']
target_list.append([scene_id, im_id, obj_id, inst_count])
return target_list |
_grad()
def convert_wav2vec2_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True):
if (config_path is not None):
config = Data2VecAudioConfig.from_pretrained(config_path)
else:
config = Data2VecAudioConfig()
if (not is_finetuned):
hf_wav2vec = Data2VecAudioModel(config)
data2vec_checkpoint_dir = os.path.dirname(checkpoint_path)
state_dict = torch.load(checkpoint_path)
state_dict['model']['final_proj.weight'] = state_dict['model'].pop('final_proj.0.weight')
state_dict['model']['final_proj.bias'] = state_dict['model'].pop('final_proj.0.bias')
converted_ckpt = os.path.join(data2vec_checkpoint_dir, 'converted.pt')
torch.save(state_dict, converted_ckpt)
else:
hf_wav2vec = Data2VecAudioForCTC(config)
converted_ckpt = checkpoint_path
def load_data2vec(path):
(model, _, _) = fairseq.checkpoint_utils.load_model_ensemble_and_task([path])
return model[0].eval()
model = load_data2vec(converted_ckpt)
recursively_load_weights(model, hf_wav2vec, (not is_finetuned))
processor = Wav2Vec2Processor.from_pretrained('facebook/wav2vec2-large-lv60')
ds = load_dataset('patrickvonplaten/librispeech_asr_dummy', 'clean', split='validation')
input_audio = [x['array'] for x in ds[:4]['audio']]
inputs = processor(input_audio, return_tensors='pt', padding=True)
input_values = inputs.input_values
attention_mask = inputs.attention_mask
hf_wav2vec.eval()
model.eval()
if is_finetuned:
their_output = model(source=input_values, padding_mask=(1 - attention_mask), mask=False, features_only=True)['encoder_out'].transpose(0, 1)
our_output = hf_wav2vec(input_values, attention_mask=attention_mask)['logits']
pred_ids = torch.argmax(our_output, dim=(- 1))
output_string = processor.batch_decode(pred_ids)
print(f"Expected Output: {ds[:4]['text']}, Pred: {output_string}")
else:
their_output = model(source=input_values, padding_mask=(1 - attention_mask), mask=False, features_only=True)['layer_results'][(- 1)][0].transpose(0, 1)
our_output = hf_wav2vec(input_values, attention_mask=attention_mask)['last_hidden_state']
print(our_output.shape, their_output.shape)
max_absolute_diff = torch.max(torch.abs((our_output - their_output))).item()
print(f'max_absolute_diff = {max_absolute_diff}')
success = torch.allclose(our_output, their_output, atol=0.001)
print('Do both models output the same tensors?', ('' if success else ''))
if (not success):
raise Exception('Something went wRoNg')
hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
if is_finetuned:
processor.save_pretrained(pytorch_dump_folder_path)
else:
processor.feature_extractor.save_pretrained(pytorch_dump_folder_path) |
def fetch_data(splits, sample_pct, seed=1234):
ds_splits = {}
for split in splits:
key = f'{split.parent}_{split.stem}'
ds_splits[key] = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=split, output_keys=['id', 'wav'])
data = list(itertools.chain(*ds_splits.values()))
random.seed(seed)
if (sample_pct < 1.0):
data = random.sample(data, int((sample_pct * len(data))))
return (iter(data), len(data)) |
def __wordnet_lookup_gender(head):
synsets = wn.synsets(head)
while synsets:
lemma_name = synsets[0].lemma_names()[0]
if ((lemma_name == 'man') or (lemma_name == 'male')):
return 'MALE'
elif ((lemma_name == 'woman') or (lemma_name == 'female')):
return 'FEMALE'
elif (lemma_name == 'person'):
return
elif (lemma_name == 'entity'):
return 'NEUTRAL'
synsets = synsets[0].hypernyms() |
class NbsDataset(VisionDataset):
def __init__(self, dataset, group):
self.dataset = dataset
self.group = group
def __getitem__(self, idx):
(img, label) = self.dataset[idx]
index = np.where((self.group == idx))[0][0]
return (img, label, index)
def __len__(self):
return len(self.dataset) |
def setup_model_loss_criterion(args, rank, is_cuda):
args.distributed_rank = rank
distributed_utils.distributed_init(args)
torch.manual_seed(1)
model = Model(args.input_size, args.nb_classes)
loss_fn = nn.CrossEntropyLoss()
if is_cuda:
model = model.cuda()
loss_fn = loss_fn.cuda()
optimizer = optim.sgd.SGD(args, model.parameters())
optimizer = optim.FairseqBMUF(args, optimizer)
return (model, loss_fn, optimizer) |
class OutputInTheMiddleNetTest(BasePytorchTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def create_feature_network(self, input_shape):
return OutputInTheMiddleNet() |
def set_score_text(fig, ax, bar, entry):
score_text = ax.text(x=((bar.get_x() + bar.get_width()) - 30), y=(bar.get_y() + (bar.get_height() / 2)), s=str(entry['score']), color='white', fontweight='bold', ha='right', va='center')
bar_x1 = bar.get_window_extent(renderer=fig.canvas.get_renderer()).x1
ax_x1 = ax.get_window_extent(renderer=fig.canvas.get_renderer()).x1
score_text_width = score_text.get_window_extent(renderer=fig.canvas.get_renderer()).width
if ((bar_x1 + score_text_width) < ax_x1):
score_text.set_x(((bar.get_x() + bar.get_width()) + 50))
score_text.set_ha('left')
if (entry['algo-title'] == 'Human'):
color = 'red'
elif (entry['algo-title'] == 'Random'):
color = 'black'
else:
color = 'darkblue'
score_text.set_color(color) |
class c_nvmlMemory_t(ctypes.Structure):
_fields_ = [('total', ctypes.c_ulonglong), ('free', ctypes.c_ulonglong), ('used', ctypes.c_ulonglong)] |
def create_backbone(cfg):
if (cfg.MODEL.BACKBONE.TYPE == 'vit'):
return vit(cfg)
else:
raise NotImplementedError('Backbone type is not implemented') |
class SystemAct(object):
IMPLICIT_CONFIRM = 'implicit_confirm'
EXPLICIT_CONFIRM = 'explicit_confirm'
INFORM = 'inform'
REQUEST = 'request'
GREET = 'greet'
GOODBYE = 'goodbye'
CLARIFY = 'clarify'
ASK_REPHRASE = 'ask_rephrase'
ASK_REPEAT = 'ask_repeat'
QUERY = 'query' |
def generate(model, styles, mean_latent=None, truncation=1.0, batch_size=16, *args, **kwargs):
(images, segs) = ([], [])
for head in range(0, styles.size(0), batch_size):
(images_, segs_) = model([styles[head:(head + batch_size)]], *args, input_is_latent=True, truncation=truncation, truncation_latent=mean_latent, **kwargs)
images.append(images_.detach().cpu())
segs.append(segs_.detach().cpu())
(images, segs) = (torch.cat(images, 0), torch.cat(segs, 0))
return (tensor2image(images), tensor2seg(segs)) |
class GPT2TokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = GPT2Tokenizer
def setUp(self):
super(GPT2TokenizationTest, self).setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return GPT2Tokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = u'lower newer'
output_text = u'lower newer'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = GPT2Tokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = 'lower newer'
bpe_tokens = ['Glow', 'er', 'G', 'n', 'e', 'w', 'er']
tokens = tokenizer.tokenize(text, add_prefix_space=True)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + [tokenizer.unk_token])
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) |
_grad()
def calculate_lpips_given_images(group_of_images):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
lpips = LPIPS().eval().to(device)
lpips_values = []
num_rand_outputs = len(group_of_images)
for i in range((num_rand_outputs - 1)):
for j in range((i + 1), num_rand_outputs):
lpips_values.append(lpips(group_of_images[i], group_of_images[j]))
lpips_value = torch.mean(torch.stack(lpips_values, dim=0))
return lpips_value.item() |
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) |
def read_all_Datasets(inpath, isLower=True):
all_instances = []
code_graph_len = []
doc_token_len = []
with gzip.GzipFile(inpath, 'r') as f:
lines = list(f)
results = parallel_process(lines, single_instance_process, args=(isLower,))
for result in results:
if (type(result) is tuple):
(sent1, sent2) = result
code_graph_len.append(sent1.get_token_length())
doc_token_len.append(sent2.get_token_length())
all_instances.append((sent1, sent2))
code_graph_len_stats = {'min': np.min(code_graph_len), 'max': np.max(code_graph_len), 'mean': np.mean(code_graph_len)}
doc_token_len_stats = {'min': np.min(doc_token_len), 'max': np.max(doc_token_len), 'mean': np.mean(doc_token_len)}
return (all_instances, code_graph_len_stats, doc_token_len_stats) |
def flickr8k_demo():
io = KarpathyIO(img_folder='data/flickr8k/Flicker8k_Dataset')
(train_data, *_) = io.read('data/flickr8k/demo.flickr8k-karpathy2015cvpr.json')
return train_data |
def test_graph_reverse_cuthill_mckee_ordering():
data = np.ones(63, dtype=int)
rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 15])
cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2, 7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13, 15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13, 1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11, 4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14, 5, 7, 10, 13, 15])
graph = coo_matrix((data, (rows, cols))).tocsr()
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15, 0, 13, 7, 5, 9, 11, 1, 3])
assert_equal(perm, correct_perm) |
def test_construct_kernel_separate_independent_duplicates():
kernel = Matern52(variance=5)
output_dim = 3
mok = construct_basic_kernel(kernel, output_dim=output_dim, share_hyperparams=False)
assert isinstance(mok, MultioutputKernel)
assert isinstance(mok, SeparateIndependent)
assert all([isinstance(k, Matern52) for k in mok.kernels])
assert (mok.kernels[0] is not mok.kernels[(- 1)])
assert (mok.kernels[0].variance.numpy() == mok.kernels[(- 1)].variance.numpy())
assert (mok.kernels[0].lengthscales.numpy() == mok.kernels[(- 1)].lengthscales.numpy()) |
def main(args):
torch.cuda.set_device(args.gpu_id)
builder = ModelBuilder()
net_encoder = builder.build_encoder(arch=args.arch_encoder, fc_dim=args.fc_dim, weights=args.weights_encoder)
net_decoder = builder.build_decoder(arch=args.arch_decoder, fc_dim=args.fc_dim, num_class=args.num_class, weights=args.weights_decoder, use_softmax=True)
crit = nn.NLLLoss(ignore_index=(- 1))
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
list_test = [{'fpath_img': args.test_img}]
dataset_val = TestDataset(list_test, args, max_sample=args.num_val)
loader_val = torchdata.DataLoader(dataset_val, batch_size=args.batch_size, shuffle=False, collate_fn=user_scattered_collate, num_workers=5, drop_last=True)
segmentation_module.cuda()
test(segmentation_module, loader_val, args)
print('Inference done!') |
class ParameterExtraction():
def __init__(self, coarse_model: CoarseModel, cost_functional_form: Union[(List[_typing.CostFunctional], _typing.CostFunctional)], states: Union[(List[fenics.Function], fenics.Function)], controls: Union[(List[fenics.Function], fenics.Function)], config: Optional[io.Config]=None, desired_weights: Optional[List[float]]=None, mode: str='initial') -> None:
self.coarse_model = coarse_model
self.cost_functional_form = cost_functional_form
self.states = _utils.enlist(states)
self.controls: List[fenics.Function] = _utils.enlist(controls)
self.config = config
self.mode = mode
self.desired_weights = desired_weights
self._pre_callback: Optional[Callable] = None
self._post_callback: Optional[Callable] = None
self.adjoints = _utils.create_function_list(coarse_model.optimal_control_problem.db.function_db.adjoint_spaces)
dict_states = {coarse_model.optimal_control_problem.states[i]: self.states[i] for i in range(len(self.states))}
dict_adjoints = {coarse_model.optimal_control_problem.adjoints[i]: self.adjoints[i] for i in range(len(self.adjoints))}
dict_controls = {coarse_model.optimal_control_problem.db.function_db.controls[i]: self.controls[i] for i in range(len(self.controls))}
mapping_dict = {}
mapping_dict.update(dict_states)
mapping_dict.update(dict_adjoints)
mapping_dict.update(dict_controls)
self.state_forms = [ufl.replace(form, mapping_dict) for form in coarse_model.optimal_control_problem.state_forms]
self.bcs_list = coarse_model.optimal_control_problem.bcs_list
self.riesz_scalar_products = coarse_model.optimal_control_problem.riesz_scalar_products
self.control_constraints = coarse_model.optimal_control_problem.box_constraints.control_constraints
self.initial_guess = coarse_model.optimal_control_problem.initial_guess
self.ksp_options = coarse_model.optimal_control_problem.ksp_options
self.adjoint_ksp_options = coarse_model.optimal_control_problem.adjoint_ksp_options
self.preconditioner_forms = coarse_model.preconditioner_forms
self.optimal_control_problem: Optional[ocp.OptimalControlProblem] = None
def _solve(self, initial_guesses: Optional[List[fenics.Function]]=None) -> None:
if (self.mode == 'initial'):
for i in range(len(self.controls)):
self.controls[i].vector().vec().set(0.0)
self.controls[i].vector().apply('')
elif ((self.mode == 'coarse_optimum') and (initial_guesses is not None)):
for i in range(len(self.controls)):
self.controls[i].vector().vec().aypx(0.0, initial_guesses[i].vector().vec())
self.controls[i].vector().apply('')
else:
raise _exceptions.InputError('ParameterExtraction._solve', 'initial_guesses', '')
self.optimal_control_problem = ocp.OptimalControlProblem(self.state_forms, self.bcs_list, self.cost_functional_form, self.states, self.controls, self.adjoints, config=self.config, riesz_scalar_products=self.riesz_scalar_products, control_constraints=self.control_constraints, initial_guess=self.initial_guess, ksp_options=self.ksp_options, adjoint_ksp_options=self.adjoint_ksp_options, desired_weights=self.desired_weights, preconditioner_forms=self.preconditioner_forms)
self.optimal_control_problem.inject_pre_post_callback(self._pre_callback, self._post_callback)
self.optimal_control_problem.solve() |
def module_has_exports(mod):
for name in dir(mod):
item = getattr(mod, name)
if callable(item):
if (get_torchscript_modifier(item) is FunctionModifiers.EXPORT):
return True
return False |
class AI21TokenCounter(TokenCounter):
def count_tokens(self, request: Request, completions: List[Sequence]) -> int:
return sum((len(sequence.tokens) for sequence in completions)) |
class SpLinear(nn.Module):
def __init__(self, input_features, output_features, bias=True):
super(SpLinear, self).__init__()
self.input_features = input_features
self.output_features = output_features
self.weight = nn.Parameter(torch.Tensor(output_features, input_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(output_features))
else:
self.register_parameter('bias', None)
stdv = (1.0 / math.sqrt(self.weight.size(1)))
self.weight.data.uniform_((- stdv), stdv)
def forward(self, input):
return splinear(input, self.weight, self.bias) |
def mask_attn_weights(w):
n = shape_list(w)[(- 1)]
b = tf.matrix_band_part(tf.ones([n, n]), (- 1), 0)
b = tf.reshape(b, [1, 1, n, n])
w = ((w * b) + ((- .0) * (1 - b)))
return w |
.parametrize('obj, method, inputs, err_cls, err_msg', [(MethodMapping(), 'add', {'callee': 'invalid', 'caller': 'fit'}, ValueError, 'Given callee'), (MethodMapping(), 'add', {'callee': 'fit', 'caller': 'invalid'}, ValueError, 'Given caller'), (MethodMapping, 'from_str', {'route': 'invalid'}, ValueError, "route should be 'one-to-one' or a single method!"), (MetadataRouter(owner='test'), 'add_self_request', {'obj': MetadataRouter(owner='test')}, ValueError, 'Given `obj` is neither a `MetadataRequest` nor does it implement'), (ConsumingClassifier(), 'set_fit_request', {'invalid': True}, TypeError, 'Unexpected args')])
def test_validations(obj, method, inputs, err_cls, err_msg):
with pytest.raises(err_cls, match=err_msg):
getattr(obj, method)(**inputs) |
.operations('failure')
def test_cli_output(cli, base_url, schema_url, snapshot_cli):
assert (cli.run(schema_url, '--code-sample-style=python') == snapshot_cli) |
class SG_reg(atomic_reg):
OP_NAME = 'SG'
_fields_ = [('cmd_short', ctypes.c_uint64, 1), ('cmd_id', ctypes.c_uint64, 20), ('cmd_id_dep', ctypes.c_uint64, 20), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('eu_half_en', ctypes.c_uint64, 1), ('tsk_opd_num', ctypes.c_uint64, 2), ('pad_mode', ctypes.c_uint64, 2), ('cmd_id_en', ctypes.c_uint64, 4), ('pwr_step', ctypes.c_uint64, 4), ('intr_en', ctypes.c_uint64, 1), ('res_add', ctypes.c_uint64, 1), ('relu', ctypes.c_uint64, 1), ('left_tran', ctypes.c_uint64, 1), ('rsvd1', ctypes.c_uint64, 1), ('kernel_rotate', ctypes.c_uint64, 1), ('opd0_sign', ctypes.c_uint64, 1), ('opd1_sign', ctypes.c_uint64, 1), ('opd2_sign', ctypes.c_uint64, 1), ('res0_prec', ctypes.c_uint64, 3), ('opd0_prec', ctypes.c_uint64, 3), ('opd1_prec', ctypes.c_uint64, 3), ('opd2_prec', ctypes.c_uint64, 3), ('opd0_const', ctypes.c_uint64, 1), ('opd1_const', ctypes.c_uint64, 1), ('opd2_const', ctypes.c_uint64, 1), ('res0_str', ctypes.c_uint64, 3), ('opd0_str', ctypes.c_uint64, 3), ('opd1_str', ctypes.c_uint64, 3), ('opd2_str', ctypes.c_uint64, 3), ('res_add_sign', ctypes.c_uint64, 1), ('rsvd2', ctypes.c_uint64, 25), ('rsvd3', ctypes.c_uint64, 1), ('opd3_const', ctypes.c_uint64, 1), ('rsvd4', ctypes.c_uint64, 1), ('opd0_x_ins0', ctypes.c_uint64, 4), ('opd0_y_ins0', ctypes.c_uint64, 4), ('opd1_x_ins0', ctypes.c_uint64, 4), ('opd1_y_ins0', ctypes.c_uint64, 4), ('opd0_up_pad', ctypes.c_uint64, 4), ('opd0_dn_pad', ctypes.c_uint64, 4), ('opd0_lf_pad', ctypes.c_uint64, 4), ('opd0_rt_pad', ctypes.c_uint64, 4), ('res_op_x_str', ctypes.c_uint64, 4), ('res_op_y_str', ctypes.c_uint64, 4), ('res0_h_shift', ctypes.c_uint64, 4), ('res0_w_shift', ctypes.c_uint64, 4), ('opd0_h_shift', ctypes.c_uint64, 4), ('opd0_w_shift', ctypes.c_uint64, 4), ('opd1_h_shift', ctypes.c_uint64, 4), ('opd1_w_shift', ctypes.c_uint64, 4), ('tsk_lane_num', ctypes.c_uint64, 64), ('res0_n', ctypes.c_uint64, 16), ('res0_c', ctypes.c_uint64, 16), ('res0_h', ctypes.c_uint64, 16), ('res0_w', ctypes.c_uint64, 16), ('opd0_n', ctypes.c_uint64, 16), ('opd0_c', ctypes.c_uint64, 16), ('opd0_h', ctypes.c_uint64, 16), ('opd0_w', ctypes.c_uint64, 16), ('opd1_n', ctypes.c_uint64, 16), ('opd1_c', ctypes.c_uint64, 16), ('opd1_h', ctypes.c_uint64, 16), ('opd1_w', ctypes.c_uint64, 16), ('res0_n_str', ctypes.c_uint64, 16), ('res0_c_str', ctypes.c_uint64, 16), ('opd0_n_str', ctypes.c_uint64, 16), ('opd0_c_str', ctypes.c_uint64, 16), ('opd1_n_str', ctypes.c_uint64, 16), ('opd1_c_str', ctypes.c_uint64, 16), ('opd2_n_str', ctypes.c_uint64, 16), ('opd2_c_str', ctypes.c_uint64, 16), ('res0_addr', ctypes.c_uint64, 32), ('opd0_addr', ctypes.c_uint64, 32), ('opd1_addr', ctypes.c_uint64, 32), ('opd2_addr', ctypes.c_uint64, 32), ('res0_h_str', ctypes.c_uint64, 32), ('res0_w_str', ctypes.c_uint64, 32), ('opd0_h_str', ctypes.c_uint64, 32), ('opd0_w_str', ctypes.c_uint64, 32), ('opd1_h_str', ctypes.c_uint64, 32), ('opd1_w_str', ctypes.c_uint64, 32), ('opd2_h_str', ctypes.c_uint64, 32), ('opd2_w_str', ctypes.c_uint64, 32), ('res1_addr', ctypes.c_uint64, 32), ('opd3_addr', ctypes.c_uint64, 32)]
cmd_short: int
cmd_id: int
cmd_id_dep: int
tsk_typ: int
tsk_eu_typ: int
eu_half_en: int
tsk_opd_num: int
pad_mode: int
cmd_id_en: int
pwr_step: int
intr_en: int
res_add: int
relu: int
left_tran: int
rsvd1: int
kernel_rotate: int
opd0_sign: int
opd1_sign: int
opd2_sign: int
res0_prec: int
opd0_prec: int
opd1_prec: int
opd2_prec: int
opd0_const: int
opd1_const: int
opd2_const: int
res0_str: int
opd0_str: int
opd1_str: int
opd2_str: int
res_add_sign: int
rsvd2: int
rsvd3: int
opd3_const: int
rsvd4: int
opd0_x_ins0: int
opd0_y_ins0: int
opd1_x_ins0: int
opd1_y_ins0: int
opd0_up_pad: int
opd0_dn_pad: int
opd0_lf_pad: int
opd0_rt_pad: int
res_op_x_str: int
res_op_y_str: int
res0_h_shift: int
res0_w_shift: int
opd0_h_shift: int
opd0_w_shift: int
opd1_h_shift: int
opd1_w_shift: int
tsk_lane_num: int
res0_n: int
res0_c: int
res0_h: int
res0_w: int
opd0_n: int
opd0_c: int
opd0_h: int
opd0_w: int
opd1_n: int
opd1_c: int
opd1_h: int
opd1_w: int
res0_n_str: int
res0_c_str: int
opd0_n_str: int
opd0_c_str: int
opd1_n_str: int
opd1_c_str: int
opd2_n_str: int
opd2_c_str: int
res0_addr: int
opd0_addr: int
opd1_addr: int
opd2_addr: int
res0_h_str: int
res0_w_str: int
opd0_h_str: int
opd0_w_str: int
opd1_h_str: int
opd1_w_str: int
opd2_h_str: int
opd2_w_str: int
res1_addr: int
opd3_addr: int
length: int = 1024 |
class EnvTool():
def __init__(self, action_info, env):
self.action_info = action_info
self.env = env
def run(self, action_input: str) -> str:
try:
parsed_input = LangChainAgent.parse_action_input(action_input, self.action_info)
observation = self.env.execute(Action(self.action_info.name, parsed_input))
except Exception as e:
usage = ',\n '.join([f'{k}: [{v}]' for (k, v) in self.action_info.usage.items()])
usage = f'''{{
{usage}
}}'''
invalid_action_error = f'''The action input for {self.action_info.name} needs to be a valid json with proper entries. You may have missed the comma between entries. Please use the correct format and try again:
{usage}'''
observation = ((('ActionInputParsingError: ' + e) + '\n') + invalid_action_error)
return observation |
def extract_done_markers(dones: np.ndarray) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
(ends,) = np.where(dones)
starts = np.concatenate(([0], (ends[:(- 1)] + 1)))
lengths = ((ends - starts) + 1)
return (starts, ends, lengths) |
def load(filepath: str, **kwargs):
if (not filepath.startswith('hdfs://')):
return torch.load(filepath, **kwargs)
with hopen(filepath, 'rb') as reader:
accessor = io.BytesIO(reader.read())
state_dict = torch.load(accessor, **kwargs)
del accessor
return state_dict |
def infer_gib_multiclass(metric: Callable) -> bool:
label = np.array([0, 1, 2])
pred = np.array([[0.9, 0.05, 0.05], [0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
g_val = metric(label, pred)
b_val = metric(label, pred[::(- 1)])
assert (g_val != b_val), 'Cannot infer greater is better from metric. Should be set manually.'
return (g_val > b_val) |
class Down(nn.Module):
def __init__(self, in_ch, out_ch):
super(Down, self).__init__()
self.mpconv = nn.Sequential(nn.MaxPool2d(2), DoubleConv(in_ch, out_ch))
def forward(self, x):
x = self.mpconv(x)
return x |
def computeSequenceClassificationF1(outputs, targets, tasks):
targets = [target[0] for target in targets]
label2id = tasks[0].label2id
outputs = [label2id[output] for output in outputs]
targets = [label2id[target] for target in targets]
f1_metric = load_metric('f1')
return (f1_metric.compute(references=targets, predictions=outputs)['f1'] * 100) |
_utils.test(require=ti.extension.adstack)
def test_ad_fibonacci_index():
N = 5
M = 10
a = ti.field(ti.f32, shape=M, needs_grad=True)
b = ti.field(ti.f32, shape=M, needs_grad=True)
f = ti.field(ti.f32, shape=(), needs_grad=True)
def fib():
for i in range(N):
p = 0
q = 1
for j in range(5):
(p, q) = (q, (p + q))
b[q] += a[q]
for i in range(M):
f[None] += b[i]
f.grad[None] = 1
a.fill(1)
fib()
fib.grad()
for i in range(M):
is_fib = int((i in [1, 2, 3, 5, 8]))
assert (a.grad[i] == (is_fib * N))
assert (b[i] == (is_fib * N)) |
_grad()
def convert_sew_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True):
if is_finetuned:
(model, _, _) = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/')[:(- 1)])})
else:
(model, _, _) = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
if (config_path is not None):
config = SEWDConfig.from_pretrained(config_path)
else:
config = convert_config(model[0], is_finetuned)
model = model[0].eval()
return_attention_mask = (True if (config.feat_extract_norm == 'layer') else False)
feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=True, return_attention_mask=return_attention_mask)
if is_finetuned:
if dict_path:
target_dict = Dictionary.load(dict_path)
target_dict.indices[target_dict.bos_word] = target_dict.pad_index
target_dict.indices[target_dict.pad_word] = target_dict.bos_index
config.bos_token_id = target_dict.pad_index
config.pad_token_id = target_dict.bos_index
config.eos_token_id = target_dict.eos_index
config.vocab_size = len(target_dict.symbols)
vocab_path = os.path.join(pytorch_dump_folder_path, 'vocab.json')
if (not os.path.isdir(pytorch_dump_folder_path)):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(pytorch_dump_folder_path))
return
os.makedirs(pytorch_dump_folder_path, exist_ok=True)
with open(vocab_path, 'w', encoding='utf-8') as vocab_handle:
json.dump(target_dict.indices, vocab_handle)
tokenizer = Wav2Vec2CTCTokenizer(vocab_path, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='|', do_lower_case=False)
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
processor.save_pretrained(pytorch_dump_folder_path)
hf_model = SEWDForCTC(config)
else:
hf_model = SEWDModel(config)
feature_extractor.save_pretrained(pytorch_dump_folder_path)
recursively_load_weights(model, hf_model, is_finetuned)
hf_model.save_pretrained(pytorch_dump_folder_path) |
def _sort_commands(cmddict, order):
def keyfn(key):
try:
return order.index(key[1])
except ValueError:
return 255
return sorted(cmddict.items(), key=keyfn) |
def NumberField_relative_v1(base_field, poly, name, latex_name, canonical_embedding=None):
return NumberField(poly.change_ring(base_field), name, check=False, embedding=canonical_embedding, latex_name=latex_name) |
class GCPKeyManager():
def __init__(self, local_key_dir: Path=(key_root / 'gcp')):
self.local_key_dir = local_key_dir
def get_private_key(self, key_name: str) -> Path:
return (self.local_key_dir / f'{key_name}.pem')
def get_public_key(self, key_name: str) -> Path:
return (self.local_key_dir / f'{key_name}.pub')
def key_exists_local(self, key_name: str) -> bool:
private_key_exists = (self.get_private_key(key_name).exists() and self.get_private_key(key_name).is_file())
public_key_exists = (self.get_public_key(key_name).exists() and self.get_public_key(key_name).is_file())
return (private_key_exists and public_key_exists)
def make_key_local(self, key_name: str) -> Path:
if self.key_exists_local(key_name):
logger.error(f'Key {key_name} already exists locally')
raise skyplane_exceptions.PermissionsException(f'Key {key_name} already exists locally, please delete it first or use a different key name.')
(local_key_file_pem, local_key_file_pub) = (self.get_private_key(key_name), self.get_public_key(key_name))
logger.fs.debug(f'[GCP] Creating local keypair {key_name}')
self.local_key_dir.mkdir(parents=True, exist_ok=True)
generate_keypair(local_key_file_pub, local_key_file_pem)
return local_key_file_pem
def delete_key_local(self, key_name: str):
if self.key_exists_local(key_name):
(self.local_key_dir / f'{key_name}.pem').unlink(missing_ok=True)
(self.local_key_dir / f'{key_name}.pub').unlink(missing_ok=True)
def ensure_key_exists(self, key_name: str) -> Path:
if self.key_exists_local(key_name):
return self.get_private_key(key_name)
else:
return self.make_key_local(key_name) |
def basic1d(filters=([128] * 5), kernel_size=3, stride=2, dilation=1, pool=0, pool_stride=1, squeeze_excite_reduction=0, num_classes=2, input_channels=8, act='relu', bn=True, headless=False, drop_p=0.0, lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head='relu', concat_pooling=True):
return basic_conv1d(filters=filters, kernel_size=kernel_size, stride=stride, dilation=dilation, pool=pool, pool_stride=pool_stride, squeeze_excite_reduction=squeeze_excite_reduction, num_classes=num_classes, input_channels=input_channels, act=act, bn=bn, headless=headless, drop_p=drop_p, lin_ftrs_head=lin_ftrs_head, ps_head=ps_head, bn_final_head=bn_final_head, bn_head=bn_head, act_head=act_head, concat_pooling=concat_pooling) |
def register_Ns3RadiotapHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::RadiotapHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetAmpduStatusFlags', 'uint16_t', [], is_const=True)
cls.add_method('GetAmpduStatusRef', 'uint32_t', [], is_const=True)
cls.add_method('GetAntennaNoisePower', 'uint8_t', [], is_const=True)
cls.add_method('GetAntennaSignalPower', 'uint8_t', [], is_const=True)
cls.add_method('GetChannelFlags', 'uint16_t', [], is_const=True)
cls.add_method('GetChannelFrequency', 'uint16_t', [], is_const=True)
cls.add_method('GetFrameFlags', 'uint8_t', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetMcsFlags', 'uint8_t', [], is_const=True)
cls.add_method('GetMcsKnown', 'uint8_t', [], is_const=True)
cls.add_method('GetMcsRate', 'uint8_t', [], is_const=True)
cls.add_method('GetRate', 'uint8_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTsft', 'uint64_t', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetVhtBandwidth', 'uint8_t', [], is_const=True)
cls.add_method('GetVhtCoding', 'uint8_t', [], is_const=True)
cls.add_method('GetVhtFlags', 'uint8_t', [], is_const=True)
cls.add_method('GetVhtGroupId', 'uint8_t', [], is_const=True)
cls.add_method('GetVhtKnown', 'uint16_t', [], is_const=True)
cls.add_method('GetVhtMcsNssUser1', 'uint8_t', [], is_const=True)
cls.add_method('GetVhtMcsNssUser2', 'uint8_t', [], is_const=True)
cls.add_method('GetVhtMcsNssUser3', 'uint8_t', [], is_const=True)
cls.add_method('GetVhtMcsNssUser4', 'uint8_t', [], is_const=True)
cls.add_method('GetVhtPartialAid', 'uint8_t', [], is_const=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetAmpduStatus', 'void', [param('uint32_t', 'referenceNumber'), param('uint16_t', 'flags'), param('uint8_t', 'crc')])
cls.add_method('SetAntennaNoisePower', 'void', [param('double', 'noise')])
cls.add_method('SetAntennaSignalPower', 'void', [param('double', 'signal')])
cls.add_method('SetChannelFrequencyAndFlags', 'void', [param('uint16_t', 'frequency'), param('uint16_t', 'flags')])
cls.add_method('SetFrameFlags', 'void', [param('uint8_t', 'flags')])
cls.add_method('SetMcsFields', 'void', [param('uint8_t', 'known'), param('uint8_t', 'flags'), param('uint8_t', 'mcs')])
cls.add_method('SetRate', 'void', [param('uint8_t', 'rate')])
cls.add_method('SetTsft', 'void', [param('uint64_t', 'tsft')])
cls.add_method('SetVhtFields', 'void', [param('uint16_t', 'known'), param('uint8_t', 'flags'), param('uint8_t', 'bandwidth'), param('uint8_t *', 'mcs_nss'), param('uint8_t', 'coding'), param('uint8_t', 'group_id'), param('uint16_t', 'partial_aid')])
return |
class NumpyArray(NumpyMeta, Content):
def __init__(self, data: ArrayLike, *, parameters=None, backend=None):
if (backend is None):
backend = backend_of_obj(data, default=NumpyBackend.instance())
self._data = backend.nplike.asarray(data)
if (not isinstance(backend.nplike, Jax)):
ak.types.numpytype.dtype_to_primitive(self._data.dtype)
if (len(self._data.shape) == 0):
raise TypeError("{} 'data' must be an array, not a scalar: {}".format(type(self).__name__, repr(data)))
if ((parameters is not None) and (parameters.get('__array__') in ('char', 'byte'))):
if ((data.dtype != np.dtype(np.uint8)) or (len(data.shape) != 1)):
raise ValueError("{} is a {}, so its 'data' must be 1-dimensional and uint8, not {}".format(type(self).__name__, parameters['__array__'], repr(data)))
self._init(parameters, backend)
def data(self) -> ArrayLike:
return self._data
form_cls: Final = NumpyForm
def copy(self, data=UNSET, *, parameters=UNSET, backend=UNSET):
return NumpyArray((self._data if (data is UNSET) else data), parameters=(self._parameters if (parameters is UNSET) else parameters), backend=(self._backend if (backend is UNSET) else backend))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.copy(data=copy.deepcopy(self._data, memo), parameters=copy.deepcopy(self._parameters, memo))
def __array__(self, dtype=None):
deprecate(f'np.asarray(content) is deprecated for {type(self).__name__}. Use ak.to_numpy(content) instead', version='2.6.0')
return numpy.asarray(self._data, dtype=dtype)
def simplified(cls, data, *, parameters=None, backend=None):
return cls(data, parameters=parameters, backend=backend)
def shape(self) -> tuple[(ShapeItem, ...)]:
return self._data.shape
def inner_shape(self) -> tuple[(ShapeItem, ...)]:
return self._data.shape[1:]
def strides(self) -> tuple[(ShapeItem, ...)]:
return self._backend.nplike.strides(self._data)
def dtype(self) -> np.dtype:
return self._data.dtype
def _raw(self, nplike=None):
return to_nplike(self.data, nplike, from_nplike=self._backend.nplike)
def _form_with_key(self, getkey: Callable[([Content], (str | None))]) -> NumpyForm:
return self.form_cls(ak.types.numpytype.dtype_to_primitive(self._data.dtype), self._data.shape[1:], parameters=self._parameters, form_key=getkey(self))
def _to_buffers(self, form: Form, getkey: Callable[([Content, Form, str], str)], container: MutableMapping[(str, ArrayLike)], backend: Backend, byteorder: str):
assert isinstance(form, self.form_cls)
key = getkey(self, form, 'data')
container[key] = ak._util.native_to_byteorder(self._raw(backend.nplike), byteorder)
def _to_typetracer(self, forget_length: bool) -> Self:
backend = TypeTracerBackend.instance()
data = self._raw(backend.nplike)
return NumpyArray((data.forget_length() if forget_length else data), parameters=self._parameters, backend=backend)
def _touch_data(self, recursive: bool):
if (not self._backend.nplike.known_data):
self._data.touch_data()
def _touch_shape(self, recursive: bool):
if (not self._backend.nplike.known_data):
self._data.touch_shape()
def length(self) -> ShapeItem:
return self._data.shape[0]
def __repr__(self):
return self._repr('', '', '')
def _repr(self, indent, pre, post):
out = [indent, pre, '<NumpyArray dtype=']
out.append(repr(str(self.dtype)))
if (len(self._data.shape) == 1):
out.append((' len=' + repr(str(self._data.shape[0]))))
else:
out.append(" shape='({})'".format(', '.join((str(x) for x in self._data.shape))))
extra = self._repr_extra((indent + ' '))
arraystr_lines = self._backend.nplike.array_str(self._data, max_line_width=30).split('\n')
if ((len(extra) != 0) or (len(arraystr_lines) > 1)):
arraystr_lines = self._backend.nplike.array_str(self._data, max_line_width=max(((80 - len(indent)) - 4), 40)).split('\n')
if (len(arraystr_lines) > 5):
arraystr_lines = ((arraystr_lines[:2] + [' ...']) + arraystr_lines[(- 2):])
out.append('>')
out.extend(extra)
out.append((('\n' + indent) + ' '))
out.append((('\n' + indent) + ' ').join(arraystr_lines))
out.append((('\n' + indent) + '</NumpyArray>'))
else:
out.append('>')
out.append(arraystr_lines[0])
out.append('</NumpyArray>')
out.append(post)
return ''.join(out)
def to_RegularArray(self):
shape = self._data.shape
zeroslen = [1]
for x in shape:
zeroslen.append((zeroslen[(- 1)] * x))
out = NumpyArray(self._backend.nplike.reshape(self._data, ((- 1),)), parameters=None, backend=self._backend)
for i in range((len(shape) - 1), 0, (- 1)):
out = ak.contents.RegularArray(out, shape[i], zeroslen[i], parameters=None)
out._parameters = self._parameters
return out
def maybe_to_NumpyArray(self) -> Self:
return self
def __iter__(self):
return iter(self._data)
def _getitem_nothing(self):
tmp = self._data[0:0]
return NumpyArray(self._backend.nplike.reshape(tmp, ((0,) + tmp.shape[2:])), parameters=None, backend=self._backend)
def _getitem_at(self, where: IndexType):
if ((not self._backend.nplike.known_data) and (len(self._data.shape) == 1)):
self._touch_data(recursive=False)
return TypeTracerArray._new(self._data.dtype, shape=())
try:
out = self._data[where]
except IndexError as err:
raise ak._errors.index_error(self, where, str(err)) from err
if (hasattr(out, 'shape') and (len(out.shape) != 0)):
return NumpyArray(out, parameters=None, backend=self._backend)
else:
return out
def _getitem_range(self, start: IndexType, stop: IndexType) -> Content:
try:
out = self._data[start:stop]
except IndexError as err:
raise ak._errors.index_error(self, slice(start, stop), str(err)) from err
return NumpyArray(out, parameters=self._parameters, backend=self._backend)
def _getitem_field(self, where: (str | SupportsIndex), only_fields: tuple[(str, ...)]=()) -> Content:
raise ak._errors.index_error(self, where, 'not an array of records')
def _getitem_fields(self, where: list[(str | SupportsIndex)], only_fields: tuple[(str, ...)]=()) -> Content:
if (len(where) == 0):
return self._getitem_range(0, 0)
raise ak._errors.index_error(self, where, 'not an array of records')
def _carry(self, carry: Index, allow_lazy: bool) -> Content:
assert isinstance(carry, ak.index.Index)
try:
nextdata = self._data[carry.data]
except IndexError as err:
raise ak._errors.index_error(self, carry.data, str(err)) from err
return NumpyArray(nextdata, parameters=self._parameters, backend=self._backend)
def _getitem_next_jagged(self, slicestarts: Index, slicestops: Index, slicecontent: Content, tail) -> Content:
if (self._data.ndim == 1):
raise ak._errors.index_error(self, ak.contents.ListArray(slicestarts, slicestops, slicecontent, parameters=None), 'too many jagged slice dimensions for array')
else:
next = self.to_RegularArray()
return next._getitem_next_jagged(slicestarts, slicestops, slicecontent, tail)
def _getitem_next(self, head: (SliceItem | tuple), tail: tuple[(SliceItem, ...)], advanced: (Index | None)) -> Content:
if (head is NO_HEAD):
return self
elif is_integer_like(head):
where = (slice(None), head, *tail)
try:
out = self._data[where]
except IndexError as err:
raise ak._errors.index_error(self, (head, *tail), str(err)) from err
if (hasattr(out, 'shape') and (len(out.shape) != 0)):
return NumpyArray(out, parameters=None, backend=self._backend)
else:
return out
elif (isinstance(head, slice) or (head is np.newaxis) or (head is Ellipsis)):
where = (slice(None), head, *tail)
try:
out = self._data[where]
except IndexError as err:
raise ak._errors.index_error(self, (head, *tail), str(err)) from err
return NumpyArray(out, parameters=self._parameters, backend=self._backend)
elif isinstance(head, str):
return self._getitem_next_field(head, tail, advanced)
elif isinstance(head, list):
return self._getitem_next_fields(head, tail, advanced)
elif isinstance(head, ak.index.Index64):
if (advanced is None):
where = (slice(None), head.data, *tail)
else:
where = (self._backend.index_nplike.asarray(advanced.data), head.data, *tail)
try:
out = self._data[where]
except IndexError as err:
raise ak._errors.index_error(self, (head, *tail), str(err)) from err
return NumpyArray(out, parameters=self._parameters, backend=self._backend)
elif isinstance(head, ak.contents.ListOffsetArray):
where = (slice(None), head, *tail)
try:
out = self._data[where]
except IndexError as err:
raise ak._errors.index_error(self, (head, *tail), str(err)) from err
return NumpyArray(out, parameters=self._parameters, backend=self._backend)
elif isinstance(head, ak.contents.IndexedOptionArray):
next = self.to_RegularArray()
return next._getitem_next_missing(head, tail, advanced)
else:
raise AssertionError(repr(head))
def _offsets_and_flattened(self, axis: int, depth: int) -> tuple[(Index, Content)]:
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
raise AxisError('axis=0 not allowed for flatten')
elif (len(self.shape) != 1):
return self.to_RegularArray()._offsets_and_flattened(axis, depth)
else:
raise AxisError(f'axis={axis} exceeds the depth of this array ({depth})')
def _mergeable_next(self, other: Content, mergebool: bool) -> bool:
if (other.is_identity_like or other.is_union):
return True
elif (other.is_indexed or other.is_option):
return self._mergeable_next(other.content, mergebool)
elif (not type_parameters_equal(self._parameters, other._parameters)):
return False
elif (len(self.shape) > 1):
return self._to_regular_primitive()._mergeable_next(other, mergebool)
elif isinstance(other, ak.contents.NumpyArray):
if (self._data.ndim != other._data.ndim):
return False
if (self.dtype == other.dtype):
return True
elif ((np.issubdtype(self.dtype, np.bool_) and np.issubdtype(other.dtype, np.number)) or (np.issubdtype(self.dtype, np.number) and np.issubdtype(other.dtype, np.bool_))):
return mergebool
elif (np.issubdtype(self.dtype, np.datetime64) or np.issubdtype(self.dtype, np.timedelta64) or np.issubdtype(other.dtype, np.datetime64) or np.issubdtype(other.dtype, np.timedelta64)):
return False
else:
return (self.backend.nplike.can_cast(self.dtype, other.dtype) or self.backend.nplike.can_cast(other.dtype, self.dtype))
else:
return False
def _mergemany(self, others: Sequence[Content]) -> Content:
if (len(others) == 0):
return self
if (len(self.shape) > 1):
return self.to_RegularArray()._mergemany(others)
(head, tail) = self._merging_strategy(others)
contiguous_arrays = []
parameters = self._parameters
for array in head:
if isinstance(array, ak.contents.EmptyArray):
continue
parameters = parameters_intersect(parameters, array._parameters)
if isinstance(array, ak.contents.NumpyArray):
contiguous_arrays.append(array.data)
else:
raise AssertionError(((('cannot merge ' + type(self).__name__) + ' with ') + type(array).__name__))
contiguous_arrays = self._backend.nplike.concat(contiguous_arrays)
next = NumpyArray(contiguous_arrays, parameters=parameters, backend=self._backend)
if (len(tail) == 0):
return next
reversed = tail[0]._reverse_merge(next)
if (len(tail) == 1):
return reversed
else:
return reversed._mergemany(tail[1:])
def _fill_none(self, value: Content) -> Content:
return self
def _local_index(self, axis, depth):
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._local_index_axis0()
elif (len(self.shape) <= 1):
raise AxisError(f'axis={axis} exceeds the depth of this array ({depth})')
else:
return self.to_RegularArray()._local_index(axis, depth)
def to_contiguous(self) -> Self:
if self.is_contiguous:
return self
else:
return ak.contents.NumpyArray(self._backend.nplike.ascontiguousarray(self._data), parameters=self._parameters, backend=self._backend)
def is_contiguous(self) -> bool:
return self._backend.nplike.is_c_contiguous(self._data)
def _subranges_equal(self, starts, stops, length, sorted=True):
is_equal = ak.index.Index64.zeros(1, nplike=self._backend.nplike)
tmp = self._backend.nplike.empty(length, dtype=self.dtype)
self._backend.maybe_kernel_error(self._backend[('awkward_NumpyArray_fill', self.dtype.type, self._data.dtype.type)](tmp, 0, self._data, length))
if (not sorted):
tmp_beg_ptr = ak.index.Index64.empty(ak._util.kMaxLevels, nplike=self._backend.index_nplike)
tmp_end_ptr = ak.index.Index64.empty(ak._util.kMaxLevels, nplike=self._backend.index_nplike)
assert ((tmp_beg_ptr.nplike is self._backend.index_nplike) and (tmp_end_ptr.nplike is self._backend.index_nplike) and (starts.nplike is self._backend.index_nplike) and (stops.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_quick_sort', self.dtype.type, tmp_beg_ptr.dtype.type, tmp_end_ptr.dtype.type, starts.dtype.type, stops.dtype.type)](tmp, tmp_beg_ptr.data, tmp_end_ptr.data, starts.data, stops.data, True, starts.length, ak._util.kMaxLevels))
assert ((starts.nplike is self._backend.index_nplike) and (stops.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_NumpyArray_subrange_equal', self.dtype.type, starts.dtype.type, stops.dtype.type, np.bool_)](tmp, starts.data, stops.data, starts.length, is_equal.data))
return (True if (is_equal[0] == 1) else False)
def _as_unique_strings(self, offsets):
offsets = ak.index.Index64(offsets.data, nplike=offsets.nplike)
outoffsets = ak.index.Index64.empty(offsets.length, nplike=self._backend.index_nplike)
out = self._backend.nplike.empty(self.shape[0], dtype=self.dtype)
assert ((offsets.nplike is self._backend.index_nplike) and (outoffsets.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_NumpyArray_sort_asstrings_uint8', self.dtype.type, self._data.dtype.type, offsets._data.dtype.type, outoffsets.dtype.type)](out, self._data, offsets.data, offsets.length, outoffsets.data, True, False))
outlength = ak.index.Index64.empty(1, self._backend.index_nplike)
nextoffsets = ak.index.Index64.empty(offsets.length, self._backend.index_nplike)
assert ((outoffsets.nplike is self._backend.index_nplike) and (nextoffsets.nplike is self._backend.index_nplike) and (outlength.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_NumpyArray_unique_strings', self.dtype.type, outoffsets.dtype.type, nextoffsets.dtype.type, outlength.dtype.type)](out, outoffsets.data, offsets.length, nextoffsets.data, outlength.data))
out2 = NumpyArray(out, parameters=self._parameters, backend=self._backend)
return (out2, nextoffsets[:outlength[0]])
def _numbers_to_type(self, name, including_unknown):
if ((self.parameter('__array__') == 'char') or (self.parameter('__array__') == 'byte')):
return self
else:
dtype = primitive_to_dtype(name)
return NumpyArray(self._backend.nplike.asarray(self._data, dtype=dtype), parameters=self._parameters, backend=self._backend)
def _is_unique(self, negaxis, starts, parents, outlength):
if (self.length == 0):
return True
elif (len(self.shape) != 1):
return self.to_RegularArray()._is_unique(negaxis, starts, parents, outlength)
elif (not self.is_contiguous):
return self.to_contiguous()._is_unique(negaxis, starts, parents, outlength)
else:
out = self._unique(negaxis, starts, parents, outlength)
if isinstance(out, ak.contents.ListOffsetArray):
return (out.content.length == self.length)
else:
return (out.length == self.length)
def _unique(self, negaxis, starts, parents, outlength):
if (self.shape[0] == 0):
return self
elif (len(self.shape) == 0):
return self
elif (negaxis is None):
contiguous_self = self.to_contiguous()
offsets = ak.index.Index64.zeros(2, self._backend.index_nplike)
offsets[1] = self._data.size
dtype = (np.dtype(np.int64) if (self._data.dtype.kind.upper() == 'M') else self._data.dtype)
out = self._backend.nplike.empty(self._data.size, dtype=dtype)
assert (offsets.nplike is self._backend.index_nplike)
self._backend.maybe_kernel_error(self._backend[('awkward_sort', dtype.type, dtype.type, offsets.dtype.type)](out, contiguous_self._data, offsets[1], offsets.data, 2, offsets[1], True, False))
nextlength = ak.index.Index64.empty(1, self._backend.index_nplike)
assert (nextlength.nplike is self._backend.index_nplike)
self._backend.maybe_kernel_error(self._backend[('awkward_unique', out.dtype.type, nextlength.dtype.type)](out, out.shape[0], nextlength.data))
return ak.contents.NumpyArray(self._backend.nplike.asarray(out[:nextlength[0]], dtype=self.dtype), parameters=None, backend=self._backend)
elif (len(self.shape) != 1):
return self.to_RegularArray()._unique(negaxis, starts, parents, outlength)
else:
parents_length = parents.length
offsets_length = ak.index.Index64.empty(1, self._backend.index_nplike)
assert ((offsets_length.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_sorting_ranges_length', offsets_length.dtype.type, parents.dtype.type)](offsets_length.data, parents.data, parents_length))
offsets = ak.index.Index64.empty(offsets_length[0], self._backend.index_nplike)
assert ((offsets.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_sorting_ranges', offsets.dtype.type, parents.dtype.type)](offsets.data, offsets_length[0], parents.data, parents_length))
out = self._backend.nplike.empty(self.length, dtype=self.dtype)
assert (offsets.nplike is self._backend.index_nplike)
self._backend.maybe_kernel_error(self._backend[('awkward_sort', out.dtype.type, self._data.dtype.type, offsets.dtype.type)](out, self._data, self.shape[0], offsets.data, offsets_length[0], parents_length, True, False))
nextoffsets = ak.index.Index64.empty(offsets.length, self._backend.index_nplike)
assert ((offsets.nplike is self._backend.index_nplike) and (nextoffsets.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_unique_ranges', out.dtype.type, offsets.dtype.type, nextoffsets.dtype.type)](out, out.shape[0], offsets.data, offsets.length, nextoffsets.data))
outoffsets = ak.index.Index64.empty((starts.length + 1), self._backend.index_nplike)
assert ((outoffsets.nplike is self._backend.index_nplike) and (nextoffsets.nplike is self._backend.index_nplike) and (starts.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_unique_offsets', outoffsets.dtype.type, nextoffsets.dtype.type, starts.dtype.type)](outoffsets.data, nextoffsets.length, nextoffsets.data, starts.data, starts.length))
return ak.contents.ListOffsetArray(outoffsets, ak.contents.NumpyArray(out), parameters=self._parameters)
def _argsort_next(self, negaxis, starts, shifts, parents, outlength, ascending, stable):
if (len(self.shape) != 1):
return self.to_RegularArray()._argsort_next(negaxis, starts, shifts, parents, outlength, ascending, stable)
elif (not self.is_contiguous):
return self.to_contiguous()._argsort_next(negaxis, starts, shifts, parents, outlength, ascending, stable)
else:
parents_length = parents.length
_offsets_length = ak.index.Index64.empty(1, self._backend.index_nplike)
assert ((_offsets_length.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_sorting_ranges_length', _offsets_length.dtype.type, parents.dtype.type)](_offsets_length.data, parents.data, parents_length))
offsets_length = self._backend.index_nplike.index_as_shape_item(_offsets_length[0])
offsets = ak.index.Index64.empty(offsets_length, self._backend.index_nplike)
assert ((offsets.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_sorting_ranges', offsets.dtype.type, parents.dtype.type)](offsets.data, offsets_length, parents.data, parents_length))
dtype = (np.dtype(np.int64) if (self._data.dtype.kind.upper() == 'M') else self._data.dtype)
nextcarry = ak.index.Index64.empty(self.length, self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (offsets.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_argsort', nextcarry.dtype.type, dtype.type, offsets.dtype.type)](nextcarry.data, self._data, self.length, offsets.data, offsets_length, ascending, stable))
if (shifts is not None):
assert ((nextcarry.nplike is self._backend.index_nplike) and (shifts.nplike is self._backend.index_nplike) and (offsets.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike) and (starts.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_NumpyArray_rearrange_shifted', nextcarry.dtype.type, shifts.dtype.type, offsets.dtype.type, parents.dtype.type, starts.dtype.type)](nextcarry.data, shifts.data, shifts.length, offsets.data, offsets_length, parents.data, parents_length, starts.data, starts.length))
out = NumpyArray(nextcarry.data, parameters=None, backend=self._backend)
return out
def _sort_next(self, negaxis, starts, parents, outlength, ascending, stable):
if (len(self.shape) != 1):
return self.to_RegularArray()._sort_next(negaxis, starts, parents, outlength, ascending, stable)
elif (not self.is_contiguous):
return self.to_contiguous()._sort_next(negaxis, starts, parents, outlength, ascending, stable)
else:
parents_length = parents.length
_offsets_length = ak.index.Index64.empty(1, self._backend.index_nplike)
assert ((_offsets_length.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_sorting_ranges_length', _offsets_length.dtype.type, parents.dtype.type)](_offsets_length.data, parents.data, parents_length))
offsets_length = self._backend.index_nplike.index_as_shape_item(_offsets_length[0])
offsets = ak.index.Index64.empty(offsets_length, self._backend.index_nplike)
assert ((offsets.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_sorting_ranges', offsets.dtype.type, parents.dtype.type)](offsets.data, offsets_length, parents.data, parents_length))
dtype = (np.dtype(np.int64) if (self._data.dtype.kind.upper() == 'M') else self._data.dtype)
out = self._backend.nplike.empty(self.length, dtype=dtype)
assert (offsets.nplike is self._backend.index_nplike)
self._backend.maybe_kernel_error(self._backend[('awkward_sort', dtype.type, dtype.type, offsets.dtype.type)](out, self._data, self.shape[0], offsets.data, offsets_length, parents_length, ascending, stable))
return ak.contents.NumpyArray(self._backend.nplike.asarray(out, dtype=self.dtype), parameters=None, backend=self._backend)
def _combinations(self, n, replacement, recordlookup, parameters, axis, depth):
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._combinations_axis0(n, replacement, recordlookup, parameters)
elif (len(self.shape) <= 1):
raise AxisError(f'axis={axis} exceeds the depth of this array ({depth})')
else:
return self.to_RegularArray()._combinations(n, replacement, recordlookup, parameters, axis, depth)
def _reduce_next(self, reducer, negaxis, starts, shifts, parents, outlength, mask, keepdims, behavior):
if (self._data.ndim > 1):
return self.to_RegularArray()._reduce_next(reducer, negaxis, starts, shifts, parents, outlength, mask, keepdims, behavior)
elif (not self.is_contiguous):
return self.to_contiguous()._reduce_next(reducer, negaxis, starts, shifts, parents, outlength, mask, keepdims, behavior)
assert self.is_contiguous
assert (self._data.ndim == 1)
out = reducer.apply(self, parents, starts, shifts, outlength)
if mask:
outmask = ak.index.Index8.empty(outlength, self._backend.index_nplike)
assert ((outmask.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_NumpyArray_reduce_mask_ByteMaskedArray_64', outmask.dtype.type, parents.dtype.type)](outmask.data, parents.data, parents.length, outlength))
out = ak.contents.ByteMaskedArray(outmask, out, False, parameters=None)
if keepdims:
out = ak.contents.RegularArray(out, 1, self.length, parameters=None)
return out
def _validity_error(self, path):
if (len(self.shape) == 0):
return f'at {path} ({type(self)!r}): shape is zero-dimensional'
for (i, dim) in enumerate(self.shape):
if (dim < 0):
return f'at {path} ({type(self)!r}): shape[{i}] < 0'
for (i, stride) in enumerate(self.strides):
if ((stride % self.dtype.itemsize) != 0):
return f'at {path} ({type(self)!r}): shape[{i}] % itemsize != 0'
return ''
def _pad_none(self, target, axis, depth, clip):
if (len(self.shape) == 0):
raise ValueError('cannot apply ak.pad_none to a scalar')
elif (len(self.shape) > 1):
return self.to_RegularArray()._pad_none(target, axis, depth, clip)
elif (not self.is_contiguous):
return self.to_contiguous()._pad_none(target, axis, depth, clip)
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) != depth)):
raise AxisError(f'axis={axis} exceeds the depth of this array ({depth})')
if (not clip):
if (target < self.length):
return self
else:
return self._pad_none(target, axis, depth, clip=True)
else:
return self._pad_none_axis0(target, clip=True)
def _nbytes_part(self):
return self.data.nbytes
def _to_arrow(self, pyarrow: Any, mask_node: (Content | None), validbytes: (Content | None), length: int, options: ToArrowOptions):
if (self._data.ndim != 1):
return self.to_RegularArray()._to_arrow(pyarrow, mask_node, validbytes, length, options)
nparray = self._raw(numpy)
storage_type = pyarrow.from_numpy_dtype(nparray.dtype)
if issubclass(nparray.dtype.type, (bool, np.bool_)):
nparray = numpy.packbits(nparray, bitorder='little')
return pyarrow.Array.from_buffers(ak._connect.pyarrow.to_awkwardarrow_type(storage_type, options['extensionarray'], options['record_is_scalar'], mask_node, self), length, [ak._connect.pyarrow.to_validbits(validbytes), ak._connect.pyarrow.to_length(nparray, length)], null_count=ak._connect.pyarrow.to_null_count(validbytes, options['count_nulls']))
def _to_backend_array(self, allow_missing, backend):
return to_nplike(self.data, backend.nplike, from_nplike=self._backend.nplike)
def _remove_structure(self, backend: Backend, options: RemoveStructureOptions) -> list[Content]:
if options['keepdims']:
shape = (((1,) * (self._data.ndim - 1)) + ((- 1),))
else:
shape = ((- 1),)
return [ak.contents.NumpyArray(backend.nplike.reshape(self._raw(backend.nplike), shape), backend=backend)]
def _recursively_apply(self, action: ImplementsApplyAction, depth: int, depth_context: (Mapping[(str, Any)] | None), lateral_context: (Mapping[(str, Any)] | None), options: ApplyActionOptions) -> (Content | None):
if ((self._data.ndim != 1) and options['numpy_to_regular']):
return self.to_RegularArray()._recursively_apply(action, depth, depth_context, lateral_context, options)
if options['return_array']:
def continuation():
if options['keep_parameters']:
return self
else:
return NumpyArray(self._data, parameters=None, backend=self._backend)
else:
def continuation():
pass
result = action(self, depth=depth, depth_context=depth_context, lateral_context=lateral_context, continuation=continuation, backend=self._backend, options=options)
if isinstance(result, Content):
return result
elif (result is None):
return continuation()
else:
raise AssertionError(result)
def to_packed(self) -> Self:
return self.to_contiguous().to_RegularArray()
def _to_list(self, behavior, json_conversions):
if (not self._backend.nplike.known_data):
raise TypeError('cannot convert typetracer arrays to Python lists')
if (self.parameter('__array__') == 'byte'):
convert_bytes = (None if (json_conversions is None) else json_conversions['convert_bytes'])
if (convert_bytes is None):
return ak._util.tobytes(self._data)
else:
return convert_bytes(ak._util.tobytes(self._data))
elif (self.parameter('__array__') == 'char'):
return ak._util.tobytes(self._data).decode(errors='surrogateescape')
out = self._to_list_custom(behavior, json_conversions)
if (out is not None):
return out
if (json_conversions is not None):
complex_real_string = json_conversions['complex_real_string']
complex_imag_string = json_conversions['complex_imag_string']
if (complex_real_string is not None):
if issubclass(self.dtype.type, np.complexfloating):
return ak.contents.RecordArray([ak.contents.NumpyArray(self._data.real, backend=self._backend), ak.contents.NumpyArray(self._data.imag, backend=self._backend)], [complex_real_string, complex_imag_string], self.length, parameters=self._parameters, backend=self._backend)._to_list(behavior, json_conversions)
out = self._data.tolist()
if (json_conversions is not None):
nan_string = json_conversions['nan_string']
if (nan_string is not None):
for i in self._backend.nplike.nonzero(self._backend.nplike.isnan(self._data))[0]:
out[i] = nan_string
posinf_string = json_conversions['posinf_string']
if (posinf_string is not None):
for i in self._backend.nplike.nonzero((self._data == np.inf))[0]:
out[i] = posinf_string
neginf_string = json_conversions['neginf_string']
if (neginf_string is not None):
for i in self._backend.nplike.nonzero((self._data == (- np.inf)))[0]:
out[i] = neginf_string
return out
def _to_backend(self, backend: Backend) -> Self:
return NumpyArray(self._raw(backend.nplike), parameters=self._parameters, backend=backend)
def _is_equal_to(self, other: Self, index_dtype: bool, numpyarray: bool, all_parameters: bool) -> bool:
return (self._is_equal_to_generic(other, all_parameters) and ((not numpyarray) or ((self.dtype == other.dtype) and ((not self._backend.nplike.known_data) or self._backend.nplike.array_equal(self.data, other.data)) and all((((x is unknown_length) or (y is unknown_length) or (x == y)) for (x, y) in zip(self.shape, other.shape))))))
def _to_regular_primitive(self) -> ak.contents.RegularArray:
index = tuple(([slice(None, 1)] * len(self.shape)))
new_data = self.backend.nplike.broadcast_to(self._data[index], self.shape)
return NumpyArray(new_data, backend=self.backend, parameters=self.parameters).to_RegularArray() |
class BaseInpaintingTrainingModule(ptl.LightningModule):
def __init__(self, config, use_ddp, *args, predict_only=False, visualize_each_iters=100, average_generator=False, generator_avg_beta=0.999, average_generator_start_step=30000, average_generator_period=10, store_discr_outputs_for_vis=False, **kwargs):
super().__init__(*args, **kwargs)
LOGGER.info('BaseInpaintingTrainingModule init called')
self.config = config
self.generator = make_generator(config, **self.config.generator)
self.use_ddp = use_ddp
if (not get_has_ddp_rank()):
LOGGER.info(f'''Generator
{self.generator}''')
if (not predict_only):
self.save_hyperparameters(self.config)
self.discriminator = make_discriminator(**self.config.discriminator)
self.adversarial_loss = make_discrim_loss(**self.config.losses.adversarial)
self.visualizer = make_visualizer(**self.config.visualizer)
self.val_evaluator = make_evaluator(**self.config.evaluator)
self.test_evaluator = make_evaluator(**self.config.evaluator)
if (not get_has_ddp_rank()):
LOGGER.info(f'''Discriminator
{self.discriminator}''')
extra_val = self.config.data.get('extra_val', ())
if extra_val:
self.extra_val_titles = list(extra_val)
self.extra_evaluators = nn.ModuleDict({k: make_evaluator(**self.config.evaluator) for k in extra_val})
else:
self.extra_evaluators = {}
self.average_generator = average_generator
self.generator_avg_beta = generator_avg_beta
self.average_generator_start_step = average_generator_start_step
self.average_generator_period = average_generator_period
self.generator_average = None
self.last_generator_averaging_step = (- 1)
self.store_discr_outputs_for_vis = store_discr_outputs_for_vis
if (self.config.losses.get('l1', {'weight_known': 0})['weight_known'] > 0):
self.loss_l1 = nn.L1Loss(reduction='none')
if (self.config.losses.get('mse', {'weight': 0})['weight'] > 0):
self.loss_mse = nn.MSELoss(reduction='none')
if (self.config.losses.perceptual.weight > 0):
self.loss_pl = PerceptualLoss()
if (self.config.losses.get('resnet_pl', {'weight': 0})['weight'] > 0):
self.loss_resnet_pl = ResNetPL(**self.config.losses.resnet_pl)
else:
self.loss_resnet_pl = None
self.visualize_each_iters = visualize_each_iters
LOGGER.info('BaseInpaintingTrainingModule init done')
def configure_optimizers(self):
discriminator_params = list(self.discriminator.parameters())
return [dict(optimizer=make_optimizer(self.generator.parameters(), **self.config.optimizers.generator)), dict(optimizer=make_optimizer(discriminator_params, **self.config.optimizers.discriminator))]
def train_dataloader(self):
kwargs = dict(self.config.data.train)
if self.use_ddp:
kwargs['ddp_kwargs'] = dict(num_replicas=(self.trainer.num_nodes * self.trainer.num_processes), rank=self.trainer.global_rank, shuffle=True)
dataloader = make_default_train_dataloader(**self.config.data.train)
return dataloader
def val_dataloader(self):
res = [make_default_val_dataloader(**self.config.data.val)]
if (self.config.data.visual_test is not None):
res = (res + [make_default_val_dataloader(**self.config.data.visual_test)])
else:
res = (res + res)
extra_val = self.config.data.get('extra_val', ())
if extra_val:
res += [make_default_val_dataloader(**extra_val[k]) for k in self.extra_val_titles]
return res
def training_step(self, batch, batch_idx, optimizer_idx=None):
self._is_training_step = True
return self._do_step(batch, batch_idx, mode='train', optimizer_idx=optimizer_idx)
def validation_step(self, batch, batch_idx, dataloader_idx):
extra_val_key = None
if (dataloader_idx == 0):
mode = 'val'
elif (dataloader_idx == 1):
mode = 'test'
else:
mode = 'extra_val'
extra_val_key = self.extra_val_titles[(dataloader_idx - 2)]
self._is_training_step = False
return self._do_step(batch, batch_idx, mode=mode, extra_val_key=extra_val_key)
def training_step_end(self, batch_parts_outputs):
if (self.training and self.average_generator and (self.global_step >= self.average_generator_start_step) and (self.global_step >= (self.last_generator_averaging_step + self.average_generator_period))):
if (self.generator_average is None):
self.generator_average = copy.deepcopy(self.generator)
else:
update_running_average(self.generator_average, self.generator, decay=self.generator_avg_beta)
self.last_generator_averaging_step = self.global_step
full_loss = (batch_parts_outputs['loss'].mean() if torch.is_tensor(batch_parts_outputs['loss']) else torch.tensor(batch_parts_outputs['loss']).float().requires_grad_(True))
log_info = {k: v.mean() for (k, v) in batch_parts_outputs['log_info'].items()}
self.log_dict(log_info, on_step=True, on_epoch=False)
return full_loss
def validation_epoch_end(self, outputs):
outputs = [step_out for out_group in outputs for step_out in out_group]
averaged_logs = average_dicts((step_out['log_info'] for step_out in outputs))
self.log_dict({k: v.mean() for (k, v) in averaged_logs.items()})
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
val_evaluator_states = [s['val_evaluator_state'] for s in outputs if ('val_evaluator_state' in s)]
val_evaluator_res = self.val_evaluator.evaluation_end(states=val_evaluator_states)
val_evaluator_res_df = pd.DataFrame(val_evaluator_res).stack(1).unstack(0)
val_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
LOGGER.info(f'''Validation metrics after epoch #{self.current_epoch}, total {self.global_step} iterations:
{val_evaluator_res_df}''')
for (k, v) in flatten_dict(val_evaluator_res).items():
self.log(f'val_{k}', v)
test_evaluator_states = [s['test_evaluator_state'] for s in outputs if ('test_evaluator_state' in s)]
test_evaluator_res = self.test_evaluator.evaluation_end(states=test_evaluator_states)
test_evaluator_res_df = pd.DataFrame(test_evaluator_res).stack(1).unstack(0)
test_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
LOGGER.info(f'''Test metrics after epoch #{self.current_epoch}, total {self.global_step} iterations:
{test_evaluator_res_df}''')
for (k, v) in flatten_dict(test_evaluator_res).items():
self.log(f'test_{k}', v)
if self.extra_evaluators:
for (cur_eval_title, cur_evaluator) in self.extra_evaluators.items():
cur_state_key = f'extra_val_{cur_eval_title}_evaluator_state'
cur_states = [s[cur_state_key] for s in outputs if (cur_state_key in s)]
cur_evaluator_res = cur_evaluator.evaluation_end(states=cur_states)
cur_evaluator_res_df = pd.DataFrame(cur_evaluator_res).stack(1).unstack(0)
cur_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
LOGGER.info(f'''Extra val {cur_eval_title} metrics after epoch #{self.current_epoch}, total {self.global_step} iterations:
{cur_evaluator_res_df}''')
for (k, v) in flatten_dict(cur_evaluator_res).items():
self.log(f'extra_val_{cur_eval_title}_{k}', v)
def _do_step(self, batch, batch_idx, mode='train', optimizer_idx=None, extra_val_key=None):
if (optimizer_idx == 0):
set_requires_grad(self.generator, True)
set_requires_grad(self.discriminator, False)
elif (optimizer_idx == 1):
set_requires_grad(self.generator, False)
set_requires_grad(self.discriminator, True)
batch = self(batch)
total_loss = 0
metrics = {}
if ((optimizer_idx is None) or (optimizer_idx == 0)):
(total_loss, metrics) = self.generator_loss(batch)
elif ((optimizer_idx is None) or (optimizer_idx == 1)):
if (self.config.losses.adversarial.weight > 0):
(total_loss, metrics) = self.discriminator_loss(batch)
if ((self.get_ddp_rank() in (None, 0)) and (((batch_idx % self.visualize_each_iters) == 0) or (mode == 'test'))):
if (self.config.losses.adversarial.weight > 0):
if self.store_discr_outputs_for_vis:
with torch.no_grad():
self.store_discr_outputs(batch)
vis_suffix = f'_{mode}'
if (mode == 'extra_val'):
vis_suffix += f'_{extra_val_key}'
self.visualizer(self.current_epoch, batch_idx, batch, suffix=vis_suffix)
metrics_prefix = f'{mode}_'
if (mode == 'extra_val'):
metrics_prefix += f'{extra_val_key}_'
result = dict(loss=total_loss, log_info=add_prefix_to_keys(metrics, metrics_prefix))
if (mode == 'val'):
result['val_evaluator_state'] = self.val_evaluator.process_batch(batch)
elif (mode == 'test'):
result['test_evaluator_state'] = self.test_evaluator.process_batch(batch)
elif (mode == 'extra_val'):
result[f'extra_val_{extra_val_key}_evaluator_state'] = self.extra_evaluators[extra_val_key].process_batch(batch)
return result
def get_current_generator(self, no_average=False):
if ((not no_average) and (not self.training) and self.average_generator and (self.generator_average is not None)):
return self.generator_average
return self.generator
def forward(self, batch: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]:
raise NotImplementedError()
def generator_loss(self, batch) -> Tuple[(torch.Tensor, Dict[(str, torch.Tensor)])]:
raise NotImplementedError()
def discriminator_loss(self, batch) -> Tuple[(torch.Tensor, Dict[(str, torch.Tensor)])]:
raise NotImplementedError()
def store_discr_outputs(self, batch):
out_size = batch['image'].shape[2:]
(discr_real_out, _) = self.discriminator(batch['image'])
(discr_fake_out, _) = self.discriminator(batch['predicted_image'])
batch['discr_output_real'] = F.interpolate(discr_real_out, size=out_size, mode='nearest')
batch['discr_output_fake'] = F.interpolate(discr_fake_out, size=out_size, mode='nearest')
batch['discr_output_diff'] = (batch['discr_output_real'] - batch['discr_output_fake'])
def get_ddp_rank(self):
return (self.trainer.global_rank if ((self.trainer.num_nodes * self.trainer.num_processes) > 1) else None) |
class _ndptr(_ndptr_base):
def from_param(cls, obj):
if (not isinstance(obj, ndarray)):
raise TypeError('argument must be an ndarray')
if ((cls._dtype_ is not None) and (obj.dtype != cls._dtype_)):
raise TypeError(('array must have data type %s' % cls._dtype_))
if ((cls._ndim_ is not None) and (obj.ndim != cls._ndim_)):
raise TypeError(('array must have %d dimension(s)' % cls._ndim_))
if ((cls._shape_ is not None) and (obj.shape != cls._shape_)):
raise TypeError(('array must have shape %s' % str(cls._shape_)))
if ((cls._flags_ is not None) and ((obj.flags.num & cls._flags_) != cls._flags_)):
raise TypeError(('array must have flags %s' % _flags_fromnum(cls._flags_)))
return obj.ctypes |
def activation_summary(x):
tensor_name = x.op.name
tf.summary.histogram((tensor_name + '/activations'), x)
tf.summary.scalar((tensor_name + '/sparsity'), tf.nn.zero_fraction(x)) |
class Invocation():
def __init__(self, name, error_context):
self._name = name
self._error_context = error_context
def name(self):
return self._name
def error_context(self):
return self._error_context |
class Dataset(InMemoryDataset):
def __init__(self, root, dataset, pred_edges=1, transform=None, pre_transform=None):
self.path = root
self.dataset = dataset
self.pred_edges = pred_edges
super(Dataset, self).__init__(root, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
self.statistical_info = torch.load(self.processed_paths[1])
self.node_num = self.statistical_info['node_num']
self.data_num = self.statistical_info['data_num']
def raw_file_names(self):
return ['{}{}/{}.data'.format(self.path, self.dataset, self.dataset), '{}{}/{}.edge'.format(self.path, self.dataset, self.dataset)]
def processed_file_names(self):
if (not self.pred_edges):
return ['{}_edge/{}.dataset'.format(self.dataset, self.dataset), '{}_edge/{}.info'.format(self.dataset, self.dataset)]
else:
return ['{}/{}.dataset'.format(self.dataset, self.dataset), '{}/{}.info'.format(self.dataset, self.dataset)]
def download(self):
pass
def read_data(self):
node_list = []
label = []
max_node_index = 0
data_num = 0
with open(self.datafile, 'r') as f:
for line in f:
data_num += 1
data = line.split()
label.append(float(data[0]))
int_list = [int(data[i]) for i in range(len(data))[1:]]
node_list.append(int_list)
if (max_node_index < max(int_list)):
max_node_index = max(int_list)
if (not self.pred_edges):
edge_list = [[[], []] for _ in range(data_num)]
sr_list = []
with open(self.edgefile, 'r') as f:
for line in f:
edge_info = line.split()
node_index = int(edge_info[0])
edge_list[node_index][0].append(int(edge_info[1]))
edge_list[node_index][1].append(int(edge_info[2]))
else:
edge_list = []
sr_list = []
for nodes in node_list:
(edge_l, sr_l) = self.construct_full_edge_list(nodes)
edge_list.append(edge_l)
sr_list.append(sr_l)
label = self.construct_one_hot_label(label)
return (node_list, edge_list, label, sr_list, (max_node_index + 1), data_num)
def construct_full_edge_list(self, nodes):
num_node = len(nodes)
edge_list = [[], []]
sender_receiver_list = []
for i in range(num_node):
for j in range(num_node)[i:]:
edge_list[0].append(i)
edge_list[1].append(j)
sender_receiver_list.append([nodes[i], nodes[j]])
return (edge_list, sender_receiver_list)
def construct_one_hot_label(self, label):
nb_classes = (int(max(label)) + 1)
targets = np.array(label, dtype=np.int32).reshape((- 1))
return np.eye(nb_classes)[targets]
def process(self):
(self.datafile, self.edgefile) = self.raw_file_names
(self.node, edge, label, self.sr_list, node_num, data_num) = self.read_data()
data_list = []
sr_data = []
for i in range(len(self.node)):
node_features = torch.LongTensor(self.node[i]).unsqueeze(1)
x = node_features
edge_index = torch.LongTensor(edge[i])
y = torch.FloatTensor(label[i])
if self.pred_edges:
sr = torch.LongTensor(self.sr_list[i])
else:
sr = []
data = Data(x=x, edge_index=edge_index, edge_attr=sr, y=y)
data_list.append(data)
(data, slices) = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
statistical_info = {'data_num': data_num, 'node_num': node_num}
torch.save(statistical_info, self.processed_paths[1])
def node_M(self):
return self.node_num
def data_N(self):
return self.data_num
"\n def len(self):\n return len(self.node)\n def get(self, idx):\n ###\n data = torch.load(osp.join(self.processed_dir, 'data_{}.pt'.format(idx)))\n return data\n " |
class ParseTreeBuilder():
def __init__(self, rules, tree_class, propagate_positions=False, ambiguous=False, maybe_placeholders=False):
self.tree_class = tree_class
self.propagate_positions = propagate_positions
self.ambiguous = ambiguous
self.maybe_placeholders = maybe_placeholders
self.rule_builders = list(self._init_builders(rules))
def _init_builders(self, rules):
for rule in rules:
options = rule.options
keep_all_tokens = options.keep_all_tokens
expand_single_child = options.expand1
wrapper_chain = list(filter(None, [((expand_single_child and (not rule.alias)) and ExpandSingleChild), maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, (options.empty_indices if self.maybe_placeholders else None)), (self.propagate_positions and PropagatePositions), (self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens)), (self.ambiguous and partial(AmbiguousIntermediateExpander, self.tree_class))]))
(yield (rule, wrapper_chain))
def create_callback(self, transformer=None):
callbacks = {}
for (rule, wrapper_chain) in self.rule_builders:
user_callback_name = (rule.alias or rule.options.template_source or rule.origin.name)
try:
f = getattr(transformer, user_callback_name)
wrapper = getattr(f, 'visit_wrapper', None)
if (wrapper is not None):
f = apply_visit_wrapper(f, user_callback_name, wrapper)
elif isinstance(transformer, InlineTransformer):
f = ptb_inline_args(f)
elif isinstance(transformer, Transformer_InPlace):
f = inplace_transformer(f)
except AttributeError:
f = partial(self.tree_class, user_callback_name)
for w in wrapper_chain:
f = w(f)
if (rule in callbacks):
raise GrammarError(("Rule '%s' already exists" % (rule,)))
callbacks[rule] = f
return callbacks |
def db_iterator():
return [{'round': 1, 'tensor_name': 'tensor1', 'tags': 'aggregated', 'nparray': [1]}] |
def test_default_replacement_unchanged(config):
new_keys = {'new_key1', 'new_key2'}
updated_config = config.with_keys_to_sanitize(*new_keys)
assert (updated_config.replacement == DEFAULT_REPLACEMENT) |
_test(assert_ii_1=False)
def test_map_unroll_processing_elements():
spec = importlib.util.spec_from_file_location('gemm', (((Path(__file__).parent.parent.parent / 'samples') / 'fpga') / 'gemm_systolic_vectorized.py'))
gemm = importlib.util.module_from_spec(spec)
spec.loader.exec_module(gemm)
N = 128
K = 256
M = 512
P = 8
W = 4
TN = 32
TM = 128
sdfg = gemm.make_sdfg('map_unroll_processing_elements', dace.vector(dace.float32, W))
sdfg.specialize({'P': P, 'W': W, 'TN': TN, 'TM': TM})
for state in sdfg.states():
for node in state.nodes():
if (isinstance(node, nodes.MapEntry) and (node.params == ['p'])):
node.unroll = False
node.schedule = dace.ScheduleType.Unrolled
A = np.ndarray([N, K], dtype=dace.float32.type)
B = np.ndarray([K, M], dtype=dace.float32.type)
C = np.ndarray([N, M], dtype=dace.float32.type)
A[:] = np.random.rand(N, K).astype(dace.float32.type)
B[:] = np.random.rand(K, M).astype(dace.float32.type)
C[:] = np.random.rand(N, M).astype(dace.float32.type)
C_regression = ((A B) + C)
sdfg(A=A, B=B, C=C, N=N, M=M, K=K)
diff = (np.linalg.norm((C_regression - C)) / float((N * M)))
if (not np.allclose(C_regression, C)):
raise ValueError('Verification failed.')
return sdfg |
class ONNXTracedModule(torch.nn.Module):
def __init__(self, inner, strict=True, force_outplace=False, return_inputs=False, return_inputs_states=False):
super(ONNXTracedModule, self).__init__()
self.inner = inner
self.strict = strict
self._force_outplace = force_outplace
self._return_inputs = return_inputs
self._return_inputs_states = return_inputs_states
def forward(self, *args: torch.Tensor):
(in_vars, in_desc) = _flatten(args)
module_state = list(_unique_state_dict(self, keep_vars=True).values())
ret_inputs = []
inputs_states = []
outs = []
def wrapper(*args):
in_args: List[torch.Tensor] = []
for i in range(len(in_vars)):
if (not isinstance(args[i], torch.Tensor)):
raise RuntimeError('Expected Tensor argument')
in_args.append(args[i])
trace_inputs = _unflatten(in_args, in_desc)
ret_inputs.append(tuple((x.clone(memory_format=torch.preserve_format) for x in args)))
if self._return_inputs_states:
inputs_states.append(_unflatten(in_args, in_desc))
outs.append(self.inner(*trace_inputs))
if self._return_inputs_states:
inputs_states[0] = (inputs_states[0], trace_inputs)
(out_vars, _) = _flatten(outs)
if (len(out_vars) == 1):
return out_vars[0]
else:
return tuple(out_vars)
(graph, out) = torch._C._create_graph_by_tracing(wrapper, (in_vars + module_state), _create_interpreter_name_lookup_fn(), self.strict, self._force_outplace)
if self._return_inputs:
return (graph, outs[0], ret_inputs[0])
if self._return_inputs_states:
return (graph, outs[0], inputs_states[0])
else:
return (graph, outs[0]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.