code
stringlengths
101
5.91M
.parametrize('voiced_region', ['pulse', 'sinusoidal', 'sawtooth']) def test_waveform(voiced_region, P=80, verbose=False): excite = diffsptk.ExcitationGeneration(P, voiced_region=voiced_region, unvoiced_region='zeros') pitch = torch.from_numpy(U.call(f'x2x +sd tools/SPTK/asset/data.short | pitch -s 16 -p {P} -o 0 -a 2')) e = excite(pitch) if verbose: sf.write(f'excite_{voiced_region}.wav', e, 16000)
.parametrize('module_creator', [ModuleCreator(TSTNetNormal(), [(4, 3, 32, 32), (4, 3, 32, 32)]), ModuleCreator(ResUnit(16), [(4, 3, 32, 32)]), ModuleCreator(NestedTestNet(), [(4, 3, 32, 32), (4, 3, 32, 32)])]) def test_with_statement_graph_def_test_name(module_creator): module = module_creator.module proto_variable_inputs = [nn.ProtoVariable(shape) for shape in module_creator.input_shape] with nn.graph_def.graph(name='test_net') as g: outputs = module(*proto_variable_inputs) variable_inputs = module_creator.get_variable_inputs() outputs = g.test_net(*variable_inputs) ref_outputs = module(*variable_inputs) forward_variable_and_check_equal(outputs, ref_outputs)
class DocTestReporter(SageObject): def __init__(self, controller): self.controller = controller self.postscript = {'lines': [], 'cputime': 0, 'walltime': 0} self.sources_completed = 0 self.stats = {} self.error_status = 0 def were_doctests_with_optional_tag_run(self, tag): if ((self.controller.options.optional is True) or (tag in self.controller.options.optional)): return True if (tag in available_software.seen()): return True return False def report_head(self, source): cmd = 'sage -t' if self.controller.options.long: cmd += ' --long' warnlong = self.controller.options.warn_long if (warnlong >= 0): cmd += ' --warn-long' if (warnlong != 1.0): cmd += (' %.1f' % warnlong) seed = self.controller.options.random_seed cmd += ' --random-seed={}'.format(seed) environment = self.controller.options.environment if (environment != 'sage.repl.ipython_kernel.all_jupyter'): cmd += f' --environment={environment}' cmd += (' ' + source.printpath) return cmd def report(self, source, timeout, return_code, results, output, pid=None): log = self.controller.log process_name = ('process (pid={0})'.format(pid) if pid else 'process') try: postscript = self.postscript stats = self.stats basename = source.basename if self.controller.baseline_stats: the_baseline_stats = self.controller.baseline_stats.get(basename, {}) else: the_baseline_stats = {} cmd = self.report_head(source) try: (ntests, result_dict) = results except (TypeError, ValueError): ntests = 0 result_dict = DictAsObject({'err': 'badresult'}) if timeout: fail_msg = 'Timed out' if (ntests > 0): fail_msg += ' after testing finished' if (return_code > 0): fail_msg += ' (with error after interrupt)' elif (return_code < 0): sig = (- return_code) if (sig == SIGQUIT): pass elif (sig == SIGKILL): fail_msg += ' (and interrupt failed)' else: fail_msg += (' (with %s after interrupt)' % signal_name(sig)) if the_baseline_stats.get('failed', False): fail_msg += ' [failed in baseline]' log((' %s\n%s\nTests run before %s timed out:' % (fail_msg, ('*' * 70), process_name))) log(output) log(('*' * 70)) postscript['lines'].append((cmd + (' # %s' % fail_msg))) stats[basename] = {'failed': True, 'walltime': 1000000.0, 'ntests': ntests} if (not the_baseline_stats.get('failed', False)): self.error_status |= 4 elif return_code: if (return_code > 0): fail_msg = ('Bad exit: %s' % return_code) else: fail_msg = ('Killed due to %s' % signal_name((- return_code))) if (ntests > 0): fail_msg += ' after testing finished' if the_baseline_stats.get('failed', False): fail_msg += ' [failed in baseline]' log((' %s\n%s\nTests run before %s failed:' % (fail_msg, ('*' * 70), process_name))) log(output) log(('*' * 70)) postscript['lines'].append((cmd + (' # %s' % fail_msg))) stats[basename] = {'failed': True, 'walltime': 1000000.0, 'ntests': ntests} if (not the_baseline_stats.get('failed', False)): self.error_status |= (8 if (return_code > 0) else 16) else: if (hasattr(result_dict, 'walltime') and hasattr(result_dict.walltime, '__len__') and (len(result_dict.walltime) > 0)): wall = (sum(result_dict.walltime) / len(result_dict.walltime)) else: wall = 1000000.0 if (hasattr(result_dict, 'cputime') and hasattr(result_dict.cputime, '__len__') and (len(result_dict.cputime) > 0)): cpu = (sum(result_dict.cputime) / len(result_dict.cputime)) else: cpu = 1000000.0 if (result_dict.err == 'badresult'): log((' Error in doctesting framework (bad result returned)\n%s\nTests run before error:' % ('*' * 70))) log(output) log(('*' * 70)) postscript['lines'].append((cmd + ' # Testing error: bad result')) self.error_status |= 64 elif (result_dict.err == 'noresult'): log((' Error in doctesting framework (no result returned)\n%s\nTests run before error:' % ('*' * 70))) log(output) log(('*' * 70)) postscript['lines'].append((cmd + ' # Testing error: no result')) self.error_status |= 64 elif (result_dict.err == 'tab'): if (len(result_dict.tab_linenos) > 5): result_dict.tab_linenos[3:(- 1)] = '...' tabs = (' ' + ','.join(result_dict.tab_linenos)) if (len(result_dict.tab_linenos) > 1): tabs = ('s' + tabs) log((' Error: TAB character found at line%s' % tabs)) postscript['lines'].append((cmd + ' # Tab character found')) self.error_status |= 32 elif (result_dict.err == 'line_number'): log(' Error: Source line number found') postscript['lines'].append((cmd + ' # Source line number found')) self.error_status |= 256 elif (result_dict.err is not None): if (result_dict.err is True): fail_msg = 'Error in doctesting framework' else: if hasattr(result_dict.err, '__name__'): err = result_dict.err.__name__ else: err = repr(result_dict.err) fail_msg = ('%s in doctesting framework' % err) log((' %s\n%s' % (fail_msg, ('*' * 70)))) if output: log(('Tests run before doctest exception:\n' + output)) log(('*' * 70)) postscript['lines'].append((cmd + (' # %s' % fail_msg))) if hasattr(result_dict, 'tb'): log(result_dict.tb) if hasattr(result_dict, 'walltime'): stats[basename] = {'failed': True, 'walltime': wall, 'ntests': ntests} else: stats[basename] = {'failed': True, 'walltime': 1000000.0, 'ntests': ntests} self.error_status |= 64 if ((result_dict.err is None) or (result_dict.err == 'tab')): f = result_dict.failures if f: fail_msg = ('%s failed' % count_noun(f, 'doctest')) if the_baseline_stats.get('failed', False): fail_msg += ' [failed in baseline]' postscript['lines'].append((cmd + (' # %s' % fail_msg))) if (not the_baseline_stats.get('failed', False)): self.error_status |= 1 if (f or (result_dict.err == 'tab')): stats[basename] = {'failed': True, 'walltime': wall, 'ntests': ntests} else: stats[basename] = {'walltime': wall, 'ntests': ntests} postscript['cputime'] += cpu postscript['walltime'] += wall try: optionals = result_dict.optionals except AttributeError: optionals = {} for tag in sorted(optionals): nskipped = optionals[tag] if (tag == 'long time'): if (not self.controller.options.long): if self.controller.options.show_skipped: log((' %s not run' % count_noun(nskipped, 'long test'))) elif (tag == 'not tested'): if self.controller.options.show_skipped: log((' %s not run' % count_noun(nskipped, 'not tested test'))) elif (tag == 'not implemented'): if self.controller.options.show_skipped: log((' %s for not implemented functionality not run' % count_noun(nskipped, 'test'))) elif (not self.were_doctests_with_optional_tag_run(tag)): if (tag == 'bug'): if self.controller.options.show_skipped: log((' %s not run due to known bugs' % count_noun(nskipped, 'test'))) elif (tag == ''): if self.controller.options.show_skipped: log((' %s not run' % count_noun(nskipped, 'unlabeled test'))) elif self.controller.options.show_skipped: log((' %s not run' % count_noun(nskipped, (tag + ' test')))) nskipped = result_dict.walltime_skips if self.controller.options.show_skipped: log((' %s not run because we ran out of time' % count_noun(nskipped, 'test'))) if (nskipped != 0): ntests_run = result_dict.tests total = ('%d%% of tests run' % round(((100 * ntests_run) / float((ntests_run + nskipped))))) else: total = count_noun(ntests, 'test') if (not (self.controller.options.only_errors and (not f))): log((' [%s, %s%.2f s]' % (total, (('%s, ' % count_noun(f, 'failure')) if f else ''), wall))) self.sources_completed += 1 except Exception: import traceback log(traceback.format_exc(), end='') def finalize(self): log = self.controller.log postscript = self.postscript if (self.sources_completed < (len(self.controller.sources) * self.controller.options.global_iterations)): postscript['lines'].append(('Doctests interrupted: %s/%s files tested' % (self.sources_completed, len(self.controller.sources)))) self.error_status |= 128 elif (not postscript['lines']): postscript['lines'].append('All tests passed!') log(('-' * 70)) log('\n'.join(postscript['lines'])) log(('-' * 70)) log(('Total time for all tests: %.1f seconds' % self.controller.timer.walltime)) log((' cpu time: %.1f seconds' % postscript['cputime'])) log((' cumulative wall time: %.1f seconds' % postscript['walltime'])) stdout.flush()
def make_just_x(ds): d = defaultdict(list) for feature in ds: for (key, val) in vars(feature).items(): if (key == 'label'): continue if (val is None): continue d[key].append(val) print(d.keys()) return TensorDataset(*[torch.tensor(x) for x in d.values()])
def tounroll(A: dace.float64[20], B: dace.float64[20]): for i in range(5): for j in dace.map[0:20]: with dace.tasklet: (a << A[j]) (b_in << B[j]) (b_out >> B[j]) b_out = (b_in + (a * i))
def yaml_load(filename): with open(filename, 'r') as f: data = yaml.load(f, Loader=yaml.BaseLoader) return data
def psi(N): if (not N.is_integral()): raise ValueError('psi only defined for integral ideals') from sage.misc.misc_c import prod return prod([((np + 1) * (np ** (e - 1))) for (np, e) in [(p.absolute_norm(), e) for (p, e) in N.factor()]])
def test_inv_residual(): with pytest.raises(AssertionError): InvertedResidual(32, 32, 3, 4) inv_module = InvertedResidual(32, 32, 1, 4) assert inv_module.use_res_connect assert (inv_module.conv[0].kernel_size == (1, 1)) assert (inv_module.conv[0].padding == 0) assert (inv_module.conv[1].kernel_size == (3, 3)) assert (inv_module.conv[1].padding == 1) assert inv_module.conv[0].with_norm assert inv_module.conv[1].with_norm x = torch.rand(1, 32, 64, 64) output = inv_module(x) assert (output.shape == (1, 32, 64, 64)) inv_module = InvertedResidual(32, 32, 2, 4) assert (not inv_module.use_res_connect) assert (inv_module.conv[0].kernel_size == (1, 1)) x = torch.rand(1, 32, 64, 64) output = inv_module(x) assert (output.shape == (1, 32, 32, 32)) inv_module = InvertedResidual(32, 32, 1, 1) assert (inv_module.conv[0].kernel_size == (3, 3)) x = torch.rand(1, 32, 64, 64) output = inv_module(x) assert (output.shape == (1, 32, 64, 64))
_kl(Pareto, Normal) def _kl_pareto_normal(p, q): var_normal = (2 * q.scale.pow(2)) common_term = (p.scale / (p.alpha - 1)) t1 = (((math.sqrt((2 * math.pi)) * q.scale) * p.alpha) / p.scale).log() t2 = p.alpha.reciprocal() t3 = ((p.alpha * common_term.pow(2)) / (p.alpha - 2)) t4 = ((p.alpha * common_term) - q.loc).pow(2) result = (((t1 - t2) + ((t3 + t4) / var_normal)) - 1) result[(p.alpha <= 2)] = inf return result
def _collect_best_span_string(best_span: torch.Tensor, cspan: torch.IntTensor, context_tokens: List[Token], context_string: str, cls_ind: Optional[Union[(torch.LongTensor, int)]]=0) -> str: best_span = best_span.detach().cpu().numpy() if (best_span[0] == cls_ind): best_span_string = '' else: best_span -= int(cspan[0]) assert np.all((best_span >= 0)) (predicted_start, predicted_end) = tuple(best_span) while ((predicted_start >= 0) and (context_tokens[predicted_start].idx is None)): predicted_start -= 1 if (predicted_start < 0): logger.warning(f"Could not map the token '{context_tokens[best_span[0]].text}' at index '{best_span[0]}' to an offset in the original text.") character_start = 0 else: character_start = context_tokens[predicted_start].idx while ((predicted_end < len(context_tokens)) and (context_tokens[predicted_end].idx is None)): predicted_end += 1 if (predicted_end >= len(context_tokens)): logger.warning(f"Could not map the token '{context_tokens[best_span[1]].text}' at index '{best_span[1]}' to an offset in the original text.") character_end = len(context_string) else: end_token = context_tokens[predicted_end] character_end = (end_token.idx + len(sanitize_wordpiece(end_token.text))) best_span_string = context_string[character_start:character_end] return best_span_string
_interact(title=(lambda : text_control('<h2>Simpson integration</h2>')), f=(lambda : input_box(default='x*sin(x)+x+1', label='$f(x)=$')), n=(lambda : slider(2, 100, 2, 6, label='# divisions')), interval_input=(lambda : selector(['from slider', 'from keyboard'], label='Integration interval', buttons=True)), interval_s=(lambda : range_slider((- 10), 10, default=(0, 10), label='slider: ')), interval_g=(lambda : input_grid(1, 2, default=[[0, 10]], label='keyboard: ')), output_form=(lambda : selector(['traditional', 'table', 'none'], label='Computations form', buttons=True))) def simpson_integration(title, f, n, interval_input, interval_s, interval_g, output_form): x = SR.var('x') f = symbolic_expression(f).function(x) if (interval_input == 'from slider'): interval = interval_s else: interval = interval_g[0] def parabola(a, b, c): from sage.symbolic.relation import solve (A, B, C) = SR.var('A, B, C') K = solve([((((A * (a[0] ** 2)) + (B * a[0])) + C) == a[1]), ((((A * (b[0] ** 2)) + (B * b[0])) + C) == b[1]), ((((A * (c[0] ** 2)) + (B * c[0])) + C) == c[1])], [A, B, C], solution_dict=True)[0] f = (((K[A] * (x ** 2)) + (K[B] * x)) + K[C]) return f xs = [] ys = [] dx = (float((interval[1] - interval[0])) / n) for i in range((n + 1)): xs.append((interval[0] + (i * dx))) ys.append(f(x=xs[(- 1)])) parabolas = Graphics() lines = Graphics() for i in range(0, (n - 1), 2): p = parabola((xs[i], ys[i]), (xs[(i + 1)], ys[(i + 1)]), (xs[(i + 2)], ys[(i + 2)])) parabolas += plot(p(x=x), (x, xs[i], xs[(i + 2)]), color='red') lines += line([(xs[i], ys[i]), (xs[i], 0), (xs[(i + 2)], 0)], color='red') lines += line([(xs[(i + 1)], ys[(i + 1)]), (xs[(i + 1)], 0)], linestyle='-.', color='red') lines += line([(xs[(- 1)], ys[(- 1)]), (xs[(- 1)], 0)], color='red') html(('Function $f(x)=%s$' % latex(f(x)))) show(((plot(f(x), x, interval[0], interval[1]) + parabolas) + lines), xmin=interval[0], xmax=interval[1]) numeric_value = integral_numerical(f, interval[0], interval[1])[0] approx = ((dx / 3) * (((ys[0] + sum([(4 * ys[i]) for i in range(1, n, 2)])) + sum([(2 * ys[i]) for i in range(2, n, 2)])) + ys[n])) html(('Integral value to seven decimal places is: $\\displaystyle\\int_{%.2f}^{%.2f} {f(x) \\, \\mathrm{d}x} = %.6f$' % (interval[0], interval[1], N(numeric_value, digits=7)))) if (output_form == 'traditional'): sum_formula_html = ('\\frac{d}{3} \\cdot \\left[ f(x_0) + %s + f(x_{%s})\\right]' % (' + '.join((('%s \\cdot f(x_{%s})' % ((((i % 2) * (- 2)) + 4), (i + 1))) for i in range(0, (n - 1)))), n)) sum_placement_html = ('\\frac{%.2f}{3} \\cdot \\left[ f(%.2f) + %s + f(%.2f)\\right]' % (dx, N(xs[0], digits=5), ' + '.join((('%s \\cdot f(%.2f)' % ((((i % 2) * (- 2)) + 4), N(xk, digits=5))) for (i, xk) in enumerate(xs[1:(- 1)]))), N(xs[n], digits=5))) sum_values_html = ('\\frac{%.2f}{3} \\cdot \\left[ %s %s %s\\right]' % (dx, ('%.2f + ' % N(ys[0], digits=5)), ' + '.join((('%s \\cdot %.2f' % ((((i % 2) * (- 2)) + 4), N(yk, digits=5))) for (i, yk) in enumerate(ys[1:(- 1)]))), (' + %.2f' % N(ys[n], digits=5)))) html(('\n <div class="math">\n \\begin{align*}\n \\int_{%.2f}^{%.2f} {f(x) \\, \\mathrm{d}x}\n & \\approx %s \\\\\n & = %s \\\\\n & = %s \\\\\n & = %.6f\n \\end{align*}\n </div>\n ' % (interval[0], interval[1], sum_formula_html, sum_placement_html, sum_values_html, N(approx, digits=7)))) elif (output_form == 'table'): s = [['$i$', '$x_i$', '$f(x_i)$', '$m$', '$m\\cdot f(x_i)$']] for i in range((n + 1)): if ((i == 0) or (i == n)): j = 1 else: j = ((((i + 1) % 2) * (- 2)) + 4) s.append([i, xs[i], ys[i], j, N((j * ys[i]))]) s.append(['', '', '', '$\\sum$', ('$%s$' % latex(((3 / dx) * approx)))]) pretty_print(table(s, header_row=True)) html(('$\\int_{%.2f}^{%.2f} {f(x) \\, \\mathrm{d}x}\\approx\\frac {%.2f}{3}\\cdot %s=%s$' % (interval[0], interval[1], dx, latex(((3 / dx) * approx)), latex(approx))))
def are_almost_same(name_a: str, name_b: str, max_dist: int=1) -> bool: if ((not name_a) or (not name_b)): return False return (lev.distance(name_a, name_b) <= max_dist)
def path_to_display(path): if (path is None): return None if isinstance(path, text_type): return path try: display_path = path.decode(sys.getfilesystemencoding(), 'strict') except UnicodeDecodeError: if PY2: display_path = str_to_display('b{!r}'.format(path)) else: display_path = ascii(path) return display_path
def main(): dataset = CNNDataset() output_dir = 'data/cnn' os.makedirs(output_dir) tokenizer = GPT2Tokenizer.from_pretrained('gpt2-large') split_size = {'train': 10000, 'dev': 5000, 'test': 1000} train_texts = [] for i in trange(20000, desc='Getting Train Text'): (_, story_lines, _) = dataset[i] text = '\n\n'.join(story_lines) if (len(tokenizer.tokenize(text)) > 1022): continue train_texts.append({'condition': '', 'text': text}) if (len(train_texts) >= split_size['train']): break print('#train:', len(train_texts)) pickle.dump(train_texts, open(os.path.join(output_dir, 'train.pickle'), 'wb')) dev_texts = [] for i in trange(20000, 30000, desc='Getting Dev Text'): (_, story_lines, _) = dataset[i] text = '\n\n'.join(story_lines) if (len(tokenizer.tokenize(text)) > 1022): continue dev_texts.append({'condition': '', 'text': text}) if (len(dev_texts) >= split_size['dev']): break print('#dev:', len(dev_texts)) pickle.dump(dev_texts, open(os.path.join(output_dir, 'dev.pickle'), 'wb')) test_texts = [] for i in trange(30000, 40000, desc='Getting Test Text'): (_, story_lines, _) = dataset[i] text = '\n\n'.join(story_lines) if (len(tokenizer.tokenize(text)) > 1022): continue test_texts.append({'condition': '', 'text': text}) if (len(test_texts) >= split_size['test']): break print('#test:', len(test_texts)) pickle.dump(test_texts, open(os.path.join(output_dir, 'test.pickle'), 'wb'))
def encode_datas(table, queries, column2vec, op2vec): return [encode_data(table, q, column2vec, op2vec) for q in queries]
.parametrize('param', PARAMS, ids=IDS) def test_cython_api(param): (pyfunc, cyfunc, specializations, knownfailure) = param if knownfailure: pytest.xfail(reason=knownfailure) values = [set() for code in specializations[0]] for typecodes in specializations: for (j, v) in enumerate(typecodes): values[j].add(v) seen = set() is_fused_code = ([False] * len(values)) for (j, v) in enumerate(values): vv = tuple(sorted(v)) if (vv in seen): continue is_fused_code[j] = (len(v) > 1) seen.add(vv) for typecodes in specializations: signature = [CYTHON_SIGNATURE_MAP[code] for (j, code) in enumerate(typecodes) if is_fused_code[j]] if signature: cy_spec_func = cyfunc[tuple(signature)] else: signature = None cy_spec_func = cyfunc pts = _generate_test_points(typecodes) for pt in pts: pyval = pyfunc(*pt) cyval = cy_spec_func(*pt) assert_allclose(cyval, pyval, err_msg='{} {} {}'.format(pt, typecodes, signature))
class Node(with_metaclass(NodeType, object)): fields = () attributes = ('lineno', 'environment') abstract = True def __init__(self, *fields, **attributes): if self.abstract: raise TypeError('abstract nodes are not instantiable') if fields: if (len(fields) != len(self.fields)): if (not self.fields): raise TypeError(('%r takes 0 arguments' % self.__class__.__name__)) raise TypeError(('%r takes 0 or %d argument%s' % (self.__class__.__name__, len(self.fields), (((len(self.fields) != 1) and 's') or '')))) for (name, arg) in izip(self.fields, fields): setattr(self, name, arg) for attr in self.attributes: setattr(self, attr, attributes.pop(attr, None)) if attributes: raise TypeError(('unknown attribute %r' % next(iter(attributes)))) def iter_fields(self, exclude=None, only=None): for name in self.fields: if ((exclude is only is None) or ((exclude is not None) and (name not in exclude)) or ((only is not None) and (name in only))): try: (yield (name, getattr(self, name))) except AttributeError: pass def iter_child_nodes(self, exclude=None, only=None): for (_, item) in self.iter_fields(exclude, only): if isinstance(item, list): for n in item: if isinstance(n, Node): (yield n) elif isinstance(item, Node): (yield item) def find(self, node_type): for result in self.find_all(node_type): return result def find_all(self, node_type): for child in self.iter_child_nodes(): if isinstance(child, node_type): (yield child) for result in child.find_all(node_type): (yield result) def set_ctx(self, ctx): todo = deque([self]) while todo: node = todo.popleft() if ('ctx' in node.fields): node.ctx = ctx todo.extend(node.iter_child_nodes()) return self def set_lineno(self, lineno, override=False): todo = deque([self]) while todo: node = todo.popleft() if ('lineno' in node.attributes): if ((node.lineno is None) or override): node.lineno = lineno todo.extend(node.iter_child_nodes()) return self def set_environment(self, environment): todo = deque([self]) while todo: node = todo.popleft() node.environment = environment todo.extend(node.iter_child_nodes()) return self def __eq__(self, other): return ((type(self) is type(other)) and (tuple(self.iter_fields()) == tuple(other.iter_fields()))) def __ne__(self, other): return (not self.__eq__(other)) __hash__ = object.__hash__ def __repr__(self): return ('%s(%s)' % (self.__class__.__name__, ', '.join((('%s=%r' % (arg, getattr(self, arg, None))) for arg in self.fields)))) def dump(self): def _dump(node): if (not isinstance(node, Node)): buf.append(repr(node)) return buf.append(('nodes.%s(' % node.__class__.__name__)) if (not node.fields): buf.append(')') return for (idx, field) in enumerate(node.fields): if idx: buf.append(', ') value = getattr(node, field) if isinstance(value, list): buf.append('[') for (idx, item) in enumerate(value): if idx: buf.append(', ') _dump(item) buf.append(']') else: _dump(value) buf.append(')') buf = [] _dump(self) return ''.join(buf)
def same_width(epsilon=0.1): return (lambda bbox1, bbox2: (abs((_width(bbox1) - _width(bbox2))) < epsilon))
class Simulator(): def __init__(self, simulator): self.simulator = simulator def __call__(self, *args, batch_size=1, random_state=None): return self.simulator(torch.from_numpy(np.stack(args).astype(np.float32).T)).numpy().reshape(batch_size, (- 1))
def IBA_calc(TPR, TNR, alpha=1): try: IBA = (((1 + (alpha * (TPR - TNR))) * TPR) * TNR) return IBA except TypeError: return 'None'
class Calculator(): amount_calculation = 0 results = [] class Decorators(): def calc_decorator(func): def inner(self, a, b): result = func(self, a, b) cr = CalculatorResult(func.__name__, result) Calculator.results.append(cr) Calculator.amount_calculation += 1 return result return inner _decorator def add(self, a: float, b: float) -> float: return (a + b) _decorator def sub(self, a: float, b: float) -> float: return (a - b) _decorator def mult(self, a: float, b: float) -> float: return (a * b) _decorator def div(self, a: float, b: float) -> float: return (a / b) def output_results(self) -> str: return ''.join([(str(x) + '\n') for x in self.results]).rstrip()
.parametrize('num_of_slices', [7]) .parametrize('size', [55, 31]) .parametrize('batch_size', [1, 3]) .parametrize('shuffle', [False]) .parametrize('drop_last', [True, False]) def test_sliced_data_iterator_equivalence(test_data_csv_png_10, num_of_slices, size, batch_size, shuffle, drop_last): def lcm(a, b): return ((abs((a * b)) / math.gcd(a, b)) if (a and b) else 0) max_epoch = (lcm(batch_size, size) / size) def test_load_func(position): return np.full(1, position, dtype=np.int32) def simple_load_func(data_set, position): return data_set[position] def get_data(iter_list, iter_num): total = 0 for it in iter_list: for _ in range(iter_num): (yield it.next()) total += 1 (yield total) (yield total) iter_num = int((((max_epoch * size) / (num_of_slices * batch_size)) + 0.5)) sliced_di_list = [] di = data_iterator_simple(test_load_func, size, batch_size, shuffle=shuffle) for slice_pos in range(num_of_slices): sliced_di = di.slice(rng=None, num_of_slices=num_of_slices, slice_pos=slice_pos, drop_last=drop_last) sliced_di_list.append(sliced_di) ref_di_list = [] all_data = [np.full(1, position, dtype=np.int32) for position in range(size)] slice_block_size = (size // num_of_slices) if (not drop_last): slice_block_size += (1 if ((size % num_of_slices) != 0) else 0) for slice_pos in range(num_of_slices): start_index = (slice_pos * slice_block_size) end_index = (start_index + slice_block_size) if (end_index > size): end_index = size start_index = (end_index - slice_block_size) sliced_data = all_data[start_index:end_index] di = data_iterator_simple(partial(simple_load_func, sliced_data), slice_block_size, batch_size, shuffle=shuffle) ref_di_list.append(di) set_a = set() set_b = set() for (ref, t) in zip(get_data(ref_di_list, iter_num), get_data(sliced_di_list, iter_num)): if isinstance(ref, tuple): (ref, t) = (ref[0], t[0]) if isinstance(ref, np.ndarray): set_a = set_a.union(set(ref)) set_b = set_b.union(set(t)) else: assert (ref == t) assert (set_a == set_b) di_all = (ref_di_list + sliced_di_list) for di in di_all: di.close()
def OA_17_560(): from sage.rings.finite_rings.finite_field_constructor import FiniteField as GF alpha = 5 beta = 4 p = 2 k = 17 m = 16 n = (p ** alpha) G = GF((p, alpha), prefix='x') G_set = sorted(G) G_to_int = {v: i for (i, v) in enumerate(G_set)} OA = [[G_to_int[(i + (x * j))] for i in G_set for j in G_set] for x in G_set[(k + 1):0:(- 1)]] OA.append([j for i in range(n) for j in range(n)]) OA.append([i for i in range(n) for j in range(n)]) elements_of_subgroup = set([x for x in G_set if (x.polynomial().degree() < beta)]) relabel = {G_to_int[v]: i for (i, v) in enumerate(elements_of_subgroup)} for x in range((p ** alpha)): if (x not in relabel): relabel[x] = None for C in OA[(- 3):]: for (i, x) in enumerate(C): C[i] = relabel[x] OA = list(zip(*OA)) return wilson_construction(OA, k, n, m, ([(p ** beta)] * 3), check=False)
def simGetSimulationTimeStep(): step = lib.simGetSimulationTimeStep() _check_return(step) return step
def test_sample_sym(): gaussian = DiagonalGaussian(dim=2) dist = dict(mean=np.array([1.0, 1.0], dtype=np.float32), log_std=np.array([0.0, 0.0], dtype=np.float32)) samples = [gaussian.sample_sym(dist).numpy() for _ in range(10000)] assert np.isclose(np.mean(samples), 1, atol=0.1) assert np.isclose(np.var(samples), 1, atol=0.1)
def get_dataloader(dataset, tokenizer, args, split='train'): def collate(examples): if (tokenizer._pad_token is None): if (args.model_type == 'gpt2_double'): text = [ex[0] for ex in examples] labels = [ex[1] for ex in examples] padded_labels = torch.stack(labels, 0) return (pad_sequence(text, batch_first=True), padded_labels) else: return pad_sequence(examples, batch_first=True) if (args.model_type == 'gpt2_double'): text = [ex[0] for ex in examples] labels = [ex[1] for ex in examples] padded_labels = torch.stack(labels, 0) return (pad_sequence(text, batch_first=True, padding_value=tokenizer.pad_token_id), padded_labels) else: return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id) if (split == 'train'): args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu)) batch_size = args.train_batch_size sampler = (RandomSampler(dataset) if (args.local_rank == (- 1)) else DistributedSampler(dataset)) else: args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu)) batch_size = args.eval_batch_size sampler = SequentialSampler(dataset) dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size, collate_fn=collate) return (dataloader, args)
class RosenbrockBenchmark(Benchmark): def __init__(self, nb_features: int=2): self.nb_features = nb_features ind_domain = ((- 2.048), 2.048) super().__init__(fn=algorithms.partial(illumination_rosenbrock, nb_features=nb_features), ind_domain=ind_domain, fitness_domain=((0.0, math.inf),), features_domain=((ind_domain,) * nb_features), default_task='minimisation')
class Sampling(Estimator): def __init__(self, table, ratio, seed): super(Sampling, self).__init__(table=table, version=table.version, ratio=ratio, seed=seed) self.sample = table.data.sample(frac=ratio, random_state=seed) self.sample_num = len(self.sample) def query(self, query): (columns, operators, values) = query_2_triple(query, with_none=False, split_range=False) start_stmp = time.time() bitmap = np.ones(self.sample_num, dtype=bool) for (c, o, v) in zip(columns, operators, values): bitmap &= OPS[o](self.sample[c], v) card = np.round(((self.table.row_num / self.sample_num) * bitmap.sum())) dur_ms = ((time.time() - start_stmp) * 1000.0) return (card, dur_ms)
def create_GAT_model(graph): generator = FullBatchNodeGenerator(graph, sparse=False, method=None) train_gen = generator.flow([0, 1], np.array([[1, 0], [0, 1]])) gat = GAT(layer_sizes=[2, 2], generator=generator, bias=False, in_dropout=0, attn_dropout=0, activations=['elu', 'softmax'], normalize=None, saliency_map_support=True) for layer in gat._layers: layer._initializer = 'ones' (x_inp, x_out) = gat.in_out_tensors() keras_model = Model(inputs=x_inp, outputs=x_out) return (gat, keras_model, generator, train_gen)
class GdsMaterialStackLayer(schema_utils.Model): foreground = types.ModelType(Material) background = types.ModelType(Material) extents = optplan.vec2d() gds_layer = types.ListType(types.IntType())
def validate_headers(ctx: click.core.Context, param: click.core.Parameter, raw_value: tuple[(str, ...)]) -> dict[(str, str)]: headers = {} for header in raw_value: with reraise_format_error(header): (key, value) = header.split(':', maxsplit=1) value = value.lstrip() key = key.strip() if (not key): raise click.BadParameter('Header name should not be empty.') if (not is_latin_1_encodable(key)): raise click.BadParameter('Header name should be latin-1 encodable.') if (not is_latin_1_encodable(value)): raise click.BadParameter('Header value should be latin-1 encodable.') if has_invalid_characters(key, value): raise click.BadParameter('Invalid return character or leading space in header.') headers[key] = value return headers
class CorundumVerilatorNIC(NICSim): def __init__(self) -> None: super().__init__() self.clock_freq = 250 def resreq_mem(self) -> int: return 512 def run_cmd(self, env: ExpEnv) -> str: return self.basic_run_cmd(env, '/corundum/corundum_verilator', str(self.clock_freq))
class IsolationForestConfig(DetectorConfig): _default_transform = TransformSequence([DifferenceTransform(), Shingle(size=2, stride=1)]) def __init__(self, max_n_samples: int=None, n_estimators: int=100, n_jobs=(- 1), **kwargs): self.max_n_samples = (1.0 if (max_n_samples is None) else max_n_samples) self.n_estimators = n_estimators self.n_jobs = n_jobs kwargs['max_score'] = 1.0 super().__init__(**kwargs)
def exchook(exc_type, exc_obj, exc_tb): if (exc_type is KeyboardInterrupt): print(('SprintExternInterface[pid %i]: KeyboardInterrupt' % (os.getpid(),))) sys.exit(1) better_exchook.better_exchook(exc_type, exc_obj, exc_tb)
class Transition(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.in_channels = in_channels self.out_channels = out_channels def forward(x): pass
def make_read_x(): sdfg = SDFG('spmv_read_x') (pre_state, body, post_state) = make_iteration_space(sdfg) x_mem = body.add_array('x_mem', (cols,), dtype, storage=StorageType.FPGA_Global) col_pipe = body.add_stream('col_pipe', itype, storage=StorageType.FPGA_Local) compute_pipe = body.add_stream('compute_pipe', dtype, storage=StorageType.FPGA_Local) tasklet = body.add_tasklet('read_x', {'x_in', 'col_in'}, {'x_out'}, 'x_out = x_in[col_in]') body.add_memlet_path(x_mem, tasklet, dst_conn='x_in', memlet=Memlet.simple(x_mem, '0:cols')) body.add_memlet_path(col_pipe, tasklet, dst_conn='col_in', memlet=Memlet.simple(col_pipe, '0')) body.add_memlet_path(tasklet, compute_pipe, src_conn='x_out', memlet=Memlet.simple(compute_pipe, '0')) return sdfg
def test_sentences(): docs = list(Reader(utf8_open(CONLL_MULTISENT))) for d in docs: last = None for s in d.sentences: for span in s: if last: assert (span.start == last) else: assert (span.start == 0) last = span.end if (isinstance(span, Mention) and (span.link is not None)): assert isinstance(span.score, float)
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): assert isinstance(commands, list) process = None popen_kwargs = {} if (sys.platform == 'win32'): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs['startupinfo'] = startupinfo for command in commands: try: dispcmd = str(([command] + args)) process = subprocess.Popen(([command] + args), cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs) break except OSError: e = sys.exc_info()[1] if (e.errno == errno.ENOENT): continue if verbose: print(('unable to run %s' % dispcmd)) print(e) return (None, None) else: if verbose: print(('unable to find command, tried %s' % (commands,))) return (None, None) stdout = process.communicate()[0].strip().decode() if (process.returncode != 0): if verbose: print(('unable to run %s (error)' % dispcmd)) print(('stdout was %s' % stdout)) return (None, process.returncode) return (stdout, process.returncode)
def register_Ns3FlameFlameHeader_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_constructor([param('ns3::flame::FlameHeader const &', 'arg0')]) cls.add_constructor([]) cls.add_method('AddCost', 'void', [param('uint8_t', 'cost')]) cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) cls.add_method('GetCost', 'uint8_t', [], is_const=True) cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) cls.add_method('GetOrigDst', 'ns3::Mac48Address', [], is_const=True) cls.add_method('GetOrigSrc', 'ns3::Mac48Address', [], is_const=True) cls.add_method('GetProtocol', 'uint16_t', [], is_const=True) cls.add_method('GetSeqno', 'uint16_t', [], is_const=True) cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) cls.add_method('SetOrigDst', 'void', [param('ns3::Mac48Address', 'dst')]) cls.add_method('SetOrigSrc', 'void', [param('ns3::Mac48Address', 'OrigSrc')]) cls.add_method('SetProtocol', 'void', [param('uint16_t', 'protocol')]) cls.add_method('SetSeqno', 'void', [param('uint16_t', 'seqno')]) return
class TrainOptions(BaseOptions): def initialize(self, parser): parser = BaseOptions.initialize(self, parser) parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen') parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') parser.add_argument('--display_server', type=str, default=' help='visdom server of the web display') parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...') parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate') parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero') parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]') parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') self.isTrain = True return parser
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=(2 ** 0.5)): global use_custom_kernel if use_custom_kernel: return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) else: dims = ([1, (- 1)] + ([1] * (input.dim() - 2))) bias = bias.view(*dims) return (F.leaky_relu((input + bias), negative_slope) * scale)
def _is_list_of_str(obj): return (isinstance(obj, list) and all((isinstance(item, six.string_types) for item in obj)))
def test_thrombocytopenia(tmp_path: pathlib.Path): outcome_codes = {'child_1_1', 'child_2', 'SNOMED/', 'child_1', 'SNOMED/'} _create_specific_labvalue_labeler(ThrombocytopeniaCodeLabeler, outcome_codes)
def train(): global train_step, train_loss, best_val_loss, eval_start_time, log_start_time model.train() if (args.batch_chunk > 1): mems = [tuple() for _ in range(args.batch_chunk)] else: mems = tuple() train_iter = (tr_iter.get_varlen_iter() if args.varlen else tr_iter) for (batch, (data, target, seq_len)) in enumerate(train_iter): model.zero_grad() if (args.batch_chunk > 1): data_chunks = torch.chunk(data, args.batch_chunk, 1) target_chunks = torch.chunk(target, args.batch_chunk, 1) for i in range(args.batch_chunk): data_i = data_chunks[i].contiguous() target_i = target_chunks[i].contiguous() ret = para_model(data_i, target_i, *mems[i]) (loss, mems[i]) = (ret[0], ret[1:]) loss = (loss.float().mean().type_as(loss) / args.batch_chunk) if args.fp16: optimizer.backward(loss) else: loss.backward() train_loss += loss.float().item() else: ret = para_model(data, target, *mems) (loss, mems) = (ret[0], ret[1:]) loss = loss.float().mean().type_as(loss) if args.fp16: optimizer.backward(loss) else: loss.backward() train_loss += loss.float().item() if args.fp16: optimizer.clip_master_grads(args.clip) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() if (args.sample_softmax > 0): optimizer_sparse.step() train_step += 1 if (args.scheduler in ['cosine', 'constant', 'dev_perf']): if (train_step < args.warmup_step): curr_lr = ((args.lr * train_step) / args.warmup_step) optimizer.param_groups[0]['lr'] = curr_lr if (args.sample_softmax > 0): optimizer_sparse.param_groups[0]['lr'] = (curr_lr * 2) elif (args.scheduler == 'cosine'): scheduler.step(train_step) if (args.sample_softmax > 0): scheduler_sparse.step(train_step) elif (args.scheduler == 'inv_sqrt'): scheduler.step(train_step) if ((train_step % args.log_interval) == 0): cur_loss = (train_loss / args.log_interval) elapsed = (time.time() - log_start_time) log_str = '| epoch {:3d} step {:>8d} | {:>6d} batches | lr {:.3g} | ms/batch {:5.2f} | loss {:5.2f}'.format(epoch, train_step, (batch + 1), optimizer.param_groups[0]['lr'], ((elapsed * 1000) / args.log_interval), cur_loss) if (args.dataset in ['enwik8', 'text8']): log_str += ' | bpc {:9.5f}'.format((cur_loss / math.log(2))) else: log_str += ' | ppl {:9.3f}'.format(math.exp(cur_loss)) logging(log_str) train_loss = 0 log_start_time = time.time() if ((train_step % args.eval_interval) == 0): val_loss = evaluate(va_iter) logging(('-' * 100)) log_str = '| Eval {:3d} at step {:>8d} | time: {:5.2f}s | valid loss {:5.2f}'.format((train_step // args.eval_interval), train_step, (time.time() - eval_start_time), val_loss) if (args.dataset in ['enwik8', 'text8']): log_str += ' | bpc {:9.5f}'.format((val_loss / math.log(2))) else: log_str += ' | valid ppl {:9.3f}'.format(math.exp(val_loss)) logging(log_str) logging(('-' * 100)) if ((not best_val_loss) or (val_loss < best_val_loss)): if (not args.debug): with open(os.path.join(args.work_dir, 'model.pt'), 'wb') as f: torch.save(model, f) with open(os.path.join(args.work_dir, 'optimizer.pt'), 'wb') as f: torch.save(optimizer.state_dict(), f) best_val_loss = val_loss if (args.scheduler == 'dev_perf'): scheduler.step(val_loss) if (args.sample_softmax > 0): scheduler_sparse.step(val_loss) eval_start_time = time.time() if (train_step == args.max_step): break
def main(): parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]), formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__) parser.add_argument('input', help='Cirrus Json wiki dump file') groupO = parser.add_argument_group('Output') groupO.add_argument('-o', '--output', default='text', help="directory for extracted files (or '-' for dumping to stdin)") groupO.add_argument('-b', '--bytes', default='1M', help='maximum bytes per output file (default %(default)s)', metavar='n[KMG]') groupO.add_argument('-c', '--compress', action='store_true', help='compress output files using bzip') groupP = parser.add_argument_group('Processing') groupP.add_argument('-ns', '--namespaces', default='', metavar='ns1,ns2', help='accepted namespaces') groupS = parser.add_argument_group('Special') groupS.add_argument('-q', '--quiet', action='store_true', help='suppress reporting progress info') groupS.add_argument('-v', '--version', action='version', version=('%(prog)s ' + version), help='print program version') args = parser.parse_args() try: power = ('kmg'.find(args.bytes[(- 1)].lower()) + 1) file_size = (int(args.bytes[:(- 1)]) * (1024 ** power)) if (file_size < minFileSize): raise ValueError() except ValueError: logging.error('Insufficient or invalid size: %s', args.bytes) return FORMAT = '%(levelname)s: %(message)s' logging.basicConfig(format=FORMAT) logger = logging.getLogger() if (not args.quiet): logger.setLevel(logging.INFO) input_file = args.input output_path = args.output if ((output_path != '-') and (not os.path.isdir(output_path))): try: os.makedirs(output_path) except: logging.error('Could not create: %s', output_path) return process_dump(input_file, output_path, file_size, args.compress)
(**njit_dict_no_parallel) def pair_creation_packet(packet): probability_gamma = ((2 * ELECTRON_MASS_ENERGY_KEV) / (H_CGS_KEV * packet.nu_cmf)) if (np.random.random() > probability_gamma): packet.status = GXPacketStatus.PHOTOABSORPTION return packet new_direction = get_random_unit_vector() final_direction = angle_aberration_gamma(new_direction, packet.location, ((- 1) * packet.time_current)) packet.direction = final_direction doppler_factor = doppler_factor_3d(packet.direction, packet.location, packet.time_current) packet.nu_cmf = (ELECTRON_MASS_ENERGY_KEV / H_CGS_KEV) packet.nu_rf = (packet.nu_cmf / doppler_factor) packet.energy_rf = (packet.energy_cmf / doppler_factor) return packet
def get_n_params(model): pp = 0 for p in list(model.parameters()): nn = 1 for s in list(p.size()): nn = (nn * s) pp += nn return pp
class CleanAuthors(): def __init__(self, authors): self.authors = authors def get_valid_names(self, blocklist): authors = set() for author in self.authors: if (not self.contains_blocklist(author, blocklist)): if (len(author) > 1): authors.add(author) clean_authors = list(authors) return clean_authors def contains_blocklist(self, parent, blocklist): return any(((token in parent) for token in blocklist)) def de_duplicate(self, authors): repeated = set() for author1 in authors: for author2 in authors: if ((author1.lower() in author2.lower()) and (author1 != author2)): repeated.add(author2) return list((set(authors) - repeated)) def clean(self, blocklist): try: authors = self.get_valid_names(blocklist) if (len(authors) > 1): authors = self.de_duplicate(authors) except: authors = [] authors = list(filter(None, authors)) return authors
def eg_rule_action1(memories_info, args): def eg_req_func(protocols, args): for protocol in protocols: if isinstance(protocol, EntanglementGenerationA): return protocol memories = [info.memory for info in memories_info] memory = memories[0] protocol = EntanglementGenerationA(None, ('EGA.' + memory.name), 'm1', 'r2', memory) protocol.primary = True return [protocol, ['r2'], [eg_req_func], [None]]
class FblasDiag(aenum.AutoNumberEnum): FblasUnit = ((),) FblasNoUnit = ((),) FblasDiagUndef = ()
def get_lr_scheduler_class(args): attr = getattr(args, 'lr_scheduler') if (attr['type'] in pipe.optimizers.lr_scheduler.ADDITIONAL_AVAILABLE_LR_SCHEDULERS): scheduler_cls = pipe.optimizers.lr_scheduler.ADDITIONAL_AVAILABLE_LR_SCHEDULERS[attr['type']] else: scheduler_cls = getattr(torch.optim.lr_scheduler, attr['type']) return scheduler_cls
class MHSA_stage_adapt_M(nn.Module): def __init__(self, seq_length, dim, num_layers, num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, num_domains=4, norm_layer=nn.LayerNorm, adapt_method=None, crpe_window={3: 2, 5: 3, 7: 3}): super(MHSA_stage_adapt_M, self).__init__() self.cpe = ConvPosEnc(dim, k=3) self.crpe = ConvRelPosEnc(Ch=(dim // num_heads), h=num_heads, window=crpe_window) self.mhca_blks = nn.ModuleList([SerialBlock_adapt_M(seq_length, dim, num_heads, mlp_ratio, qkv_bias, qk_scale, drop_rate, attn_drop_rate, drop_path_rate, nn.GELU, norm_layer, self.cpe, self.crpe, adapt_method, num_domains) for _ in range(num_layers)]) def forward(self, input, H, W, domain_label=None, d=None): for blk in self.mhca_blks: input = (blk(input, size=(H, W), d=d) if (domain_label == None) else blk(input, (H, W), domain_label, d=d)) return input
_utils.test(debug=True, advanced_optimization=False, exclude=[ti.vulkan, ti.metal, ti.opengl, ti.gles]) def test_ipow_negative_exp_i32(): _ipow_negative_exp(ti.i32)
def torch_where(condition, x, y): return ((condition.to(device='meta') + x.to(device='meta')) + y.to(device='meta'))
def plot_partitioning(axs, field, cell_tasks, gfd, output_dir, size): mesh = field.domain.mesh ax = pc.plot_wireframe(axs[0], mesh.cmesh) coors = field.get_coor() econn = field.econn ax = pd.plot_global_dofs(ax, coors, econn) ax.set_title('global DOFs') ax.figure.savefig(os.path.join(output_dir, 'global_dofs.png'), bbox_inches='tight') ax = pc.plot_wireframe(axs[1], mesh.cmesh) fig = ax.figure coors = field.get_coor() econn = field.econn id_map = gfd.id_map colors = nm.zeros((field.n_nod, 4)) for (ir, dof_map) in ordered_iteritems(gfd.dof_maps): aux = id_map[dof_map[0]] colors[aux] = [0, 0, (float((ir + 1)) / size), 0.6] for aux in dof_map[1]: colors[id_map[aux]] = [0, 0, (float((ir + 1)) / size), 0.9] from sfepy.discrete.fem.utils import prepare_translate aux = prepare_translate(id_map[econn], econn) ax = label_dofs(ax, coors[aux], id_map, colors) mark_subdomains(ax, mesh.cmesh, cell_tasks, size, 0, 0.7) ax.set_title('subdomains of tasks and PETSc DOFs') fig.savefig(os.path.join(output_dir, 'petsc_dofs.png'), bbox_inches='tight') ax.set_title('') axis = ax.axis() for (ir, ocells) in enumerate(gfd.overlap_cells): aux = nm.zeros_like(cell_tasks) aux[ocells] = 10 aux[gfd.cell_parts[ir]] = 1 ax = fig.add_axes(ax.get_position(), frameon=False, label='aux') mark_subdomains(ax, mesh.cmesh, aux, 11, 1, 0.3, True) ax.axis(axis) ax.set_title(('overlap cells on task %d' % ir)) fig.savefig(os.path.join(output_dir, ('petsc_overlaps_%02d.png' % ir)), bbox_inches='tight') fig.delaxes(ax)
def GetUpdatedAPdrawDataset(opt, img_path, img_background): opt.im_p = img_path opt.img_background = img_background data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() return dataset
def computeROUGE(outputs, targets, rouge_types): targets = [target[0] for target in targets] rouge_metric = load_metric('rouge') return rouge_metric.compute(references=targets, predictions=outputs, rouge_types=rouge_types)
def weights_init(m): classname = m.__class__.__name__ if (classname.find('Conv') != (- 1)): m.weight.data.normal_(0.0, 0.01) m.bias.data.normal_(0.0, 0.01) elif (classname.find('BatchNorm') != (- 1)): m.weight.data.normal_(1.0, 0.01) m.bias.data.fill_(0)
class ExponentialLR(_LRScheduler): def __init__(self, optimizer, gamma, last_epoch=(- 1), verbose=False): self.gamma = gamma super(ExponentialLR, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if (not self._get_lr_called_within_step): warnings.warn('To get the last learning rate computed by the scheduler, please use `get_last_lr()`.', UserWarning) if (self.last_epoch == 0): return self.base_lrs return [(group['lr'] * self.gamma) for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [(base_lr * (self.gamma ** self.last_epoch)) for base_lr in self.base_lrs]
.parametrize('n', [0, 1, 2]) .parametrize('x', [0, 1, np.nan]) def test_hermite_nan(n, x): assert (np.isnan(_ufuncs.eval_hermite(n, x)) == np.any(np.isnan([n, x]))) assert (np.isnan(_ufuncs.eval_hermitenorm(n, x)) == np.any(np.isnan([n, x])))
def export(): dummy_input = torch.randn(1, 3, 224, 224) model = torchvision.models.resnet18(pretrained=True) torch.onnx.export(model, dummy_input, 'resnet.onnx')
def parse_cfg(cfg, args): if (len(cfg.task) == 0): raise ValueError('task must be specified') os.environ['CUDA_VISIBLE_DEVICES'] = ', '.join([str(gpu) for gpu in cfg.gpus]) if (cfg.task in _heads_factory): cfg.heads = _heads_factory[cfg.task] cfg.det_dir = os.path.join(cfg.model_dir, cfg.task, args.det) cfg.head_conv = (64 if ('res' in cfg.network) else 256) cfg.model_dir = os.path.join(cfg.model_dir, cfg.task, cfg.model) cfg.record_dir = os.path.join(cfg.record_dir, cfg.task, cfg.model) cfg.result_dir = os.path.join(cfg.result_dir, cfg.task, cfg.model) if (cfg.model in ['duck']): cfg.align_corners = True else: cfg.align_corners = False cfg.image_height = 256 cfg.image_width = 256
class RecurrentDropoutLSTMCell(RNNCellBase): def __init__(self, input_size, hidden_size, dropout=0.0): super(RecurrentDropoutLSTMCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.dropout = dropout self.W_i = Parameter(torch.Tensor(hidden_size, input_size)) self.U_i = Parameter(torch.Tensor(hidden_size, hidden_size)) self.W_f = Parameter(torch.Tensor(hidden_size, input_size)) self.U_f = Parameter(torch.Tensor(hidden_size, hidden_size)) self.W_c = Parameter(torch.Tensor(hidden_size, input_size)) self.U_c = Parameter(torch.Tensor(hidden_size, hidden_size)) self.W_o = Parameter(torch.Tensor(hidden_size, input_size)) self.U_o = Parameter(torch.Tensor(hidden_size, hidden_size)) self.bias_ih = Parameter(torch.Tensor((4 * hidden_size))) self.bias_hh = Parameter(torch.Tensor((4 * hidden_size))) self._input_dropout_mask = self._h_dropout_mask = None self.reset_parameters() def reset_parameters(self): init.orthogonal_(self.W_i) init.orthogonal_(self.U_i) init.orthogonal_(self.W_f) init.orthogonal_(self.U_f) init.orthogonal_(self.W_c) init.orthogonal_(self.U_c) init.orthogonal_(self.W_o) init.orthogonal_(self.U_o) self.bias_ih.data.fill_(0.0) self.bias_ih.data[self.hidden_size:(2 * self.hidden_size)].fill_(1.0) self.bias_hh.data.fill_(0.0) def set_dropout_masks(self, batch_size): if self.dropout: if self.training: new_tensor = self.W_i.data.new self._input_dropout_mask = Variable(torch.bernoulli(new_tensor(4, batch_size, self.input_size).fill_((1 - self.dropout))), requires_grad=False) self._h_dropout_mask = Variable(torch.bernoulli(new_tensor(4, batch_size, self.hidden_size).fill_((1 - self.dropout))), requires_grad=False) else: self._input_dropout_mask = self._h_dropout_mask = ([(1.0 - self.dropout)] * 4) else: self._input_dropout_mask = self._h_dropout_mask = ([1.0] * 4) def forward(self, input, hidden_state): def get_mask_slice(mask, idx): if isinstance(mask, list): return mask[idx] else: return mask[idx][:input.size(0)] (h_tm1, c_tm1) = hidden_state xi_t = F.linear((input * get_mask_slice(self._input_dropout_mask, 0)), self.W_i) xf_t = F.linear((input * get_mask_slice(self._input_dropout_mask, 1)), self.W_f) xc_t = F.linear((input * get_mask_slice(self._input_dropout_mask, 2)), self.W_c) xo_t = F.linear((input * get_mask_slice(self._input_dropout_mask, 3)), self.W_o) hi_t = F.linear((h_tm1 * get_mask_slice(self._h_dropout_mask, 0)), self.U_i) hf_t = F.linear((h_tm1 * get_mask_slice(self._h_dropout_mask, 1)), self.U_f) hc_t = F.linear((h_tm1 * get_mask_slice(self._h_dropout_mask, 2)), self.U_c) ho_t = F.linear((h_tm1 * get_mask_slice(self._h_dropout_mask, 3)), self.U_o) if input.is_cuda: igates = torch.cat([xi_t, xf_t, xc_t, xo_t], dim=(- 1)) hgates = torch.cat([hi_t, hf_t, hc_t, ho_t], dim=(- 1)) state = fusedBackend.LSTMFused.apply return state(igates, hgates, c_tm1, self.bias_ih, self.bias_hh) else: i_t = torch.sigmoid((((xi_t + self.bias_ih[:self.hidden_size]) + hi_t) + self.bias_hh[:self.hidden_size])) f_t = torch.sigmoid((((xf_t + self.bias_ih[self.hidden_size:(2 * self.hidden_size)]) + hf_t) + self.bias_hh[self.hidden_size:(2 * self.hidden_size)])) c_t = ((f_t * c_tm1) + (i_t * torch.tanh((((xc_t + self.bias_ih[(2 * self.hidden_size):(3 * self.hidden_size)]) + hc_t) + self.bias_hh[(2 * self.hidden_size):(3 * self.hidden_size)])))) o_t = torch.sigmoid((((xo_t + self.bias_ih[(3 * self.hidden_size):(4 * self.hidden_size)]) + ho_t) + self.bias_hh[(3 * self.hidden_size):(4 * self.hidden_size)])) h_t = (o_t * torch.tanh(c_t)) return (h_t, c_t)
def simulate_test_prior(n=1000, fixm=False, fixz=False, fixalign=False): logger.info('Generating prior test data with %s images', n) (f_sub, beta) = draw_params_from_prior(n) (theta, x, _, _, _, z) = augmented_data(f_sub=f_sub, beta=beta, n_images=n, mine_gold=False, draw_host_mass=(not fixm), draw_host_redshift=(not fixz), draw_alignment=(not fixalign)) results = {} results['theta'] = theta results['x'] = x results['z'] = z return results
class StepLrUpdaterHook(LrUpdaterHook): def __init__(self, step, gamma=0.1, **kwargs): assert isinstance(step, (list, int)) if isinstance(step, list): for s in step: assert (isinstance(s, int) and (s > 0)) elif isinstance(step, int): assert (step > 0) else: raise TypeError('"step" must be a list or integer') self.step = step self.gamma = gamma super(StepLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = (runner.epoch if self.by_epoch else trainer.iter) if isinstance(self.step, int): return (base_lr * (self.gamma ** (progress // self.step))) exp = len(self.step) for (i, s) in enumerate(self.step): if (progress < s): exp = i break return (base_lr * (self.gamma ** exp))
def test_listarray(): listoffsetarray = ak_Array([[1, 2, 3], [], [4, 5]]).layout listarray = ak.contents.ListArray(listoffsetarray.starts, listoffsetarray.stops, listoffsetarray.content) assert (ak_from_buffers(*ak_to_buffers(listarray)).to_list() == [[1, 2, 3], [], [4, 5]]) assert (pickle.loads(pickle.dumps(ak_Array(listarray), (- 1))).to_list() == [[1, 2, 3], [], [4, 5]])
.parametrize('categorical_as_dictionary', [False, True]) .parametrize('extensionarray', [False, True]) def test_dictionary_encoding(tmp_path, categorical_as_dictionary, extensionarray): akarray = ak.contents.IndexedArray(ak.index.Index64(np.array([3, 2, 2, 2, 0, 1, 3], dtype=np.uint64)), ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3]), parameters={'which': 'inner'}), parameters={'__array__': 'categorical', 'which': 'outer'}) paarray = akarray.to_arrow(categorical_as_dictionary=categorical_as_dictionary, extensionarray=extensionarray) arrow_round_trip(akarray, paarray, extensionarray, categorical_as_dictionary=categorical_as_dictionary) if (not (extensionarray and categorical_as_dictionary)): parquet_round_trip(akarray, paarray, extensionarray, tmp_path, categorical_as_dictionary=categorical_as_dictionary)
def buffered_random(stream, buffer_items=100, leak_percent=0.9): item_buffer = ([None] * buffer_items) leak_count = int((buffer_items * leak_percent)) item_count = 0 for item in stream: item_buffer[item_count] = item item_count += 1 if (buffer_items == item_count): random.shuffle(item_buffer) for item in item_buffer[leak_count:]: (yield item) item_count = leak_count if (item_count > 0): item_buffer = item_buffer[:item_count] random.shuffle(item_buffer) for item in item_buffer: (yield item)
def random_rotate(image, label): angle = np.random.randint((- 20), 20) image = ndimage.rotate(image, angle, order=0, reshape=False) label = ndimage.rotate(label, angle, order=0, reshape=False) return (image, label)
_optimizer('sgd') class SGD(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.SGD(params, **self.optimizer_config) def add_args(parser): parser.add_argument('--momentum', default=0.0, type=float, metavar='M', help='momentum factor') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') def optimizer_config(self): return {'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay} def supports_flat_params(self): return True
class Bsite_extractor(): def __init__(self, lig_thres, bw=15): self.T = lig_thres self.ms = MeanShift(bandwidth=bw, bin_seeding=True, cluster_all=False, n_jobs=4) def _cluster_points(self, prot, lig_scores): T_new = self.T while ((sum((lig_scores >= T_new)) < 10) and (T_new > 0.3001)): T_new -= 0.1 filtered_points = prot.surf_points[(lig_scores > T_new)] filtered_scores = lig_scores[(lig_scores > T_new)] if (len(filtered_points) < 5): return () clustering = self.ms.fit(filtered_points) labels = clustering.labels_ (unique_l, freq) = np.unique(labels, return_counts=True) if (len(unique_l[(freq >= 5)]) != 0): unique_l = unique_l[(freq >= 5)] else: return () if (unique_l[0] == (- 1)): unique_l = unique_l[1:] clusters = [(filtered_points[(labels == l)], filtered_scores[(labels == l)]) for l in unique_l] return clusters def extract_bsites(self, prot, lig_scores): clusters = self._cluster_points(prot, lig_scores) if (len(clusters) == 0): print('No binding site found') return for cluster in clusters: prot.add_bsite(cluster) prot.sort_bsites() prot.write_bsites()
class Configuration(object): def __init__(self, *args, **kwargs): for (opt, val) in zip(list(PROJECT_CONFIG['options'].keys())[:len(args)], args): setattr(self, opt, PROJECT_CONFIG['options'][opt]['type'](val)) for (opt, val) in kwargs.items(): if (opt not in PROJECT_CONFIG['options']): raise KeyError((('"' + opt) + '" is not a valid option.')) if (opt in self.__dict__): raise KeyError((('Option "' + opt) + '" set both as arg and kwarg')) setattr(self, opt, PROJECT_CONFIG['options'][opt]['type'](val)) unsetArgs = (PROJECT_CONFIG['options'].keys() - self.__dict__.keys()) for arg in unsetArgs: default = PROJECT_CONFIG['options'][arg]['default'] if (default != None): setattr(self, opt, default) unsetArgs.remove(arg) if (len(unsetArgs) > 0): raise TypeError('Missing arguments: {}'.format(', '.join(unsetArgs))) def csv_header(): return ','.join(PROJECT_CONFIG['options'].keys()) def to_string(self): return '_'.join([str(getattr(self, opt)).replace(':', '-').replace('_', '-') for opt in PROJECT_CONFIG['options']]) def to_csv(self): return ','.join(map(str, [getattr(self, opt) for opt in PROJECT_CONFIG['options']])) def build_folder(self): return ('build_' + self.to_string()) def benchmark_folder(self): return ('benchmark_' + self.to_string()) def __str__(self): return self.to_string() def __repr__(self): return self.to_string() def cmake_command(self, sourceDir, extra=[]): return ((['cmake', sourceDir] + ['-D{}={}'.format(val['cmake'], getattr(self, key)) for (key, val) in PROJECT_CONFIG['options'].items()]) + extra) def get_conf(s): pattern = '' for opt in PROJECT_CONFIG['options'].values(): if (opt['type'] == str): pattern += '([^_]+)_' elif (opt['type'] == int): pattern += '([0-9]+)_' else: raise TypeError('Unsupported type "{}".'.format(str(opt['type']))) m = re.search(pattern[:(- 1)], s) if (not m): raise ValueError(('Not a valid configuration string: ' + s)) return Configuration(*m.groups())
def main(): parser = argparse.ArgumentParser(description='PyTorch Object Detection Training') parser.add_argument('--config-file', default='', metavar='FILE', help='path to config file', type=str) parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--skip-test', dest='skip_test', help='Do not test the final model', action='store_true') parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1) args.distributed = (num_gpus > 1) if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger('atss_core', output_dir, get_rank()) logger.info('Using {} GPUs'.format(num_gpus)) logger.info(args) logger.info('Collecting env info (might take some time)') logger.info(('\n' + collect_env_info())) logger.info('Loaded configuration file {}'.format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = ('\n' + cf.read()) logger.info(config_str) logger.info('Running with config:\n{}'.format(cfg)) model = train(cfg, args.local_rank, args.distributed) if (not args.skip_test): run_test(cfg, model, args.distributed)
class Omniglot(data.Dataset): folder = 'omniglot-py' download_url_prefix = ' zips_md5 = {'images_background': '68d2efa1b9178cc56df9314c21c6e718', 'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811'} def __init__(self, root, background=True, transform=None, target_transform=None, download=False): self.root = join(os.path.expanduser(root), self.folder) self.background = background self.transform = transform self.target_transform = target_transform if download: self.download() if (not self._check_integrity()): raise RuntimeError(('Dataset not found or corrupted.' + ' You can use download=True to download it')) self.target_folder = join(self.root, self._get_target_folder()) self._alphabets = list_dir(self.target_folder) self._characters = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets], []) self._character_images = [[(image, idx) for image in list_files(join(self.target_folder, character), '.png')] for (idx, character) in enumerate(self._characters)] self._flat_character_images = sum(self._character_images, []) def __len__(self): return len(self._flat_character_images) def __getitem__(self, index): (image_name, character_class) = self._flat_character_images[index] image_path = join(self.target_folder, self._characters[character_class], image_name) image = Image.open(image_path, mode='r').convert('L') if self.transform: image = self.transform(image) if self.target_transform: character_class = self.target_transform(character_class) return (image, character_class) def _check_integrity(self): zip_filename = self._get_target_folder() if (not check_integrity(join(self.root, (zip_filename + '.zip')), self.zips_md5[zip_filename])): return False return True def download(self): import zipfile if self._check_integrity(): print('Files already downloaded and verified') return filename = self._get_target_folder() zip_filename = (filename + '.zip') url = ((self.download_url_prefix + '/') + zip_filename) download_url(url, self.root, zip_filename, self.zips_md5[filename]) print(('Extracting downloaded file: ' + join(self.root, zip_filename))) with zipfile.ZipFile(join(self.root, zip_filename), 'r') as zip_file: zip_file.extractall(self.root) def _get_target_folder(self): return ('images_background' if self.background else 'images_evaluation')
class MaterialOptimizer(object): def create_app(filename, is_homog=False, **kwargs): from sfepy.base.conf import ProblemConf, get_standard_keywords from sfepy.homogenization.homogen_app import HomogenizationApp from sfepy.applications import PDESolverApp (required, other) = get_standard_keywords() if is_homog: required.remove('equations') conf = ProblemConf.from_file(filename, required, other, define_args=kwargs) options = Struct(output_filename_trunk=None, save_ebc=False, save_ebc_nodes=False, save_regions=False, save_regions_as_groups=False, solve_not=False) if is_homog: app = HomogenizationApp(conf, options, 'material_opt_micro:') else: app = PDESolverApp(conf, options, 'material_opt_macro:') app.conf.opt_data = {} opts = conf.options if hasattr(opts, 'parametric_hook'): parametric_hook = conf.get_function(opts.parametric_hook) app.parametrize(parametric_hook) return app def x_norm2real(self, x): return ((x * (self.x_U - self.x_L)) + self.x_L) def x_real2norm(self, x): return ((x - self.x_L) / (self.x_U - self.x_L)) def __init__(self, macro_fn, micro_fn, x0, x_L, x_U, exp_data): self.macro_app = self.create_app(macro_fn, is_homog=False, is_opt=True) self.micro_app = self.create_app(micro_fn, is_homog=True, is_opt=True) self.x_L = nm.array(x_L) self.x_U = nm.array(x_U) self.x0 = self.x_real2norm(nm.array(x0)) self.x = [] self.eval_f = [] self.exp_data = exp_data def rotate_mat(D, angle): s = nm.sin(angle) c = nm.cos(angle) s2 = (s ** 2) c2 = (c ** 2) sc = (s * c) T = nm.array([[c2, 0, s2, 0, (2 * sc), 0], [0, 1, 0, 0, 0, 0], [s2, 0, c2, 0, ((- 2) * sc), 0], [0, 0, 0, c, 0, (- s)], [(- sc), 0, sc, 0, (c2 - s2), 0], [0, 0, 0, s, 0, c]]) return nm.dot(nm.dot(T, D), T.T) def matopt_eval(self, x): mic_od = self.micro_app.conf.opt_data mac_od = self.macro_app.conf.opt_data mic_od['coefs'] = {} mic_od['mat_params'] = x self.micro_app() D = mic_od['D_homog'] val = 0.0 aux = [] for (phi, exp_k) in self.exp_data: print(('phi = %d' % phi)) mac_od['D_homog'] = self.rotate_mat(D, nm.deg2rad(phi)) self.macro_app() comp_k = mac_od['k'] val += ((1.0 - (comp_k / exp_k)) ** 2) aux.append((comp_k, exp_k)) val = nm.sqrt(val) self.x.append(x) self.eval_f.append(val) return val def iter_step(self, x, first_step=False): if first_step: self.log = Log([['O'], ['E_f', 'E_m'], ['v_f', 'v_m']], ylabels=['Obj. fun.', "Young's modulus", "Poisson's ratio"], xlabels=['iter', 'iter', 'iter'], aggregate=0) self.istep = 0 self.log(0.5, x[0], x[2], x[1], x[3], x=[0, 0, 0, 0]) else: self.istep += 1 self.log(self.eval_f[(- 1)], x[0], x[2], x[1], x[3], x=((self.istep,) * 4)) def material_optimize(self): x0 = self.x0 bnds = zip(self.x_real2norm(self.x_L), self.x_real2norm(self.x_U)) feval = (lambda x: self.matopt_eval(self.x_norm2real(x))) istep = (lambda x: self.iter_step(self.x_norm2real(x))) self.iter_step(self.x_norm2real(x0), first_step=True) print('>>> material optimization START <<<') xopt = fmin_tnc(feval, x0, approx_grad=True, bounds=list(bnds), xtol=0.001, callback=istep) print('>>> material optimization FINISHED <<<') self.log(finished=True) return self.x_norm2real(xopt[0])
def make_multiagent_env(env_id, num_agents, dist_threshold, arena_size, identity_size): scenario = scenarios.load((env_id + '.py')).Scenario(num_agents=num_agents, dist_threshold=dist_threshold, arena_size=arena_size, identity_size=identity_size) world = scenario.make_world() env = MultiAgentEnv(world=world, reset_callback=scenario.reset_world, reward_callback=scenario.reward, observation_callback=scenario.observation, info_callback=(scenario.info if hasattr(scenario, 'info') else None), discrete_action=True, done_callback=scenario.done, cam_range=arena_size) return env
def get_workspace_path(agent: Agent, file_name: str) -> str: return str(agent.workspace.get_path(file_name))
def read_contamination(): hlog(f'Reading contamination information from {CONTAMINATION_YAML_FILENAME}...') contamination_path = resources.files(CONTAMINATION_YAML_PACKAGE).joinpath(CONTAMINATION_YAML_FILENAME) with contamination_path.open('r') as f: raw = yaml.safe_load(f) return dacite.from_dict(Contamination, raw)
def make_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, verbose_logging): example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_logit', 'end_logit']) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) for start_index in start_indexes: for end_index in end_indexes: if (start_index >= len(feature.tokens)): continue if (end_index >= len(feature.tokens)): continue if (start_index not in feature.token_to_orig_map): continue if (end_index not in feature.token_to_orig_map): continue if (not feature.token_is_max_context.get(start_index, False)): continue if (end_index < start_index): continue length = ((end_index - start_index) + 1) if (length > max_answer_length): continue prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) prelim_predictions = sorted(prelim_predictions, key=(lambda x: (x.start_logit + x.end_logit)), reverse=True) _NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_logit', 'end_logit']) seen_predictions = {} nbest = [] for pred in prelim_predictions: if (len(nbest) >= n_best_size): break feature = features[pred.feature_index] if (pred.start_index > 0): tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = ' '.join(tok_tokens) tok_text = tok_text.replace(' ##', '') tok_text = tok_text.replace('##', '') tok_text = tok_text.strip() tok_text = ' '.join(tok_text.split()) orig_text = ' '.join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if (final_text in seen_predictions): continue seen_predictions[final_text] = True else: final_text = '' seen_predictions[final_text] = True nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) if (not nbest): nbest.append(_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0)) assert (len(nbest) >= 1) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append((entry.start_logit + entry.end_logit)) if (not best_non_null_entry): if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output['text'] = entry.text output['probability'] = probs[i] output['start_logit'] = entry.start_logit output['end_logit'] = entry.end_logit nbest_json.append(output) assert (len(nbest_json) >= 1) all_predictions[example.qas_id] = nbest_json[0]['text'] all_nbest_json[example.qas_id] = nbest_json return (all_predictions, all_nbest_json)
class Net(nn.Module): def __init__(self): super(Net, self).__init__() conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1} self.conv1 = nn.Conv2d(3, 16, **conv_kwargs) self.conv2 = nn.Conv2d(16, 32, **conv_kwargs) self.conv3 = nn.Conv2d(32, 64, **conv_kwargs) self.conv4 = nn.Conv2d(64, 128, **conv_kwargs) self.conv5 = nn.Conv2d((128 + 32), 256, **conv_kwargs) self.conv6 = nn.Conv2d(256, 512, **conv_kwargs) self.conv7 = nn.Conv2d(((512 + 128) + 32), 256, **conv_kwargs) self.conv8 = nn.Conv2d(256, 512, **conv_kwargs) self.fc1 = nn.Linear(((1184 * 9) * 9), 128) self.fc2 = nn.Linear(128, 8) def forward(self, x): torch.manual_seed(0) x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) maxpool = F.max_pool2d(x, 2, 2) x = F.relu(self.conv3(maxpool)) x = F.relu(self.conv4(x)) concat = torch.cat([maxpool, x], dim=1) maxpool = F.max_pool2d(concat, 2, 2) x = F.relu(self.conv5(maxpool)) x = F.relu(self.conv6(x)) concat = torch.cat([maxpool, x], dim=1) maxpool = F.max_pool2d(concat, 2, 2) x = F.relu(self.conv7(maxpool)) x = F.relu(self.conv8(x)) concat = torch.cat([maxpool, x], dim=1) maxpool = F.max_pool2d(concat, 2, 2) x = maxpool.flatten(start_dim=1) x = F.dropout(self.fc1(x), p=0.5) x = self.fc2(x) return x
class VectorDiscriminator(nn.Module): def __init__(self, input_nc=64, n_layers=2, use_sigmoid=True, gpu_ids=[]): super(VectorDiscriminator, self).__init__() self.gpu_ids = gpu_ids ndf = (2 * input_nc) sequence = [nn.Linear(input_nc, ndf, bias=True), nn.LeakyReLU(0.1, inplace=False)] nf_mult = 1 nf_mult_prev = 1 for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = (2 ** n) sequence += [nn.Linear((ndf * nf_mult_prev), (ndf * nf_mult), bias=True), nn.LeakyReLU(0.1, inplace=False)] nf_mult_prev = nf_mult nf_mult = (2 ** (n_layers - 1)) for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = (nf_mult // 2) sequence += [nn.Linear((ndf * nf_mult_prev), (ndf * nf_mult), bias=True), nn.LeakyReLU(0.1, inplace=False)] sequence += [nn.Linear((ndf * nf_mult), 1, bias=False)] if use_sigmoid: sequence += [nn.Sigmoid()] self.model = nn.Sequential(*sequence) def forward(self, input): return self.model(input)
def handler(event): sleep_time = event.get('sleep') sleep(sleep_time) return {'result': sleep_time}
def setup_ec2(): for region in ['us-west-1', 'us-west-2', 'us-east-1']: print(('Setting up region %s' % region)) ec2 = boto3.resource('ec2', region_name=region, aws_access_key_id=ACCESS_KEY, aws_secret_access_key=ACCESS_SECRET) ec2_client = boto3.client('ec2', region_name=region, aws_access_key_id=ACCESS_KEY, aws_secret_access_key=ACCESS_SECRET) existing_vpcs = list(ec2.vpcs.all()) assert (len(existing_vpcs) >= 1) vpc = existing_vpcs[0] print(('Creating security group in VPC %s' % str(vpc.id))) try: security_group = vpc.create_security_group(GroupName='rllab-sg', Description='Security group for rllab') except botocore.exceptions.ClientError as e: if (e.response['Error']['Code'] == 'InvalidGroup.Duplicate'): sgs = list(vpc.security_groups.filter(GroupNames=['rllab-sg'])) security_group = sgs[0] else: raise e ALL_REGION_AWS_SECURITY_GROUP_IDS[region] = [security_group.id] ec2_client.create_tags(Resources=[security_group.id], Tags=[{'Key': 'Name', 'Value': 'rllab-sg'}]) try: security_group.authorize_ingress(FromPort=22, ToPort=22, IpProtocol='tcp', CidrIp='0.0.0.0/0') except botocore.exceptions.ClientError as e: if (e.response['Error']['Code'] == 'InvalidPermission.Duplicate'): pass else: raise e print(('Security group created with id %s' % str(security_group.id))) key_name = ('rllab-%s' % region) try: print(('Trying to create key pair with name %s' % key_name)) key_pair = ec2_client.create_key_pair(KeyName=key_name) except botocore.exceptions.ClientError as e: if (e.response['Error']['Code'] == 'InvalidKeyPair.Duplicate'): if (not query_yes_no(('Key pair with name %s exists. Proceed to delete and recreate?' % key_name), 'no')): sys.exit() print(('Deleting existing key pair with name %s' % key_name)) ec2_client.delete_key_pair(KeyName=key_name) print(('Recreating key pair with name %s' % key_name)) key_pair = ec2_client.create_key_pair(KeyName=key_name) else: raise e key_pair_folder_path = os.path.join(config.PROJECT_PATH, 'private', 'key_pairs') file_name = os.path.join(key_pair_folder_path, ('%s.pem' % key_name)) print('Saving keypair file') console.mkdir_p(key_pair_folder_path) with os.fdopen(os.open(file_name, (os.O_WRONLY | os.O_CREAT), 384), 'w') as handle: handle.write((key_pair['KeyMaterial'] + '\n')) os.system(('ssh-add %s' % file_name)) ALL_REGION_AWS_KEY_NAMES[region] = key_name
_utils.test() def test_fill_vector_field_recompile(): a = ti.Vector.field(2, ti.i32, shape=3) for i in range(2): a.fill(ti.Vector([0, 0])) assert (impl.get_runtime().get_num_compiled_functions() == 1)
def gen_dependent_configs(xenial_parent_config): extra_parms = [(['multigpu'], 'large'), (['nogpu', 'NO_AVX2'], None), (['nogpu', 'NO_AVX'], None), (['slow'], 'medium')] configs = [] for (parms, gpu) in extra_parms: c = Conf(xenial_parent_config.distro, (['py3'] + parms), pyver=xenial_parent_config.pyver, cuda_version=xenial_parent_config.cuda_version, restrict_phases=['test'], gpu_resource=gpu, parent_build=xenial_parent_config, is_important=False) configs.append(c) return configs
def run(testdir, cli, unique_hook, schema, openapi3_base_url, hypothesis_max_examples, *args): schema_file = testdir.make_openapi_schema_file(schema) return cli.main('run', str(schema_file), f'--base-url={openapi3_base_url}', '-cunique_test_cases', f'--hypothesis-max-examples={(hypothesis_max_examples or 30)}', '--contrib-unique-data', '--data-generation-method=all', '--hypothesis-suppress-health-check=filter_too_much', '--hypothesis-phases=generate', *args, hooks=unique_hook.purebasename)
def parallel_hash(data, format): duplicate_groups = {} process_func = {'solid': hash_solid, 'profile': hash_profile, 'loop': hash_loop, 'model': hash_model} num_cpus = multiprocessing.cpu_count() objs_iter = multiprocessing.Pool(num_cpus).imap(process_func[format], data) for (h, uid) in tqdm(objs_iter, total=len(data)): if (len(h) > 0): if (not (h in duplicate_groups)): duplicate_groups[h] = [] duplicate_groups[h].append([uid]) return duplicate_groups
def _precision_micro_3d(y_true: np.ndarray, y_pred: np.ndarray): sum_intersection = 0 sum_prediction_and_ancestors = 0 for (row_ground_truth, row_prediction) in zip(y_true, y_pred): ground_truth_set = set() predicted_set = set() for (ground_truth, prediction) in zip(row_ground_truth, row_prediction): ground_truth_set.update(ground_truth) predicted_set.update(prediction) ground_truth_set.discard('') predicted_set.discard('') sum_intersection = (sum_intersection + len(ground_truth_set.intersection(predicted_set))) sum_prediction_and_ancestors = (sum_prediction_and_ancestors + len(predicted_set)) return (sum_intersection / sum_prediction_and_ancestors)
def two_conv_model(): inputs = Input(shape=INPUT_SHAPE) x = Conv2D(2, 3)(inputs) x = BatchNormalization()(x) x = ReLU()(x) outputs = Conv2D(2, 3)(x) return keras.Model(inputs=inputs, outputs=outputs)
def build_dataloader(cfg, dataset): if cfg.distributed: if (dataset.which_set == 'train'): sampler = DistributedGroupSampler(dataset, cfg.data.samples_per_gpu, cfg.world_size, cfg.rank, seed=cfg.seed) else: sampler = DistributedSampler(dataset, cfg.world_size, cfg.rank, shuffle=False, seed=cfg.seed) else: sampler = (GroupSampler(dataset, cfg.data.samples_per_gpu) if (dataset.which_set == 'train') else None) init_fn = (partial(worker_init_fn, num_workers=cfg.data.workers_per_gpu, rank=cfg.rank, seed=cfg.seed) if (cfg.seed is not None) else None) return DataLoader(dataset, batch_size=cfg.data.samples_per_gpu, sampler=sampler, shuffle=False, batch_sampler=None, num_workers=cfg.data.workers_per_gpu, pin_memory=False, collate_fn=partial(collate, samples_per_gpu=cfg.data.samples_per_gpu), worker_init_fn=init_fn, drop_last=False, persistent_workers=cfg.distributed)
class HyperbolicArcCore(BezierPath): def _bezier_path(self, z0, z1, model, first=False): import numpy as np from sage.rings.infinity import infinity EPSILON = (10 ** (- 5)) arc0 = model.get_geodesic(z0, z1).plot()[0] if isinstance(arc0, BezierPath): points = arc0.vertices else: points = arc0.bezier_path()[0].vertices if (((z0.is_infinity() or (z0 == infinity)) and (abs((CC(points[0][0], points[0][1]) - z1)) < EPSILON)) or ((z1.is_infinity() or (z1 == infinity)) and (abs((CC(points[1][0], points[1][1]) - z0)) < EPSILON)) or ((abs((CC(points[0][0], points[0][1]) - z0)) >= EPSILON) and (not (z0.is_infinity() or (z0 == infinity) or z1.is_infinity() or (z1 == infinity))))): points = np.flipud(points) if first: self.path.append(points[0:4]) if isinstance(arc0, BezierPath): self.last_plotted = 'line' else: N = 4 while (N < len(points)): self.path.append(points[N:(N + 3)]) N += 3 self.last_plotted = 'arc' else: points = np.delete(points, 0, 0) N = 0 if isinstance(arc0, BezierPath): self.path.append(points[0:1]) self.last_plotted = 'line' elif (self.last_plotted == 'line'): while (N < len(points)): self.path.append(points[N:(N + 3)]) N += 3 self.last_plotted = 'arc' else: tail = self.path[(- 1)] ltail = len(tail) while (ltail < 3): self.path[(- 1)].append(points[N]) ltail += 1 N += 1 while (N < len(points)): self.path.append(points[N:(N + 3)]) N += 3 self.last_plotted = 'arc' return
def pipeline_archetype5(): ink_phase = [Faxify(monochrome=1, monochrome_method='threshold_otsu', halftone=0), InkBleed(intensity_range=(0.3, 0.4), kernel_size=(3, 3), severity=(1.0, 1.0)), Scribbles(scribbles_type='text', scribbles_ink='pen', scribbles_location=(0.8, 0.8), scribbles_size_range=(320, 320), scribbles_count_range=(1, 1), scribbles_thickness_range=(2, 2), scribbles_brightness_change=[0], scribbles_color=(0, 0, 0), scribbles_text='Dr Architg \n \n Dr Lynn added that \n me to Dereene etan \n with you . He feel that \n we should stay away \n from the mystery that could \n help to .FTC on \n In attend the president \n of the committee \n Cloude ', scribbles_text_font=' scribbles_text_rotate_range=(15, 15)), BadPhotoCopy(noise_type=2, noise_side='right', noise_iteration=(3, 3), noise_size=(1, 3), noise_value=(0, 1), noise_sparsity=(0.99, 0.99), noise_concentration=(0.3, 0.3), blur_noise=0, wave_pattern=0, edge_effect=1), BadPhotoCopy(noise_type=2, noise_side='left', noise_iteration=(2, 2), noise_size=(1, 2), noise_value=(0, 1), noise_sparsity=(0.7, 0.7), noise_concentration=(0.1, 0.1), blur_noise=0, wave_pattern=0, edge_effect=1), Geometric(randomize=0, padding=[0, 0.02, 0, 0])] paper_phase = [Geometric(padding=[0, 4, 0, 5], padding_type='fill', padding_value=(0, 0, 0), randomize=0)] post_phase = [Geometric(randomize=0, rotate_range=((- 2), (- 2))), Geometric(randomize=0, translation=(0.02, (- 0.05))), Geometric(padding=[6, 0, 0, 10], padding_type='fill', padding_value=(0, 0, 0), randomize=0), BindingsAndFasteners(overlay_types='min', effect_type='triangle_clips', width_range='random', height_range='random', angle_range=(270, 270), ntimes=(2, 2), nscales=(1.5, 1.5), edge='bottom', edge_offset=(20, 20))] pipeline = AugraphyPipeline(ink_phase=ink_phase, paper_phase=paper_phase, post_phase=post_phase) return pipeline
class Transformer(nn.Module): def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, return_intermediate_dec=False): super().__init__() encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before) encoder_norm = (nn.LayerNorm(d_model) if normalize_before else None) self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before) decoder_norm = nn.LayerNorm(d_model) self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec) self._reset_parameters() self.d_model = d_model self.nhead = nhead def _reset_parameters(self): for p in self.parameters(): if (p.dim() > 1): nn.init.xavier_uniform_(p) def forward(self, src, mask, query_embed, pos_embed): (bs, l, d) = src.shape src = src.permute(1, 0, 2) pos_embed = pos_embed.permute(1, 0, 2) query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) tgt = torch.zeros_like(query_embed) memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed) hs = hs.transpose(1, 2) memory = memory.transpose(0, 1) return (hs, memory)
def rand_data(shape, dtype): if (dtype == 'float32'): return np.random.random(shape).astype(np.float32) if ((dtype == 'int32') or 'uint32' or 'int16' or 'uint16' or 'int8' or 'uint8'): return np.random.randint(0, 256, size=shape).astype(dtype) raise Exception('Not supported data type: {}!'.format(dtype))
class ExperimentWorkspace(): def __init__(self, experiment_name: str, data_file_path: Path, install_requirements: bool=False, remove_archive: bool=True) -> None: self.experiment_name = experiment_name self.data_file_path = data_file_path self.install_requirements = install_requirements self.cwd = Path.cwd() self.experiment_work_dir = (self.cwd / self.experiment_name) self.remove_archive = remove_archive def _install_requirements(self): requirements_filename = (self.experiment_work_dir / 'requirements.txt') if requirements_filename.is_file(): attempts = 10 for _ in range(attempts): try: check_call([executable, '-m', 'pip', 'install', '-r', requirements_filename], shell=False) except Exception as exc: logger.error(f'Failed to install requirements: {exc}') time.sleep(5) else: break else: logger.error((('No ' + requirements_filename) + ' file found.')) def __enter__(self): if self.experiment_work_dir.exists(): shutil.rmtree(self.experiment_work_dir, ignore_errors=True) os.makedirs(self.experiment_work_dir) shutil.unpack_archive(self.data_file_path, self.experiment_work_dir, format='zip') if self.install_requirements: self._install_requirements() os.chdir(self.experiment_work_dir) sys.path.append(str(self.experiment_work_dir)) def __exit__(self, exc_type, exc_value, traceback): os.chdir(self.cwd) shutil.rmtree(self.experiment_work_dir, ignore_errors=True) if (str(self.experiment_work_dir) in sys.path): sys.path.remove(str(self.experiment_work_dir)) if self.remove_archive: logger.debug(f'Exiting from the workspace context manager for {self.experiment_name} experiment') logger.debug(f'Archive still exists: {self.data_file_path.exists()}') self.data_file_path.unlink(missing_ok=False)
class LogLevel(): DEBUG = logging.DEBUG INFO = logging.INFO WARNING = logging.WARNING ERROR = logging.ERROR CRITICAL = logging.CRITICAL
def random_ndarray(n, p, seed): import string random.seed(seed) alphabet = list(string.ascii_uppercase) return random.choice(alphabet, size=(n, p))
def process_routing(_obj, _method, /, **kwargs): if ((not _routing_enabled()) and (not kwargs)): class EmptyRequest(): def get(self, name, default=None): return (default if default else {}) def __getitem__(self, name): return Bunch(**{method: dict() for method in METHODS}) def __getattr__(self, name): return Bunch(**{method: dict() for method in METHODS}) return EmptyRequest() if (not (hasattr(_obj, 'get_metadata_routing') or isinstance(_obj, MetadataRouter))): raise AttributeError(f'The given object ({repr(_obj.__class__.__name__)}) needs to either implement the routing method `get_metadata_routing` or be a `MetadataRouter` instance.') if (_method not in METHODS): raise TypeError(f'Can only route and process input on these methods: {METHODS}, while the passed method is: {_method}.') request_routing = get_routing_for_object(_obj) request_routing.validate_metadata(params=kwargs, method=_method) routed_params = request_routing.route_params(params=kwargs, caller=_method) return routed_params
class SBMPDCSVD(SBMPATTERNEval, BaseSVDModelScheme): def get_default_config(self): config_dict = super().get_default_config() config_dict.update(dataset_name='sbm_pattern', class_sizes=[979220, 209900], rlr_monitor='val_xent', save_best_monitor='val_xent') return config_dict def get_dataset_config(self, splits=['training', 'validation']): (dataset_config, _) = super().get_dataset_config() return (dataset_config, SVDDataset) def get_model_config(self): (model_config, _) = super().get_model_config() return (model_config, DCSVDTransformer) def get_loss(self): wxent = WeightedSparseXEntropyLoss(class_weights=None, class_sizes=self.config.class_sizes, from_logits=True, name='xentropy') return [wxent] def get_metrics(self): wxent = WeightedSparseXEntropyMetric(class_weights=None, class_sizes=self.config.class_sizes, from_logits=True, name='xent') acc = metrics.SparseCategoricalAccuracy(name='acc') return [wxent, acc]
def test_dia_fields(): (M, N, nnz, num_diags) = (dace.symbol(s) for s in ('M', 'N', 'nnz', 'num_diags')) diag = dace.data.Tensor(dace.float32, (M, N), [(dace.data.TensorIndexDense(), num_diags), (dace.data.TensorIndexRange(), 0), (dace.data.TensorIndexOffset(), 1)], nnz, 'DIA_Matrix') expected_fields = ['idx1_offset', 'idx2_offset'] assert all(((key in diag.members.keys()) for key in expected_fields))