code
stringlengths
101
5.91M
class TorchSumBenchmark(op_bench.TorchBenchmarkBase): def init(self, M, N): self.input_one = torch.rand(M, N) self.set_module_name('sum') def jit_forward(self, iters): return torch_sumall(self.input_one, iters)
def get_pseudo_label_VTM_for_one_segment(args, node2step, step2node, VNM_matched_nodes, wikihow_step_task_occurrence, howto100m_step_task_occurrence): wikihow_tasks_this_segment = dict() howto100m_tasks_this_segment = dict() for node_id in VNM_matched_nodes: step_ids_this_node = node2step[node_id] for step_id in step_ids_this_node: wikihow_taskids = np.where((wikihow_step_task_occurrence[step_id] > 0))[0] for task_id in wikihow_taskids: if (task_id not in wikihow_tasks_this_segment): wikihow_tasks_this_segment[task_id] = wikihow_step_task_occurrence[(step_id, task_id)] else: wikihow_tasks_this_segment[task_id] = max(wikihow_tasks_this_segment[task_id], wikihow_step_task_occurrence[(step_id, task_id)]) howto100m_taskids = np.where((howto100m_step_task_occurrence[step_id] > 0))[0] for task_id in howto100m_taskids: if (task_id not in howto100m_tasks_this_segment): howto100m_tasks_this_segment[task_id] = howto100m_step_task_occurrence[(step_id, task_id)] else: howto100m_tasks_this_segment[task_id] = max(howto100m_tasks_this_segment[task_id], howto100m_step_task_occurrence[(step_id, task_id)]) howto100m_task_scores_sorted = sorted(howto100m_tasks_this_segment.items(), key=(lambda item: item[1]), reverse=True) results = dict() results['wikihow_tasks'] = list(wikihow_tasks_this_segment.keys()) (matched_tasks, matched_tasks_scores) = find_matching_of_a_segment_given_sorted_val_corres_idx([task_score for (task_id, task_score) in howto100m_task_scores_sorted], [task_id for (task_id, task_score) in howto100m_task_scores_sorted], criteria=args.label_find_tasks_criteria, threshold=args.label_find_tasks_thresh, topK=args.label_find_tasks_topK) results['howto100m_tasks'] = {'indices': matched_tasks, 'values': matched_tasks_scores} return results
class __DisplMixin(): def displ_item(self, index): (sample, ann) = (self.__getitem__(index), self.annotation[index]) return OrderedDict({'file': ann['image'], 'label': ann['caption'], 'audio': sample['audio'], 'audio_path': sample['audio_path'], 'caption': sample['caption']})
def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args): reset_optimizer = cfg.reset_optimizer reset_lr_scheduler = cfg.reset_lr_scheduler optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides) reset_meters = cfg.reset_meters reset_dataloader = cfg.reset_dataloader if ((cfg.finetune_from_model is not None) and (reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader)): raise ValueError('--finetune-from-model can not be set together with either --reset-optimizer or reset_lr_scheduler or reset_meters or reset_dataloader') checkpoint_paths = [] suffix = cfg.checkpoint_suffix if (cfg.restore_file == 'checkpoint_last.pt'): checkpoint_path = os.path.join(cfg.save_dir, 'checkpoint_last{}.pt'.format(suffix)) first_launch = (not PathManager.exists(checkpoint_path)) if ((cfg.finetune_from_model is not None) and first_launch): finetune_from_models = cfg.finetune_from_model.split(',') if all([PathManager.exists(finetune_from_model) for finetune_from_model in finetune_from_models]): checkpoint_paths = finetune_from_models reset_optimizer = True reset_lr_scheduler = True reset_meters = True reset_dataloader = True logger.info(f'loading pretrained model from {checkpoint_path}: optimizer, lr scheduler, meters, dataloader will be reset') else: raise ValueError(f'--funetune-from-model {cfg.finetune_from_model} does not exist') elif (cfg.model_parallel_size > 1): checkpoint_path = cfg.restore_file.replace('.pt', (suffix + '.pt')) else: checkpoint_path = cfg.restore_file if (len(checkpoint_paths) == 0): checkpoint_paths.append(checkpoint_path) if ((cfg.restore_file != 'checkpoint_last.pt') and cfg.finetune_from_model): raise ValueError(('--finetune-from-model and --restore-file (non-default value) can not be specified together: ' + str(cfg))) for checkpoint_path in checkpoint_paths: extra_state = trainer.load_checkpoint(checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, reset_meters=reset_meters) if ((extra_state is not None) and ('best' in extra_state) and (not reset_optimizer) and (not reset_meters)): save_checkpoint.best = extra_state['best'] if ((extra_state is not None) and (not reset_dataloader)): itr_state = extra_state['train_iterator'] epoch_itr = trainer.get_train_iterator(epoch=itr_state['epoch'], load_dataset=True, **passthrough_args) epoch_itr.load_state_dict(itr_state) else: epoch_itr = trainer.get_train_iterator(epoch=1, load_dataset=True, **passthrough_args) trainer.lr_step(epoch_itr.epoch) return (extra_state, epoch_itr)
class DownloadTestCase(unittest.TestCase): def test_download_mirror_list(self): tmp = tempfile.NamedTemporaryFile() tmp.close() progress = StringIO() Download(MirrorList.URL, tmp.name, progress=progress).run() self.assertEqual(progress.getvalue(), '[]\n') with open(tmp.name, 'r') as f: content = f.read() self.assertTrue(content.startswith('# Sage Mirror List')) def test_error(self): URL = ' progress = StringIO() download = Download(URL, progress=progress) log = CapturedLog() def action(): with log: download.run() self.assertRaises(IOError, action) self.assertIsNotFoundError(log.messages()) self.assertEqual(progress.getvalue(), '[xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx]\n') def test_ignore_errors(self): URL = ' with CapturedLog() as log: Download(URL, progress=False, ignore_errors=True).run() self.assertIsNotFoundError(log.messages()) def assertIsNotFoundError(self, messages): self.assertEqual(len(messages), 1) self.assertEqual(messages[0][0], 'ERROR') self.assertTrue(messages[0][1].startswith('[Errno')) self.assertTrue(messages[0][1].endswith("[Errno 404] Not Found: '//files.sagemath.org/sage_bootstrap/this_url_does_not_exist'"))
def encode_dataset(dataset, vocab, tokenizer, test=False): questions = [] sparqls = [] for item in tqdm(dataset): question = item['question'] questions.append(question) if (not test): sparql = item['sparql'] sparqls.append(sparql) sequences = (questions + sparqls) encoded_inputs = tokenizer(sequences, padding=True) print(encoded_inputs.keys()) print(encoded_inputs['input_ids'][0]) print(tokenizer.decode(encoded_inputs['input_ids'][0])) print(tokenizer.decode(encoded_inputs['input_ids'][(- 1)])) max_seq_length = len(encoded_inputs['input_ids'][0]) assert (max_seq_length == len(encoded_inputs['input_ids'][(- 1)])) print(max_seq_length) questions = [] sparqls = [] choices = [] answers = [] for item in tqdm(dataset): question = item['question'] questions.append(question) _ = [vocab['answer_token_to_idx'][w] for w in item['choices']] choices.append(_) if (not test): sparql = item['sparql'] sparqls.append(sparql) answers.append(vocab['answer_token_to_idx'].get(item['answer'])) input_ids = tokenizer.batch_encode_plus(questions, max_length=max_seq_length, pad_to_max_length=True, truncation=True) source_ids = np.array(input_ids['input_ids'], dtype=np.int32) source_mask = np.array(input_ids['attention_mask'], dtype=np.int32) if (not test): target_ids = tokenizer.batch_encode_plus(sparqls, max_length=max_seq_length, pad_to_max_length=True, truncation=True) target_ids = np.array(target_ids['input_ids'], dtype=np.int32) else: target_ids = np.array([], dtype=np.int32) choices = np.array(choices, dtype=np.int32) answers = np.array(answers, dtype=np.int32) return (source_ids, source_mask, target_ids, choices, answers)
def load_xml_info(gt_file, img_info): obj = ET.parse(gt_file) anno_info = [] for image in obj.getroot(): for box in image: h = box.attrib['height'] w = box.attrib['width'] x = box.attrib['left'] y = box.attrib['top'] segs = box[1].text pts = segs.strip().split(',') pts = [int(x) for x in pts] assert (len(pts) == 28) iscrowd = 0 category_id = 1 bbox = [int(x), int(y), int(w), int(h)] coordinates = np.array(pts).reshape((- 1), 2) polygon = Polygon(coordinates) area = polygon.area anno = dict(iscrowd=iscrowd, category_id=category_id, bbox=bbox, area=area, segmentation=[pts]) anno_info.append(anno) img_info.update(anno_info=anno_info) return img_info
def parse_trace(trace_file): jobs = [] arrival_times = [] with open(trace_file, 'r') as f: for line in f: (job_type, command, working_directory, num_steps_arg, needs_data_dir, total_steps, scale_factor, priority_weight, SLO, arrival_time) = line.split('\t') assert (int(scale_factor) >= 1) jobs.append(Job(job_id=None, job_type=job_type, command=command, working_directory=working_directory, needs_data_dir=bool(int(needs_data_dir)), num_steps_arg=num_steps_arg, total_steps=int(total_steps), duration=None, scale_factor=int(scale_factor), priority_weight=float(priority_weight), SLO=float(SLO))) arrival_times.append(float(arrival_time)) return (jobs, arrival_times)
def K0_func(SUK, A, prec=106): R = RealField(prec) K0 = R(1) c3 = c3_func(SUK, prec) for v_l in SUK.primes(): e_l = v_l.residue_class_degree() Norm_v_l = v_l.absolute_norm() c5_l = (c3 / (e_l * R(Norm_v_l).log())) c8_l = Yu_bound(SUK, v_l, prec) K0_l = (((2 * c8_l) / (e_l * c5_l)) * R((c8_l / (e_l * c5_l))).log()) K0 = max(K0, K0_l) return K0
def augment_parser(): sepp.config.set_main_config_path(os.path.expanduser('~/.sepp/upp.config')) parser = sepp.config.get_parser() parser.description = 'This script runs the UPP algorithm on set of sequences. A backbone alignment and tree can be given as input. If none is provided, a backbone will be automatically generated.' decompGroup = parser.groups['decompGroup'] decompGroup.__dict__['description'] = ' '.join(['These options', 'determine the alignment decomposition size, backbone size, and how to decompose the backbone set.']) decompGroup.add_argument('-A', '--alignmentSize', type=int, dest='alignment_size', metavar='N', default=10, help='max alignment subset size of N [default: 10]') decompGroup.add_argument('-S', '--decomp_strategy', type=valid_decomp_strategy, dest='decomp_strategy', metavar='DECOMP', default='hierarchical', help='decomposition strategy [default: ensemble of HMMs (hierarchical)]') inputGroup = parser.groups['inputGroup'] inputGroup.add_argument('-s', '--sequence_file', type=argparse.FileType('r'), dest='sequence_file', metavar='SEQ', default=None, help='Unaligned sequence file. If no backbone tree and alignment is given, the sequence file will be randomly split into a backbone set (size set to B) and query set (remaining sequences), [default: None]') inputGroup.add_argument('-c', '--config', dest='config_file', metavar='CONFIG', type=argparse.FileType('r'), help='A config file, including options used to run UPP. Options provided as command line arguments overwrite config file values for those options. [default: %(default)s]') inputGroup.add_argument('-t', '--tree', dest='tree_file', metavar='TREE', type=argparse.FileType('r'), help='Input tree file (newick format) [default: %(default)s]') inputGroup.add_argument('-a', '--alignment', dest='alignment_file', metavar='ALIGN', type=argparse.FileType('r'), help='Aligned fasta file [default: %(default)s]') parser.add_argument_group('UPP Options'.upper(), 'These options set settings specific to UPP') seppGroup = parser.add_argument_group('SEPP Options'.upper(), 'These options set settings specific to SEPP and are not used for UPP.') seppGroup.add_argument('-P', '--placementSize', type=int, dest='placement_size', metavar='N', default=None, help='max placement subset size of N [default: 10%% of the total number of taxa]') seppGroup.add_argument('-r', '--raxml', dest='info_file', metavar='RAXML', type=argparse.FileType('r'), help='RAxML_info file including model parameters, generated by RAxML.[default: %(default)s]') seppGroup.add_argument('-f', '--fragment', dest='fragment_file', metavar='FRAG', type=argparse.FileType('r'), help='fragment file [default: %(default)s]')
def test_nested_function_method(): class TestClass(): some_field: int def some_method(self, q): return (q * self.some_field) obj = TestClass(5) def nested(a): return ((a + 1) + obj.some_method(a)) def nfm(a: dace.float64[20]): return nested(a) A = np.random.rand(20) ref = nfm.f(A) daceres = nfm(A) assert np.allclose(ref, daceres)
class FCompiler(CCompiler): distutils_vars = EnvironmentConfig(distutils_section='config_fc', noopt=(None, None, 'noopt', str2bool, False), noarch=(None, None, 'noarch', str2bool, False), debug=(None, None, 'debug', str2bool, False), verbose=(None, None, 'verbose', str2bool, False)) command_vars = EnvironmentConfig(distutils_section='config_fc', compiler_f77=('exe.compiler_f77', 'F77', 'f77exec', None, False), compiler_f90=('exe.compiler_f90', 'F90', 'f90exec', None, False), compiler_fix=('exe.compiler_fix', 'F90', 'f90exec', None, False), version_cmd=('exe.version_cmd', None, None, None, False), linker_so=('exe.linker_so', 'LDSHARED', 'ldshared', None, False), linker_exe=('exe.linker_exe', 'LD', 'ld', None, False), archiver=(None, 'AR', 'ar', None, False), ranlib=(None, 'RANLIB', 'ranlib', None, False)) flag_vars = EnvironmentConfig(distutils_section='config_fc', f77=('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), f90=('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), free=('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), fix=('flags.fix', None, None, flaglist, False), opt=('flags.opt', 'FOPT', 'opt', flaglist, True), opt_f77=('flags.opt_f77', None, None, flaglist, False), opt_f90=('flags.opt_f90', None, None, flaglist, False), arch=('flags.arch', 'FARCH', 'arch', flaglist, False), arch_f77=('flags.arch_f77', None, None, flaglist, False), arch_f90=('flags.arch_f90', None, None, flaglist, False), debug=('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), debug_f77=('flags.debug_f77', None, None, flaglist, False), debug_f90=('flags.debug_f90', None, None, flaglist, False), flags=('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), linker_so=('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), linker_exe=('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), ar=('flags.ar', 'ARFLAGS', 'arflags', flaglist, True)) language_map = {'.f': 'f77', '.for': 'f77', '.F': 'f77', '.ftn': 'f77', '.f77': 'f77', '.f90': 'f90', '.F90': 'f90', '.f95': 'f90'} language_order = ['f90', 'f77'] compiler_type = None compiler_aliases = () version_pattern = None possible_executables = [] executables = {'version_cmd': ['f77', '-v'], 'compiler_f77': ['f77'], 'compiler_f90': ['f90'], 'compiler_fix': ['f90', '-fixed'], 'linker_so': ['f90', '-shared'], 'linker_exe': ['f90'], 'archiver': ['ar', '-cr'], 'ranlib': None} suggested_f90_compiler = None compile_switch = '-c' object_switch = '-o ' library_switch = '-o ' module_dir_switch = None module_include_switch = '-I' pic_flags = [] src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] obj_extension = '.o' shared_lib_extension = get_shared_lib_extension() static_lib_extension = '.a' static_lib_format = 'lib%s%s' shared_lib_format = '%s%s' exe_extension = '' _exe_cache = {} _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', 'ranlib'] c_compiler = None extra_f77_compile_args = [] extra_f90_compile_args = [] def __init__(self, *args, **kw): CCompiler.__init__(self, *args, **kw) self.distutils_vars = self.distutils_vars.clone(self._environment_hook) self.command_vars = self.command_vars.clone(self._environment_hook) self.flag_vars = self.flag_vars.clone(self._environment_hook) self.executables = self.executables.copy() for e in self._executable_keys: if (e not in self.executables): self.executables[e] = None self._is_customised = False def __copy__(self): obj = self.__new__(self.__class__) obj.__dict__.update(self.__dict__) obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) obj.command_vars = obj.command_vars.clone(obj._environment_hook) obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) obj.executables = obj.executables.copy() return obj def copy(self): return self.__copy__() def _command_property(key): def fget(self): assert self._is_customised return self.executables[key] return property(fget=fget) version_cmd = _command_property('version_cmd') compiler_f77 = _command_property('compiler_f77') compiler_f90 = _command_property('compiler_f90') compiler_fix = _command_property('compiler_fix') linker_so = _command_property('linker_so') linker_exe = _command_property('linker_exe') archiver = _command_property('archiver') ranlib = _command_property('ranlib') def set_executable(self, key, value): self.set_command(key, value) def set_commands(self, **kw): for (k, v) in kw.items(): self.set_command(k, v) def set_command(self, key, value): if (not (key in self._executable_keys)): raise ValueError(("unknown executable '%s' for class %s" % (key, self.__class__.__name__))) if is_string(value): value = split_quoted(value) assert ((value is None) or is_sequence_of_strings(value[1:])), (key, value) self.executables[key] = value def find_executables(self): assert self._is_customised exe_cache = self._exe_cache def cached_find_executable(exe): if (exe in exe_cache): return exe_cache[exe] fc_exe = find_executable(exe) exe_cache[exe] = exe_cache[fc_exe] = fc_exe return fc_exe def verify_command_form(name, value): if ((value is not None) and (not is_sequence_of_strings(value))): raise ValueError(('%s value %r is invalid in class %s' % (name, value, self.__class__.__name__))) def set_exe(exe_key, f77=None, f90=None): cmd = self.executables.get(exe_key, None) if (not cmd): return None exe_from_environ = getattr(self.command_vars, exe_key) if (not exe_from_environ): possibles = ([f90, f77] + self.possible_executables) else: possibles = ([exe_from_environ] + self.possible_executables) seen = set() unique_possibles = [] for e in possibles: if (e == '<F77>'): e = f77 elif (e == '<F90>'): e = f90 if ((not e) or (e in seen)): continue seen.add(e) unique_possibles.append(e) for exe in unique_possibles: fc_exe = cached_find_executable(exe) if fc_exe: cmd[0] = fc_exe return fc_exe self.set_command(exe_key, None) return None ctype = self.compiler_type f90 = set_exe('compiler_f90') if (not f90): f77 = set_exe('compiler_f77') if f77: log.warn(('%s: no Fortran 90 compiler found' % ctype)) else: raise CompilerNotFound(('%s: f90 nor f77' % ctype)) else: f77 = set_exe('compiler_f77', f90=f90) if (not f77): log.warn(('%s: no Fortran 77 compiler found' % ctype)) set_exe('compiler_fix', f90=f90) set_exe('linker_so', f77=f77, f90=f90) set_exe('linker_exe', f77=f77, f90=f90) set_exe('version_cmd', f77=f77, f90=f90) set_exe('archiver') set_exe('ranlib') def update_executables(self): pass def get_flags(self): return ([] + self.pic_flags) def _get_command_flags(self, key): cmd = self.executables.get(key, None) if (cmd is None): return [] return cmd[1:] def get_flags_f77(self): return self._get_command_flags('compiler_f77') def get_flags_f90(self): return self._get_command_flags('compiler_f90') def get_flags_free(self): return [] def get_flags_fix(self): return self._get_command_flags('compiler_fix') def get_flags_linker_so(self): return self._get_command_flags('linker_so') def get_flags_linker_exe(self): return self._get_command_flags('linker_exe') def get_flags_ar(self): return self._get_command_flags('archiver') def get_flags_opt(self): return [] def get_flags_arch(self): return [] def get_flags_debug(self): return [] get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug def get_libraries(self): return self.libraries[:] def get_library_dirs(self): return self.library_dirs[:] def get_version(self, force=False, ok_status=[0]): assert self._is_customised version = CCompiler.get_version(self, force=force, ok_status=ok_status) if (version is None): raise CompilerNotFound() return version def customize(self, dist=None): log.info(('customize %s' % self.__class__.__name__)) self._is_customised = True self.distutils_vars.use_distribution(dist) self.command_vars.use_distribution(dist) self.flag_vars.use_distribution(dist) self.update_executables() self.find_executables() noopt = self.distutils_vars.get('noopt', False) noarch = self.distutils_vars.get('noarch', noopt) debug = self.distutils_vars.get('debug', False) f77 = self.command_vars.compiler_f77 f90 = self.command_vars.compiler_f90 f77flags = [] f90flags = [] freeflags = [] fixflags = [] if f77: f77 = _shell_utils.NativeParser.split(f77) f77flags = self.flag_vars.f77 if f90: f90 = _shell_utils.NativeParser.split(f90) f90flags = self.flag_vars.f90 freeflags = self.flag_vars.free fix = self.command_vars.compiler_fix if fix: fix = _shell_utils.NativeParser.split(fix) fixflags = (self.flag_vars.fix + f90flags) (oflags, aflags, dflags) = ([], [], []) def get_flags(tag, flags): flags.extend(getattr(self.flag_vars, tag)) this_get = getattr(self, ('get_flags_' + tag)) for (name, c, flagvar) in [('f77', f77, f77flags), ('f90', f90, f90flags), ('f90', fix, fixflags)]: t = ('%s_%s' % (tag, name)) if (c and (this_get is not getattr(self, ('get_flags_' + t)))): flagvar.extend(getattr(self.flag_vars, t)) if (not noopt): get_flags('opt', oflags) if (not noarch): get_flags('arch', aflags) if debug: get_flags('debug', dflags) fflags = (((self.flag_vars.flags + dflags) + oflags) + aflags) if f77: self.set_commands(compiler_f77=((f77 + f77flags) + fflags)) if f90: self.set_commands(compiler_f90=(((f90 + freeflags) + f90flags) + fflags)) if fix: self.set_commands(compiler_fix=((fix + fixflags) + fflags)) linker_so = self.linker_so if linker_so: linker_so_flags = self.flag_vars.linker_so if sys.platform.startswith('aix'): python_lib = get_python_lib(standard_lib=1) ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') python_exp = os.path.join(python_lib, 'config', 'python.exp') linker_so = (([ld_so_aix] + linker_so) + [('-bI:' + python_exp)]) self.set_commands(linker_so=(linker_so + linker_so_flags)) linker_exe = self.linker_exe if linker_exe: linker_exe_flags = self.flag_vars.linker_exe self.set_commands(linker_exe=(linker_exe + linker_exe_flags)) ar = self.command_vars.archiver if ar: arflags = self.flag_vars.ar self.set_commands(archiver=([ar] + arflags)) self.set_library_dirs(self.get_library_dirs()) self.set_libraries(self.get_libraries()) def dump_properties(self): props = [] for key in (list(self.executables.keys()) + ['version', 'libraries', 'library_dirs', 'object_switch', 'compile_switch']): if hasattr(self, key): v = getattr(self, key) props.append((key, None, ('= ' + repr(v)))) props.sort() pretty_printer = FancyGetopt(props) for l in pretty_printer.generate_help(('%s instance properties:' % self.__class__.__name__)): if (l[:4] == ' --'): l = (' ' + l[4:]) print(l) def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): src_flags = {} if (is_f_file(src) and (not has_f90_header(src))): flavor = ':f77' compiler = self.compiler_f77 src_flags = get_f77flags(src) extra_compile_args = (self.extra_f77_compile_args or []) elif is_free_format(src): flavor = ':f90' compiler = self.compiler_f90 if (compiler is None): raise DistutilsExecError(('f90 not supported by %s needed for %s' % (self.__class__.__name__, src))) extra_compile_args = (self.extra_f90_compile_args or []) else: flavor = ':fix' compiler = self.compiler_fix if (compiler is None): raise DistutilsExecError(('f90 (fixed) not supported by %s needed for %s' % (self.__class__.__name__, src))) extra_compile_args = (self.extra_f90_compile_args or []) if (self.object_switch[(- 1)] == ' '): o_args = [self.object_switch.strip(), obj] else: o_args = [(self.object_switch.strip() + obj)] assert self.compile_switch.strip() s_args = [self.compile_switch, src] if extra_compile_args: log.info(('extra %s options: %r' % (flavor[1:], ' '.join(extra_compile_args)))) extra_flags = src_flags.get(self.compiler_type, []) if extra_flags: log.info(('using compile options from source: %r' % ' '.join(extra_flags))) command = ((((((compiler + cc_args) + extra_flags) + s_args) + o_args) + extra_postargs) + extra_compile_args) display = ('%s: %s' % ((os.path.basename(compiler[0]) + flavor), src)) try: self.spawn(command, display=display) except DistutilsExecError: msg = str(get_exception()) raise CompileError(msg) def module_options(self, module_dirs, module_build_dir): options = [] if (self.module_dir_switch is not None): if (self.module_dir_switch[(- 1)] == ' '): options.extend([self.module_dir_switch.strip(), module_build_dir]) else: options.append((self.module_dir_switch.strip() + module_build_dir)) else: print(('XXX: module_build_dir=%r option ignored' % module_build_dir)) print('XXX: Fix module_dir_switch for ', self.__class__.__name__) if (self.module_include_switch is not None): for d in ([module_build_dir] + module_dirs): options.append(('%s%s' % (self.module_include_switch, d))) else: print(('XXX: module_dirs=%r option ignored' % module_dirs)) print('XXX: Fix module_include_switch for ', self.__class__.__name__) return options def library_option(self, lib): return ('-l' + lib) def library_dir_option(self, dir): return ('-L' + dir) def link(self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): (objects, output_dir) = self._fix_object_args(objects, output_dir) (libraries, library_dirs, runtime_library_dirs) = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) if is_string(output_dir): output_filename = os.path.join(output_dir, output_filename) elif (output_dir is not None): raise TypeError("'output_dir' must be a string or None") if self._need_link(objects, output_filename): if (self.library_switch[(- 1)] == ' '): o_args = [self.library_switch.strip(), output_filename] else: o_args = [(self.library_switch.strip() + output_filename)] if is_string(self.objects): ld_args = (objects + [self.objects]) else: ld_args = (objects + self.objects) ld_args = ((ld_args + lib_opts) + o_args) if debug: ld_args[:0] = ['-g'] if extra_preargs: ld_args[:0] = extra_preargs if extra_postargs: ld_args.extend(extra_postargs) self.mkpath(os.path.dirname(output_filename)) if (target_desc == CCompiler.EXECUTABLE): linker = self.linker_exe[:] else: linker = self.linker_so[:] command = (linker + ld_args) try: self.spawn(command) except DistutilsExecError: msg = str(get_exception()) raise LinkError(msg) else: log.debug('skipping %s (up-to-date)', output_filename) def _environment_hook(self, name, hook_name): if (hook_name is None): return None if is_string(hook_name): if hook_name.startswith('self.'): hook_name = hook_name[5:] hook = getattr(self, hook_name) return hook() elif hook_name.startswith('exe.'): hook_name = hook_name[4:] var = self.executables[hook_name] if var: return var[0] else: return None elif hook_name.startswith('flags.'): hook_name = hook_name[6:] hook = getattr(self, ('get_flags_' + hook_name)) return hook() else: return hook_name() def can_ccompiler_link(self, ccompiler): return True def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): raise NotImplementedError()
class Scalar(nn.Module): def __init__(self, init_value): super().__init__() self.constant = nn.Parameter(torch.tensor(init_value, dtype=torch.float32)) def forward(self): return self.constant
class RMSNorm(torch.nn.Module): def __init__(self, hidden_size, eps=1e-05, device=None, dtype=None): factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() self.eps = eps self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): torch.nn.init.ones_(self.weight) def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False): return rms_norm_fn(x, self.weight, self.bias, residual=residual, eps=self.eps, prenorm=prenorm, residual_in_fp32=residual_in_fp32)
def config_init(dataset): if (dataset == 'mnist'): return (784, 3000, 10, 0.002, 0.002, 10, 0.9, 0.9, 1, 'sigmoid') if (dataset == 'reuters10k'): return (2000, 15, 4, 0.002, 0.002, 5, 0.5, 0.5, 1, 'linear') if (dataset == 'har'): return (561, 120, 6, 0.002, 2e-05, 10, 0.9, 0.9, 5, 'linear')
def evaluate_style_transfer(args): scorer = StyleTransferScorer(align=args.align) scores = [] for (input_sent, hypo) in zip(open(args.input_sent).readlines(), open(args.hypo).readlines()): (input_sent, hypo) = (input_sent.strip(), hypo.strip()) if ((input_sent == '') and (hypo == '')): continue scores.append(scorer.score(input_sent=input_sent, hypo=hypo, aspect=args.aspect, remove_stopwords=args.remove_stopwords)) return scores
def count_sgx_standard(pods) -> Tuple[(int, int)]: (i1, i2) = tee(pods) (standard_pods, sgx_pods) = (filterfalse(cluster.pod_requests_sgx, i1), filter(cluster.pod_requests_sgx, i2)) return (len(list(standard_pods)), len(list(sgx_pods)))
class EnvironmentCommand(BaseTransformersCLICommand): def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser('env') download_parser.set_defaults(func=info_command_factory) def run(self): safetensors_version = 'not installed' if is_safetensors_available(): import safetensors safetensors_version = safetensors.__version__ elif (importlib.util.find_spec('safetensors') is not None): import safetensors safetensors_version = f'{safetensors.__version__} but is ignored because of PyTorch version too old.' pt_version = 'not installed' pt_cuda_available = 'NA' if is_torch_available(): import torch pt_version = torch.__version__ pt_cuda_available = torch.cuda.is_available() tf_version = 'not installed' tf_cuda_available = 'NA' if is_tf_available(): import tensorflow as tf tf_version = tf.__version__ try: tf_cuda_available = tf.test.is_gpu_available() except AttributeError: tf_cuda_available = bool(tf.config.list_physical_devices('GPU')) flax_version = 'not installed' jax_version = 'not installed' jaxlib_version = 'not installed' jax_backend = 'NA' if is_flax_available(): import flax import jax import jaxlib flax_version = flax.__version__ jax_version = jax.__version__ jaxlib_version = jaxlib.__version__ jax_backend = jax.lib.xla_bridge.get_backend().platform info = {'`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': f'{safetensors_version}', 'PyTorch version (GPU?)': f'{pt_version} ({pt_cuda_available})', 'Tensorflow version (GPU?)': f'{tf_version} ({tf_cuda_available})', 'Flax version (CPU?/GPU?/TPU?)': f'{flax_version} ({jax_backend})', 'Jax version': f'{jax_version}', 'JaxLib version': f'{jaxlib_version}', 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>'} print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n') print(self.format_dict(info)) return info def format_dict(d): return ('\n'.join([f'- {prop}: {val}' for (prop, val) in d.items()]) + '\n')
def log_memory_usage(sample_interval: float=1.0, log_individual_devices: bool=False): directory = '/dev/shm' if (not os.path.exists(directory)): directory = tempfile.gettempdir() tempfile_name = os.path.join(directory, f'memory_usage_{os.getpid()}.prof') def inner(): import posix import time while True: jax.profiler.save_device_memory_profile(f'{tempfile_name}.new') posix.rename(f'{tempfile_name}.new', tempfile_name) time.sleep(sample_interval) thread = threading.Thread(target=inner, daemon=True) thread.start() def log_memory_usage(step: StepInfo): process = subprocess.run(args=f'go tool pprof -tags {tempfile_name}'.split(' '), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) if (process.returncode != 0): warnings.warn('failed to run pprof. Is go installed?') return output = process.stdout.decode('utf-8') (per_device, by_kind) = output.split('kind: Total ') regex = re.compile('^(\\d+\\.\\d+[a-zA-Z]+)') match = regex.search(by_kind) if match: memory_usage = humanfriendly.parse_size(match.group(1)) wandb.log({'memory/total': (memory_usage / 1000000.0)}, step=step.step) regex = re.compile('([\\d.]+[a-zA-Z]+) \\(([\\d.]+)%\\): ([\\w\\d:_]+)') if log_individual_devices: for match in regex.finditer(per_device): memory_usage = humanfriendly.parse_size(match.group(1)) device_name = match.group(3) wandb.log({f'memory/device/{device_name}': (memory_usage / 1000000.0)}, step=step.step) for match in regex.finditer(by_kind): memory_usage = match.group(1) memory_usage = humanfriendly.parse_size(memory_usage) wandb.log({f'memory/{match.group(3)}': (memory_usage / 1000000.0)}, step=step.step) return log_memory_usage
class SUMOActor(): def __init__(self, actor_id: str, traci): self._state: ActorState = ActorState() self._actor_id: str = actor_id self._outdated: bool = True self.traci = traci def flag_outdated(self) -> None: self._outdated = True def state(self) -> ActorState: if (not self._outdated): return self._state self._outdated = False results = self.traci.vehicle.getSubscriptionResults(self._actor_id) self._state = ActorState(veh_id=self._actor_id, veh_type=results[traci.constants.VAR_TYPE], veh_class=results[traci.constants.VAR_VEHICLECLASS], length=results[traci.constants.VAR_LENGTH], width=results[traci.constants.VAR_WIDTH], height=results[traci.constants.VAR_HEIGHT], location=np.array(list(results[traci.constants.VAR_POSITION3D])), rotation=np.array([results[traci.constants.VAR_SLOPE], results[traci.constants.VAR_ANGLE], 0.0]), lane_position_lat=results[traci.constants.VAR_LANEPOSITION_LAT], velocity=results[traci.constants.VAR_SPEED], velocity_lat=results[traci.constants.VAR_SPEED_LAT], velocity_min=(None if (self._state is None) else self._state.velocity_min), velocity_max=results[traci.constants.VAR_MAXSPEED], accel=results[traci.constants.VAR_ACCELERATION], accel_max=results[traci.constants.VAR_ACCEL], decel_max=results[traci.constants.VAR_DECEL], signals=results[traci.constants.VAR_SIGNALS], extent=np.array([(results[traci.constants.VAR_LENGTH] / 2.0), (results[traci.constants.VAR_WIDTH] / 2.0), (results[traci.constants.VAR_HEIGHT] / 2.0)]), edge_id=results[traci.constants.VAR_ROAD_ID], lane_id=results[traci.constants.VAR_LANE_ID], lane_index=results[traci.constants.VAR_LANE_INDEX], sumo_repr=True).to_universal() return self._state def state(self, new_state: ActorState) -> None: if (new_state.sumo_repr != self._state.sumo_repr): if new_state.sumo_repr: new_state = new_state.to_universal() else: new_state = new_state.to_sumo() assert (new_state.sumo_repr == self._state.sumo_repr) new_state = deepcopy(new_state) new_state_sumo = new_state.to_sumo() own_state_sumo = deepcopy(self.state).to_sumo() for field in fields(new_state): attr_name: str = field.name old_val = getattr(self._state, attr_name) new_val = getattr(new_state, attr_name) if (new_val is None): continue elif (isinstance(new_val, np.ndarray) and np.all((old_val == new_val))): continue elif (isinstance(new_val, float) or (isinstance(new_val, str) and (old_val == new_val))): continue elif isinstance(new_val, List): raise ValueError new_val_sumo = getattr(new_state_sumo, attr_name) if (attr_name == 'veh_id'): continue if (attr_name == 'veh_type'): continue elif (attr_name == 'veh_class'): self.traci.vehicle.setVehicleClass(self._actor_id, new_val_sumo) elif (attr_name == 'length'): self.traci.vehicle.setLength(self._actor_id, new_val_sumo) elif (attr_name == 'width'): self.traci.vehicle.setWidth(self._actor_id, new_val_sumo) elif (attr_name == 'height'): self.traci.vehicle.setHeight(self._actor_id, new_val_sumo) pass elif ((attr_name == 'location') or (attr_name == 'rotation')): location = (new_state_sumo.location if (new_state_sumo.location is not None) else own_state_sumo.location) rotation = (new_state_sumo.rotation if (new_state_sumo.rotation is not None) else own_state_sumo.rotation) edge_id = (new_state_sumo.edge_id if (new_state_sumo.edge_id is not None) else '') lane_index = (new_state_sumo.lane_index if (new_state_sumo.lane_index is not None) else (- 1)) self.traci.vehicle.moveToXY(self._actor_id, edge_id, lane_index, location[0], location[1], angle=rotation[1], keepRoute=1, matchThreshold=200.0) elif (attr_name == 'velocity'): self.traci.vehicle.setSpeed(self._actor_id, new_val_sumo) elif (attr_name == 'velocity_lat'): pass elif (attr_name == 'velocity_max'): self.traci.vehicle.setMaxSpeed(self._actor_id, new_val_sumo) elif (attr_name == 'accel'): self.traci.vehicle.setAcceleration(self._actor_id, new_val_sumo) elif (attr_name == 'accel_max'): self.traci.vehicle.setAccel(self._actor_id, new_val_sumo) elif (attr_name == 'decel_max'): self.traci.vehicle.setDecel(self._actor_id, new_val_sumo) elif (attr_name == 'signals'): self.traci.vehicle.setSignals(self._actor_id, new_val_sumo) elif (attr_name == 'extent'): pass elif (attr_name == 'edge_id'): pass elif (attr_name == 'lane_id'): pass elif (attr_name == 'lane_index'): pass setattr(self._state, attr_name, new_val)
def add_random(df, mode, arch='MLP'): rules = [2, 4, 8, 16, 32] encs = [32, 64, 128, 256, 512] dims = [128, 256, 512, 1024, 2048] modes = ['last', 'best'] def collapse_metric_worse(prob, rules): p = np.min(np.sum(prob, axis=0)) cmw = (1 - (rules * p)) return cmw def collapse_metric(prob, rules): p = np.sum(prob, axis=0) cm = ((rules * np.sum(np.maximum(((np.ones_like(p) / rules) - p), 0))) / (rules - 1)) return cm def mutual_info(prob): m1 = np.sum(prob, axis=0, keepdims=True) m2 = np.sum(prob, axis=1, keepdims=True) m = (m1 * m2) return np.sum((prob * np.log(((prob / (m + EPS)) + EPS)))) def specialization_score(true_p, empirical_p): true_p.sort() empirical_p.sort() return (np.sum(np.abs((true_p - empirical_p))) / 2.0) def specialization_metric(prob, rules): spec = 0.0 p_ = np.sum(prob, axis=0) for eval_seed in range(100): rng = np.random.RandomState(eval_seed) p = rng.dirichlet(alpha=np.ones(rules)) spec += specialization_score(p, p_) return (spec / 100.0) def hungarian_metric(prob, rules): prob = (prob * rules) cost = (1 - prob) (row_ind, col_ind) = linear_sum_assignment(cost) perm = np.zeros((rules, rules)) perm[(row_ind, col_ind)] = 1.0 hung_score = (np.sum(np.abs((perm - prob))) / (2 * rules)) return hung_score for rule in [2, 4, 8, 16, 32]: prob = (np.ones((rule, rule)) / (rule * rule)) cmw = collapse_metric_worse(prob, rule) cm = collapse_metric(prob, rule) mi = mutual_info(prob) spec = specialization_metric(prob, rule) hung = hungarian_metric(prob, rule) for (enc, dim) in zip(encs, dims): for m in modes: if (arch == 'MLP'): df.loc[(- 1)] = [mode, 'Random', rule, enc, dim, (- 1), (- 1), (- 1), (- 1), (- 1), cm, cmw, spec, mi, hung, m] elif (arch == 'MHA'): df.loc[(- 1)] = [mode, 'Random', rule, 1, enc, dim, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), cm, cmw, spec, mi, hung, m] df.index += 1 df.loc[(- 1)] = [mode, 'Random', rule, 2, enc, dim, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), cm, cmw, spec, mi, hung, m] elif (arch == 'RNN'): df.loc[(- 1)] = [mode, 'Random', rule, enc, dim, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), cm, cmw, spec, mi, hung, m] df.index += 1
class TransformerDecoderLayer(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.cross_self_attention = getattr(args, 'cross_self_attention', False) self.self_attn = self.build_self_attention(self.embed_dim, args, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn(activation=getattr(args, 'activation_fn', 'relu')) self.activation_dropout = getattr(args, 'activation_dropout', 0) if (self.activation_dropout == 0): self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = self.build_encoder_attention(self.embed_dim, args) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = self.build_fc1(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = self.build_fc2(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def build_fc1(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_fc2(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False): return MultiheadAttention(embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=(not getattr(args, 'cross_self_attention', False))) def build_encoder_attention(self, embed_dim, args): return MultiheadAttention(embed_dim, args.decoder_attention_heads, kdim=getattr(args, 'encoder_embed_dim', None), vdim=getattr(args, 'encoder_embed_dim', None), dropout=args.attention_dropout, encoder_decoder_attention=True) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward(self, x, encoder_out: Optional[torch.Tensor]=None, encoder_padding_mask: Optional[torch.Tensor]=None, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, prev_self_attn_state: Optional[List[torch.Tensor]]=None, prev_attn_state: Optional[List[torch.Tensor]]=None, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None, need_attn: bool=False, need_head_weights: bool=False): if need_head_weights: need_attn = True residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if (prev_self_attn_state is not None): (prev_key, prev_value) = prev_self_attn_state[:2] saved_state: Dict[(str, Optional[Tensor])] = {'prev_key': prev_key, 'prev_value': prev_value} if (len(prev_self_attn_state) >= 3): saved_state['prev_key_padding_mask'] = prev_self_attn_state[2] assert (incremental_state is not None) self.self_attn._set_input_buffer(incremental_state, saved_state) _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state) if (self.cross_self_attention and (not ((incremental_state is not None) and (_self_attn_input_buffer is not None) and ('prev_key' in _self_attn_input_buffer)))): if (self_attn_mask is not None): assert (encoder_out is not None) self_attn_mask = torch.cat((x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1) if (self_attn_padding_mask is not None): if (encoder_padding_mask is None): assert (encoder_out is not None) encoder_padding_mask = self_attn_padding_mask.new_zeros(encoder_out.size(1), encoder_out.size(0)) self_attn_padding_mask = torch.cat((encoder_padding_mask, self_attn_padding_mask), dim=1) assert (encoder_out is not None) y = torch.cat((encoder_out, x), dim=0) else: y = x (x, attn) = self.self_attn(query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = (residual + x) if (not self.normalize_before): x = self.self_attn_layer_norm(x) if (self.encoder_attn is not None): residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) if (prev_attn_state is not None): (prev_key, prev_value) = prev_attn_state[:2] saved_state: Dict[(str, Optional[Tensor])] = {'prev_key': prev_key, 'prev_value': prev_value} if (len(prev_attn_state) >= 3): saved_state['prev_key_padding_mask'] = prev_attn_state[2] assert (incremental_state is not None) self.encoder_attn._set_input_buffer(incremental_state, saved_state) (x, attn) = self.encoder_attn(query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(need_attn or ((not self.training) and self.need_attn)), need_head_weights=need_head_weights) x = F.dropout(x, p=self.dropout, training=self.training) x = (residual + x) if (not self.normalize_before): x = self.encoder_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=float(self.activation_dropout), training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = (residual + x) if (not self.normalize_before): x = self.final_layer_norm(x) if (self.onnx_trace and (incremental_state is not None)): saved_state = self.self_attn._get_input_buffer(incremental_state) assert (saved_state is not None) if (self_attn_padding_mask is not None): self_attn_state = [saved_state['prev_key'], saved_state['prev_value'], saved_state['prev_key_padding_mask']] else: self_attn_state = [saved_state['prev_key'], saved_state['prev_value']] return (x, attn, self_attn_state) return (x, attn, None) def make_generation_fast_(self, need_attn: bool=False, **kwargs): self.need_attn = need_attn
def get_input_list_file(subset, trainsplit): if (subset == 'train'): if (trainsplit == 0): return 'ImageSets/480p/train.txt' elif (trainsplit == 1): return 'ImageSets/480p/trainsplit_train.txt' elif (trainsplit == 2): return 'ImageSets/480p/trainsplit2_train.txt' elif (trainsplit == 3): return 'ImageSets/480p/trainsplit3_train.txt' else: assert False, 'invalid trainsplit' elif (trainsplit == 0): return 'ImageSets/480p/val.txt' elif (trainsplit == 1): return 'ImageSets/480p/trainsplit_val.txt' elif (trainsplit == 2): return 'ImageSets/480p/trainsplit2_val.txt' elif (trainsplit == 3): return 'ImageSets/480p/trainsplit3_val.txt' else: assert False, 'invalid trainsplit'
class MobileNetV2DeepLabV3Plus(nn.Module): def __init__(self, config: MobileNetV2Config) -> None: super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(output_size=1) self.conv_pool = MobileNetV2ConvLayer(config, in_channels=apply_depth_multiplier(config, 320), out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation='relu', layer_norm_eps=1e-05) self.conv_aspp = MobileNetV2ConvLayer(config, in_channels=apply_depth_multiplier(config, 320), out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation='relu', layer_norm_eps=1e-05) self.conv_projection = MobileNetV2ConvLayer(config, in_channels=512, out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation='relu', layer_norm_eps=1e-05) self.dropout = nn.Dropout2d(config.classifier_dropout_prob) self.classifier = MobileNetV2ConvLayer(config, in_channels=256, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True) def forward(self, features: torch.Tensor) -> torch.Tensor: spatial_size = features.shape[(- 2):] features_pool = self.avg_pool(features) features_pool = self.conv_pool(features_pool) features_pool = nn.functional.interpolate(features_pool, size=spatial_size, mode='bilinear', align_corners=True) features_aspp = self.conv_aspp(features) features = torch.cat([features_pool, features_aspp], dim=1) features = self.conv_projection(features) features = self.dropout(features) features = self.classifier(features) return features
class Beamline(object): def __init__(self, srwl_beamline=None): self.propagation_options = [{'optical_elements': [], 'propagation_parameters': []}] if (srwl_beamline is not None): tolal_elements = max(len(srwl_beamline.arProp), len(srwl_beamline.arOpt)) for ti in range(tolal_elements): try: elem = srwl_beamline.arOpt[ti] except IndexError: elem = wpg.optical_elements.Empty() try: pp = srwl_beamline.arProp[ti] except IndexError: pp = None self.append(elem, pp) def __str__(self): res = '' for po in self.propagation_options: tolal_elements = max(len(po['optical_elements']), len(po['propagation_parameters'])) for ti in range(tolal_elements): try: elem = po['optical_elements'][ti] except IndexError: elem = wpg.optical_elements.Empty() try: pp = po['propagation_parameters'][ti] except IndexError: pp = None s1 = elem.__doc__ s2 = 'Prop. parameters = {0}'.format(pp) if isinstance(elem, srwlib.SRWLOpt): s3 = ('\t' + '\n\t'.join(srw_obj2str(elem).split('\n'))) else: s3 = ('\t' + str(elem)) res += '{0}\n{1}\n{2}\n'.format(s1, s2, s3) return res def append(self, optical_element, propagation_parameters): last_pp_opt = self.propagation_options[(- 1)] if (not (len(last_pp_opt['optical_elements']) == len(last_pp_opt['propagation_parameters']))): self.propagation_options.append({'optical_elements': [], 'propagation_parameters': []}) last_pp_opt = self.propagation_options[(- 1)] if (not all([isinstance(o, srwlib.SRWLOpt) for o in last_pp_opt['optical_elements']])): self.propagation_options.append({'optical_elements': [], 'propagation_parameters': []}) if isinstance(optical_element, srwlib.SRWLOpt): opt = optical_element pp = _get_srw_pp(propagation_parameters) last_pp_opt['optical_elements'].append(opt) last_pp_opt['propagation_parameters'].append(pp) if ((optical_element == []) or isinstance(optical_element, wpg.optical_elements.Empty)): pp = _get_srw_pp(propagation_parameters) last_pp_opt['propagation_parameters'].append(pp) def propagate(self, wfr): for propagation_option in self.propagation_options: if all([isinstance(o, srwlib.SRWLOpt) for o in propagation_option['optical_elements']]): srwl_beamline = srwlib.SRWLOptC(propagation_option['optical_elements'], propagation_option['propagation_parameters']) srwl.PropagElecField(wfr._srwl_wf, srwl_beamline) for opt_element in propagation_option['optical_elements']: if isinstance(opt_element, srwlib.SRWLOptD): wfr.params.Mesh.zCoord = (wfr.params.Mesh.zCoord + opt_element.L) else: raise ValueError('Unknown type of propagators')
def parse_multiprocessing_cli(parser): parser.add_argument('--nprocs', type=int, default=4, help='Tells us how much processes do we want') parser.add_argument('--master_port', type=int, default=29500) parser.add_argument('--verbose_comm', action='store_true') parser.add_argument('--verbose_comm_from_cmd', action='store_true')
class SawyerFaucetOpenV1Policy(Policy): _fully_parsed def _parse_obs(obs): return {'hand_pos': obs[:3], 'faucet_pos': obs[3:6], 'unused_info': obs[6:]} def get_action(self, obs): o_d = self._parse_obs(obs) action = Action({'delta_pos': np.arange(3), 'grab_effort': 3}) action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0) action['grab_effort'] = 1.0 return action.array def _desired_pos(o_d): pos_curr = o_d['hand_pos'] pos_faucet = (o_d['faucet_pos'] + np.array([(- 0.02), 0.0, 0.0])) if (np.linalg.norm((pos_curr[:2] - pos_faucet[:2])) > 0.04): return (pos_faucet + np.array([0.0, 0.0, 0.1])) elif (abs((pos_curr[2] - pos_faucet[2])) > 0.04): return pos_faucet else: return (pos_faucet + np.array([0.1, 0.05, 0.0]))
def _seg_40(): return [(63824, 'M', u''), (63825, 'M', u''), (63826, 'M', u''), (63827, 'M', u''), (63828, 'M', u''), (63829, 'M', u''), (63830, 'M', u''), (63831, 'M', u''), (63832, 'M', u''), (63833, 'M', u''), (63834, 'M', u''), (63835, 'M', u''), (63836, 'M', u''), (63837, 'M', u''), (63838, 'M', u''), (63839, 'M', u''), (63840, 'M', u''), (63841, 'M', u''), (63842, 'M', u''), (63843, 'M', u''), (63844, 'M', u''), (63845, 'M', u''), (63846, 'M', u''), (63847, 'M', u''), (63848, 'M', u''), (63849, 'M', u''), (63850, 'M', u''), (63851, 'M', u''), (63852, 'M', u''), (63853, 'M', u''), (63854, 'M', u''), (63855, 'M', u''), (63856, 'M', u''), (63857, 'M', u''), (63858, 'M', u''), (63859, 'M', u''), (63860, 'M', u''), (63861, 'M', u''), (63862, 'M', u''), (63863, 'M', u''), (63864, 'M', u''), (63865, 'M', u''), (63866, 'M', u''), (63867, 'M', u''), (63868, 'M', u''), (63869, 'M', u''), (63870, 'M', u''), (63871, 'M', u''), (63872, 'M', u''), (63873, 'M', u''), (63874, 'M', u''), (63875, 'M', u''), (63876, 'M', u''), (63877, 'M', u''), (63878, 'M', u''), (63879, 'M', u''), (63880, 'M', u''), (63881, 'M', u''), (63882, 'M', u''), (63883, 'M', u''), (63884, 'M', u''), (63885, 'M', u''), (63886, 'M', u''), (63887, 'M', u''), (63888, 'M', u''), (63889, 'M', u''), (63890, 'M', u''), (63891, 'M', u''), (63892, 'M', u''), (63893, 'M', u''), (63894, 'M', u''), (63895, 'M', u''), (63896, 'M', u''), (63897, 'M', u''), (63898, 'M', u''), (63899, 'M', u''), (63900, 'M', u''), (63901, 'M', u''), (63902, 'M', u''), (63903, 'M', u''), (63904, 'M', u''), (63905, 'M', u''), (63906, 'M', u''), (63907, 'M', u''), (63908, 'M', u''), (63909, 'M', u''), (63910, 'M', u''), (63911, 'M', u''), (63912, 'M', u''), (63913, 'M', u''), (63914, 'M', u''), (63915, 'M', u''), (63916, 'M', u''), (63917, 'M', u''), (63918, 'M', u''), (63919, 'M', u''), (63920, 'M', u''), (63921, 'M', u''), (63922, 'M', u''), (63923, 'M', u'')]
class A006530(SloaneSequence): def __init__(self): SloaneSequence.__init__(self, offset=1) def _repr_(self): return 'Largest prime dividing n (with a(1)=1).' def _eval(self, n): if (n == 1): return ZZ.one() return max((p for (p, _) in arith.factor(n)))
def log_stop_time(key: str, weight: float=0.0): for agg in get_active_aggregators(): agg[key].stop(weight)
def safe_np_cat(arrays, **kwargs): if all([(arr.size == 0) for arr in arrays]): return np.array([]) cat_arrays = [arr for arr in arrays if arr.size] return np.concatenate(cat_arrays, **kwargs)
def get_printer(msg): def printer(tensor): if (tensor.nelement() == 1): print(f'{msg} {tensor}') else: print(f'{msg} shape: {tensor.shape} max: {tensor.max()} min: {tensor.min()} mean: {tensor.mean()}') return printer
class _PyAccess8(PyAccess): def _post_init(self, *args, **kwargs): self.pixels = self.image8 def get_pixel(self, x, y): return self.pixels[y][x] def set_pixel(self, x, y, color): try: self.pixels[y][x] = min(color, 255) except TypeError: self.pixels[y][x] = min(color[0], 255)
def predict_labels_multi_scale(images, model_options, eval_scales=(1.0,), add_flipped_images=False): outputs_to_predictions = {output: [] for output in model_options.outputs_to_num_classes} for (i, image_scale) in enumerate(eval_scales): with tf.variable_scope(tf.get_variable_scope(), reuse=(True if i else None)): outputs_to_scales_to_logits = multi_scale_logits(images, model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) if add_flipped_images: with tf.variable_scope(tf.get_variable_scope(), reuse=True): outputs_to_scales_to_logits_reversed = multi_scale_logits(tf.reverse_v2(images, [2]), model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = tf.image.resize_bilinear(scales_to_logits[MERGED_LOGITS_SCOPE], tf.shape(images)[1:3], align_corners=True) outputs_to_predictions[output].append(tf.expand_dims(tf.nn.softmax(logits), 4)) if add_flipped_images: scales_to_logits_reversed = outputs_to_scales_to_logits_reversed[output] logits_reversed = tf.image.resize_bilinear(tf.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]), tf.shape(images)[1:3], align_corners=True) outputs_to_predictions[output].append(tf.expand_dims(tf.nn.softmax(logits_reversed), 4)) for output in sorted(outputs_to_predictions): predictions = outputs_to_predictions[output] predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4) outputs_to_predictions[output] = tf.argmax(predictions, 3) return outputs_to_predictions
def write_text(_text, _file_path): f = open(_file_path, 'w') f.write((_text + '\n')) f.close()
class LineProduction(object): def __init__(self, id=None, type=None): self.id = id self.lhs = type
(scope='module') def dataframe_only_item_left_pandas(): data_only_item_left = [(1, [0, 0, 0, 0, 2], [19842]), (1, [0, 0, 0, 2, 4], [19842, 19844]), (1, [0, 0, 2, 4, 3], [19842, 19844, 19843]), (1, [0, 2, 4, 3, 5], [19842, 19844, 19843, 19845]), (1, [2, 4, 3, 5, 6], [19842, 19844, 19843, 19845, 19846]), (1, [4, 3, 5, 6, 7], [19842, 19844, 19843, 19845, 19846, 19847]), (2, [0, 0, 0, 0, 1], [19841]), (2, [0, 0, 0, 1, 2], [19841, 19842]), (2, [0, 0, 1, 2, 3], [19841, 19842, 19843]), (2, [0, 1, 2, 3, 4], [19841, 19842, 19843, 19844]), (3, [0, 0, 0, 0, 10], [19844]), (4, [0, 0, 0, 10, 11], [19844, 19843]), (4, [0, 0, 10, 11, 12], [19844, 19843, 19845]), (10, [0, 0, 0, 0, 1], [19841])] return pd.DataFrame(data_only_item_left, columns=['user_id', 'item_id', 'timestamp'])
def _assert_no_warnings_context(name=None): __tracebackhide__ = True with warnings.catch_warnings(record=True) as l: warnings.simplefilter('always') (yield) if (len(l) > 0): name_str = ((' when calling %s' % name) if (name is not None) else '') raise AssertionError(('Got warnings%s: %s' % (name_str, l)))
class AmazonViewOrderDetails(VirtualFunctionTool): name = 'AmazonViewOrderDetails' summary = 'View the details of an order, including shipment and payment information.' parameters: List[ArgParameter] = [{'name': 'order_id', 'type': 'string', 'description': 'The unique identifier of the order.', 'required': True}] returns: List[ArgReturn] = [{'name': 'order_timestamp', 'type': 'string', 'description': "The time the order was placed. Format: 'YYYY-MM-DD:HH:MM:SS'."}, {'name': 'order_status', 'type': 'string', 'description': "The status of the order, one of ['pending', 'shipped', 'delivered']."}, {'name': 'order_contents', 'type': 'array', 'description': "A list of objects, each containing 'product_id', 'product_name' and 'quantity'."}, {'name': 'shipment_details', 'type': 'object', 'description': "An object containing 'tracking_number', 'carrier', 'arrival_date' (for delivered orders) or 'estimated_arrival_date' (for pending or shipped orders), and 'shipping_address'."}, {'name': 'payment_details', 'type': 'object', 'description': "An object containing 'card_number', and 'amount_paid'."}] exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The order with the specified 'order_id' was not found."}]
def build_struc_layers(G, opt1=True, opt2=True, opt3=True, until_layer=None, workers=64): if opt3: until_layer = until_layer else: until_layer = None G = struc2vec.Graph(G, False, workers, untilLayer=until_layer) if opt1: G.preprocess_neighbors_with_bfs_compact() else: G.preprocess_neighbors_with_bfs() if opt2: G.create_vectors() G.calc_distances(compactDegree=opt1) else: G.calc_distances_all_vertices(compactDegree=opt1) G.create_distances_network() G.preprocess_parameters_random_walk() return
class DistributedDataParallel(Module): def __init__(self, module): super(DistributedDataParallel, self).__init__() self.warn_on_half = (True if (dist._backend == dist.dist_backend.GLOO) else False) self.module = module self.data_parallel_group = mpu.get_data_parallel_group() src_rank = mpu.get_model_parallel_rank() for p in self.module.parameters(): if torch.is_tensor(p): dist.broadcast(p, src_rank, group=self.data_parallel_group) def allreduce_params(reduce_after=True, no_scale=False, fp32_allreduce=False): if self.needs_reduction: self.needs_reduction = False buckets = {} for (name, param) in self.module.named_parameters(): if (param.requires_grad and (param.grad is not None)): tp = param.data.type() if (tp not in buckets): buckets[tp] = [] buckets[tp].append(param) if self.warn_on_half: if (torch.cuda.HalfTensor in buckets): print(('WARNING: gloo dist backend for half parameters may be extremely slow.' + ' It is recommended to use the NCCL backend in this case.')) self.warn_on_half = False for tp in buckets: bucket = buckets[tp] grads = [param.grad.data for param in bucket] coalesced = _flatten_dense_tensors(grads) if fp32_allreduce: coalesced = coalesced.float() if ((not no_scale) and (not reduce_after)): coalesced /= dist.get_world_size(group=self.data_parallel_group) dist.all_reduce(coalesced, group=self.data_parallel_group) torch.cuda.synchronize() if ((not no_scale) and reduce_after): coalesced /= dist.get_world_size(group=self.data_parallel_group) for (buf, synced) in zip(grads, _unflatten_dense_tensors(coalesced, grads)): buf.copy_(synced) self.hook_handles = [] self.hooks = [] for param in list(self.module.parameters()): def allreduce_hook(*unused): Variable._execution_engine.queue_callback(allreduce_params) self.allreduce_params = allreduce_params def forward(self, *inputs, **kwargs): self.needs_reduction = True return self.module(*inputs, **kwargs) def state_dict(self, destination=None, prefix='', keep_vars=False): sd = self.module.state_dict(destination, prefix, keep_vars) return sd def load_state_dict(self, state_dict, strict=True): self.module.load_state_dict(state_dict, strict=strict) '\n def _sync_buffers(self):\n buffers = list(self.module._all_buffers())\n if len(buffers) > 0:\n # cross-node buffer sync\n flat_buffers = _flatten_dense_tensors(buffers)\n dist.broadcast(flat_buffers, 0)\n for buf, synced in zip(buffers, _unflatten_dense_tensors(flat_buffers, buffers)):\n buf.copy_(synced)\n def train(self, mode=True):\n # Clear NCCL communicator and CUDA event cache of the default group ID,\n # These cache will be recreated at the later call. This is currently a\n # work-around for a potential NCCL deadlock.\n if dist._backend == dist.dist_backend.NCCL:\n dist._clear_group_cache()\n super(DistributedDataParallel, self).train(mode)\n self.module.train(mode)\n '
class Widget(Model): def __init__(self, style=None, data=None): if (WIDGET_ENV == 'jupyter'): self._comms = [] self._queue = [] self._viewcount = 0 def handle_remote_set(name, value): with capture_output(self): self.prop(name).trigger(value) self._recv_from_js_(handle_remote_set) self.style = Property(style) self.data = Property(data) self.write = Trigger() def widget_js(self): return '' def widget_html(self): return f'<div {self.std_attrs()}></div>' def view_id(self): return f'_{id(self)}_{self._viewcount}' def std_attrs(self): return ((f'id="{self.view_id()}"' + style_attr(self.style)) + data_attrs(self.data)) def _repr_html_(self): self._viewcount += 1 json_data = json.dumps({k: v.value for (k, v) in vars(self).items() if isinstance(v, Property)}) json_data = re.sub('</', '<\\/', json_data) std_widget_js = minify(f''' var model = new Model("{id(self)}", {json_data}); var element = document.getElementById("{self.view_id()}"); model.on('write', (ev) => {{ var dummy = document.createElement('div'); dummy.innerHTML = ev.value.trim(); dummy.childNodes.forEach((item) => {{ element.parentNode.insertBefore(item, element); }}); }}); function upd(a) {{ return (e) => {{ for (k in e.value) {{ element[a][k] = e.value[k]; }}}}}} model.on('style', upd('style')); model.on('data', upd('dataset')); ''') return ''.join([self.widget_html(), '<script>(function() {', WIDGET_MODEL_JS, std_widget_js, self.widget_js(), '})();</script>']) def _initprop_(self, name, value): if (not hasattr(self, '_viewcount')): raise ValueError('base Model __init__ must be called') super()._initprop_(name, value) def notify_js(event): self._send_to_js_(id(self), name, event.value) if isinstance(value, Trigger): value.on(notify_js, internal=True) def _send_to_js_(self, *args): if (self._viewcount > 0): if (WIDGET_ENV == 'colab'): colab_output.eval_js(minify(f''' (window.send_{id(self)} = window.send_{id(self)} || new BroadcastChannel("channel_{id(self)}") ).postMessage({json.dumps(args)}); '''), ignore_result=True) elif (WIDGET_ENV == 'jupyter'): if (not self._comms): self._queue.append(args) return for comm in self._comms: comm.send(args) def _recv_from_js_(self, fn): if (WIDGET_ENV == 'colab'): colab_output.register_callback(f'invoke_{id(self)}', fn) elif (WIDGET_ENV == 'jupyter'): def handle_comm(msg): fn(*msg['content']['data']) def handle_close(close_msg): comm_id = close_msg['content']['comm_id'] self._comms = [c for c in self._comms if (c.comm_id != comm_id)] def open_comm(comm, open_msg): self._comms.append(comm) comm.on_msg(handle_comm) comm.on_close(handle_close) comm.send('ok') if self._queue: for args in self._queue: comm.send(args) self._queue.clear() if open_msg['content']['data']: handle_comm(open_msg) cname = ('comm_' + str(id(self))) COMM_MANAGER.register_target(cname, open_comm) def display(self): from IPython.core.display import display display(self) return self
_after(1800) def run_model(ckpt_path, ckpt_args, has_gpu, custom_tasks=None): ckpt_save_dir = ckpt_path.parent model_args = Namespace(**ckpt_args['model_args']) model_args.moco = False transform_args = Namespace(**ckpt_args['transform_args']) data_args = Namespace(**ckpt_args['data_args']) print('in select_ensemble.py: data_args: {}'.format(data_args)) data_args.custom_tasks = custom_tasks if has_gpu: gpu_ids = util.args_to_list(ckpt_args['gpu_ids'], allow_empty=True, arg_type=int, allow_negative=False) else: gpu_ids = [] device = util.setup_gpus(gpu_ids) (model, _) = ModelSaver.load_model(ckpt_path=ckpt_path, gpu_ids=gpu_ids, model_args=model_args, is_training=False) predictor = Predictor(model=model, device=device) loader = get_loader(phase='valid', data_args=data_args, transform_args=transform_args, is_training=False, return_info_dict=False, logger=None) (pred_df, gt_df) = predictor.predict(loader) return (pred_df, gt_df)
(scope='session') def comm_nccl_opts(request): if (not request.config.getoption('--test-communicator')): return None import nnabla.communicators as C from nnabla.ext_utils import get_extension_context try: from nnabla_ext import cuda except Exception as e: raise ImportError('Communicator test requires CUDA extension.\n{}'.format(e)) gpus = request.config.getoption('--communicator-gpus') n_devices = cuda.get_device_count() if (gpus is None): devices = list(map(str, range(n_devices))) else: devices = gpus.split(',') try: for d in devices: gid = int(d) if (gid >= n_devices): raise ValueError('') except ValueError as e: raise ValueError('GPU IDs must be comma separated integers of available GPUs. Given {}. Available GPUs are {}.'.format(gpus, n_devices)) extension_module = 'cuda' type_config = request.config.getoption('--type-config') ctx = get_extension_context(extension_module, type_config=type_config) try: comm = C.MultiProcessCommunicator(ctx) except Exception as e: raise RuntimeError("Communicator could not be created. You may haven't build with distributed support.\n{}".format(e)) try: comm.init() except Exception as e: raise RuntimeError('Communicator initialization failed. (Maybe MPI init failure.)\n{}'.format(e)) assert (len(devices) == comm.size), 'Number of cuda devices used are not same as that of processes.' n_devices = comm.size mpi_rank = comm.rank mpi_local_rank = comm.local_rank ctx.device_id = devices[mpi_local_rank] class CommOpts(): pass c = CommOpts() c.comm = comm c.ctx = ctx c.device_id = ctx.device_id c.devices = devices c.mpi_rank = mpi_rank c.mpi_local_rank = mpi_local_rank return c
def __plot_client_goodput(args, circuittype, torperf_dbs, tornet_dbs): if (circuittype == 'onionservice'): torperf_dbs = [] for tornet_db in tornet_dbs: tornet_db['data'] = [[((x * (2 ** 20)) / 1000000.0) for x in ds] for ds in tornet_db['dataset']] for torperf_db in torperf_dbs: client_gput = [(t / 1000000.0) for t in torperf_db['dataset']['client_goodput']] torperf_db['data'] = [client_gput] dbs_to_plot = (torperf_dbs + tornet_dbs) __plot_cdf_figure(args, dbs_to_plot, f'client_goodput.{circuittype}', yscale='taillog', xlabel=f'{circuittype} Client Transfer Goodput (Mbit/s): 0.5 to 1 MiB')
def eval_qg(res_dict, gts_dict, not_print=True): encoder.FLOAT_REPR = (lambda o: format(o, '.4f')) res = defaultdict((lambda : [])) gts = defaultdict((lambda : [])) for key in gts_dict.keys(): res[key] = [res_dict[key].encode('utf-8')] gts[key].append(gts_dict[key].encode('utf-8')) QGEval = QGEvalCap(gts, res) return QGEval.evaluate(not_print)[(- 1)]
def test_sum_add_to_fun(): var1 = optplan.Parameter() var2 = optplan.Parameter() var3 = optplan.Parameter() sum1 = optplan.Sum(functions=[var1, var2]) sum2 = (sum1 + var3) assert isinstance(sum2, optplan.Sum) assert (sum2.functions == [var1, var2, var3])
def conv1d(input_, output_channels, dilation=1, filter_width=1, causal=False, name='dilated_conv'): with tf.variable_scope(name): w = tf.get_variable('w', [1, filter_width, input_.get_shape()[(- 1)], output_channels], initializer=tf.contrib.layers.xavier_initializer_conv2d()) b = tf.get_variable('b', [output_channels], initializer=tf.constant_initializer(0.0)) if causal: padding = [[0, 0], [((filter_width - 1) * dilation), 0], [0, 0]] padded = tf.pad(input_, padding) input_expanded = tf.expand_dims(padded, axis=1) out = (tf.nn.atrous_conv2d(input_expanded, w, rate=dilation, padding='VALID') + b) else: input_expanded = tf.expand_dims(input_, axis=1) out = (tf.nn.atrous_conv2d(input_expanded, w, rate=dilation, padding='SAME') + b) return tf.squeeze(out, [1])
def _requantize(x, multiplier, zero_point, qmin=0, qmax=255, qtype=np.uint8): qx = ((x * multiplier).round() + zero_point) qx = np.clip(qx, qmin, qmax).astype(qtype) return qx
def test_random_sampler_empty_gt(): assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False) bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]]) gt_bboxes = torch.empty(0, 4) gt_labels = torch.empty(0).long() assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels) sampler = RandomSampler(num=10, pos_fraction=0.5, neg_pos_ub=(- 1), add_gt_as_proposals=True) sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds)) assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
class EvaluationChunk(sqlalchemy_base): __tablename__ = 'evaluation_chunks' uuid = sqla.Column(sqla.String, primary_key=True) creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now()) evaluation_uuid = sqla.Column(sqla.String, sqla.ForeignKey('evaluations.uuid'), nullable=False) evaluation = sqla.orm.relationship('Evaluation', back_populates='chunks', foreign_keys=[evaluation_uuid]) username = sqla.Column(sqla.String) extra_info = sqla.Column(sqla.JSON) hidden = sqla.Column(sqla.Boolean) def __repr__(self): return f'<EvaluationChunk(uuid="{self.uuid}", evaluation_uuid="{self.evaluation_uuid}")>' def __hash__(self): return hash((hash(self.uuid) + hash(self.name))) def __eq__(self, other): return (self.hash() == hash(other))
def epoch_wrapup(pl_module: LightningModule, mode: str): assert (mode in ['train', 'val', 'test']) value = getattr(pl_module, f'{mode}_loss').compute() if (mode == 'train'): pl_module.log(f'{mode}/loss_epoch', value) getattr(pl_module, f'{mode}_loss').reset() value = getattr(pl_module, f'{mode}_acc').compute() if (mode == 'train'): pl_module.log(f'{mode}/acc_epoch', value) getattr(pl_module, f'{mode}_acc').reset()
def test_raw_tree(): con_sentences = convert_it_vit.read_constituency_sentences(io.StringIO(CON_SAMPLE)) expected_ids = ['#ID=sent_00002', '#ID=sent_00318', '#ID=sent_00589'] expected_trees = ["(ROOT (cp (sp (part negli) (sn (sa (ag ultimi)) (nt anni))) (f (sn (art la) (n dinamica) (spd (partd dei) (sn (n polo) (n di) (n attrazione)))) (ibar (ause e) (ausep stata) (savv (savv (avv sempre)) (avv piu)) (vppt caratterizzata)) (compin (spda (partda dall) (sn (n emergere) (spd (pd di) (sn (art una) (sa (ag crescente)) (n concorrenza) (f2 (rel che) (f (ibar (clit si) (ause e) (avv progressivamente) (vppin spostata)) (compin (spda (partda dalle) (sn (sa (ag singole)) (n imprese))) (sp (part ai) (sn (n sistemi) (sa (coord (ag economici) (cong e) (ag territoriali))))) (fp (punt ,) (sv5 (vgt determinando) (compt (sn (art l') (nf esigenza) (spd (pd di) (sn (art una) (n riconsiderazione) (spd (partd dei) (sn (n rapporti) (sv3 (ppre esistenti) (compin (sp (p tra) (sn (n soggetti) (sa (ag produttivi)))) (cong e) (sn (n ambiente) (f2 (sp (p in) (sn (relob cui))) (f (sn (deit questi)) (ibar (vin operano) (punto .", '(ROOT (dirsp (fc (congf tuttavia) (f (sn (sq (ind qualche)) (n problema)) (ir_infl (vsupir potrebbe) (vcl esserci)) (compc (clit ci) (sp (p per) (sn (art la) (n commissione) (sa (ag esteri)) (f2 (sp (part alla) (relob cui) (sn (n presidenza))) (f (ibar (vc e)) (compc (sn (n candidato)) (sn (art l) (n esponente) (spd (pd di) (sn (mw Alleanza) (npro Nazionale))) (sn (mw Mirko) (nh Tremaglia (dirs :) (f3 (sn (art una) (n candidatura) (sc (q piu) (sa (ppas subita)) (sc (ccong che) (sa (ppas gradita))) (compt (spda (partda dalla) (sn (mw Lega) (npro Nord) (punt ,) (f2 (rel che) (fc (congf tuttavia) (f (ir_infl (vsupir dovrebbe) (vit rispettare)) (compt (sn (art gli) (n accordi (punto .))))', '(ROOT (f (sn (art l) (n ottimismo) (spd (pd di) (sn (nh Kantor)))) (ir_infl (vsupir potrebbe) (congf pero) (vcl rivelarsi)) (compc (sn (in ancora) (art una) (nt volta)) (sa (ag prematuro))) (punto .)))'] assert (len(con_sentences) == 3) for (sentence, expected_id, expected_tree) in zip(con_sentences, expected_ids, expected_trees): assert (sentence[0] == expected_id) tree = convert_it_vit.raw_tree(sentence[1]) assert (str(tree) == expected_tree)
def test_pi_numpy(): def returnpi(result: dace.float64[1]): result[0] = math.pi a = np.random.rand(1) returnpi(a) assert np.allclose(a, np.array(math.pi))
def BModel2MLIR(bmodel_net: BModel): with use_backend(bmodel_net.chip) as context: if isinstance(context, BM1688Context): coeff = bmodel_net.net[0].parameter[0].coeff_mem if (coeff and (context.base_addr[0] != context.base_addr[1])): context.base_addr[1] += len(coeff.data) with atomic_context(bmodel_net, context): atomic_mlir = MlirModule(bmodel_net) return atomic_mlir
def define_E(opt): netE_cls = find_network_using_name('conv', 'encoder') return create_network(netE_cls, opt)
def softmin(input, dim=None, _stacklevel=3): if (dim is None): dim = _get_softmax_dim('softmin', input.dim(), _stacklevel) return (- input.softmax(dim))
class ActionDecoder(nn.Module): def act(self, latent_plan: torch.Tensor, perceptual_emb: torch.Tensor, latent_goal: torch.Tensor) -> torch.Tensor: raise NotImplementedError def loss(self, latent_plan: torch.Tensor, perceptual_emb: torch.Tensor, latent_goal: torch.Tensor, actions: torch.Tensor) -> torch.Tensor: raise NotImplementedError def loss_and_act(self, latent_plan: torch.Tensor, perceptual_emb: torch.Tensor, latent_goal: torch.Tensor, actions: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]: raise NotImplementedError def clear_hidden_state(self) -> None: raise NotImplementedError def _sample(self, *args, **kwargs): raise NotImplementedError def forward(self, latent_plan: torch.Tensor, perceptual_emb: torch.Tensor, latent_goal: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]: raise NotImplementedError
class SetSeedCallback(Callback): def __init__(self, seed=10, is_DDP=False): self.seed = seed self.is_DDP = is_DDP def on_fit_start(self, trainer, pl_module): if self.is_DDP: if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') seed_everything(((dist.get_rank() + 1) * self.seed)) else: seed_everything(self.seed)
def run_inference(args): if (args.model in ['bridge', 'seq2seq', 'seq2seq.pg']): sp = EncoderDecoderLFramework(args) else: raise NotImplementedError sp.cuda() with torch.set_grad_enabled(False): inference(sp)
def orderNodeList(nodelist): newlist = sorted([n for n in nodelist], key=(lambda x: x.eduspan[1])) return newlist
class SelecSLS(nn.Module): def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): self.num_classes = num_classes self.drop_rate = drop_rate super(SelecSLS, self).__init__() self.stem = conv_bn(in_chans, 32, stride=2) self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) self.num_features = cfg['num_features'] self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.fc = nn.Linear((self.num_features * self.global_pool.feat_mult()), num_classes) for (n, m) in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1.0) nn.init.constant_(m.bias, 0.0) def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.num_classes = num_classes if num_classes: num_features = (self.num_features * self.global_pool.feat_mult()) self.fc = nn.Linear(num_features, num_classes) else: self.fc = nn.Identity() def forward_features(self, x): x = self.stem(x) x = self.features([x]) x = self.head(x[0]) return x def forward(self, x): x = self.forward_features(x) x = self.global_pool(x).flatten(1) if (self.drop_rate > 0.0): x = F.dropout(x, p=self.drop_rate, training=self.training) x = self.fc(x) return x
class ExpandInplaceOperators(EnvTransform): def visit_InPlaceAssignmentNode(self, node): lhs = node.lhs rhs = node.rhs if lhs.type.is_cpp_class: return node if isinstance(lhs, ExprNodes.BufferIndexNode): return node env = self.current_env() def side_effect_free_reference(node, setting=False): if node.is_name: return (node, []) elif (node.type.is_pyobject and (not setting)): node = LetRefNode(node) return (node, [node]) elif node.is_subscript: (base, temps) = side_effect_free_reference(node.base) index = LetRefNode(node.index) return (ExprNodes.IndexNode(node.pos, base=base, index=index), (temps + [index])) elif node.is_attribute: (obj, temps) = side_effect_free_reference(node.obj) return (ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps) elif isinstance(node, ExprNodes.BufferIndexNode): raise ValueError("Don't allow things like attributes of buffer indexing operations") else: node = LetRefNode(node) return (node, [node]) try: (lhs, let_ref_nodes) = side_effect_free_reference(lhs, setting=True) except ValueError: return node dup = lhs.__class__(**lhs.__dict__) binop = ExprNodes.binop_node(node.pos, operator=node.operator, operand1=dup, operand2=rhs, inplace=True) lhs.analyse_target_types(env) dup.analyse_types(env) binop.analyse_operation(env) node = Nodes.SingleAssignmentNode(node.pos, lhs=lhs, rhs=binop.coerce_to(lhs.type, env)) let_ref_nodes.reverse() for t in let_ref_nodes: node = LetNode(t, node) return node def visit_ExprNode(self, node): return node
class ATSNmat(SpectralMatrix): def assemble(self, method): (test, trial) = (self.testfunction, self.trialfunction) assert isinstance(test[0], T) assert isinstance(trial[0], SN) N = test[0].N k = np.arange(N, dtype=float) self._keyscale = 1 def _getkey(j): if (j == 0): return ((((((- (k[:(- 2)] ** 2)) / (k[:(- 2)] + 2)) * (((k[:(- 2)] + 2) ** 2) - (k[:(- 2)] ** 2))) * np.pi) / 2.0) * self._keyscale) return (((((k[j:(- 2)] * ((k[j:(- 2)] ** 2) - (k[:(- (j + 2))] ** 2))) - (((k[j:(- 2)] ** 2) / (k[j:(- 2)] + 2)) * (((k[j:(- 2)] + 2) ** 2) - (k[:(- (j + 2))] ** 2)))) * np.pi) / 2.0) * self._keyscale) d = dict.fromkeys(np.arange(0, (N - 2), 2), _getkey) return d
def xml_to_treceval(opt, input_file): overwrite = opt.overwrite res_file = (os.path.splitext(input_file)[0] + '.treceval') if os.path.exists(res_file): if overwrite: logger.info(('%s exists. Overwrite' % res_file)) else: logger.info(('%s exists. Use "--overwrite 1" if you want to overwrite' % res_file)) return res_file tree = ET.parse(input_file) root = tree.getroot() MAX_SCORE = 9999 TEAM = 'RUCMM' newlines = [] for topicResult in root.iter('videoAdhocSearchTopicResult'): qry_id = ('1' + topicResult.attrib['tNum']) itemlist = list(topicResult) for (rank, item) in enumerate(itemlist): assert ((rank + 1) == int(item.attrib['seqNum'])) shot_id = item.attrib['shotId'] score = (MAX_SCORE - rank) newlines.append(('%s 0 %s %d %d %s' % (qry_id, shot_id, (rank + 1), score, TEAM))) fw = open(res_file, 'w') fw.write(('\n'.join(newlines) + '\n')) fw.close() return res_file
def convert_f(args): from .convert import convert convert(args.files, args.dest_dir, args.verbose)
def write_dataset(dataset, out_directory, dataset_name): for (shard, phrases) in zip(SHARDS, dataset): output_file = os.path.join(out_directory, ('%s.%s.json' % (dataset_name, shard))) write_list(output_file, phrases)
.parametrize('nuclide_name', ['Ni-56', 'Fe-52', 'Cr-48']) def test_activity(gamma_ray_simulation_state, nuclide_name): nuclide = rd.Nuclide(nuclide_name) t_half = (nuclide.half_life() * u.s) decay_constant = (np.log(2) / t_half) time_delta = (1.0 * u.s) composition = gamma_ray_simulation_state.composition cell_masses = composition.calculate_cell_masses(gamma_ray_simulation_state.geometry.volume) isotopic_mass_fractions = gamma_ray_simulation_state.composition.isotopic_mass_fraction isotopic_masses = (isotopic_mass_fractions * cell_masses) test_mass = (isotopic_masses.loc[((nuclide.Z, nuclide.A), 0)] * u.g) iso_dict = create_isotope_dicts(isotopic_mass_fractions, cell_masses) inv_dict = create_inventories_dict(iso_dict) total_decays = calculate_total_decays(inv_dict, time_delta) actual = total_decays[0][(nuclide.Z, nuclide.A)][nuclide_name] isotope_mass = (nuclide.atomic_mass * u.u) number_of_atoms = (test_mass / isotope_mass).to(u.dimensionless_unscaled) expected = (number_of_atoms * (1 - np.exp(((- decay_constant) * time_delta)))) npt.assert_allclose(actual, expected)
def _as_pairs(x, ndim, as_index=False): if (x is None): return (((None, None),) * ndim) x = np.array(x) if as_index: x = np.round(x).astype(np.intp, copy=False) if (x.ndim < 3): if (x.size == 1): x = x.ravel() if (as_index and (x < 0)): raise ValueError("index can't contain negative values") return (((x[0], x[0]),) * ndim) if ((x.size == 2) and (x.shape != (2, 1))): x = x.ravel() if (as_index and ((x[0] < 0) or (x[1] < 0))): raise ValueError("index can't contain negative values") return (((x[0], x[1]),) * ndim) if (as_index and (x.min() < 0)): raise ValueError("index can't contain negative values") return np.broadcast_to(x, (ndim, 2)).tolist()
class GetTestInfoTester(unittest.TestCase): def test_get_test_to_tester_mapping(self): bert_test_tester_mapping = get_test_to_tester_mapping(BERT_TEST_FILE) blip_test_tester_mapping = get_test_to_tester_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = {'BertModelTest': 'BertModelTester'} EXPECTED_BLIP_MAPPING = {'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester'} self.assertEqual(get_test_info.to_json(bert_test_tester_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_test_tester_mapping), EXPECTED_BLIP_MAPPING) def test_get_model_to_test_mapping(self): bert_model_test_mapping = get_model_to_test_mapping(BERT_TEST_FILE) blip_model_test_mapping = get_model_to_test_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = {'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest']} EXPECTED_BLIP_MAPPING = {'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipTextImageModelTest', 'BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest']} self.assertEqual(get_test_info.to_json(bert_model_test_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_model_test_mapping), EXPECTED_BLIP_MAPPING) def test_get_model_to_tester_mapping(self): bert_model_tester_mapping = get_model_to_tester_mapping(BERT_TEST_FILE) blip_model_tester_mapping = get_model_to_tester_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = {'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester']} EXPECTED_BLIP_MAPPING = {'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipModelTester', 'BlipTextImageModelsModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester']} self.assertEqual(get_test_info.to_json(bert_model_tester_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_model_tester_mapping), EXPECTED_BLIP_MAPPING)
def printer(string, quiet=False, debug=False, error=False, **kwargs): if (debug and (not DEBUG)): return if debug: if sys.stdout.isatty(): out = ('\x1b[1;30mDEBUG: %s\x1b[0m' % string) else: out = ('DEBUG: %s' % string) else: out = string if error: kwargs['file'] = sys.stderr if (not quiet): print_(out, **kwargs)
class DPRReader(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def _impl(array, container, buffer_key, form_key, id_start, backend, byteorder): layout = ak.operations.to_layout(array, allow_record=False, primitive_policy='error') if (backend is not None): backend = regularize_backend(backend) return ak._do.to_buffers(layout, container=container, buffer_key=buffer_key, form_key=form_key, id_start=id_start, backend=backend, byteorder=byteorder)
def test_util(): rd = RecursiveDefaultDict() rd['a']['new']['element'] = 'assigned' Print.set_verbosity(VERBOSITY.VERBOSE) Print.verbosity_region_begin(VERBOSITY.VERBOSE) print(Print.style.header('this is a header!')) Print.warn('This is a warning!') Print.info('This is an informative message!') Print.show('This is a message!') with pytest.raises(Exception): Print.warn('This is a warning!', raises=Exception) Print.set_decorator_info('>', '<') Print.set_decorator_warn('|', '|') Print.set_decorator_error('*', '*') Print.verbosity_region_end()
def PGL_repn(rational_function): if is_Matrix(rational_function): return rational_function K = rational_function.parent() F = K.base_ring() if (not K.is_field()): return matrix(F, 2, [rational_function[1], rational_function[0], 0, 1]) else: f = rational_function.numerator() g = rational_function.denominator() return matrix(F, 2, [f[1], f[0], g[1], g[0]])
def check_output(n_channels: int, labels: np.ndarray): n_labels = len(set(labels[(labels >= 0)])) if ((n_labels > 2) and (n_labels > n_channels)): raise ValueError('The dimension of the output is too small for the number of labels. Please check the `dims` parameter of your GNN or the `labels` parameter.')
def test_residual_normalised_score_pipe() -> None: pipe = Pipeline([('poly', PolynomialFeatures(degree=2)), ('linear', LinearRegression())]) mapie_reg = MapieRegressor(conformity_score=ResidualNormalisedScore(residual_estimator=pipe, split_size=0.2), cv='split', random_state=random_state) mapie_reg.fit(np.concatenate((X_toy, X_toy)), np.concatenate((y_toy, y_toy)))
def ToricCode(P, F): from sage.combinat.tuple import Tuples mset = [x for x in F if (x != 0)] d = len(P[0]) pts = Tuples(mset, d).list() n = len(pts) k = len(P) e = P[0] B = [] for e in P: tmpvar = [prod([(t[i] ** e[i]) for i in range(d)]) for t in pts] B.append(tmpvar) MS = MatrixSpace(F, k, n) return LinearCode(MS(B))
class TokenBlockDataset(torch.utils.data.Dataset): def __init__(self, tokens, sizes, block_size, pad, eos, break_mode=None, include_targets=False): super().__init__() self.tokens = tokens self.total_size = len(tokens) self.pad = pad self.eos = eos self.include_targets = include_targets self.slice_indices = [] if ((break_mode is None) or (break_mode == 'none')): length = math.ceil((len(tokens) / block_size)) def block_at(i): start = (i * block_size) end = min((start + block_size), len(tokens)) return (start, end) self.slice_indices = [block_at(i) for i in range(length)] elif (break_mode == 'complete'): assert ((sizes is not None) and (sum(sizes) == len(tokens))), '{} != {}'.format(sum(sizes), len(tokens)) tok_idx = 0 sz_idx = 0 curr_size = 0 while (sz_idx < len(sizes)): if (((curr_size + sizes[sz_idx]) <= block_size) or (curr_size == 0)): curr_size += sizes[sz_idx] sz_idx += 1 else: self.slice_indices.append((tok_idx, (tok_idx + curr_size))) tok_idx += curr_size curr_size = 0 if (curr_size > 0): self.slice_indices.append((tok_idx, (tok_idx + curr_size))) elif (break_mode == 'eos'): assert ((sizes is not None) and (sum(sizes) == len(tokens))), '{} != {}'.format(sum(sizes), len(tokens)) curr = 0 for sz in sizes: if (sz > 1): self.slice_indices.append((curr, (curr + sz))) curr += sz else: raise ValueError(('Invalid break_mode: ' + break_mode)) self.sizes = np.array([(e - s) for (s, e) in self.slice_indices]) def __getitem__(self, index): (s, e) = self.slice_indices[index] item = torch.LongTensor(self.tokens[s:e]) if self.include_targets: if (s == 0): source = np.concatenate([[self.eos], self.tokens[0:(e - 1)]]) past_target = np.concatenate([[self.pad, self.eos], self.tokens[0:(e - 2)]]) else: source = self.tokens[(s - 1):(e - 1)] if (s == 1): past_target = np.concatenate([[self.eos], self.tokens[0:(e - 2)]]) else: past_target = self.tokens[(s - 2):(e - 2)] return (torch.LongTensor(source), item, torch.LongTensor(past_target)) return item def __len__(self): return len(self.slice_indices)
.parametrize('access', ['ro', 'rw', 'static_ro', 'static_rw']) def test_property_return_value_policies(access): if (not access.startswith('static')): obj = m.TestPropRVP() else: obj = m.TestPropRVP ref = getattr(obj, (access + '_ref')) assert (ref.value == 1) ref.value = 2 assert (getattr(obj, (access + '_ref')).value == 2) ref.value = 1 copy = getattr(obj, (access + '_copy')) assert (copy.value == 1) copy.value = 2 assert (getattr(obj, (access + '_copy')).value == 1) copy = getattr(obj, (access + '_func')) assert (copy.value == 1) copy.value = 2 assert (getattr(obj, (access + '_func')).value == 1)
class RandomHorizontalFlip(transforms.RandomHorizontalFlip): def __init__(self, p=0.5): super().__init__(p) self._current_state = None def forward(self, x): return self.__call__(x) def __call__(self, x, state=None): if (state is None): self._current_state = (random.random() > (1.0 - self.p)) state = self._current_state if state: x = F.hflip(x) return x def mask_fn(self, x): return self.__call__(x, state=self._current_state)
class Scorer(object): __metaclass__ = abc.ABCMeta def __init__(self): self._updated = False self._cached_results = {} def update(self, results): self._updated = True def get_loss(self): pass def _get_results(self): return [] def get_results(self, prefix=''): results = (self._get_results() if self._updated else self._cached_results) self._cached_results = results self._updated = False return [((prefix + k), v) for (k, v) in results] def results_str(self): return ' - '.join(['{:}: {:.2f}'.format(k, v) for (k, v) in self.get_results()])
class GraphemePhonemeEncoder(text_encoder.TextEncoder): def __init__(self, vocab_filename=None, vocab_list=None, separator='', num_reserved_ids=text_encoder.NUM_RESERVED_TOKENS): super(GraphemePhonemeEncoder, self).__init__(num_reserved_ids=num_reserved_ids) if (vocab_filename and os.path.exists(vocab_filename)): self._init_vocab_from_file(vocab_filename) else: assert (vocab_list is not None) self._init_vocab_from_list(vocab_list) self._separator = separator def encode(self, symbols_line): if self._separator: symbols_list = symbols_line.strip().split(self._separator) else: symbols_list = list(symbols_line.strip()) return [self._sym_to_id[sym] for sym in symbols_list if (sym in self._sym_to_id)] def decode(self, ids): return self._separator.join(self.decode_list(ids)) def decode_list(self, ids): return [self._id_to_sym[id_] for id_ in ids] def vocab_size(self): return len(self._id_to_sym) def _init_vocab_from_file(self, filename): def sym_gen(): with tf.gfile.Open(filename) as vocab_file: for line in vocab_file: sym = line.strip() (yield sym) self._init_vocab(sym_gen(), add_reserved_symbols=False) def _init_vocab_from_list(self, vocab_list): def sym_gen(): for sym in vocab_list: if (sym not in text_encoder.RESERVED_TOKENS): (yield sym) self._init_vocab(sym_gen()) def _init_vocab(self, sym_generator, add_reserved_symbols=True): self._id_to_sym = {} non_reserved_start_index = 0 if add_reserved_symbols: self._id_to_sym.update(enumerate(text_encoder.RESERVED_TOKENS)) non_reserved_start_index = len(text_encoder.RESERVED_TOKENS) self._id_to_sym.update(enumerate(sym_generator, start=non_reserved_start_index)) self._sym_to_id = dict(((v, k) for (k, v) in six.iteritems(self._id_to_sym))) def store_to_file(self, filename): with tf.gfile.Open(filename, 'w') as vocab_file: for i in range(len(self._id_to_sym)): vocab_file.write((self._id_to_sym[i] + '\n'))
class DateTimeField(Field): widget = TextInput() def __init__(self, label=None, validators=None, parse_kwargs=None, display_format='%Y-%m-%d %H:%M', **kwargs): super(DateTimeField, self).__init__(label, validators, **kwargs) if (parse_kwargs is None): parse_kwargs = {} self.parse_kwargs = parse_kwargs self.display_format = display_format def _value(self): if self.raw_data: return ' '.join(self.raw_data) else: return ((self.data and self.data.strftime(self.display_format)) or '') def process_formdata(self, valuelist): if valuelist: date_str = ' '.join(valuelist) if (not date_str): self.data = None raise ValidationError(self.gettext('Please input a date/time value')) parse_kwargs = self.parse_kwargs.copy() if ('default' not in parse_kwargs): try: parse_kwargs['default'] = self.default() except TypeError: parse_kwargs['default'] = self.default try: self.data = parser.parse(date_str, **parse_kwargs) except ValueError: self.data = None raise ValidationError(self.gettext('Invalid date/time input')) except TypeError: if (not DATEUTIL_TYPEERROR_ISSUE): raise self.data = None raise ValidationError(self.gettext('Invalid date/time input'))
class Trainer(BaseTrainer): def __init__(self, model, train_criterion, metrics, optimizer, config, data_loader, valid_data_loader=None, test_data_loader=None, lr_scheduler=None, len_epoch=None, val_criterion=None): super().__init__(model, train_criterion, metrics, optimizer, config, val_criterion) self.config = config self.data_loader = data_loader if (len_epoch is None): self.len_epoch = len(self.data_loader) else: self.data_loader = inf_loop(data_loader) self.len_epoch = len_epoch self.valid_data_loader = valid_data_loader self.test_data_loader = test_data_loader self.do_validation = (self.valid_data_loader is not None) self.do_test = (self.test_data_loader is not None) self.lr_scheduler = lr_scheduler self.log_step = int(np.sqrt(data_loader.batch_size)) self.train_loss_list: List[float] = [] self.val_loss_list: List[float] = [] self.test_loss_list: List[float] = [] def _eval_metrics(self, output, label): acc_metrics = np.zeros(len(self.metrics)) for (i, metric) in enumerate(self.metrics): acc_metrics[i] += metric(output, label) self.writer.add_scalar('{}'.format(metric.__name__), acc_metrics[i]) return acc_metrics def _train_epoch(self, epoch): self.model.train() total_loss = 0 total_metrics = np.zeros(len(self.metrics)) with tqdm(self.data_loader) as progress: for (batch_idx, (data, label, indexs, _)) in enumerate(progress): progress.set_description_str(f'Train epoch {epoch}') (data, label) = (data.to(self.device), label.long().to(self.device)) output = self.model(data) loss = self.train_criterion(indexs.cpu().detach().numpy().tolist(), output, label) self.optimizer.zero_grad() loss.backward() self.optimizer.step() self.writer.set_step((((epoch - 1) * self.len_epoch) + batch_idx)) self.writer.add_scalar('loss', loss.item()) self.train_loss_list.append(loss.item()) total_loss += loss.item() total_metrics += self._eval_metrics(output, label) if ((batch_idx % self.log_step) == 0): progress.set_postfix_str(' {} Loss: {:.6f}'.format(self._progress(batch_idx), loss.item())) self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True)) if (batch_idx == self.len_epoch): break log = {'loss': (total_loss / self.len_epoch), 'metrics': (total_metrics / self.len_epoch).tolist(), 'learning rate': self.lr_scheduler.get_lr()} if self.do_validation: val_log = self._valid_epoch(epoch) log.update(val_log) if self.do_test: (test_log, test_meta) = self._test_epoch(epoch) log.update(test_log) else: test_meta = [0, 0] if (self.lr_scheduler is not None): self.lr_scheduler.step() return log def _valid_epoch(self, epoch): self.model.eval() total_val_loss = 0 total_val_metrics = np.zeros(len(self.metrics)) with torch.no_grad(): with tqdm(self.valid_data_loader) as progress: for (batch_idx, (data, label, _, _)) in enumerate(progress): progress.set_description_str(f'Valid epoch {epoch}') (data, label) = (data.to(self.device), label.to(self.device)) output = self.model(data) loss = self.val_criterion(output, label) self.writer.set_step((((epoch - 1) * len(self.valid_data_loader)) + batch_idx), 'valid') self.writer.add_scalar('loss', loss.item()) self.val_loss_list.append(loss.item()) total_val_loss += loss.item() total_val_metrics += self._eval_metrics(output, label) self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True)) for (name, p) in self.model.named_parameters(): self.writer.add_histogram(name, p, bins='auto') return {'val_loss': (total_val_loss / len(self.valid_data_loader)), 'val_metrics': (total_val_metrics / len(self.valid_data_loader)).tolist()} def _test_epoch(self, epoch): self.model.eval() total_test_loss = 0 total_test_metrics = np.zeros(len(self.metrics)) results = np.zeros((len(self.test_data_loader.dataset), self.config['num_classes']), dtype=np.float32) tar_ = np.zeros((len(self.test_data_loader.dataset),), dtype=np.float32) with torch.no_grad(): with tqdm(self.test_data_loader) as progress: for (batch_idx, (data, label, indexs, _)) in enumerate(progress): progress.set_description_str(f'Test epoch {epoch}') (data, label) = (data.to(self.device), label.to(self.device)) output = self.model(data) loss = self.val_criterion(output, label) self.writer.set_step((((epoch - 1) * len(self.test_data_loader)) + batch_idx), 'test') self.writer.add_scalar('loss', loss.item()) self.test_loss_list.append(loss.item()) total_test_loss += loss.item() total_test_metrics += self._eval_metrics(output, label) self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True)) results[indexs.cpu().detach().numpy().tolist()] = output.cpu().detach().numpy().tolist() tar_[indexs.cpu().detach().numpy().tolist()] = label.cpu().detach().numpy().tolist() for (name, p) in self.model.named_parameters(): self.writer.add_histogram(name, p, bins='auto') return ({'test_loss': (total_test_loss / len(self.test_data_loader)), 'test_metrics': (total_test_metrics / len(self.test_data_loader)).tolist()}, [results, tar_]) def _warmup_epoch(self, epoch): total_loss = 0 total_metrics = np.zeros(len(self.metrics)) self.model.train() data_loader = self.data_loader with tqdm(data_loader) as progress: for (batch_idx, (data, label, _, indexs, _)) in enumerate(progress): progress.set_description_str(f'Warm up epoch {epoch}') (data, label) = (data.to(self.device), label.long().to(self.device)) self.optimizer.zero_grad() output = self.model(data) out_prob = torch.nn.functional.softmax(output).data.detach() self.train_criterion.update_hist(indexs.cpu().detach().numpy().tolist(), out_prob) loss = torch.nn.functional.cross_entropy(output, label) loss.backward() self.optimizer.step() self.writer.set_step((((epoch - 1) * self.len_epoch) + batch_idx)) self.writer.add_scalar('loss', loss.item()) self.train_loss_list.append(loss.item()) total_loss += loss.item() total_metrics += self._eval_metrics(output, label) if ((batch_idx % self.log_step) == 0): progress.set_postfix_str(' {} Loss: {:.6f}'.format(self._progress(batch_idx), loss.item())) self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True)) if (batch_idx == self.len_epoch): break if hasattr(self.data_loader, 'run'): self.data_loader.run() log = {'loss': (total_loss / self.len_epoch), 'noise detection rate': 0.0, 'metrics': (total_metrics / self.len_epoch).tolist(), 'learning rate': self.lr_scheduler.get_lr()} if self.do_validation: val_log = self._valid_epoch(epoch) log.update(val_log) if self.do_test: (test_log, test_meta) = self._test_epoch(epoch) log.update(test_log) else: test_meta = [0, 0] return log def _progress(self, batch_idx): base = '[{}/{} ({:.0f}%)]' if hasattr(self.data_loader, 'n_samples'): current = (batch_idx * self.data_loader.batch_size) total = self.data_loader.n_samples else: current = batch_idx total = self.len_epoch return base.format(current, total, ((100.0 * current) / total))
def prune_stupid_effect_conditions(var, val, conditions, effects_on_var): if (conditions == [[]]): return False assert (val in [0, 1]) dual_val = (1 - val) dual_fact = (var, dual_val) if (dual_val in effects_on_var): return False simplified = False for condition in conditions: while (dual_fact in condition): simplified = True condition.remove(dual_fact) if (not condition): conditions[:] = [[]] simplified = True break return simplified
def add_common_eval_args(group): group.add_argument('--path', metavar='FILE', help='path(s) to model file(s), colon separated') group.add_argument('--remove-bpe', '--post-process', nargs='?', const=' ', default=None, help='remove BPE tokens before scoring (can be set to sentencepiece)') group.add_argument('--quiet', action='store_true', help='only print final scores') group.add_argument('--model-overrides', default='{}', type=str, metavar='DICT', help='a dictionary used to override model args at generation that were used during model training') group.add_argument('--results-path', metavar='RESDIR', type=str, default=None, help='path to save eval results (optional)"')
class TimmResNetWrapper(nn.Module): def __init__(self, net): super().__init__() self.net = net def forward(self, x, return_features=True): x = self.net.forward_features(x) embedding = self.net.global_pool(x) if self.net.drop_rate: embedding = torch.nn.functional.dropout(embedding, p=float(self.drop_rate), training=self.training) preds = self.net.fc(embedding) return (embedding, (preds if return_features else preds))
class DocumentState(object): def __init__(self, key): self.doc_key = key self.sentence_end = [] self.token_end = [] self.tokens = [] self.subtokens = [] self.info = [] self.segments = [] self.subtoken_map = [] self.segment_subtoken_map = [] self.sentence_map = [] self.pronouns = [] self.clusters = collections.defaultdict(list) self.coref_stacks = collections.defaultdict(list) self.speakers = [] self.segment_info = [] def finalize(self): subtoken_idx = 0 for segment in self.segment_info: speakers = [] for (i, tok_info) in enumerate(segment): if ((tok_info is None) and ((i == 0) or (i == (len(segment) - 1)))): speakers.append('[SPL]') elif (tok_info is None): speakers.append(speakers[(- 1)]) else: speakers.append(tok_info[9]) if (tok_info[4] == 'PRP'): self.pronouns.append(subtoken_idx) subtoken_idx += 1 self.speakers += [speakers] first_subtoken_index = (- 1) for (seg_idx, segment) in enumerate(self.segment_info): speakers = [] for (i, tok_info) in enumerate(segment): first_subtoken_index += 1 coref = (tok_info[(- 2)] if (tok_info is not None) else '-') if (coref != '-'): last_subtoken_index = ((first_subtoken_index + tok_info[(- 1)]) - 1) for part in coref.split('|'): if (part[0] == '('): if (part[(- 1)] == ')'): cluster_id = int(part[1:(- 1)]) self.clusters[cluster_id].append((first_subtoken_index, last_subtoken_index)) else: cluster_id = int(part[1:]) self.coref_stacks[cluster_id].append(first_subtoken_index) else: cluster_id = int(part[:(- 1)]) start = self.coref_stacks[cluster_id].pop() self.clusters[cluster_id].append((start, last_subtoken_index)) merged_clusters = [] for c1 in self.clusters.values(): existing = None for m in c1: for c2 in merged_clusters: if (m in c2): existing = c2 break if (existing is not None): break if (existing is not None): print("Merging clusters (shouldn't happen very often.)") existing.update(c1) else: merged_clusters.append(set(c1)) merged_clusters = [list(c) for c in merged_clusters] all_mentions = flatten(merged_clusters) sentence_map = get_sentence_map(self.segments, self.sentence_end) subtoken_map = flatten(self.segment_subtoken_map) assert (len(all_mentions) == len(set(all_mentions))) num_words = len(flatten(self.segments)) assert (num_words == len(flatten(self.speakers))) assert (num_words == len(subtoken_map)), (num_words, len(subtoken_map)) assert (num_words == len(sentence_map)), (num_words, len(sentence_map)) return {'doc_key': self.doc_key, 'sentences': self.segments, 'clusters': merged_clusters, 'sentence_map': sentence_map, 'subtoken_map': subtoken_map}
def debug_print(s, *args): if DEBUG_LOGGING: formatted_args = [format_ops(arg) for arg in args] print(('DEBUG ' + (s % tuple(formatted_args))))
class OneHot(TransformBase): def __init__(self, drop=None, **kwargs): super().__init__() if (drop is None): self.encoder = preprocessing.OneHotEncoder(handle_unknown='ignore', **kwargs) else: self.encoder = preprocessing.OneHotEncoder(drop=drop, **kwargs) def fit(self, x): if isinstance(x, pd.DataFrame): x = x.values self.encoder.fit(x) return self def transform(self, x): if isinstance(x, pd.DataFrame): x = x.values return self.encoder.transform(x).toarray() def invert(self, x): return self.encoder.inverse_transform(x) def categories(self): return self.encoder.categories_ def get_feature_names(self, input_features=None): return self.encoder.get_feature_names(input_features)
def train(): memory.train() gnn.train() node_pred.train() memory.reset_state() neighbor_loader.reset_state() total_loss = 0 label_t = dataset.get_label_time() total_score = 0 num_label_ts = 0 for batch in tqdm(train_loader): batch = batch.to(device) optimizer.zero_grad() (src, dst, t, msg) = (batch.src, batch.dst, batch.t, batch.msg) query_t = batch.t[(- 1)] if (query_t > label_t): label_tuple = dataset.get_node_label(query_t) (label_ts, label_srcs, labels) = (label_tuple[0], label_tuple[1], label_tuple[2]) label_t = dataset.get_label_time() label_srcs = label_srcs.to(device) previous_day_mask = (batch.t < label_t) process_edges(src[previous_day_mask], dst[previous_day_mask], t[previous_day_mask], msg[previous_day_mask]) (src, dst, t, msg) = (src[(~ previous_day_mask)], dst[(~ previous_day_mask)], t[(~ previous_day_mask)], msg[(~ previous_day_mask)]) n_id = label_srcs (n_id_neighbors, mem_edge_index, e_id) = neighbor_loader(n_id) assoc[n_id_neighbors] = torch.arange(n_id_neighbors.size(0), device=device) (z, last_update) = memory(n_id_neighbors) z = gnn(z, last_update, mem_edge_index, data.t[e_id].to(device), data.msg[e_id].to(device)) z = z[assoc[n_id]] pred = node_pred(z) loss = criterion(pred, labels.to(device)) np_pred = pred.cpu().detach().numpy() np_true = labels.cpu().detach().numpy() input_dict = {'y_true': np_true, 'y_pred': np_pred, 'eval_metric': [eval_metric]} result_dict = evaluator.eval(input_dict) score = result_dict[eval_metric] total_score += score num_label_ts += 1 loss.backward() optimizer.step() total_loss += float(loss) process_edges(src, dst, t, msg) memory.detach() metric_dict = {'ce': (total_loss / num_label_ts)} metric_dict[eval_metric] = (total_score / num_label_ts) return metric_dict
def register_Ns3Timer_methods(root_module, cls): cls.add_constructor([param('ns3::Timer const &', 'arg0')]) cls.add_constructor([]) cls.add_constructor([param('ns3::Timer::DestroyPolicy', 'destroyPolicy')]) cls.add_method('Cancel', 'void', []) cls.add_method('GetDelay', 'ns3::Time', [], is_const=True) cls.add_method('GetDelayLeft', 'ns3::Time', [], is_const=True) cls.add_method('GetState', 'ns3::Timer::State', [], is_const=True) cls.add_method('IsExpired', 'bool', [], is_const=True) cls.add_method('IsRunning', 'bool', [], is_const=True) cls.add_method('IsSuspended', 'bool', [], is_const=True) cls.add_method('Remove', 'void', []) cls.add_method('Resume', 'void', []) cls.add_method('Schedule', 'void', []) cls.add_method('Schedule', 'void', [param('ns3::Time', 'delay')]) cls.add_method('SetDelay', 'void', [param('ns3::Time const &', 'delay')]) cls.add_method('Suspend', 'void', []) return
.parametrize('return_text', ['{"error":"Model [model] is currently loading","estimated_time": [delay]}', '{"error":"Model [model] is currently loading"}', '{"error:}', '']) .parametrize('image_model', ['CompVis/stable-diffusion-v1-4', 'stabilityai/stable-diffusion-2-1']) .parametrize('delay', [10, 0]) def test_huggingface_fail_request_with_delay(agent: Agent, workspace, image_size, image_model, return_text, delay): return_text = return_text.replace('[model]', image_model).replace('[delay]', str(delay)) with patch('requests.post') as mock_post: if (return_text == ''): mock_post.return_value.status_code = 200 mock_post.return_value.ok = True mock_post.return_value.content = b'bad image' else: mock_post.return_value.status_code = 500 mock_post.return_value.ok = False mock_post.return_value.text = return_text agent.config.image_provider = 'huggingface' agent.config.huggingface_image_model = image_model prompt = 'astronaut riding a horse' with patch('time.sleep') as mock_sleep: result = generate_image(prompt, agent, image_size) assert (result == 'Error creating image.') if ('estimated_time' in return_text): mock_sleep.assert_called_with(delay) else: mock_sleep.assert_not_called()
def test_int_ineq_constraint(rng, u, geometry): bounds = rng.random(2) bounds.sort() lower_bound = bounds[0] upper_bound = bounds[1] int_ineq_constraint_lower = cashocs.InequalityConstraint((u * geometry.dx), lower_bound=lower_bound) int_ineq_constraint_upper = cashocs.InequalityConstraint((u * geometry.dx), upper_bound=upper_bound) int_ineq_constraint_both = cashocs.InequalityConstraint((u * geometry.dx), lower_bound=lower_bound, upper_bound=upper_bound) shift_tol = rng.random() u.vector().vec().set((lower_bound - shift_tol)) u.vector().apply('') assert (np.abs((int_ineq_constraint_lower.constraint_violation() - shift_tol)) < 1e-14) shift_tol = rng.random() u.vector().vec().set((lower_bound + shift_tol)) u.vector().apply('') assert (np.abs((int_ineq_constraint_lower.constraint_violation() - 0.0)) < 1e-14) shift_tol = rng.random() u.vector().vec().set((upper_bound + shift_tol)) u.vector().apply('') assert (np.abs((int_ineq_constraint_upper.constraint_violation() - shift_tol)) < 1e-14) shift_tol = rng.random() u.vector().vec().set((upper_bound - shift_tol)) u.vector().apply('') assert (np.abs((int_ineq_constraint_upper.constraint_violation() - 0.0)) < 1e-14) u.vector().vec().set(((upper_bound + lower_bound) / 2.0)) u.vector().apply('') assert (np.abs((int_ineq_constraint_both.constraint_violation() - 0.0)) < 1e-14) shift_tol = rng.random() u.vector().vec().set((upper_bound + shift_tol)) u.vector().apply('') assert (np.abs((int_ineq_constraint_both.constraint_violation() - shift_tol)) < 1e-14) shift_tol = rng.random() u.vector().vec().set((lower_bound - shift_tol)) u.vector().apply('') assert (np.abs((int_ineq_constraint_both.constraint_violation() - shift_tol)) < 1e-14)
def inference_detector(model, imgs, cfg, device='cuda:0'): img_transform = ImageTransform(size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg) model = model.to(device) model.eval() if (not isinstance(imgs, list)): return _inference_single(model, imgs, img_transform, cfg, device) else: return _inference_generator(model, imgs, img_transform, cfg, device)
def torch_nn_functional_relu(x, inplace=False): if (not inplace): raise ValueError("Don't support in-place functional.relu for MetaTensor analysis") return x
def test_read_write_set(): sdfg = dace.SDFG('graph') sdfg.add_array('A', [10], dace.float64) sdfg.add_array('B', [10], dace.float64) sdfg.add_array('C', [10], dace.float64) state = sdfg.add_state('state') task1 = state.add_tasklet('work1', {'A'}, {'B'}, 'B = A + 1') task2 = state.add_tasklet('work2', {'B'}, {'C'}, 'C = B + 1') read_a = state.add_access('A') rw_b = state.add_access('B') write_c = state.add_access('C') state.add_memlet_path(read_a, task1, dst_conn='A', memlet=dace.Memlet('A[2]')) state.add_memlet_path(task1, rw_b, src_conn='B', memlet=dace.Memlet('B[2]')) state.add_memlet_path(rw_b, task2, dst_conn='B', memlet=dace.Memlet('B[2]')) state.add_memlet_path(task2, write_c, src_conn='C', memlet=dace.Memlet('C[2]')) assert ('B' not in state.read_and_write_sets()[0])
.parametrize('simulator', [wn.delayed_impact, wn.credit, wn.hiv, wn.lotka_volterra, wn.opioid, wn.world2, wn.world3, wn.zika], ids=['delayed_impact', 'credit', 'hiv', 'lotka_volterra', 'opioid', 'world2', 'world3', 'zika']) def test_dynamics_initial_state(simulator): initial_state = simulator.State() config = simulator.Config() run = simulator.simulate(initial_state, config) assert (len(run.states) == len(run.times)) assert (run.initial_state is initial_state) assert np.allclose(config.delta_t, (run.times[1] - run.times[0]))
class ContextGeneratorEval(object): def __init__(self, context_file): self.ctxs = [] with open(context_file, 'r') as f: ctx_pair = [] for line in f: ctx = line.strip().split() ctx_pair.append(ctx) if (len(ctx_pair) == 2): self.ctxs.append(ctx_pair) ctx_pair = []