code
stringlengths
101
5.91M
def idctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False): type = _inverse_typemap[type] shape = _good_shape(x, shape, axes) return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)
class TestRegistrable(TestCase): def test_should_register_subclass(self): class MyBaseClass(Registrable): pass ('first_subclass') class MyFirstSubclass(MyBaseClass): pass ('second_subclass') class MySecondSubclass(MyBaseClass): pass my_subclass = MyBaseClass.by_name('second_subclass') self.assertEqual(MySecondSubclass, my_subclass) def test_should_raise_when_not_registered(self): class MyBaseClass(Registrable): pass with self.assertRaises(NotRegisteredError): MyBaseClass.by_name('my_unregistered_subclass') def test_should_raise_when_already_registered(self): class MyBaseClass(Registrable): pass ('my_duplicated_subclass') class MySubclass(MyBaseClass): pass with self.assertRaises(AlreadyRegisteredError): ('my_duplicated_subclass') class MySecondSubclass(MyBaseClass): pass def test_should_override_already_registered_subclass(self): class MyBaseClass(Registrable): pass ('my_subclass') class MyOverridenSubclass(MyBaseClass): pass ('my_subclass', override=True) class MySubclass(MyBaseClass): pass subclass = MyBaseClass.by_name('my_subclass') self.assertEqual(MySubclass, subclass)
def load(file, file_format=None, file_client_args=None, **kwargs): if isinstance(file, Path): file = str(file) if ((file_format is None) and is_str(file)): file_format = file.split('.')[(- 1)] if (file_format not in file_handlers): raise TypeError(f'Unsupported format: {file_format}') handler = file_handlers[file_format] if is_str(file): file_client = FileClient.infer_client(file_client_args, file) if handler.str_like: with StringIO(file_client.get_text(file)) as f: obj = handler.load_from_fileobj(f, **kwargs) else: with BytesIO(file_client.get(file)) as f: obj = handler.load_from_fileobj(f, **kwargs) elif hasattr(file, 'read'): obj = handler.load_from_fileobj(file, **kwargs) else: raise TypeError('"file" must be a filepath str or a file-object') return obj
class MBartTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ['input_ids', 'attention_mask'] prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[Dict[(str, Any)]]=None, additional_special_tokens=None, **kwargs): mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token) self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, tokenizer_file=None, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs) self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} self.fairseq_offset = 1 self.sp_model_size = len(self.sp_model) self.lang_code_to_id = {code: ((self.sp_model_size + i) + self.fairseq_offset) for (i, code) in enumerate(FAIRSEQ_LANGUAGE_CODES)} self.id_to_lang_code = {v: k for (k, v) in self.lang_code_to_id.items()} self.fairseq_tokens_to_ids['<mask>'] = ((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset) self.fairseq_tokens_to_ids.update(self.lang_code_to_id) self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()} self._additional_special_tokens = list(self.lang_code_to_id.keys()) if (additional_special_tokens is not None): self._additional_special_tokens.extend([t for t in additional_special_tokens if (t not in self._additional_special_tokens)]) self._src_lang = (src_lang if (src_lang is not None) else 'en_XX') self.cur_lang_code_id = self.lang_code_to_id[self._src_lang] self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None state['sp_model_proto'] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d if (not hasattr(self, 'sp_model_kwargs')): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def vocab_size(self): return (((len(self.sp_model) + len(self.lang_code_to_id)) + self.fairseq_offset) + 1) def src_lang(self) -> str: return self._src_lang _lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) prefix_ones = ([1] * len(self.prefix_tokens)) suffix_ones = ([1] * len(self.suffix_tokens)) if (token_ids_1 is None): return ((prefix_ones + ([0] * len(token_ids_0))) + suffix_ones) return (((prefix_ones + ([0] * len(token_ids_0))) + ([0] * len(token_ids_1))) + suffix_ones) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if (token_ids_1 is None): return ((self.prefix_tokens + token_ids_0) + self.suffix_tokens) return (((self.prefix_tokens + token_ids_0) + token_ids_1) + self.suffix_tokens) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0]) def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): if ((src_lang is None) or (tgt_lang is None)): raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs['forced_bos_token_id'] = tgt_lang_id return inputs def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): if (token in self.fairseq_tokens_to_ids): return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) return ((spm_id + self.fairseq_offset) if spm_id else self.unk_token_id) def _convert_id_to_token(self, index): if (index in self.fairseq_ids_to_tokens): return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece((index - self.fairseq_offset)) def convert_tokens_to_string(self, tokens): out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)): copyfile(self.vocab_file, out_vocab_file) elif (not os.path.isfile(self.vocab_file)): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='en_XX', tgt_texts: Optional[List[str]]=None, tgt_lang: str='ro_RO', **kwargs) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: self.cur_lang_code = self.lang_code_to_id[src_lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] def set_tgt_lang_special_tokens(self, lang: str) -> None: self.cur_lang_code = self.lang_code_to_id[lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
class REDSDataset(data.Dataset): def __init__(self, opt): super(REDSDataset, self).__init__() self.opt = opt (self.gt_root, self.lq_root) = (Path(opt['dataroot_gt']), Path(opt['dataroot_lq'])) self.flow_root = (Path(opt['dataroot_flow']) if (opt['dataroot_flow'] is not None) else None) assert ((opt['num_frame'] % 2) == 1), f"num_frame should be odd number, but got {opt['num_frame']}" self.num_frame = opt['num_frame'] self.num_half_frames = (opt['num_frame'] // 2) self.keys = [] with open(opt['meta_info_file'], 'r') as fin: for line in fin: (folder, frame_num, _) = line.split(' ') self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))]) if (opt['val_partition'] == 'REDS4'): val_partition = ['000', '011', '015', '020'] elif (opt['val_partition'] == 'official'): val_partition = [f'{v:03d}' for v in range(240, 270)] else: raise ValueError(f"Wrong validation partition {opt['val_partition']}.Supported ones are ['official', 'REDS4'].") self.keys = [v for v in self.keys if (v.split('/')[0] not in val_partition)] self.file_client = None self.io_backend_opt = opt['io_backend'] self.is_lmdb = False if (self.io_backend_opt['type'] == 'lmdb'): self.is_lmdb = True if (self.flow_root is not None): self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root, self.flow_root] self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow'] else: self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root] self.io_backend_opt['client_keys'] = ['lq', 'gt'] self.interval_list = opt['interval_list'] self.random_reverse = opt['random_reverse'] interval_str = ','.join((str(x) for x in opt['interval_list'])) logger = get_root_logger() logger.info(f'Temporal augmentation interval list: [{interval_str}]; random reverse is {self.random_reverse}.') def __getitem__(self, index): if (self.file_client is None): self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) scale = self.opt['scale'] gt_size = self.opt['gt_size'] key = self.keys[index] (clip_name, frame_name) = key.split('/') center_frame_idx = int(frame_name) interval = random.choice(self.interval_list) start_frame_idx = (center_frame_idx - (self.num_half_frames * interval)) end_frame_idx = (center_frame_idx + (self.num_half_frames * interval)) while ((start_frame_idx < 0) or (end_frame_idx > 99)): center_frame_idx = random.randint(0, 99) start_frame_idx = (center_frame_idx - (self.num_half_frames * interval)) end_frame_idx = (center_frame_idx + (self.num_half_frames * interval)) frame_name = f'{center_frame_idx:08d}' neighbor_list = list(range((center_frame_idx - (self.num_half_frames * interval)), ((center_frame_idx + (self.num_half_frames * interval)) + 1), interval)) if (self.random_reverse and (random.random() < 0.5)): neighbor_list.reverse() assert (len(neighbor_list) == self.num_frame), f'Wrong length of neighbor list: {len(neighbor_list)}' if self.is_lmdb: img_gt_path = f'{clip_name}/{frame_name}' else: img_gt_path = ((self.gt_root / clip_name) / f'{frame_name}.png') img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = imfrombytes(img_bytes, float32=True) img_lqs = [] for neighbor in neighbor_list: if self.is_lmdb: img_lq_path = f'{clip_name}/{neighbor:08d}' else: img_lq_path = ((self.lq_root / clip_name) / f'{neighbor:08d}.png') img_bytes = self.file_client.get(img_lq_path, 'lq') img_lq = imfrombytes(img_bytes, float32=True) img_lqs.append(img_lq) if (self.flow_root is not None): img_flows = [] for i in range(self.num_half_frames, 0, (- 1)): if self.is_lmdb: flow_path = f'{clip_name}/{frame_name}_p{i}' else: flow_path = ((self.flow_root / clip_name) / f'{frame_name}_p{i}.png') img_bytes = self.file_client.get(flow_path, 'flow') cat_flow = imfrombytes(img_bytes, flag='grayscale', float32=False) (dx, dy) = np.split(cat_flow, 2, axis=0) flow = dequantize_flow(dx, dy, max_val=20, denorm=False) img_flows.append(flow) for i in range(1, (self.num_half_frames + 1)): if self.is_lmdb: flow_path = f'{clip_name}/{frame_name}_n{i}' else: flow_path = ((self.flow_root / clip_name) / f'{frame_name}_n{i}.png') img_bytes = self.file_client.get(flow_path, 'flow') cat_flow = imfrombytes(img_bytes, flag='grayscale', float32=False) (dx, dy) = np.split(cat_flow, 2, axis=0) flow = dequantize_flow(dx, dy, max_val=20, denorm=False) img_flows.append(flow) img_lqs.extend(img_flows) (img_gt, img_lqs) = paired_random_crop(img_gt, img_lqs, gt_size, scale, img_gt_path) if (self.flow_root is not None): (img_lqs, img_flows) = (img_lqs[:self.num_frame], img_lqs[self.num_frame:]) img_lqs.append(img_gt) if (self.flow_root is not None): (img_results, img_flows) = augment(img_lqs, self.opt['use_flip'], self.opt['use_rot'], img_flows) else: img_results = augment(img_lqs, self.opt['use_flip'], self.opt['use_rot']) img_results = img2tensor(img_results) img_lqs = torch.stack(img_results[0:(- 1)], dim=0) img_gt = img_results[(- 1)] if (self.flow_root is not None): img_flows = img2tensor(img_flows) img_flows.insert(self.num_half_frames, torch.zeros_like(img_flows[0])) img_flows = torch.stack(img_flows, dim=0) if (self.flow_root is not None): return {'lq': img_lqs, 'flow': img_flows, 'gt': img_gt, 'key': key} else: return {'lq': img_lqs, 'gt': img_gt, 'key': key} def __len__(self): return len(self.keys)
class OpenImagesSegChallenge2019Cfg(OpenImagesSegCfg): num_classes: int = 300 ann_class_map: str = 'annotations/challenge-2019/challenge-2019-classes-description-segmentable.csv' splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(), val=dict(), test=dict())))
def equalize(image, factor): image = Image.fromarray(image) image = ImageOps.equalize(image) return np.asarray(image)
def record_repetitive_adjacent(graph, node_weight_function, rtol=0.002, do_topo_sort=True): if do_topo_sort: graph.topo_sort(change_graph=False) topo_sorted_nodes_to_weight = SortedDict({n.topo_sort_id: node_weight_function(n) for n in graph.non_input_nodes}) found_sets = [] cur = None rsum = 0 cur_set = set() for (node, weight) in topo_sorted_nodes_to_weight.items(): if (cur is None): cur = weight cur_set.add(node) rsum = weight elif np.allclose(weight, cur, rtol): rsum += weight cur_set.add(node) cur = (rsum / len(cur_set)) else: if cur_set: found_sets.append(cur_set) cur = weight rsum = weight cur_set = set() return found_sets
def handleTimer(): global timer print('publishing my url') timer = threading.Timer(5, handleTimer) timer.start()
def get_parser(): parser = argparse.ArgumentParser() parser.add_argument('--run-type', choices=['train', 'eval'], required=True, help='run type of the experiment (train or eval)') parser.add_argument('--exp-config', type=str, required=True, help='path to config yaml containing info about experiment') parser.add_argument('--ckpt-path', default=None, type=str, help='full path to a ckpt (for eval or resumption)') parser.add_argument('--clear-only', default=False, type=bool) parser.add_argument('opts', default=None, nargs=argparse.REMAINDER, help='Modify config options from command line') return parser
def create_py_map(sdfg): py_mapper = MapPython(sdfg.name) made_with_api = py_mapper.mapper(sdfg) folder = sdfg.build_folder save('py', sdfg.name, py_mapper.map, folder) sourceFiles = get_src_files(sdfg) return (folder, sourceFiles, made_with_api)
def get_sampling(im): if ((not hasattr(im, 'layers')) or (im.layers in (1, 4))): return (- 1) sampling = ((im.layer[0][1:3] + im.layer[1][1:3]) + im.layer[2][1:3]) return samplings.get(sampling, (- 1))
def _cuda(self, device=None, non_blocking=False, **kwargs): non_blocking = _get_async_or_non_blocking('cuda', non_blocking, kwargs) if self.is_cuda: if (device is None): device = torch.cuda.current_device() if (self.get_device() == device): return self elif (device is None): device = (- 1) with torch.cuda.device(device): if self.is_sparse: new_type = getattr(torch.cuda.sparse, self.__class__.__name__) indices = self._indices().cuda(device, non_blocking) values = self._values().cuda(device, non_blocking) return new_type(indices, values, self.size()) else: new_type = getattr(torch.cuda, self.__class__.__name__) return new_type(self.size()).copy_(self, non_blocking)
('span_annotation') class SpanAnnotationReader(DatasetReader): def __init__(self, max_span_width: int, token_indexers: Dict[(str, TokenIndexer)]=None) -> None: self.max_span_width = max_span_width self._token_indexers = (token_indexers or {'tokens': SingleIdTokenIndexer()}) self._tag_widths: Dict[(str, List[int])] = {} self._total_args = 0 self._non_constit_args = 0 self._constits_args = {} def calculate_span_size(self, tag_sequence: List[str]): def remove_bio(tag): return (tag[2:] if (tag.startswith('B-') or tag.startswith('I-')) else tag) sizes = [(remove_bio(tag_sequence[0]), 1)] for (idx, tag) in enumerate(tag_sequence[1:], 1): if ((tag != tag_sequence[(idx - 1)]) or tag.startswith('B-')): sizes.append((remove_bio(tag), 1)) else: (last_tag, length) = sizes.pop() sizes.append((last_tag, (length + 1))) for pair in sizes: (tag, length) = pair if (tag not in self._tag_widths): self._tag_widths[tag] = [] self._tag_widths[tag].append(length) def _convert_bio_into_matrix(self, tag_sequence: List[str]) -> List[List[str]]: def remove_bio(tag): return (tag[2:] if (tag.startswith('B-') or tag.startswith('I-')) else tag) spans = [['*' for _ in range(self.max_span_width)] for _ in range(len(tag_sequence))] start_span = 0 current_tag = tag_sequence[0] for (pos, tag) in enumerate(tag_sequence[1:], 1): width = (pos - start_span) if (tag.startswith('B-') or ((tag == 'O') and (tag_sequence[(pos - 1)] != 'O'))): width = ((pos - 1) - start_span) spans[(pos - 1)][width] = remove_bio(current_tag) start_span = pos current_tag = tag width = (pos - start_span) elif (width == (self.max_span_width - 1)): spans[pos][width] = remove_bio(current_tag) start_span = (pos + 1) if ((pos + 1) < len(tag_sequence)): current_tag = tag_sequence[(pos + 1)] spans[(len(tag_sequence) - 1)][((len(tag_sequence) - 1) - start_span)] = remove_bio(tag_sequence[(- 1)]) return spans def _process_sentence(self, sentence_tokens: List[str], constits: Dict[(Tuple[(int, int)], str)], verbal_predicates: List[int], predicate_argument_labels: List[List[str]]) -> List[Instance]: default = '*' def get_new_label(original: str, newer: str): return (newer if (original == default) else '{}|{}'.format(newer, original)) constit_matrix = [[default for _ in range(self.max_span_width)] for _ in sentence_tokens] for span in constits: (start, end) = span diff = (end - start) if (diff >= self.max_span_width): continue constit_matrix[end][diff] = get_new_label(constit_matrix[end][diff], constits[span]) tokens = [Token(t) for t in sentence_tokens] if (not verbal_predicates): tags = ['O' for _ in sentence_tokens] verb_label = [0 for _ in sentence_tokens] srl_args = self._convert_bio_into_matrix(tags) dummy_verb_index = 0 return [self.text_to_instance(tokens, verb_label, dummy_verb_index, constit_matrix, srl_args)] else: instances = [] for (verb_index, tags) in zip(verbal_predicates, predicate_argument_labels): verb_label = [0 for _ in sentence_tokens] verb_label[verb_index] = 1 srl_args = self._convert_bio_into_matrix(tags) instances.append(self.text_to_instance(tokens, verb_label, verb_index, constit_matrix, srl_args)) self.find_overlap(srl_args, constit_matrix) return instances def find_overlap(self, srl_args, constit_matrix): for j in range(len(srl_args)): for diff in range(len(srl_args[0])): arg = srl_args[j][diff] constit = constit_matrix[j][diff] if ((arg == '*') or (arg == 'V') or (arg == 'O')): continue self._total_args += 1 if (constit == '*'): if ((j - diff) != 0): self._non_constit_args += 1 continue if (constit not in self._constits_args): self._constits_args[constit] = 0 self._constits_args[constit] += 1 def read(self, file_path: str): file_path = cached_path(file_path) instances = [] sentence: List[str] = [] open_constits: List[Tuple[(str, int)]] = [] constits: Dict[(Tuple[(int, int)], str)] = {} verbal_predicates: List[int] = [] predicate_argument_labels: List[List[str]] = [] current_span_label: List[Optional[str]] = [] logger.info('Reading phrase-syntax and SRL instances from dataset files at: %s', file_path) for (root, _, files) in tqdm.tqdm(list(os.walk(file_path))): for data_file in files: if (not data_file.endswith('gold_conll')): continue with codecs.open(os.path.join(root, data_file), 'r', encoding='utf8') as open_file: for line in open_file: line = line.strip() if ((line == '') or line.startswith('#')): if (not sentence): continue instances.extend(self._process_sentence(sentence, constits, verbal_predicates, predicate_argument_labels)) sentence = [] open_constits = [] constits = {} verbal_predicates = [] predicate_argument_labels = [] current_span_label = [] continue conll_components = line.split() word = conll_components[3] sentence.append(word) word_index = (len(sentence) - 1) syn_label = conll_components[5] if (syn_label != '*'): if ('(' in syn_label): starts = syn_label.split('(') for con in starts[1:]: clabel = con.strip(')').strip('*') open_constits.append((clabel, word_index)) if (')' in syn_label): ends = syn_label.count(')') for _ in range(ends): assert open_constits (clabel, start) = open_constits.pop() if ((start, word_index) in constits): clabel = '{}|{}'.format(constits[(start, word_index)], clabel) constits[(start, word_index)] = clabel if (clabel not in self._tag_widths): self._tag_widths[clabel] = [] self._tag_widths[clabel].append(((word_index - start) + 1)) if (word_index == 0): predicate_argument_labels = [[] for _ in conll_components[11:(- 1)]] current_span_label = [None for _ in conll_components[11:(- 1)]] prev_span_label = [None for _ in conll_components[11:(- 1)]] num_annotations = len(predicate_argument_labels) is_verbal_predicate = False for annotation_index in range(num_annotations): annotation = conll_components[(11 + annotation_index)] if ('(V' in annotation): is_verbal_predicate = True label = annotation.strip('()*') if ('(' in annotation): bio_label = ('B-' + label) predicate_argument_labels[annotation_index].append(bio_label) current_span_label[annotation_index] = label elif (current_span_label[annotation_index] is not None): bio_label = ('I-' + current_span_label[annotation_index]) predicate_argument_labels[annotation_index].append(bio_label) else: predicate_argument_labels[annotation_index].append('O') prev_span_label[annotation_index] = current_span_label[annotation_index] if (')' in annotation): current_span_label[annotation_index] = None if is_verbal_predicate: verbal_predicates.append(word_index) if (not instances): raise ConfigurationError('No instances were read from the given filepath {}. Is the path correct?'.format(file_path)) logger.info('# instances = %d', len(instances)) return Dataset(instances) def analyze_overlap(self): logger.info('%% non-constit SRL args = %f', ((self._non_constit_args * 100.0) / self._total_args)) from collections import OrderedDict from operator import itemgetter x = {k: (self._constits_args[k] / self._total_args) for k in self._constits_args} d = OrderedDict(sorted(x.items(), key=itemgetter(1), reverse=True)) print(d) def analyze_span_width(self): total_tag_width = 0.0 total_spans = 0 widths = defaultdict(int) for tag in self._tag_widths: if (tag in ['*', 'V', 'O']): continue total_tag_width += sum(self._tag_widths[tag]) total_spans += len(self._tag_widths[tag]) for l in self._tag_widths[tag]: widths[l] += 1 x = [] for l in sorted(widths): if (len(x) == 0): x.append((l, ((widths[l] * 1.0) / total_spans))) else: x.append((l, (x[(- 1)][1] + ((widths[l] * 1.0) / total_spans)))) print(x[(- 1)]) print('avg tag length = {}'.format((total_tag_width / total_spans))) import ipdb ipdb.set_trace() def text_to_instance(self, tokens: List[Token], verb_label: List[int], verb_index: int, constituents: List[List[str]]=None, srl_args: List[List[str]]=None) -> Instance: text_field = TextField(tokens, token_indexers=self._token_indexers) verb_field = SequenceLabelField(verb_label, text_field) target_field = IndexField(verb_index, text_field) span_starts: List[Field] = [] span_ends: List[Field] = [] span_mask: List[int] = [1 for _ in range((len(tokens) * self.max_span_width))] span_labels: Optional[List[str]] = ([] if (srl_args is not None) else None) constit_labels: Optional[List[str]] = ([] if (constituents is not None) else None) for j in range(len(tokens)): for diff in range(self.max_span_width): width = diff if ((j - diff) < 0): span_mask[((j * self.max_span_width) + diff)] = 0 width = j span_starts.append(IndexField((j - width), text_field)) span_ends.append(IndexField(j, text_field)) if srl_args: current_label = srl_args[j][diff] span_labels.append(current_label) if constituents: label = constituents[j][diff] constit_labels.append(label) start_fields = ListField(span_starts) end_fields = ListField(span_ends) span_mask_fields = SequenceLabelField(span_mask, start_fields) fields: Dict[(str, Field)] = {'tokens': text_field, 'verb_indicator': verb_field, 'target_index': target_field, 'span_starts': start_fields, 'span_ends': end_fields, 'span_mask': span_mask_fields} if srl_args: fields['tags'] = SequenceLabelField(span_labels, start_fields) if constituents: fields['constituents'] = SequenceLabelField(constit_labels, start_fields, label_namespace='constit_labels') return Instance(fields) def from_params(cls, params: Params) -> 'SpanAnnotationReader': token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {})) max_span_width = params.pop('max_span_width') params.assert_empty(cls.__name__) return SpanAnnotationReader(token_indexers=token_indexers, max_span_width=max_span_width)
class Cache(object): def __init__(self, base): if (not os.path.isdir(base)): os.makedirs(base) if ((os.stat(base).st_mode & 63) != 0): logger.warning("Directory '%s' is not private", base) self.base = os.path.abspath(os.path.normpath(base)) def prefix_to_dir(self, prefix): return path_to_cache_dir(prefix) def clear(self): not_removed = [] for fn in os.listdir(self.base): fn = os.path.join(self.base, fn) try: if (os.path.islink(fn) or os.path.isfile(fn)): os.remove(fn) elif os.path.isdir(fn): shutil.rmtree(fn) except Exception: not_removed.append(fn) return not_removed
def adjust_learning_rate_D(optimizer, i_iter): lr = lr_poly(args.learning_rate_D, i_iter, args.num_steps, args.power) optimizer.param_groups[0]['lr'] = lr if (len(optimizer.param_groups) > 1): optimizer.param_groups[1]['lr'] = (lr * 10)
def from_fraction_field(L, x): d = L(x.denominator()) if d.is_unit(): n = L(x.numerator()) return (n * d.inverse_of_unit()) else: raise TypeError('fraction must have unit denominator')
class MisGANImputationSampler(BaseImputationSampler): def __init__(self, data_loader, imputer, batch_size=256): super().__init__(data_loader) self.imputer = imputer self.impu_noise = torch.FloatTensor(batch_size, 3, 64, 64).to(device) def impute(self, data, mask): if (data.shape[0] != self.impu_noise.shape[0]): self.impu_noise.resize_(data.shape) self.impu_noise.uniform_() return self.imputer(data, mask, self.impu_noise)
def invweibull_pdf(x, c): if (x > 0): return (c * math.exp((((- (c + 1)) * math.log(x)) - (x ** (- c))))) return 0.0
class I1Pool(nn.Module): def forward(self, x, guide): x = x.contiguous() guide = guide.expand_as(x).contiguous() return I1PoolFunction.apply(x, guide)
class RAM(): def __init__(self, ignore_warnings=False): self._consumption = 0 self._ignore_warnings = ignore_warnings self._start = time.time() def get_consumption(self): self.calculate_consumption() return self._consumption def _get_memory_used(self): current_pid = os.getpid() memory_percent = 0 for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=['name', 'pid', 'memory_percent']) if (pinfo['pid'] == current_pid): memory_percent = float(pinfo['memory_percent']) except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): pass total_memory = (psutil.virtual_memory().total / (1024 ** 3)) return ((memory_percent * total_memory) / 100) def calculate_consumption(self): time_period = (time.time() - self._start) self._start = time.time() consumption = (((self._get_memory_used() * (3 / 8)) * time_period) / FROM_WATTs_TO_kWATTh) self._consumption += consumption return consumption
def make_dir(filename): folder = os.path.dirname(filename) if (not os.path.exists(folder)): os.makedirs(folder)
def _get_lvis_instances_meta_v0_5(): assert (len(LVIS_V0_5_CATEGORIES) == 1230) cat_ids = [k['id'] for k in LVIS_V0_5_CATEGORIES] assert ((min(cat_ids) == 1) and (max(cat_ids) == len(cat_ids))), 'Category ids are not in [1, #categories], as expected' lvis_categories = sorted(LVIS_V0_5_CATEGORIES, key=(lambda x: x['id'])) thing_classes = [k['synonyms'][0] for k in lvis_categories] meta = {'thing_classes': thing_classes} return meta
.parametrize('version', ['1.0.0']) .parametrize('schema', ['defs.json', 'measurement.json', 'model.json', 'workspace.json']) def test_get_schema(version, schema): assert pyhf.schema.load_schema(f'{version}/{schema}')
class ConcatPoincareLayer(nn.Module): def __init__(self, d1, d2, d_out, c): super(ConcatPoincareLayer, self).__init__() self.d1 = d1 self.d2 = d2 self.d_out = d_out self.l1 = HypLinear(d1, d_out, bias=False, c=c) self.l2 = HypLinear(d2, d_out, bias=False, c=c) self.c = c def forward(self, x1, x2, c=None): if (c is None): c = self.c return pmath.mobius_add(self.l1(x1), self.l2(x2), c=c) def extra_repr(self): return 'dims {} and {} ---> dim {}'.format(self.d1, self.d2, self.d_out)
def hide_available_pandas(monkeypatch): import_orig = builtins.__import__ def mocked_import(name, *args, **kwargs): if (name == 'pandas'): raise ImportError() return import_orig(name, *args, **kwargs) monkeypatch.setattr(builtins, '__import__', mocked_import)
def test_data_frame_ListOffsetArray_NumpyArray(): array = ak.contents.listoffsetarray.ListOffsetArray(ak.index.Index(np.array([1, 4, 4, 6, 7], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7]))) layout = array generator = ak._connect.cling.togenerator(layout.form, flatlist_as_rvec=False) lookup = ak._lookup.Lookup(layout) generator.generate(compiler) array_out = generator.tolayout(lookup, 0, ()) assert (array.to_list() == array_out.to_list())
class UnpairedAudioTextConfig(FairseqDataclass): data: str = field(default=MISSING, metadata={'help': 'path to data directory containing audio'}) text_data: str = field(default=MISSING, metadata={'help': 'path to data directory containing text'}) max_length: Optional[int] = None labels: Optional[str] = field(default=None, metadata={'help': 'extension of the label file to load, used for fine-tuning'}) unfiltered: bool = field(default=False, metadata={'help': 'load data with _unfiltered suffix'}) ctc_eval: bool = field(default=False, metadata={'help': 'eval UER as if computed by CTC'}) sort_by_length: bool = field(default=True, metadata={'help': 'sort examples by length of audio timesteps'}) shuffle: bool = field(default=True, metadata={'help': 'shuffle examples'}) append_eos: bool = field(default=False, metadata={'help': 'append eos'}) uppercase: Optional[bool] = field(default=False, metadata={'help': 'uppercase for LM score computation'}) skipwords: Optional[str] = field(default='', metadata={'help': 'comma-separated words to be removed for LM score computation'}) kenlm_path: Optional[str] = None vocab_usage_power: float = 2 word_decoder_config: Optional[KaldiDecoderConfig] = None word_kenlm_path: Optional[str] = None decoding_config: DecodingConfig = DecodingConfig()
def _get_step_context(step): proto = step.Proto() if proto.should_stop_blob: return (call('loop'), False) if (proto.num_iter and (proto.num_iter != 1)): return (call('loop', [proto.num_iter]), False) if (proto.num_concurrent_instances > 1): return (call('parallel', [('num_instances', proto.num_concurrent_instances)]), (len(step.Substeps()) > 1)) concurrent = (proto.concurrent_substeps and (len(step.Substeps()) > 1)) if concurrent: return (call('parallel'), True) if proto.report_net: return (call('run_once'), False) return (None, False)
class CiscoUmbrellaGetLogDetails(VirtualFunctionTool): name = 'CiscoUmbrellaGetLogDetails' summary = 'Get detailed information about a specific security log.' parameters: List[ArgParameter] = [{'name': 'log_id', 'type': 'string', 'description': 'The unique identifier of the log.', 'required': True}] returns: List[ArgReturn] = [{'name': 'log_details', 'type': 'object', 'description': "An object containing 'log_id' (string), 'timestamp' (string, formatted as 'YYYY-MM-DD HH:mm:ss'), 'event_type' (string, one of ['allowed', 'blocked']), 'domain' (string), 'user' (string), 'device' (string), 'location' (string)."}] exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'log_id' argument is not a valid log identifier."}, {'name': 'NotFoundException', 'description': 'The log does not exist.'}]
def get_attr(model: torch.jit.RecursiveScriptModule, node: torch.Node): if (node.kind() == 'prim::Param'): return (model, '') if (node.kind() == 'prim::GetAttr'): name = node.s('name') (obj, parent) = get_attr(model, node.input().node()) return (getattr(obj, name), (((parent + '.') + name) if (len(parent) > 0) else name))
def CalculateMoranAutoMutability(ProteinSequence): result = CalculateEachMoranAuto(ProteinSequence, _Mutability, '_Mutability') return result
def quaternion_linear_rotation_op(input, r_weight, i_weight, j_weight, k_weight, bias, scale, zero_kernel): square_r = (r_weight * r_weight) square_i = (i_weight * i_weight) square_j = (j_weight * j_weight) square_k = (k_weight * k_weight) norm = (torch.sqrt((((square_r + square_i) + square_j) + square_k)) + 0.0001) r_n_weight = (r_weight / norm) i_n_weight = (i_weight / norm) j_n_weight = (j_weight / norm) k_n_weight = (k_weight / norm) norm_factor = 2.0 square_i = (norm_factor * (i_n_weight * i_n_weight)) square_j = (norm_factor * (j_n_weight * j_n_weight)) square_k = (norm_factor * (k_n_weight * k_n_weight)) ri = ((norm_factor * r_n_weight) * i_n_weight) rj = ((norm_factor * r_n_weight) * j_n_weight) rk = ((norm_factor * r_n_weight) * k_n_weight) ij = ((norm_factor * i_n_weight) * j_n_weight) ik = ((norm_factor * i_n_weight) * k_n_weight) jk = ((norm_factor * j_n_weight) * k_n_weight) if scale.requires_grad: rot_kernel_1 = torch.cat([zero_kernel, (scale * (1.0 - (square_j + square_k))), (scale * (ij - rk)), (scale * (ik + rj))], dim=1) rot_kernel_2 = torch.cat([zero_kernel, (scale * (ij + rk)), (scale * (1.0 - (square_i + square_k))), (scale * (jk - ri))], dim=1) rot_kernel_3 = torch.cat([zero_kernel, (scale * (ik - rj)), (scale * (jk + ri)), (scale * (1.0 - (square_i + square_j)))], dim=1) else: rot_kernel_1 = torch.cat([zero_kernel, (1.0 - (square_j + square_k)), (ij - rk), (ik + rj)], dim=1) rot_kernel_2 = torch.cat([zero_kernel, (ij + rk), (1.0 - (square_i + square_k)), (jk - ri)], dim=1) rot_kernel_3 = torch.cat([zero_kernel, (ik - rj), (jk + ri), (1.0 - (square_i + square_j))], dim=1) zero_kernel2 = torch.cat([zero_kernel, zero_kernel, zero_kernel, zero_kernel], dim=1) global_rot_kernel = torch.cat([zero_kernel2, rot_kernel_1, rot_kernel_2, rot_kernel_3], dim=0) if (input.dim() == 2): if bias.requires_grad: return torch.addmm(bias, input, global_rot_kernel) else: return torch.mm(input, global_rot_kernel) else: output = torch.matmul(input, global_rot_kernel) if bias.requires_grad: return (output + bias) else: return output
def compute_temporal_iou(pred, gt): intersection = max(0, (min(pred[1], gt[1]) - max(pred[0], gt[0]))) union = (max(pred[1], gt[1]) - min(pred[0], gt[0])) if (union == 0): return 0 else: return ((1.0 * intersection) / union)
def register_Ns3MmWaveMacPduHeader_methods(root_module, cls): cls.add_constructor([param('ns3::MmWaveMacPduHeader const &', 'arg0')]) cls.add_constructor([]) cls.add_constructor([param('uint16_t', 'frameNo'), param('uint8_t', 'sfNo'), param('uint8_t', 'slotNo')]) cls.add_method('AddSubheader', 'void', [param('ns3::MacSubheader', 'rlcPduInfo')]) cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'i')], is_virtual=True) cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) cls.add_method('GetSubheaders', 'std::vector< ns3::MacSubheader >', []) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'i')], is_const=True, is_virtual=True) cls.add_method('SetSubheaders', 'void', [param('std::vector< ns3::MacSubheader >', 'macSubheaderList')]) return
class Dataset(): def __init__(self, name, t0, t1, dt, precision, obs_func=None): self.dataset = BasicDataset(name, t0, t1, dt, precision) self.obs_func = obs_func def write(self, func, t, name): self.dataset.write(func, t, name=name) def read(self, func, t, name='v'): if (self.obs_func is not None): self.dataset.obs_func = self.obs_func[(0 + (1 * (name == 's')))] return self.dataset.read(func, t, name=name) def __getattr__(self, item): return getattr(self.dataset, item)
def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') cls.add_constructor([]) cls.add_constructor([param('uint8_t *', 'prefix')]) cls.add_constructor([param('char const *', 'prefix')]) cls.add_constructor([param('uint8_t', 'prefix')]) cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return
class RAND_reg(atomic_reg): OP_NAME = 'RAND' _fields_ = [('cmd_short', ctypes.c_uint64, 1), ('op_code', ctypes.c_uint64, 16), ('cmd_id_dep', ctypes.c_uint64, 23), ('dbg_mode', ctypes.c_uint64, 1), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('opt_rq', ctypes.c_uint64, 1), ('tsk_opd_num', ctypes.c_uint64, 2), ('pad_mode', ctypes.c_uint64, 2), ('opt_res0_sign', ctypes.c_uint64, 1), ('rsvd0', ctypes.c_uint64, 3), ('pwr_step', ctypes.c_uint64, 4), ('intr_en', ctypes.c_uint64, 1), ('opt_res_add', ctypes.c_uint64, 1), ('opt_relu', ctypes.c_uint64, 1), ('opt_left_tran', ctypes.c_uint64, 1), ('opt_opd4_const', ctypes.c_uint64, 1), ('opt_kernel_rotate', ctypes.c_uint64, 1), ('opt_opd0_sign', ctypes.c_uint64, 1), ('opt_opd1_sign', ctypes.c_uint64, 1), ('opt_opd2_sign', ctypes.c_uint64, 1), ('opt_res0_prec', ctypes.c_uint64, 3), ('opt_opd0_prec', ctypes.c_uint64, 3), ('opt_opd1_prec', ctypes.c_uint64, 3), ('opt_opd2_prec', ctypes.c_uint64, 3), ('opt_opd0_const', ctypes.c_uint64, 1), ('opt_opd1_const', ctypes.c_uint64, 1), ('opt_opd2_const', ctypes.c_uint64, 1), ('short_res0_str', ctypes.c_uint64, 3), ('short_opd0_str', ctypes.c_uint64, 3), ('short_opd1_str', ctypes.c_uint64, 3), ('short_opd2_str', ctypes.c_uint64, 3), ('opt_res_add_sign', ctypes.c_uint64, 1), ('rsvd2', ctypes.c_uint64, 25), ('sym_range', ctypes.c_uint64, 1), ('opt_opd3_const', ctypes.c_uint64, 1), ('opt_opd5_const', ctypes.c_uint64, 1), ('opd0_x_ins0', ctypes.c_uint64, 4), ('opd0_y_ins0', ctypes.c_uint64, 4), ('opd1_x_ins0', ctypes.c_uint64, 4), ('opd1_y_ins0', ctypes.c_uint64, 4), ('opd0_up_pad', ctypes.c_uint64, 4), ('opd0_dn_pad', ctypes.c_uint64, 4), ('opd0_lf_pad', ctypes.c_uint64, 4), ('opd0_rt_pad', ctypes.c_uint64, 4), ('res_op_x_str', ctypes.c_uint64, 4), ('res_op_y_str', ctypes.c_uint64, 4), ('res0_h_shift', ctypes.c_uint64, 4), ('res0_w_shift', ctypes.c_uint64, 4), ('opd0_h_shift', ctypes.c_uint64, 4), ('opd0_w_shift', ctypes.c_uint64, 4), ('opd1_h_shift', ctypes.c_uint64, 4), ('opd1_w_shift', ctypes.c_uint64, 4), ('tsk_lane_num', ctypes.c_uint64, 64), ('res0_n', ctypes.c_uint64, 16), ('res0_c', ctypes.c_uint64, 16), ('res0_h', ctypes.c_uint64, 16), ('res0_w', ctypes.c_uint64, 16), ('opd0_n', ctypes.c_uint64, 16), ('opd0_c', ctypes.c_uint64, 16), ('opd0_h', ctypes.c_uint64, 16), ('opd0_w', ctypes.c_uint64, 16), ('opd1_n', ctypes.c_uint64, 16), ('opd1_c', ctypes.c_uint64, 16), ('opd1_h', ctypes.c_uint64, 16), ('opd1_w', ctypes.c_uint64, 16), ('res0_n_str', ctypes.c_uint64, 16), ('res0_c_str', ctypes.c_uint64, 16), ('opd0_n_str', ctypes.c_uint64, 16), ('opd0_c_str', ctypes.c_uint64, 16), ('opd1_n_str', ctypes.c_uint64, 16), ('opd1_c_str', ctypes.c_uint64, 16), ('opd2_n_str', ctypes.c_uint64, 16), ('opd2_c_str', ctypes.c_uint64, 16), ('res0_addr', ctypes.c_uint64, 32), ('opd0_addr', ctypes.c_uint64, 32), ('opd1_addr', ctypes.c_uint64, 32), ('opd2_addr', ctypes.c_uint64, 32), ('res0_h_str', ctypes.c_uint64, 32), ('res0_w_str', ctypes.c_uint64, 32), ('opd0_h_str', ctypes.c_uint64, 32), ('opd0_w_str', ctypes.c_uint64, 32), ('opd1_h_str', ctypes.c_uint64, 32), ('opd1_w_str', ctypes.c_uint64, 32), ('opd2_h_str', ctypes.c_uint64, 32), ('opd2_w_str', ctypes.c_uint64, 32), ('res1_addr', ctypes.c_uint64, 32), ('opd3_addr', ctypes.c_uint64, 32)] cmd_short: int op_code: int cmd_id_dep: int dbg_mode: int tsk_typ: int tsk_eu_typ: int opt_rq: int tsk_opd_num: int pad_mode: int opt_res0_sign: int rsvd0: int pwr_step: int intr_en: int opt_res_add: int opt_relu: int opt_left_tran: int opt_opd4_const: int opt_kernel_rotate: int opt_opd0_sign: int opt_opd1_sign: int opt_opd2_sign: int opt_res0_prec: int opt_opd0_prec: int opt_opd1_prec: int opt_opd2_prec: int opt_opd0_const: int opt_opd1_const: int opt_opd2_const: int short_res0_str: int short_opd0_str: int short_opd1_str: int short_opd2_str: int opt_res_add_sign: int rsvd2: int sym_range: int opt_opd3_const: int opt_opd5_const: int opd0_x_ins0: int opd0_y_ins0: int opd1_x_ins0: int opd1_y_ins0: int opd0_up_pad: int opd0_dn_pad: int opd0_lf_pad: int opd0_rt_pad: int res_op_x_str: int res_op_y_str: int res0_h_shift: int res0_w_shift: int opd0_h_shift: int opd0_w_shift: int opd1_h_shift: int opd1_w_shift: int tsk_lane_num: int res0_n: int res0_c: int res0_h: int res0_w: int opd0_n: int opd0_c: int opd0_h: int opd0_w: int opd1_n: int opd1_c: int opd1_h: int opd1_w: int res0_n_str: int res0_c_str: int opd0_n_str: int opd0_c_str: int opd1_n_str: int opd1_c_str: int opd2_n_str: int opd2_c_str: int res0_addr: int opd0_addr: int opd1_addr: int opd2_addr: int res0_h_str: int res0_w_str: int opd0_h_str: int opd0_w_str: int opd1_h_str: int opd1_w_str: int opd2_h_str: int opd2_w_str: int res1_addr: int opd3_addr: int length: int = 1024
class V2VModel(nn.Module): def __init__(self, input_channels, output_channels): super().__init__() self.encoder_decoder = EncoderDecoder(in_dim=input_channels) self.output_layer = nn.Conv2d(128, output_channels, kernel_size=1, stride=1, padding=0) self._initialize_weights() def forward(self, x): x = self.encoder_decoder(x) x = self.output_layer(x) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.ConvTranspose2d): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0)
def main(cfg): (dataset, train_loader, test_loader, num_query, num_classes) = make_data_loader(cfg) model = build_model(num_classes, 'base', pretrain_choice=True) model = (torch.nn.DataParallel(model).cuda() if torch.cuda.is_available() else model) loss_func = make_loss() optimizer = make_optimizer(cfg, model) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[40, 80], gamma=0.1) if (cfg.train == 1): start_epoch = 0 acc_best = 0.0 last_model_wts = torch.load(os.path.join('pre_feat', 'last_cloth_best.pth')) model_dict = model.state_dict() checkpoint_dict = {k: v for (k, v) in last_model_wts['state_dict'].items() if ((k in model_dict) and ('classifier' not in k))} model_dict.update(checkpoint_dict) model.load_state_dict(model_dict) do_train(cfg, model, train_loader, test_loader, optimizer, scheduler, loss_func, num_query, start_epoch, acc_best) else: last_model_wts = torch.load(os.path.join('pre_feat', 'checkpoint_best_pre.pth')) model.load_state_dict(last_model_wts['state_dict']) (mAP, cmc1, cmc5, cmc10, cmc20, feat_dict) = inference(model, test_loader, num_query, True) start_time = datetime.datetime.now() start_time = ('%4d:%d:%d-%2d:%2d:%2d' % (start_time.year, start_time.month, start_time.day, start_time.hour, start_time.minute, start_time.second)) print('{} - Final: cmc1: {:.1%} cmc5: {:.1%} cmc10: {:.1%} cmc20: {:.1%} mAP: {:.1%}\n'.format(start_time, cmc1, cmc5, cmc10, cmc20, mAP))
def convert_extras(extras): if (not extras): return set() return Requirement(('placeholder' + extras.lower())).extras
class FancyTuple(tuple): def __repr__(self): length = len(str((len(self) - 1))) return '\n'.join(('{0:>{1}}: {2}'.format(i, length, item) for (i, item) in enumerate(self))) def __getslice__(self, i, j): return self.__getitem__(slice(i, j)) def __getitem__(self, x): res = tuple.__getitem__(self, x) if isinstance(x, slice): res = FancyTuple(res) return res
((not torch), 'no PyTorch') def test_demo_torch_export_to_onnx(): out_onnx_model = _test_torch_export_to_onnx('demos/demo-torch.config') _test_torch_onnx_inference_seq_lens_in_out(out_onnx_model)
def get_layers(dims: Union[(int, list)], layer_types: Union[(str, BaseLayer, list)], activations: Union[(str, BaseActivation, list)], use_bias: Union[(bool, list)], normalizations: Union[(str, list)], self_embeddings: Union[(bool, list)], sample_sizes: Union[(int, list)], loss: Union[(str, BaseLoss)]) -> list: check_normalizations(normalizations) if (not isinstance(dims, list)): dims = [dims] n_layers = len(dims) layer_types = check_param(layer_types, n_layers) activations = check_param(activations, n_layers) use_bias = check_param(use_bias, n_layers) normalizations = check_param(normalizations, n_layers) self_embeddings = check_param(self_embeddings, n_layers) sample_sizes = check_param(sample_sizes, n_layers) layers = [] names_params = ['layer', 'out_channels', 'activation', 'use_bias', 'normalization', 'self_embeddings', 'sample_size'] for i in range(n_layers): params = [layer_types[i], dims[i], activations[i], use_bias[i], normalizations[i], self_embeddings[i], sample_sizes[i]] if (i == (n_layers - 1)): params.append(loss) names_params.append('loss') dict_params = dict(zip(names_params, params)) layers.append(get_layer(**dict_params)) return layers
def dispatchCommand(command, user, args): command = command.lower() if (command == 'login'): loginUser() return elif (command == 'retrieve_file'): sendFile() return elif (command == 'list_files'): listFiles() return else: print('Invalid Command')
class Console(RichConsole): CRITICAL = logging.CRITICAL FATAL = logging.FATAL ERROR = logging.ERROR WARNING = logging.WARNING WARN = logging.WARN INFO = logging.INFO DEBUG = logging.DEBUG NOTSET = logging.NOTSET def __init__(self, *args, log_level: int=INFO, **kwrags): super().__init__(*args, **kwrags) self.log_level = log_level def status(self, status: str, level: int=INFO) -> LogStatus: return LogStatus(status, console=self, level=level, enabled=(level >= self.log_level)) def log(self, *objects, level: int=logging.INFO, sep: str=' ', end: str='\n', style: Union[(str, Style)]=None, justify: Optional[JustifyMethod]=None, emoji: Optional[bool]=None, markup: Optional[bool]=None, highlight: Optional[bool]=None, log_locals: bool=False, _stack_offset: int=2) -> None: if (level < self.log_level): return if (not objects): objects = (NewLine(),) render_hooks = self._render_hooks[:] with self: renderables = self._collect_renderables(objects, sep, end, justify=justify, emoji=emoji, markup=markup, highlight=highlight) if (style is not None): renderables = [Styled(renderable, style) for renderable in renderables] (filename, line_no, locals) = self._caller_frame_info(_stack_offset) link_path = (None if filename.startswith('<') else os.path.abspath(filename)) path = filename.rpartition(os.sep)[(- 1)] if log_locals: locals_map = {key: value for (key, value) in locals.items() if (not key.startswith('__'))} renderables.append(render_scope(locals_map, title='[i]locals')) record = logging.LogRecord(name=None, level=level, pathname=None, lineno=None, msg=None, args=None, exc_info=None) handler = RichHandler(console=self) self._log_render.show_level = True renderables = [self._log_render(self, renderables, level=handler.get_level_text(record), log_time=self.get_datetime(), path=path, line_no=line_no, link_path=link_path)] for hook in render_hooks: renderables = hook.process_renderables(renderables) new_segments = [] extend = new_segments.extend render = self.render render_options = self.options for renderable in renderables: extend(render(renderable, render_options)) buffer_extend = self._buffer.extend for line in Segment.split_and_crop_lines(new_segments, self.width, pad=False): buffer_extend(line) def debug(self, *args, **kwargs): self.log(*args, level=self.DEBUG, **kwargs) def info(self, *args, **kwargs): self.log(*args, level=self.INFO, **kwargs) def warning(self, *args, **kwargs): self.log(*args, level=self.WARNING, **kwargs) def error(self, *args, **kwargs): self.log(*args, level=self.ERROR, **kwargs)
def save_args(args, force_overwrite=False): os.makedirs(args.log_dir, exist_ok=(args.exist_ok or force_overwrite)) variables = vars(args).copy() del variables['train_tasks'] del variables['val_tasks'] with open(os.path.join(args.log_dir, 'config.json'), 'wt') as f: json.dump(variables, f, indent=2)
(arg_at(0, assert_vector())) def normalized(vec, eps=0.0): invlen = (1 / (norm(vec) + eps)) return (invlen * vec)
class Functional(): def __init__(self, sampler): self.sampler = sampler self.J = 0.0 def solver_step(self, numerical_solution, t): obs = self.sampler.get_observation(t) if (obs is not None): self.J += assemble((((numerical_solution - obs) ** 2) * dx))
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any: val = str(val) result: Any = [] if (val in NULL_VALUES): return [np.nan] if (not validate_lt_pvm(val)): if (errors == 'raise'): raise ValueError(f'Unable to parse value {val}') error_result = (val if (errors == 'ignore') else np.nan) return [error_result] if (output_format in {'compact', 'standard'}): result = ([pvm.compact(val)] + result) return result
def test_set_output_names_on_inference_model(): model = tract.onnx().model_for_path('./mobilenetv2-7.onnx') model.set_input_fact(0, 'B,3,224,224,f32') model.analyse() model.set_output_names(['mobilenetv20_output_pred_fwd']) assert (str(model.output_fact(0)) == 'B,1000,1,1,F32')
def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse
class RRG(nn.Module): def __init__(self, n_feat, n_MRB, height, width, chan_factor, bias=False, groups=1): super(RRG, self).__init__() modules_body = [MRB(n_feat, height, width, chan_factor, bias, groups) for _ in range(n_MRB)] modules_body.append(nn.Conv2d(n_feat, n_feat, kernel_size=3, stride=1, padding=1, bias=bias)) self.body = nn.Sequential(*modules_body) def forward(self, x): res = self.body(x) res += x return res
_arg_scope def variable(name, shape=None, dtype=tf.float32, initializer=None, regularizer=None, trainable=True, collections=None, device='', restore=True): collections = list((collections or [])) collections += [tf.GraphKeys.VARIABLES, MODEL_VARIABLES] if restore: collections.append(VARIABLES_TO_RESTORE) collections = set(collections) with tf.device(variable_device(device, name)): return tf.get_variable(name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=collections)
def get_generic_df_coefficient_symbol(k1, k2, k3, k4, k5, k6, z1, z2, z3, z4): return symbols('C_{0}\\,{1}\\,{2}\\,{3}\\,{4}\\,{5}^{6}\\,{7}\\,{8}\\,{9}'.format(k1, k2, k3, k4, k5, k6, z1, z2, z3, z4))
def setup(): args = parse_arguments() config = load_yaml(args.config) update_not_none(config, vars(args)) setup_dirs(config) del logging.getLogger('tensorflow').handlers[0] setup_loggers(config['log_dir']) os.environ['CUDA_VISIBLE_DEVICES'] = config['gpu'] backup_src(config['src_dir']) return config
_module() class EMAHead(BaseDecodeHead): def __init__(self, ema_channels, num_bases, num_stages, concat_input=True, momentum=0.1, **kwargs): super(EMAHead, self).__init__(**kwargs) self.ema_channels = ema_channels self.num_bases = num_bases self.num_stages = num_stages self.concat_input = concat_input self.momentum = momentum self.ema_module = EMAModule(self.ema_channels, self.num_bases, self.num_stages, self.momentum) self.ema_in_conv = ConvModule(self.in_channels, self.ema_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.ema_mid_conv = ConvModule(self.ema_channels, self.ema_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=None, act_cfg=None) for param in self.ema_mid_conv.parameters(): param.requires_grad = False self.ema_out_conv = ConvModule(self.ema_channels, self.ema_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=None) self.bottleneck = ConvModule(self.ema_channels, self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) if self.concat_input: self.conv_cat = ConvModule((self.in_channels + self.channels), self.channels, kernel_size=3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) def forward(self, inputs): x = self._transform_inputs(inputs) feats = self.ema_in_conv(x) identity = feats feats = self.ema_mid_conv(feats) recon = self.ema_module(feats) recon = F.relu(recon, inplace=True) recon = self.ema_out_conv(recon) output = F.relu((identity + recon), inplace=True) output = self.bottleneck(output) if self.concat_input: output = self.conv_cat(torch.cat([x, output], dim=1)) output = self.cls_seg(output) return output
class DownSample(nn.Module): def __init__(self, in_channels, scale_factor, chan_factor=2, kernel_size=3): super(DownSample, self).__init__() self.scale_factor = int(np.log2(scale_factor)) modules_body = [] for i in range(self.scale_factor): modules_body.append(Down(in_channels, chan_factor)) in_channels = int((in_channels * chan_factor)) self.body = nn.Sequential(*modules_body) def forward(self, x): x = self.body(x) return x
class HFBertMatchingTrainDataset(Dataset): def __init__(self, tokenizer: PreTrainedTokenizer, name='STS-B', max_len: int=64): self.tokenizer = tokenizer self.data = load_dataset('shibing624/nli_zh', name.upper(), split='train') self.max_len = max_len self.name = name.upper() def __len__(self): return len(self.data) def text_2_id(self, text_1: str, text_2: str=None): return self.tokenizer(text_1, text_2, max_length=(self.max_len * 2), truncation=True, padding='max_length', return_tensors='pt') def __getitem__(self, index: int): line = self.data[index] return (self.text_2_id(line['sentence1'], line['sentence2']), (int((line['label'] > 2.5)) if ('STS' in self.name) else line['label']))
def __boost_get_version_file(self, d): if (not d): return None dnode = self.root.find_dir(d) if dnode: return dnode.find_node(BOOST_VERSION_FILE) return None
class LlavaMetaForCausalLM(ABC): def get_model(self): pass def get_vision_tower(self): return self.get_model().get_vision_tower() def encode_images(self, images): image_features = self.get_model().get_vision_tower()(images) image_features = self.get_model().mm_projector(image_features) return image_features def prepare_inputs_labels_for_multimodal(self, input_ids, attention_mask, past_key_values, labels, images): vision_tower = self.get_vision_tower() if ((vision_tower is None) or (images is None) or (input_ids.shape[1] == 1)): if ((past_key_values is not None) and (vision_tower is not None) and (images is not None) and (input_ids.shape[1] == 1)): attention_mask = torch.ones((attention_mask.shape[0], (past_key_values[(- 1)][(- 1)].shape[(- 2)] + 1)), dtype=attention_mask.dtype, device=attention_mask.device) return (input_ids, attention_mask, past_key_values, None, labels) if ((type(images) is list) or (images.ndim == 5)): concat_images = torch.cat([image for image in images], dim=0) image_features = self.encode_images(concat_images) split_sizes = [image.shape[0] for image in images] image_features = torch.split(image_features, split_sizes, dim=0) image_features = [x.flatten(0, 1) for x in image_features] else: image_features = self.encode_images(images) new_input_embeds = [] new_labels = ([] if (labels is not None) else None) cur_image_idx = 0 for (batch_idx, cur_input_ids) in enumerate(input_ids): if ((cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0): cur_input_embeds = self.get_model().embed_tokens(cur_input_ids) cur_input_embeds = (cur_input_embeds + (0.0 * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()) new_input_embeds.append(cur_input_embeds) if (labels is not None): new_labels.append(labels[batch_idx]) cur_image_idx += 1 continue image_token_indices = torch.where((cur_input_ids == IMAGE_TOKEN_INDEX))[0] cur_new_input_embeds = [] if (labels is not None): cur_labels = labels[batch_idx] cur_new_labels = [] assert (cur_labels.shape == cur_input_ids.shape) while (image_token_indices.numel() > 0): cur_image_features = image_features[cur_image_idx] image_token_start = image_token_indices[0] if (getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False)): cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:(image_token_start - 1)]).detach()) cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[(image_token_start - 1):image_token_start])) cur_new_input_embeds.append(cur_image_features) cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[(image_token_start + 1):(image_token_start + 2)])) if (labels is not None): cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_new_labels.append(cur_labels[image_token_start:(image_token_start + 1)]) cur_labels = cur_labels[(image_token_start + 2):] else: cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) cur_new_input_embeds.append(cur_image_features) if (labels is not None): cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_labels = cur_labels[(image_token_start + 1):] cur_image_idx += 1 if (getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False)): cur_input_ids = cur_input_ids[(image_token_start + 2):] else: cur_input_ids = cur_input_ids[(image_token_start + 1):] image_token_indices = torch.where((cur_input_ids == IMAGE_TOKEN_INDEX))[0] if (cur_input_ids.numel() > 0): if (getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False)): cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach()) else: cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids)) if (labels is not None): cur_new_labels.append(cur_labels) cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds] cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) new_input_embeds.append(cur_new_input_embeds) if (labels is not None): cur_new_labels = torch.cat(cur_new_labels, dim=0) new_labels.append(cur_new_labels) if any(((x.shape != new_input_embeds[0].shape) for x in new_input_embeds)): max_len = max((x.shape[0] for x in new_input_embeds)) new_input_embeds_align = [] for cur_new_embed in new_input_embeds: cur_new_embed = torch.cat((cur_new_embed, torch.zeros(((max_len - cur_new_embed.shape[0]), cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0) new_input_embeds_align.append(cur_new_embed) new_input_embeds = torch.stack(new_input_embeds_align, dim=0) if (labels is not None): new_labels_align = [] _new_labels = new_labels for cur_new_label in new_labels: cur_new_label = torch.cat((cur_new_label, torch.full(((max_len - cur_new_label.shape[0]),), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0) new_labels_align.append(cur_new_label) new_labels = torch.stack(new_labels_align, dim=0) if (attention_mask is not None): new_attention_mask = [] for (cur_attention_mask, cur_new_labels, cur_new_labels_align) in zip(attention_mask, _new_labels, new_labels): new_attn_mask_pad_left = torch.full(((cur_new_labels.shape[0] - labels.shape[1]),), True, dtype=attention_mask.dtype, device=attention_mask.device) new_attn_mask_pad_right = torch.full(((cur_new_labels_align.shape[0] - cur_new_labels.shape[0]),), False, dtype=attention_mask.dtype, device=attention_mask.device) cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0) new_attention_mask.append(cur_new_attention_mask) attention_mask = torch.stack(new_attention_mask, dim=0) assert (attention_mask.shape == new_labels.shape) else: new_input_embeds = torch.stack(new_input_embeds, dim=0) if (labels is not None): new_labels = torch.stack(new_labels, dim=0) if (attention_mask is not None): new_attn_mask_pad_left = torch.full((attention_mask.shape[0], (new_input_embeds.shape[1] - input_ids.shape[1])), True, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) assert (attention_mask.shape == new_input_embeds.shape[:2]) return (None, attention_mask, past_key_values, new_input_embeds, new_labels) def initialize_vision_tokenizer(self, model_args, tokenizer): if model_args.mm_use_im_patch_token: tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) self.resize_token_embeddings(len(tokenizer)) if model_args.mm_use_im_start_end: num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True) self.resize_token_embeddings(len(tokenizer)) if (num_new_tokens > 0): input_embeddings = self.get_input_embeddings().weight.data output_embeddings = self.get_output_embeddings().weight.data input_embeddings_avg = input_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True) output_embeddings_avg = output_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True) input_embeddings[(- num_new_tokens):] = input_embeddings_avg output_embeddings[(- num_new_tokens):] = output_embeddings_avg if model_args.tune_mm_mlp_adapter: for p in self.get_input_embeddings().parameters(): p.requires_grad = True for p in self.get_output_embeddings().parameters(): p.requires_grad = False if model_args.pretrain_mm_mlp_adapter: mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu') embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight'] assert (num_new_tokens == 2) if (input_embeddings.shape == embed_tokens_weight.shape): input_embeddings[(- num_new_tokens):] = embed_tokens_weight[(- num_new_tokens):] elif (embed_tokens_weight.shape[0] == num_new_tokens): input_embeddings[(- num_new_tokens):] = embed_tokens_weight else: raise ValueError(f'Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.') elif model_args.mm_use_im_patch_token: if model_args.tune_mm_mlp_adapter: for p in self.get_input_embeddings().parameters(): p.requires_grad = False for p in self.get_output_embeddings().parameters(): p.requires_grad = False
def model_gen(): inputs = layers.Input(shape=[4, 4, 3]) x = layers.Conv2D(2, 2, padding='same')(inputs) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) return tf.keras.models.Model(inputs=inputs, outputs=x)
.parametrize('v_inner_boundary, v_outer_boundary', [(3350, 3650), (2900, 3750), (2900, 3850), (2900, 3900), (2950, 3750), (2950, 3850), (2950, 3900), (3050, 3750), (3050, 3850), (3050, 3900), (3150, 3750), (3150, 3850), (3150, 3900)]) def test_plasma_vboundary(config_init_trad_fname, v_inner_boundary, v_outer_boundary, atomic_data_fname): tardis_config = Configuration.from_yaml(config_init_trad_fname) tardis_config.atom_data = atomic_data_fname tardis_config.model.structure.v_inner_boundary = ((v_inner_boundary * u.km) / u.s) tardis_config.model.structure.v_outer_boundary = ((v_outer_boundary * u.km) / u.s) simulation = Simulation.from_config(tardis_config)
class DeterministicMLPRegressor(LayersPowered, Serializable): def __init__(self, name, input_shape, output_dim, network=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, output_nonlinearity=None, optimizer=None, normalize_inputs=True): Serializable.quick_init(self, locals()) with tf.variable_scope(name): if (optimizer is None): optimizer = LbfgsOptimizer(name='optimizer') self.output_dim = output_dim self._optimizer = optimizer if (network is None): network = MLP(input_shape=input_shape, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, name='network') l_out = network.output_layer LayersPowered.__init__(self, [l_out]) xs_var = network.input_layer.input_var ys_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name='ys') x_mean_var = tf.get_variable(name='x_mean', shape=((1,) + input_shape), initializer=tf.constant_initializer(0.0, dtype=tf.float32)) x_std_var = tf.get_variable(name='x_std', shape=((1,) + input_shape), initializer=tf.constant_initializer(1.0, dtype=tf.float32)) normalized_xs_var = ((xs_var - x_mean_var) / x_std_var) if normalize_inputs: fit_ys_var = L.get_output(l_out, {network.input_layer: normalized_xs_var}) else: fit_ys_var = L.get_output(l_out, {network.input_layer: xs_var}) loss = tf.reduce_mean(tf.square((fit_ys_var - ys_var))) self.f_predict = tensor_utils.compile_function([xs_var], fit_ys_var) optimizer_args = dict(loss=loss, target=self, network_outputs=[fit_ys_var]) optimizer_args['inputs'] = [xs_var, ys_var] self._optimizer.update_opt(**optimizer_args) self._name = name self.l_out = l_out self._normalize_inputs = normalize_inputs self._x_mean_var = x_mean_var self._x_std_var = x_std_var def predict_sym(self, xs): return L.get_output(self.l_out, xs) def fit(self, xs, ys): if self._normalize_inputs: new_mean = np.mean(xs, axis=0, keepdims=True) new_std = (np.std(xs, axis=0, keepdims=True) + 1e-08) tf.get_default_session().run(tf.group(tf.assign(self._x_mean_var, new_mean), tf.assign(self._x_std_var, new_std))) inputs = [xs, ys] loss_before = self._optimizer.loss(inputs) if self._name: prefix = (self._name + '_') else: prefix = '' logger.record_tabular((prefix + 'LossBefore'), loss_before) self._optimizer.optimize(inputs) loss_after = self._optimizer.loss(inputs) logger.record_tabular((prefix + 'LossAfter'), loss_after) logger.record_tabular((prefix + 'dLoss'), (loss_before - loss_after)) def predict(self, xs): return self.f_predict(np.asarray(xs)) def get_param_values(self, **tags): return LayersPowered.get_param_values(self, **tags) def set_param_values(self, flattened_params, **tags): return LayersPowered.set_param_values(self, flattened_params, **tags)
def test_compute_ricci_curvature(): G = nx.Graph() G.add_edges_from([(1, 2), (2, 3), (3, 4), (2, 4)]) G.add_node(5) frc = FormanRicci(G, method='1d') frc.compute_ricci_curvature() frc_edges = list(nx.get_edge_attributes(frc.G, 'formanCurvature').values()) frc_nodes = list(nx.get_node_attributes(frc.G, 'formanCurvature').values()) frc_edges_ans = [0.0, (- 1.0), (- 1.0), 0.0] frc_nodes_ans = [0.0, (- 0.), (- 0.5), (- 0.5), 0] npt.assert_array_almost_equal(frc_edges, frc_edges_ans) npt.assert_array_almost_equal(frc_nodes, frc_nodes_ans) frc_a = FormanRicci(G, method='augmented') frc_a.compute_ricci_curvature() frc_a_edges = list(nx.get_edge_attributes(frc_a.G, 'formanCurvature').values()) frc_a_nodes = list(nx.get_node_attributes(frc_a.G, 'formanCurvature').values()) frc_a_edges_ans = [0.0, 2.0, 2.0, 3.0] frc_a_nodes_ans = [0.0, 1., 2.5, 2.5, 0] npt.assert_array_almost_equal(frc_a_edges, frc_a_edges_ans) npt.assert_array_almost_equal(frc_a_nodes, frc_a_nodes_ans)
class UtteranceLevel(nn.Module): def __init__(self, input_dim, output_dim, pooling='MeanPooling', activation='ReLU', pre_net=None, post_net={'select': 'FrameLevel'}, **kwargs): super().__init__() latest_dim = input_dim self.pre_net = (get_downstream_model(latest_dim, latest_dim, pre_net) if isinstance(pre_net, dict) else None) self.pooling = eval(pooling)(input_dim=latest_dim, activation=activation) self.post_net = get_downstream_model(latest_dim, output_dim, post_net) def forward(self, hidden_state, features_len=None): if (self.pre_net is not None): (hidden_state, features_len) = self.pre_net(hidden_state, features_len) (pooled, features_len) = self.pooling(hidden_state, features_len) (logit, features_len) = self.post_net(pooled, features_len) return (logit, features_len)
def str_presenter(dumper, data): if (len(data.splitlines()) > 1): return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') return dumper.represent_scalar('tag:yaml.org,2002:str', data)
def iterate_multibank_interface_ids(array: dt.Array, interface_ids: Union[(int, List[Tuple[(int, int)]])]): if is_multibank_array_with_distributed_index(array): for (bank, id) in interface_ids: (yield (bank, id)) else: (yield (0, interface_ids))
def ensure_dir(file_path): directory = file_path if (not os.path.exists(directory)): os.makedirs(directory)
class ConvBertForMultipleChoice(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
def aps13_f(x): if (x == 0): return 0 y = (1 / (x ** 2)) if (y > _MAX_EXPABLE): return 0 return (x / np.exp(y))
def get_hole_identities(hole_filename, duplicate_filename): hole_data = pickle.load(open(hole_filename, 'rb')) duplicate_files = open(duplicate_filename, 'r').readlines() duplicate_files = [x.strip() for x in duplicate_files] hole_identities = [] for (k, v) in hole_data.items(): if ((k not in duplicate_files) and (not k.startswith('rule_classifier_data/train/rsbotownversion/trunk/scripts/'))): for hole in v: h_id = ((((k + '_') + str(hole[0])) + '_') + str(hole[1])) hole_identities.append(h_id) return (hole_identities, duplicate_files)
class Credential(): def __init__(self, username, password): self.username = username self.password = password def __iter__(self): (yield self.username) (yield self.password) def __str__(self): return ('%(username)s:%(password)s' % vars(self))
class SAC(QLearningAlgoBase[(SACImpl, SACConfig)]): def inner_create_impl(self, observation_shape: Shape, action_size: int) -> None: policy = create_normal_policy(observation_shape, action_size, self._config.actor_encoder_factory, device=self._device) (q_funcs, q_func_forwarder) = create_continuous_q_function(observation_shape, action_size, self._config.critic_encoder_factory, self._config.q_func_factory, n_ensembles=self._config.n_critics, device=self._device) (targ_q_funcs, targ_q_func_forwarder) = create_continuous_q_function(observation_shape, action_size, self._config.critic_encoder_factory, self._config.q_func_factory, n_ensembles=self._config.n_critics, device=self._device) log_temp = create_parameter((1, 1), math.log(self._config.initial_temperature), device=self._device) actor_optim = self._config.actor_optim_factory.create(policy.named_modules(), lr=self._config.actor_learning_rate) critic_optim = self._config.critic_optim_factory.create(q_funcs.named_modules(), lr=self._config.critic_learning_rate) if (self._config.temp_learning_rate > 0): temp_optim = self._config.temp_optim_factory.create(log_temp.named_modules(), lr=self._config.temp_learning_rate) else: temp_optim = None modules = SACModules(policy=policy, q_funcs=q_funcs, targ_q_funcs=targ_q_funcs, log_temp=log_temp, actor_optim=actor_optim, critic_optim=critic_optim, temp_optim=temp_optim) self._impl = SACImpl(observation_shape=observation_shape, action_size=action_size, modules=modules, q_func_forwarder=q_func_forwarder, targ_q_func_forwarder=targ_q_func_forwarder, gamma=self._config.gamma, tau=self._config.tau, device=self._device) def get_action_type(self) -> ActionSpace: return ActionSpace.CONTINUOUS
class MSMT17(BaseImageDataset): dataset_dir = 'MSMT17_V1' def __init__(self, root='/home/haoluo/data', verbose=True, **kwargs): super(MSMT17, self).__init__() self.dataset_dir = osp.join(root, self.dataset_dir) self.train_dir = osp.join(self.dataset_dir, 'train') self.test_dir = osp.join(self.dataset_dir, 'test') self.list_train_path = osp.join(self.dataset_dir, 'list_train.txt') self.list_val_path = osp.join(self.dataset_dir, 'list_val.txt') self.list_query_path = osp.join(self.dataset_dir, 'list_query.txt') self.list_gallery_path = osp.join(self.dataset_dir, 'list_gallery.txt') self._check_before_run() train = self._process_dir(self.train_dir, self.list_train_path) query = self._process_dir(self.test_dir, self.list_query_path) gallery = self._process_dir(self.test_dir, self.list_gallery_path) if verbose: print('=> MSMT17 loaded') self.print_dataset_statistics(train, query, gallery) self.train = train self.query = query self.gallery = gallery (self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train) (self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query) (self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery) def _check_before_run(self): if (not osp.exists(self.dataset_dir)): raise RuntimeError("'{}' is not available".format(self.dataset_dir)) if (not osp.exists(self.train_dir)): raise RuntimeError("'{}' is not available".format(self.train_dir)) if (not osp.exists(self.test_dir)): raise RuntimeError("'{}' is not available".format(self.test_dir)) def _process_dir(self, dir_path, list_path): with open(list_path, 'r') as txt: lines = txt.readlines() dataset = [] pid_container = set() for (img_idx, img_info) in enumerate(lines): (img_path, pid) = img_info.split(' ') pid = int(pid) camid = int(img_path.split('_')[2]) img_path = osp.join(dir_path, img_path) dataset.append((img_path, pid, camid)) pid_container.add(pid) for (idx, pid) in enumerate(pid_container): assert (idx == pid), 'See code comment for explanation' return dataset
def find_unclean_onnx_name(model: ONNXModel, name: str) -> str: unclean_name = [n for n in model.weights if (clean_onnx_name(n) == name)] if (len(unclean_name) != 1): raise ValueError(f'Could not find unclean name for name {name}') return unclean_name[0]
def _training_config(proto): class TrainingConfig(): pass config = TrainingConfig() config.max_epoch = proto.training_config.max_epoch config.iter_per_epoch = proto.training_config.iter_per_epoch config.save_best = proto.training_config.save_best config.monitor_interval = (proto.training_config.monitor_interval if (proto.training_config.monitor_interval > 0) else 10) return config
def max_pool(bottom, ks, stride=1): return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)
def get_input_tensors(): height = np.random.randint(1, 10) width = np.random.randint(1, 10) dtype = np.float32 input_tensor = hu.arrays(dims=[height, width], dtype=dtype, elements=st.integers(min_value=0, max_value=100)) return input_tensor
def download_dataset(dataset_tag): print('Downloading dataset...') if (dataset_tag == 'zero_dce'): gdown.download(' 'Dataset_Part1.rar', quiet=False) print('Unpacking Dataset') subprocess.run('unrar x Dataset_Part1.rar'.split(' ')) print('Done!!!') elif (dataset_tag == 'dark_face'): gdown.download(' 'DarkPair.zip', quiet=False) print('Unpacking Dataset') subprocess.run('unzip DarkPair.zip'.split(' ')) print('Done!!!') else: raise AssertionError('Dataset tag not found')
class Market(object): def __init__(self, root): self.images_dir = osp.join(root) self.train_path = 'bounding_box_train' self.gallery_path = 'bounding_box_test' self.query_path = 'query' (self.train, self.query, self.gallery) = ([], [], []) (self.num_train_ids, self.num_query_ids, self.num_gallery_ids) = (0, 0, 0) self.load() def preprocess(self, path, relabel=True): pattern = re.compile('([-\\d]+)_c(\\d)') all_pids = {} ret = [] fpaths = sorted(glob(osp.join(self.images_dir, path, '*.jpg'))) for fpath in fpaths: fname = osp.basename(fpath) (pid, cam) = map(int, pattern.search(fname).groups()) if (pid == (- 1)): continue if relabel: if (pid not in all_pids): all_pids[pid] = len(all_pids) elif (pid not in all_pids): all_pids[pid] = pid pid = all_pids[pid] cam -= 1 ret.append((fname, pid, cam)) return (ret, int(len(all_pids))) def load(self): (self.train, self.num_train_ids) = self.preprocess(self.train_path) (self.gallery, self.num_gallery_ids) = self.preprocess(self.gallery_path, False) (self.query, self.num_query_ids) = self.preprocess(self.query_path, False) print(self.__class__.__name__, 'dataset loaded') print(' subset | # ids | # images') print(' ') print(' train | {:5d} | {:8d}'.format(self.num_train_ids, len(self.train))) print(' query | {:5d} | {:8d}'.format(self.num_query_ids, len(self.query))) print(' gallery | {:5d} | {:8d}'.format(self.num_gallery_ids, len(self.gallery)))
def __add_info_subprocess(available_datasets: List[str], subparsers) -> None: parser = subparsers.add_parser('info', formatter_class=SortingHelpFormatter, help='Show info about projects, project versions, and misuses in MUBench.', description='Show info about projects, project versions, and misuses in MUBench.') __setup_filter_arguments(parser, available_datasets)
def generate_nodes(node_procs, router_names, memo_size): nodes = [{Topology.NAME: name, Topology.TYPE: RouterNetTopo.QUANTUM_ROUTER, Topology.SEED: i, RouterNetTopo.MEMO_ARRAY_SIZE: memo_size, RouterNetTopo.GROUP: node_procs[name]} for (i, name) in enumerate(router_names)] return nodes
def extract_sent_candidates(text_obj): return [' '.join((word for (word, tag) in sent)) for sent in text_obj.pos_tagged]
def _get_filenames_and_classes(dataset_dir): images_root = os.path.join(dataset_dir, 'images') directories = [] class_names = [] for filename in os.listdir(images_root): path = os.path.join(images_root, filename) if os.path.isdir(path): directories.append(path) class_names.append(filename) photo_filenames = [] for directory in directories: for filename in os.listdir(directory): path = os.path.join(directory, filename) photo_filenames.append(path) return (photo_filenames, sorted(class_names))
class VarmField(ArrayLikeField): def __init__(self, *args, **kwargs): super().__init__(*args, field_type='varm', **kwargs)
class RequestError(PoolError): def __init__(self, pool, url, message): self.url = url PoolError.__init__(self, pool, message) def __reduce__(self): return (self.__class__, (None, self.url, None))
def _train_loader_from_config(cfg, mapper, dataset_name=None, *, dataset=None, sampler=None): if (dataset is None): dataset = get_detection_dataset_dicts(dataset_name, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, proposal_files=(cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None)) if (mapper is None): mapper = DatasetMapper(cfg, True) if (sampler is None): sampler_name = cfg.DATALOADER.SAMPLER_TRAIN logger = logging.getLogger(__name__) logger.info('Using training sampler {}'.format(sampler_name)) sampler = TrainingSampler(len(dataset)) return {'dataset': dataset, 'sampler': sampler, 'mapper': mapper, 'total_batch_size': cfg.SOLVER.IMS_PER_BATCH, 'aspect_ratio_grouping': cfg.DATALOADER.ASPECT_RATIO_GROUPING, 'num_workers': cfg.DATALOADER.NUM_WORKERS}
def replaces_ufunc(func: Callable[(..., Tuple[str])], name: str): Replacements._ufunc_rep[name] = func return func
class LinearExtensionsOfMobile(LinearExtensionsOfPoset): def cardinality(self): import sage.combinat.posets.d_complete as dc if self._poset._anchor: anchor_index = self._poset._ribbon.index(self._poset._anchor[0]) else: anchor_index = len(self._poset._ribbon) folds_up = [] folds_down = [] for (ind, r) in enumerate(self._poset._ribbon[:(- 1)]): if ((ind < anchor_index) and self._poset.is_greater_than(r, self._poset._ribbon[(ind + 1)])): folds_up.append((self._poset._ribbon[(ind + 1)], r)) elif ((ind >= anchor_index) and self._poset.is_less_than(r, self._poset._ribbon[(ind + 1)])): folds_down.append((r, self._poset._ribbon[(ind + 1)])) if ((not folds_up) and (not folds_down)): return dc.DCompletePoset(self._poset).linear_extensions().cardinality() cr = self._poset.cover_relations() foldless_cr = [tuple(c) for c in cr if ((tuple(c) not in folds_up) and (tuple(c) not in folds_down))] elmts = list(self._poset._elements) poset_components = DiGraph([elmts, foldless_cr]) ordered_poset_components = [poset_components.connected_component_containing_vertex(f[1], sort=False) for f in folds_up] ordered_poset_components.extend((poset_components.connected_component_containing_vertex(f[0], sort=False) for f in folds_down)) ordered_poset_components.append(poset_components.connected_component_containing_vertex((folds_down[(- 1)][1] if folds_down else folds_up[(- 1)][0]), sort=False)) folds = folds_up folds.extend(folds_down) mat = [] for i in range((len(folds) + 1)): mat_poset = dc.DCompletePoset(self._poset.subposet(ordered_poset_components[i])) row = (([0] * ((i - 1) if ((i - 1) > 0) else 0)) + ([1] * (1 if (i >= 1) else 0))) row.append((1 / mat_poset.hook_product())) for (j, f) in enumerate(folds[i:]): next_poset = self._poset.subposet(ordered_poset_components[((j + i) + 1)]) mat_poset = dc.DCompletePoset(next_poset.slant_sum(mat_poset, f[0], f[1])) row.append((1 / mat_poset.hook_product())) mat.append(row) return (matrix(QQ, mat).determinant() * factorial(self._poset.cardinality()))
def clean_last_char(sentences): for n in range(len(sentences)): if (sentences[n][0] != ''): sentences[n][0] = sentences[n][0][:(- 1)] sentences[n][1] = sentences[n][1][:(- 3)] else: del sentences[n] return sentences
def test_basic(): def test_basic_tf(A: datatype[(5, 5)]): B = (A + 1) return (B * 2) sdfg = test_basic_tf.to_sdfg(simplify=True) num_map_fusions = sdfg.apply_transformations(MapFusion) assert (num_map_fusions == 1) num_tasklet_fusions = sdfg.apply_transformations(TaskletFusion) assert (num_tasklet_fusions == 1) A = np.ones((5, 5), dtype=np_datatype) result = sdfg(A=A) assert np.allclose(result, (2 * (A + 1)))
def main(args=None): args = parse_args(args=args) utils.set_random_seed(args.seed) args = vars(args) logger.info('Running MWT expander in {} mode'.format(args['mode'])) if (args['mode'] == 'train'): train(args) else: evaluate(args)
def main(_): hparams_center = HParamsCenter(HParams(load_preproc=True, bert_pretrained_dir='none', max_sequence_len=64, src_infer_dir='none', tgt_infer_dir='none', timeout=5.0, use_op_type_constraint=False, ner_dump_dir='save_ner_num', debug_dec=0, num_parallels=4, dump_dir='placeholder', clear_dump_dir=False, kb_mode='online', verbose_test=False, use_filtered_ent=True, use_dump_ner=False), HParams(pretrained_num_layers=(- 1)), HParams(num_epochs=3, num_steps=(- 1), train_batch_size=32, test_batch_size=32, load_model=False, load_path='none', eval_period=500, save_model=False, save_num=3), models_dir='e2e.models') cfg = Configs(hparams_center, 'multi_task_sp') if (cfg['mode'] == 'train'): train(cfg) elif (cfg['mode'] == 'infer'): assert os.path.isdir(cfg['load_path']) model_cfg = Configs.load_cfg_from_file(Configs.gen_cfg_path(cfg['load_path'])) assert (model_cfg is not None) infer(model_cfg, cfg) elif (cfg['mode'] == 'decoding'): assert os.path.isdir(cfg['load_path']) model_cfg = Configs.load_cfg_from_file(Configs.gen_cfg_path(cfg['load_path'])) assert (model_cfg is not None) decoding(model_cfg, cfg) elif (cfg['mode'] == 'parallel_test'): assert os.path.isdir(cfg['load_path']) model_cfg = Configs.load_cfg_from_file(Configs.gen_cfg_path(cfg['load_path'])) assert (model_cfg is not None) parallel_test(model_cfg, cfg)
def train(opt, log): write_data_log(f''' {opt.exp_name} ''') print(f''' {opt.exp_name} ''') valid_datasets = train_datasets = [lan for lan in opt.lan_list] best_scores = [] ned_scores = [] valid_datas = [] char = dict() opt_log = ' Options \n' args = vars(opt) for (k, v) in args.items(): if ((str(k) == 'character') and (len(str(v)) > 500)): opt_log += f'''{str(k)}: So many characters to show all: number of characters: {len(str(v))} ''' opt_log += '\n' log.write(opt_log) if (opt.il == 'lwf'): learner = LwF(opt) elif (opt.il == 'wa'): learner = WA(opt) elif (opt.il == 'ewc'): learner = EWC(opt) elif (opt.il == 'der'): learner = DER(opt) elif (opt.il == 'mrn'): learner = MRN(opt) elif ((opt.il == 'joint_mix') or (opt.il == 'joint_loader')): learner = JointLearner(opt) else: learner = BaseLearner(opt) data_manager = Dataset_Manager(opt) for taski in range(len(train_datasets)): for valid_data in opt.valid_datas: val_data = os.path.join(valid_data, valid_datasets[taski]) valid_datas.append(val_data) valid_loader = Val_Dataset(valid_datas, opt) select_data = opt.select_data AlignCollate_valid = AlignCollate(opt, mode='test') if ((opt.il == 'joint_loader') or (opt.il == 'joint_mix')): valid_datas = [] char = {} for taski in range(len(train_datasets)): for val_data in opt.valid_datas: valid_data = os.path.join(val_data, valid_datasets[taski]) valid_datas.append(valid_data) data_manager.joint_start(opt, select_data, log, taski, len(train_datasets)) for data_path in opt.select_data: (opt.character, char) = load_dict((data_path + f'/{opt.lan_list[taski]}'), char) print(len(opt.character)) (best_scores, ned_scores) = learner.incremental_train(0, opt.character, data_manager, valid_loader, AlignCollate_valid, valid_datas) (best_scores, ned_scores) = learner.test(AlignCollate_valid, valid_datas, best_scores, ned_scores, 0) break if (taski == 0): data_manager.init_start(opt, select_data, log, taski) train_loader = data_manager for data_path in opt.select_data: if (data_path == '/'): opt.character = load_dict((data_path + f'/{opt.lan_list[taski]}'), char) else: (opt.character, tmp_char) = load_dict((data_path + f'/{opt.lan_list[taski]}'), char) learner.incremental_train(taski, opt.character, train_loader, valid_loader) (best_scores, ned_scores) = learner.test(AlignCollate_valid, valid_datas, best_scores, ned_scores, taski) learner.after_task() write_data_log(f''' {opt.exp_name} ''') print(f''' {opt.exp_name} ''') if (len(opt.valid_datas) == 1): print('ALL Average Incremental Accuracy: {:.2f} \n'.format((sum(best_scores) / len(best_scores)))) write_data_log('ALL Average Acc: {:.2f} \n'.format((sum(best_scores) / len(best_scores)))) elif (len(opt.valid_datas) == 2): print('ALL Average 17 Acc: {:.2f} \n'.format((sum(best_scores) / len(best_scores)))) print('ALL Average 19 Acc: {:.2f} \n'.format((sum(ned_scores) / len(ned_scores)))) write_data_log('ALL 17 Acc: {:.2f} \n'.format((sum(best_scores) / len(best_scores)))) write_data_log('ALL 19 Acc: {:.2f} \n'.format((sum(ned_scores) / len(ned_scores))))
def test_rank_selection(): selection = sel.RankSelection() population = [MagicMock(chrom.Chromosome) for _ in range(20)] assert (0 <= selection.get_index(population) < len(population))
class PickleObject(): def __init__(self, value, expression): self.value = value self.expression = expression self.immutable = False def _sage_input_(self, sib, coerced): self.immutable = True return self.expression
class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.projection = nn.Linear(10, 10) def forward(self): pass
(st.floats(min_value=0.0, max_value=float('inf'), exclude_min=False, exclude_max=True)) def test_normalise(value): assert (ff.normalise(value) == (value / (1.0 + value)))
class DeepFM(BaseModel): def __init__(self, linear_feature_columns, dnn_feature_columns, use_fm=True, dnn_hidden_units=(256, 128), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu'): super(DeepFM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device) self.use_fm = use_fm self.use_dnn = ((len(dnn_feature_columns) > 0) and (len(dnn_hidden_units) > 0)) if use_fm: self.fm = FM() if self.use_dnn: self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device) self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device) self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn) self.add_regularization_weight(self.dnn_linear.weight, l2_reg_dnn) self.to(device) def forward(self, X): (sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict) logit = self.linear_model(X) if (self.use_fm and (len(sparse_embedding_list) > 0)): fm_input = torch.cat(sparse_embedding_list, dim=1) logit += self.fm(fm_input) if self.use_dnn: dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list) dnn_output = self.dnn(dnn_input) dnn_logit = self.dnn_linear(dnn_output) logit += dnn_logit y_pred = self.out(logit) return y_pred