code
stringlengths
281
23.7M
def get_cached_module_file(pretrained_model_name_or_path: Union[(str, os.PathLike)], module_file: str, cache_dir: Optional[Union[(str, os.PathLike)]]=None, force_download: bool=False, resume_download: bool=False, proxies: Optional[Dict[(str, str)]]=None, use_auth_token: Optional[Union[(bool, str)]]=None, revision: Optional[str]=None, local_files_only: bool=False): if (is_offline_mode() and (not local_files_only)): logger.info('Offline mode: forcing local_files_only=True') local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): submodule = pretrained_model_name_or_path.split(os.path.sep)[(- 1)] else: submodule = pretrained_model_name_or_path.replace('/', os.path.sep) try: resolved_module_file = cached_file(pretrained_model_name_or_path, module_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.') raise modules_needed = check_imports(resolved_module_file) full_submodule = ((TRANSFORMERS_DYNAMIC_MODULE_NAME + os.path.sep) + submodule) create_dynamic_module(full_submodule) submodule_path = (Path(HF_MODULES_CACHE) / full_submodule) if (submodule == pretrained_model_name_or_path.split(os.path.sep)[(- 1)]): shutil.copy(resolved_module_file, (submodule_path / module_file)) for module_needed in modules_needed: module_needed = f'{module_needed}.py' shutil.copy(os.path.join(pretrained_model_name_or_path, module_needed), (submodule_path / module_needed)) else: commit_hash = model_info(pretrained_model_name_or_path, revision=revision, token=use_auth_token).sha submodule_path = (submodule_path / commit_hash) full_submodule = ((full_submodule + os.path.sep) + commit_hash) create_dynamic_module(full_submodule) if (not (submodule_path / module_file).exists()): shutil.copy(resolved_module_file, (submodule_path / module_file)) for module_needed in modules_needed: if (not (submodule_path / module_needed).exists()): get_cached_module_file(pretrained_model_name_or_path, f'{module_needed}.py', cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, use_auth_token=use_auth_token, revision=revision, local_files_only=local_files_only) return os.path.join(full_submodule, module_file)
def main(argv): args = parse_args(argv) args.ws_dir = args.ws_dir.absolute() if (args.prefix is not None): args.prefix = args.prefix.absolute() (freetds_archive, iconv_archive) = download(args) if (platform.system() == 'Windows'): os.environ['PATH'] += f';{args.msys}' build_windows(args, freetds_archive, iconv_archive) else: build(args, freetds_archive) args.dist_dir = args.dist_dir.absolute() env = os.environ.copy() env.update(PYMSSQL_FREETDS=f'{args.prefix}') run(f'{sys.executable} -m pip wheel . -w {args.dist_dir}', shell=True, env=env) if args.sdist: fmt = ('zip' if (platform.system() == 'Windows') else 'gztar') run(f'{sys.executable} setup.py sdist --formats={fmt} -d {args.dist_dir}', shell=True, env=env)
def resnet_arg_scope(weight_decay=0.0001, batch_norm_decay=0.997, batch_norm_epsilon=1e-05, batch_norm_scale=True, activation_fn=tf.nn.relu, use_batch_norm=True, batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS): batch_norm_params = {'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'updates_collections': batch_norm_updates_collections, 'fused': None} with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), activation_fn=activation_fn, normalizer_fn=(slim.batch_norm if use_batch_norm else None), normalizer_params=batch_norm_params): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: return arg_sc
def load_custom_pretrained(model: nn.Module, pretrained_cfg: Optional[Dict]=None, load_fn: Optional[Callable]=None): pretrained_cfg = (pretrained_cfg or getattr(model, 'pretrained_cfg', None)) if (not pretrained_cfg): _logger.warning('Invalid pretrained config, cannot load weights.') return (load_from, pretrained_loc) = _resolve_pretrained_source(pretrained_cfg) if (not load_from): _logger.warning('No pretrained weights exist for this model. Using random initialization.') return if (load_from == 'hf-hub'): _logger.warning('Hugging Face hub not currently supported for custom load pretrained models.') elif (load_from == 'url'): pretrained_loc = download_cached_file(pretrained_loc, check_hash=_CHECK_HASH, progress=_DOWNLOAD_PROGRESS) if (load_fn is not None): load_fn(model, pretrained_loc) elif hasattr(model, 'load_pretrained'): model.load_pretrained(pretrained_loc) else: _logger.warning('Valid function to load pretrained weights is not available, using random initialization.')
def read_platform_numbers(filename, in_upper=False, num_as_int=False): out_dict = {} with open(filename, 'r') as fid: for row in fid: if (not row.startswith('#')): parts = row.split() if (len(parts) < 2): continue platform = ' '.join(parts[:(- 1)]) num = parts[(- 1)] if in_upper: platform = platform.upper() if num_as_int: num = int(num) out_dict[platform] = num return out_dict
def generate_random_log_paths(sample_len: int, sample_size: int, mean: float, std: float, leverage: float=1.0): mean = (mean * leverage) std = (std * leverage) mean = (mean - ((0.5 * std) * std)) time = np.arange(1, (1 + sample_len)) returns_vector = np.random.normal(loc=mean, scale=std, size=((sample_len * sample_size), 1)) returns = np.reshape(returns_vector, (sample_len, sample_size)) return LogReturnsDataFrame(data=returns, index=time)
def str_dec(string): res = '' prev_slash = False for ch in string: if (ch == chr(92)): if (not prev_slash): prev_slash = True else: res += ch prev_slash = False else: prev_slash = False res += ch return res
def _identify_radius(r): r = r.replace(' ', '') try: if (r.startswith('(') and r.endswith(')')): (rx, ry) = map(float, r.lstrip('(').rstrip(')').split(',')) elif (r == 'None'): r = None else: r = float(r) rx = ry = r return (rx, ry) except: return r
def path_logger(result_dir, log_time): streamHandler = logging.StreamHandler() streamHandler.setLevel(logging.DEBUG) global logger logger = logging.getLogger('basic') logger.setLevel(logging.DEBUG) path_logging = os.path.join(result_dir, f'{log_time}') fileHandler = logging.FileHandler(path_logging, mode='w') fileHandler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(levelno)s - %(filename)s - %(funcName)s - %(message)s') streamHandler.setFormatter(formatter) fileHandler.setFormatter(formatter) logger.addHandler(streamHandler) logger.addHandler(fileHandler) return logger
class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, w1, w2): val1 = torch.neg(w1) m1 = torch.cat([val1, w2]).sum() val2 = torch.neg(w1) m2 = torch.cat([val2, w2]).sum() return ((x + torch.max(m1)) + torch.max(m2))
class M2M100Tokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ['input_ids', 'attention_mask'] prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__(self, vocab_file, spm_file, src_lang=None, tgt_lang=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', pad_token='<pad>', unk_token='<unk>', language_codes='m2m100', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, num_madeup_words=8, **kwargs) -> None: self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs) self.language_codes = language_codes fairseq_language_code = FAIRSEQ_LANGUAGE_CODES[language_codes] self.lang_code_to_token = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} kwargs['additional_special_tokens'] = kwargs.get('additional_special_tokens', []) kwargs['additional_special_tokens'] += [self.get_lang_token(lang_code) for lang_code in fairseq_language_code if (self.get_lang_token(lang_code) not in kwargs['additional_special_tokens'])] super().__init__(src_lang=src_lang, tgt_lang=tgt_lang, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, unk_token=unk_token, pad_token=pad_token, language_codes=language_codes, sp_model_kwargs=self.sp_model_kwargs, num_madeup_words=num_madeup_words, **kwargs) self.vocab_file = vocab_file self.encoder = load_json(vocab_file) self.decoder = {v: k for (k, v) in self.encoder.items()} self.spm_file = spm_file self.sp_model = load_spm(spm_file, self.sp_model_kwargs) self.encoder_size = len(self.encoder) self.lang_token_to_id = {self.get_lang_token(lang_code): (self.encoder_size + i) for (i, lang_code) in enumerate(fairseq_language_code)} self.lang_code_to_id = {lang_code: (self.encoder_size + i) for (i, lang_code) in enumerate(fairseq_language_code)} self.id_to_lang_token = {v: k for (k, v) in self.lang_token_to_id.items()} self._src_lang = (src_lang if (src_lang is not None) else 'en') self.tgt_lang = tgt_lang self.cur_lang_id = self.get_lang_id(self._src_lang) self.set_src_lang_special_tokens(self._src_lang) self.num_madeup_words = num_madeup_words def vocab_size(self) -> int: return ((len(self.encoder) + len(self.lang_token_to_id)) + self.num_madeup_words) def src_lang(self) -> str: return self._src_lang _lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): if (token in self.lang_token_to_id): return self.lang_token_to_id[token] return self.encoder.get(token, self.encoder[self.unk_token]) def _convert_id_to_token(self, index: int) -> str: if (index in self.id_to_lang_token): return self.id_to_lang_token[index] return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens: List[str]) -> str: return self.sp_model.decode(tokens) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) prefix_ones = ([1] * len(self.prefix_tokens)) suffix_ones = ([1] * len(self.suffix_tokens)) if (token_ids_1 is None): return ((prefix_ones + ([0] * len(token_ids_0))) + suffix_ones) return (((prefix_ones + ([0] * len(token_ids_0))) + ([0] * len(token_ids_1))) + suffix_ones) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if (token_ids_1 is None): return ((self.prefix_tokens + token_ids_0) + self.suffix_tokens) return (((self.prefix_tokens + token_ids_0) + token_ids_1) + self.suffix_tokens) def get_vocab(self) -> Dict: vocab = self.encoder.copy() vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self) -> Dict: state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d: Dict) -> None: self.__dict__ = d if (not hasattr(self, 'sp_model_kwargs')): self.sp_model_kwargs = {} self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: save_dir = Path(save_directory) assert save_dir.is_dir(), f'{save_directory} should be a directory' vocab_save_path = (save_dir / (((filename_prefix + '-') if filename_prefix else '') + self.vocab_files_names['vocab_file'])) spm_save_path = (save_dir / (((filename_prefix + '-') if filename_prefix else '') + self.vocab_files_names['spm_file'])) save_json(self.encoder, vocab_save_path) if ((os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path)) and os.path.isfile(self.spm_file)): copyfile(self.spm_file, spm_save_path) elif (not os.path.isfile(self.spm_file)): with open(spm_save_path, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (str(vocab_save_path), str(spm_save_path)) def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='en', tgt_texts: Optional[List[str]]=None, tgt_lang: str='ro', **kwargs) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self.src_lang) return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _build_translation_inputs(self, raw_inputs, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): if ((src_lang is None) or (tgt_lang is None)): raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, **extra_kwargs) tgt_lang_id = self.get_lang_id(tgt_lang) inputs['forced_bos_token_id'] = tgt_lang_id return inputs def as_target_tokenizer(self): self.set_tgt_lang_special_tokens(self.tgt_lang) (yield) self.set_src_lang_special_tokens(self.src_lang) def set_src_lang_special_tokens(self, src_lang: str) -> None: lang_token = self.get_lang_token(src_lang) self.cur_lang_id = self.lang_token_to_id[lang_token] self.prefix_tokens = [self.cur_lang_id] self.suffix_tokens = [self.eos_token_id] def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: lang_token = self.get_lang_token(tgt_lang) self.cur_lang_id = self.lang_token_to_id[lang_token] self.prefix_tokens = [self.cur_lang_id] self.suffix_tokens = [self.eos_token_id] def get_lang_token(self, lang: str) -> str: return self.lang_code_to_token[lang] def get_lang_id(self, lang: str) -> int: lang_token = self.get_lang_token(lang) return self.lang_token_to_id[lang_token]
def send_new_schedule_invitation_answer(schedule_item, request): invitation_admin_url = request.build_absolute_uri(schedule_item.get_invitation_admin_url()) schedule_item_admin_url = request.build_absolute_uri(schedule_item.get_admin_url()) submission = schedule_item.submission publish_message('NewScheduleInvitationAnswer', body={'speaker_id': submission.speaker_id, 'schedule_item_id': schedule_item.id, 'invitation_admin_url': invitation_admin_url, 'schedule_item_admin_url': schedule_item_admin_url}, deduplication_id=str(uuid4()))
class TestLeadAcidLOQS(TestCase): def test_well_posed(self): options = {'thermal': 'isothermal'} model = pybamm.lead_acid.LOQS(options) model.check_well_posedness() model = pybamm.lead_acid.LOQS(build=False) model.build_model() model.check_well_posedness() def test_default_geometry(self): options = {'thermal': 'isothermal'} model = pybamm.lead_acid.LOQS(options) self.assertNotIn('negative particle', model.default_geometry) self.assertIsInstance(model.default_spatial_methods, dict) self.assertIsInstance(model.default_spatial_methods['current collector'], pybamm.ZeroDimensionalSpatialMethod) self.assertTrue(issubclass(model.default_submesh_types['current collector'], pybamm.SubMesh0D)) def test_well_posed_with_convection(self): options = {'convection': 'uniform transverse'} model = pybamm.lead_acid.LOQS(options) model.check_well_posedness() options = {'dimensionality': 1, 'convection': 'full transverse'} model = pybamm.lead_acid.LOQS(options) model.check_well_posedness() def test_well_posed_1plus1D(self): options = {'surface form': 'differential', 'current collector': 'potential pair', 'dimensionality': 1} model = pybamm.lead_acid.LOQS(options) model.check_well_posedness() self.assertIsInstance(model.default_spatial_methods['current collector'], pybamm.FiniteVolume) self.assertTrue(issubclass(model.default_submesh_types['current collector'], pybamm.Uniform1DSubMesh)) def test_well_posed_2plus1D(self): options = {'surface form': 'differential', 'current collector': 'potential pair', 'dimensionality': 2} model = pybamm.lead_acid.LOQS(options) model.check_well_posedness() self.assertIsInstance(model.default_spatial_methods['current collector'], pybamm.ScikitFiniteElement) self.assertTrue(issubclass(model.default_submesh_types['current collector'], pybamm.ScikitUniform2DSubMesh))
class DataTrainingArguments(): dataset_name: Optional[str] = field(default='cifar10', metadata={'help': 'Name of a dataset from the datasets package'}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) image_column_name: Optional[str] = field(default=None, metadata={'help': 'The column name of the images in the files.'}) train_dir: Optional[str] = field(default=None, metadata={'help': 'A folder containing the training data.'}) validation_dir: Optional[str] = field(default=None, metadata={'help': 'A folder containing the validation data.'}) train_val_split: Optional[float] = field(default=0.15, metadata={'help': 'Percent to split off of train for validation.'}) max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'}) def __post_init__(self): data_files = dict() if (self.train_dir is not None): data_files['train'] = self.train_dir if (self.validation_dir is not None): data_files['val'] = self.validation_dir self.data_files = (data_files if data_files else None)
def cast_tensor_type(inputs, src_type, dst_type): if isinstance(inputs, torch.Tensor): return inputs.to(dst_type) elif isinstance(inputs, str): return inputs elif isinstance(inputs, np.ndarray): return inputs elif isinstance(inputs, abc.Mapping): return type(inputs)({k: cast_tensor_type(v, src_type, dst_type) for (k, v) in inputs.items()}) elif isinstance(inputs, abc.Iterable): return type(inputs)((cast_tensor_type(item, src_type, dst_type) for item in inputs)) return inputs
class Simulator(multiprocessing.Process): class State(): PRE_START = 0 CALLING = 1 PLAYING = 2 END = 3 templates = load_templates() mini_templates = load_mini_templates(templates) def __init__(self, idx, hwnd, pipe_sim2exps, pipe_exps2sim, pipe_sim2coord, pipe_coord2sim, pipe_sim2mgr, pipe_mgr2sim, agent_names, exploration, toggle): super(Simulator, self).__init__() self.name = 'simulator-{}'.format(idx) self.sim2exps = pipe_sim2exps self.exps2sim = pipe_exps2sim self.sim2coord = pipe_sim2coord self.coord2sim = pipe_coord2sim self.sim2mgr = pipe_sim2mgr self.mgr2sim = pipe_mgr2sim self.agent_names = agent_names self.window_rect = get_window_rect(hwnd) self.cxt = [(10 + self.window_rect[0]), (1150 + self.window_rect[1])] self.current_screen = None self.win_rates = {n: StatCounter() for n in self.agent_names} self.state = Simulator.State.CALLING self.current_lord_pos = None self.cached_msg = None self.exploration = exploration self.history = [[], [], []] self.predictor = Predictor() self.toggle = toggle def reset_episode(self): self.state = Simulator.State.CALLING self.current_lord_pos = None self.cached_msg = None self.history = [[], [], []] def run(self): logger.info('simulator main loop') context = zmq.Context() sim2coord_socket = context.socket(zmq.PUSH) sim2coord_socket.setsockopt(zmq.IDENTITY, self.name.encode('utf-8')) sim2coord_socket.set_hwm(2) sim2coord_socket.connect(self.sim2coord) coord2sim_socket = context.socket(zmq.DEALER) coord2sim_socket.setsockopt(zmq.IDENTITY, self.name.encode('utf-8')) coord2sim_socket.set_hwm(2) coord2sim_socket.connect(self.coord2sim) sim2exp_sockets = [] for sim2exp in self.sim2exps: sim2exp_socket = context.socket(zmq.PUSH) sim2exp_socket.setsockopt(zmq.IDENTITY, self.name.encode('utf-8')) sim2exp_socket.set_hwm(2) sim2exp_socket.connect(sim2exp) sim2exp_sockets.append(sim2exp_socket) sim2mgr_socket = context.socket(zmq.PUSH) sim2mgr_socket.setsockopt(zmq.IDENTITY, self.name.encode('utf-8')) sim2mgr_socket.set_hwm(2) sim2mgr_socket.connect(self.sim2mgr) mgr2sim_socket = context.socket(zmq.DEALER) mgr2sim_socket.setsockopt(zmq.IDENTITY, self.name.encode('utf-8')) mgr2sim_socket.set_hwm(2) mgr2sim_socket.connect(self.mgr2sim) def request_screen(): sim2mgr_socket.send(dumps([self.name, SimulatorManager.MSG_TYPE.SCREEN, []])) return loads(mgr2sim_socket.recv(copy=False).bytes) def request_click(bbox): sim2mgr_socket.send(dumps([self.name, SimulatorManager.MSG_TYPE.CLICK, [((((bbox[0] + bbox[2]) // 2) + self.window_rect[0]) + 6), ((((bbox[1] + bbox[3]) // 2) + self.window_rect[1]) + 46)]])) return loads(mgr2sim_socket.recv(copy=False).bytes) def request_lock(): sim2mgr_socket.send(dumps([self.name, SimulatorManager.MSG_TYPE.LOCK, []])) return loads(mgr2sim_socket.recv(copy=False).bytes) def request_unlock(): sim2mgr_socket.send(dumps([self.name, SimulatorManager.MSG_TYPE.UNLOCK, []])) return loads(mgr2sim_socket.recv(copy=False).bytes) def spin_lock_on_button(): act = dict() while (not act): self.current_screen = request_screen() cv2.imwrite('debug.png', self.current_screen) act = get_current_button_action(self.current_screen) if (self.toggle.value == 0): break return act def discard(act, bboxes, idxs): def diff(idxs, cards): res = [] for i in range(len(cards)): if (cards[i] is not None): if (i in idxs): res.append(i) elif (i not in idxs): res.append(i) return res differences = diff(idxs, get_cards_bboxes(request_screen(), self.templates, bboxes=bboxes)[0]) print(differences) request_lock() while (len(differences) > 0): for d in differences: request_click(bboxes[d]) differences = diff(idxs, get_cards_bboxes(request_screen(), self.templates, bboxes=bboxes)[0]) print(differences) if ('chupai' in act): request_click(act['chupai']) elif ('alone_chupai' in act): request_click(act['alone_chupai']) elif ('ming_chupai' in act): request_click(act['ming_chupai']) request_unlock() game_cnt = 0 while True: import psutil if (self.toggle.value == 0): time.sleep(0.2) continue print('new round') self.current_screen = request_screen() act = spin_lock_on_button() if (not act): continue print(act) if ('start' in act): request_click(act['start']) continue if (self.state == Simulator.State.CALLING): if ('reverse' in act): self.state = Simulator.State.PLAYING self.current_lord_pos = who_is_lord(self.current_screen) while (self.current_lord_pos < 0): self.current_screen = request_screen() self.current_lord_pos = who_is_lord(self.current_screen) print('current lord pos ', self.current_lord_pos) if (self.toggle.value == 0): break continue if ('continuous defeat' in act): request_click(act['continuous defeat']) continue print('calling', act) (handcards, _) = get_cards_bboxes(self.current_screen, self.templates, 0) (cards_value, _) = CEnv.get_cards_value(Card.char2color(handcards)) print('cards value: ', cards_value) (request_click(act['bujiao']) if (cards_value < 10) else request_click(act['jiaodizhu'])) elif (self.state == Simulator.State.PLAYING): if (('defeat' in act) or ('victory' in act)): request_click((act['defeat'] if ('defeat' in act) else act['victory'])) if (self.cached_msg is None): print('other player wins in one step!!!') continue win = is_win(self.current_screen) (state, action, fine_mask) = self.cached_msg if win: sim2exp_sockets[self.current_lord_pos].send(dumps([[state, state], action, 1, True, False, [fine_mask, fine_mask]])) self.win_rates[self.agent_names[self.current_lord_pos]].feed(1.0) else: sim2exp_sockets[self.current_lord_pos].send(dumps([[state, state], action, (- 1), True, False, [fine_mask, fine_mask]])) self.win_rates[self.agent_names[self.current_lord_pos]].feed(0.0) game_cnt += 1 if ((game_cnt % 100) == 0): for agent in self.agent_names: if (self.win_rates[agent].count > 0): logger.info('[last-100]{} win rate: {}'.format(agent, self.win_rates[agent].average)) self.win_rates[agent].reset() self.reset_episode() continue print('playing', act) (left_cards, _) = get_cards_bboxes(self.current_screen, self.mini_templates, 1) (right_cards, _) = get_cards_bboxes(self.current_screen, self.mini_templates, 2) if ((None in left_cards) or (None in right_cards)): request_click(act['buchu']) time.sleep(1.0) continue assert (None not in left_cards) assert (None not in right_cards) self.history[1].extend(right_cards) self.history[2].extend(left_cards) total_cards = np.ones([60]) total_cards[53:56] = 0 total_cards[57:60] = 0 (handcards, bboxes) = get_cards_bboxes(self.current_screen, self.templates, 0) handcards = [card for card in handcards if (card is not None)] remain_cards = (total_cards - Card.char2onehot60((((handcards + self.history[0]) + self.history[1]) + self.history[2]))) print('current handcards: ', handcards) left_cnt = (17 - len(self.history[2])) right_cnt = (17 - len(self.history[1])) if (self.current_lord_pos == 1): left_cnt += 3 if (self.current_lord_pos == 2): right_cnt += 3 right_prob_state = (remain_cards * (right_cnt / (left_cnt + right_cnt))) left_prob_state = (remain_cards * (left_cnt / (left_cnt + right_cnt))) prob_state = np.concatenate([right_prob_state, left_prob_state]) (intention, buffer_comb, buffer_fine) = self.predictor.predict(handcards, [left_cards, right_cards], prob_state, self, sim2coord_socket, coord2sim_socket) if (self.cached_msg is not None): (state, action, fine_mask) = self.cached_msg sim2exp_sockets[self.current_lord_pos].send(dumps([[state, buffer_comb[0]], action, 0, False, False, [fine_mask, buffer_comb[2]]])) sim2exp_sockets[self.current_lord_pos].send(dumps([[buffer_comb[0], buffer_fine[0]], buffer_comb[1], 0, False, True, [buffer_comb[2], buffer_fine[2]]])) self.cached_msg = buffer_fine self.history[0].extend(intention) print('intention is: ', intention) intention.sort(key=(lambda k: Card.cards_to_value[k])) if (len(intention) == 0): request_click(act['buchu']) else: i = 0 j = 0 to_click = [] to_click_idxs = [] while (j < len(intention)): if (handcards[i] == intention[j]): to_click_idxs.append(i) to_click.append(bboxes[i]) i += 1 j += 1 else: i += 1 for bbox in to_click: request_click(bbox) time.sleep(0.5) request_click([1310, 760, 1310, 760]) time.sleep(1.0)
class ModelSaverBase(object): def __init__(self, base_path, model, model_opt, fields, optim, keep_checkpoint=(- 1)): self.base_path = base_path self.model = model self.model_opt = model_opt self.fields = fields self.optim = optim self.last_saved_step = None self.keep_checkpoint = keep_checkpoint if (keep_checkpoint > 0): self.checkpoint_queue = deque([], maxlen=keep_checkpoint) def save(self, step, moving_average=None): if ((self.keep_checkpoint == 0) or (step == self.last_saved_step)): return save_model = self.model if moving_average: model_params_data = [] for (avg, param) in zip(moving_average, save_model.parameters()): model_params_data.append(param.data) param.data = avg.data (chkpt, chkpt_name) = self._save(step, save_model) self.last_saved_step = step if moving_average: for (param_data, param) in zip(model_params_data, save_model.parameters()): param.data = param_data if (self.keep_checkpoint > 0): if (len(self.checkpoint_queue) == self.checkpoint_queue.maxlen): todel = self.checkpoint_queue.popleft() self._rm_checkpoint(todel) self.checkpoint_queue.append(chkpt_name) def _save(self, step): raise NotImplementedError() def _rm_checkpoint(self, name): raise NotImplementedError()
class Assigning(): def __init__(self, value: int, name: str) -> None: self.value = value self.name = name def new_attr(self, newvalue: int, newname: str) -> None: self = newvalue self.name = newname def new_cls(cls, newtype: type) -> type: cls = newtype return cls
def all_values_full(args: list[Register], blocks: list[BasicBlock]) -> list[Value]: values: list[Value] = list(args) seen_registers = set(args) for block in blocks: for op in block.ops: for source in op.sources(): if (isinstance(source, Register) and (source not in seen_registers)): values.append(source) seen_registers.add(source) if (not isinstance(op, ControlOp)): if isinstance(op, (Assign, AssignMulti)): if (op.dest not in seen_registers): values.append(op.dest) seen_registers.add(op.dest) elif op.is_void: continue else: values.append(op) return values
def test_validate_helm_oci_manifest(): manifest_bytes = '{\n "schemaVersion":2,\n "config":{\n "mediaType":"application/vnd.cncf.helm.config.v1+json",\n "digest":"sha256:65a07b841ece031e6d0ec5eb948eacb17aa6d7294cdeb01d5348e",\n "size":141\n },\n "layers": [\n {\n "mediaType":"application/tar+gzip",\n "digest":"sha256:d84c9c29e0899862a0fa0f73da4d9f8c8c38e2da5d3258764aa7ba74bb914718",\n "size":3562\n }\n ]\n }' HELM_CHART_CONFIG_TYPE = 'application/vnd.cncf.helm.config.v1+json' HELM_CHART_LAYER_TYPES = ['application/tar+gzip'] register_artifact_type(HELM_CHART_CONFIG_TYPE, HELM_CHART_LAYER_TYPES) manifest = OCIManifest(Bytes.for_string_or_unicode(manifest_bytes))
def send_nsca(code, message, nscahost, hostname=None, service=None, nscabin='send_nsca', nscaconf=None): if (not hostname): hostname = platform.node() command = [nscabin, '-H', nscahost] if nscaconf: command += ['-c', nscaconf] code = str(code) if service: input_string = ('\t'.join([hostname, service, code, message]) + '\n') else: input_string = ('\t'.join([hostname, code, message]) + '\n') if ((not six.PY2) and (not isinstance(input_string, six.binary_type))): input_string = input_string.encode() proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = proc.communicate(input=input_string) stdout = bytes2str(stdout) stderr = bytes2str(stderr) result = (proc.returncode, stdout, stderr) return result
def densenet121(num_classes, loss, pretrained='imagenet', **kwargs): model = DenseNet(num_classes=num_classes, loss=loss, num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), fc_dims=None, dropout_p=None, **kwargs) if (pretrained == 'imagenet'): init_pretrained_weights(model, model_urls['densenet121']) return model
_config def test_bsp_window_focus_cycle(manager): manager.test_window('one') manager.test_window('two') manager.test_window('float1') manager.c.window.toggle_floating() manager.test_window('float2') manager.c.window.toggle_floating() manager.test_window('three') assert (manager.c.layout.info()['clients'] == ['one', 'three', 'two']) assert_focused(manager, 'three') assert_focus_path(manager, 'two', 'float1', 'float2', 'one', 'three')
class XMLHelperTestCases(unittest.TestCase): def tearDown(self): os.unlink('__unittests.xml') def assertReadWriteSame(self, props): WriteDialogToFile('__unittests.xml', props) read_props = ReadPropertiesFromFile('__unittests.xml') self.assertEqual(props, read_props) def testOneUnicode(self): props = [dict(test=u'hiya')] self.assertReadWriteSame(props) def testOneString(self): props = [dict(test='hiya')] self.assertReadWriteSame(props) def testSomeEscapes(self): test_string = [] for i in range(0, 50000): test_string.append(six.unichr(i)) test_string = ''.join(test_string) props = [dict(test=test_string)] self.assertReadWriteSame(props) def testOneBool(self): props = [dict(test=True)] self.assertReadWriteSame(props) def testOneList(self): props = [dict(test=[1, 2, 3, 4, 5, 6])] self.assertReadWriteSame(props) def testOneDict(self): props = [dict(test_value=dict(test=1))] self.assertReadWriteSame(props) def testOneLong(self): props = [dict(test=1)] self.assertReadWriteSame(props) def testLOGFONTW(self): font = LOGFONTW() font.lfWeight = 23 font.lfFaceName = u'wowow' props = [dict(test=font)] self.assertReadWriteSame(props) def testRECT(self): props = [dict(test=RECT(1, 2, 3, 4))] self.assertReadWriteSame(props) def testTwoLong(self): props = [dict(test=1), dict(test_blah=2)] self.assertReadWriteSame(props) def testEmptyList(self): props = [dict(test=[])] self.assertReadWriteSame(props) def testEmptyDict(self): props = [dict(test={})] self.assertReadWriteSame(props)
def _start_kernel(): if (sys._ipython_app and sys._ipython_kernel_running): return sys._ipython_app import IPython from ipykernel.kernelapp import IPKernelApp from zmq.eventloop import ioloop def _IPKernelApp_start(self): if (self.poller is not None): self.poller.start() self.kernel.start() loop = ioloop.IOLoop.instance() def poll_ioloop(timer_id, time): if self.kernel.shell.exit_now: _log.debug(('IPython kernel stopping (%s)' % self.connection_file)) timer.kill_timer(timer_id) loop.start() sys._ipython_kernel_running = False return loop.add_timeout(0, (lambda : loop.add_callback(loop.stop))) loop.start() sys._ipython_kernel_running = True timer.set_timer(100, poll_ioloop) IPKernelApp.start = _IPKernelApp_start sys.__stdout__ = sys.stdout sys.__stderr__ = sys.stderr IPython.embed_kernel() ipy = IPKernelApp.instance() sys._ipython_app = ipy setattr(ipy.shell.__class__, 'user_global_ns', property((lambda self: self.user_ns))) from IPython.terminal.ipapp import TerminalIPythonApp TerminalIPythonApp.instance = (lambda : ipy) __builtins__['get_ipython'] = (lambda : ipy.shell.__class__) mpl = ipy.shell.find_magic('matplotlib') if mpl: mpl('inline') return ipy
class Resolver(): def __init__(self, callback, resolve_on_error, time_to_live=(60 * 30)): self.callback = callback self.resolve_on_error = resolve_on_error self._cache = {} self._time_to_live = time_to_live self._cache_ttl = defaultdict(set) self._clear_every = 2 self._cached_modules = {'pandas', 'numpy', 'tensorflow', 'matplotlib'} def cached_modules(self): return self._cached_modules _modules.setter def cached_modules(self, new_value): self._cached_modules = set(new_value) def clear_outdated(self): now = self.time_key() to_clear = [timestamp for timestamp in self._cache_ttl if (timestamp < now)] for time_key in to_clear: for key in self._cache_ttl[time_key]: del self._cache[key] del self._cache_ttl[time_key] def time_key(self): return int((time() / self._time_to_live)) def get_or_create(self, completion: Completion): if (not completion.full_name): use_cache = False else: module_parts = completion.full_name.split('.') use_cache = (module_parts and (module_parts[0] in self._cached_modules)) if use_cache: key = self._create_completion_id(completion) if (key not in self._cache): if ((self.time_key() % self._clear_every) == 0): self.clear_outdated() self._cache[key] = self.resolve(completion) self._cache_ttl[self.time_key()].add(key) return self._cache[key] return self.resolve(completion) def _create_completion_id(self, completion: Completion): return (completion.full_name, completion.module_path, completion.line, completion.column, self.time_key()) def resolve(self, completion): try: sig = completion.get_signatures() return self.callback(completion, sig) except Exception as e: log.warning(f'Something went wrong when resolving label for {completion}: {e}') return self.resolve_on_error
def test_max_iterations(): this_dir = os.path.dirname(os.path.realpath(__file__)) file = os.path.join(this_dir, 'temp_test_max_iterations_file.txt') mem = memory_usage((write_line, (file,), dict()), max_usage=True, max_iterations=1) n_lines = sum((1 for line in open(file))) os.remove(file) assert (n_lines == 1)
class TestArgComplete(): .skipif("sys.platform in ('win32', 'darwin')") def test_compare_with_compgen(self, tmp_path: Path, monkeypatch: MonkeyPatch) -> None: from _pytest._argcomplete import FastFilesCompleter ffc = FastFilesCompleter() fc = FilesCompleter() monkeypatch.chdir(tmp_path) assert equal_with_bash('', ffc, fc, out=sys.stdout) tmp_path.cwd().joinpath('data').touch() for x in ['d', 'data', 'doesnotexist', '']: assert equal_with_bash(x, ffc, fc, out=sys.stdout) .skipif("sys.platform in ('win32', 'darwin')") def test_remove_dir_prefix(self): from _pytest._argcomplete import FastFilesCompleter ffc = FastFilesCompleter() fc = FilesCompleter() for x in '/usr/'.split(): assert (not equal_with_bash(x, ffc, fc, out=sys.stdout))
def set_rp_acl(rp, entry_list=None, default_entry_list=None, map_names=1): assert (rp.conn is Globals.local_connection), 'Set ACLs of path should only be done locally not over {conn}.'.format(conn=rp.conn) if entry_list: acl = _list_to_acl(entry_list, map_names) else: acl = posix1e.ACL() try: acl.applyto(rp.path) except OSError as exc: log.Log("Unable to set ACL on path {pa} due to exception '{ex}'".format(pa=rp, ex=exc), log.INFO) return if rp.isdir(): if default_entry_list: def_acl = _list_to_acl(default_entry_list, map_names) else: def_acl = posix1e.ACL() def_acl.applyto(rp.path, posix1e.ACL_TYPE_DEFAULT)
def save_model(model: nn.Module, iteration: int, suffix: str) -> None: os.makedirs(args.save_folder, exist_ok=True) save_path = os.path.join(args.save_folder, '{}_{}_{}_kd{}_size{}_anchor{}_{}_{}.pth'.format(args.dataset, args.neck, args.backbone, args.kd, args.image_size, args.anchor_size, ('MG' if args.mutual_guide else 'Retina'), suffix)) tosave = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'iteration': iteration} print('Saving to {}'.format(save_path)) torch.save(tosave, save_path) return
class GaussianMLPRegressor(LayersPowered, Serializable): def __init__(self, name, input_shape, output_dim, mean_network=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, optimizer=None, use_trust_region=True, step_size=0.01, learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), std_nonlinearity=None, normalize_inputs=True, normalize_outputs=True, subsample_factor=1.0): Serializable.quick_init(self, locals()) with tf.variable_scope(name): if (optimizer is None): if use_trust_region: optimizer = PenaltyLbfgsOptimizer('optimizer') else: optimizer = LbfgsOptimizer('optimizer') self._optimizer = optimizer self._subsample_factor = subsample_factor if (mean_network is None): mean_network = MLP(name='mean_network', input_shape=input_shape, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=None) l_mean = mean_network.output_layer if adaptive_std: l_log_std = MLP(name='log_std_network', input_shape=input_shape, input_var=mean_network.input_layer.input_var, output_dim=output_dim, hidden_sizes=std_hidden_sizes, hidden_nonlinearity=std_nonlinearity, output_nonlinearity=None).output_layer else: l_log_std = L.ParamLayer(mean_network.input_layer, num_units=output_dim, param=tf.constant_initializer(np.log(init_std)), name='output_log_std', trainable=learn_std) LayersPowered.__init__(self, [l_mean, l_log_std]) xs_var = mean_network.input_layer.input_var ys_var = tf.placeholder(dtype=tf.float32, name='ys', shape=(None, output_dim)) old_means_var = tf.placeholder(dtype=tf.float32, name='ys', shape=(None, output_dim)) old_log_stds_var = tf.placeholder(dtype=tf.float32, name='old_log_stds', shape=(None, output_dim)) x_mean_var = tf.Variable(np.zeros(((1,) + input_shape), dtype=np.float32), name='x_mean') x_std_var = tf.Variable(np.ones(((1,) + input_shape), dtype=np.float32), name='x_std') y_mean_var = tf.Variable(np.zeros((1, output_dim), dtype=np.float32), name='y_mean') y_std_var = tf.Variable(np.ones((1, output_dim), dtype=np.float32), name='y_std') normalized_xs_var = ((xs_var - x_mean_var) / x_std_var) normalized_ys_var = ((ys_var - y_mean_var) / y_std_var) normalized_means_var = L.get_output(l_mean, {mean_network.input_layer: normalized_xs_var}) normalized_log_stds_var = L.get_output(l_log_std, {mean_network.input_layer: normalized_xs_var}) means_var = ((normalized_means_var * y_std_var) + y_mean_var) log_stds_var = (normalized_log_stds_var + tf.log(y_std_var)) normalized_old_means_var = ((old_means_var - y_mean_var) / y_std_var) normalized_old_log_stds_var = (old_log_stds_var - tf.log(y_std_var)) dist = self._dist = DiagonalGaussian(output_dim) normalized_dist_info_vars = dict(mean=normalized_means_var, log_std=normalized_log_stds_var) mean_kl = tf.reduce_mean(dist.kl_sym(dict(mean=normalized_old_means_var, log_std=normalized_old_log_stds_var), normalized_dist_info_vars)) loss = (- tf.reduce_mean(dist.log_likelihood_sym(normalized_ys_var, normalized_dist_info_vars))) self._f_predict = tensor_utils.compile_function([xs_var], means_var) self._f_pdists = tensor_utils.compile_function([xs_var], [means_var, log_stds_var]) self._l_mean = l_mean self._l_log_std = l_log_std optimizer_args = dict(loss=loss, target=self, network_outputs=[normalized_means_var, normalized_log_stds_var]) if use_trust_region: optimizer_args['leq_constraint'] = (mean_kl, step_size) optimizer_args['inputs'] = [xs_var, ys_var, old_means_var, old_log_stds_var] else: optimizer_args['inputs'] = [xs_var, ys_var] self._optimizer.update_opt(**optimizer_args) self._use_trust_region = use_trust_region self._name = name self._normalize_inputs = normalize_inputs self._normalize_outputs = normalize_outputs self._mean_network = mean_network self._x_mean_var = x_mean_var self._x_std_var = x_std_var self._y_mean_var = y_mean_var self._y_std_var = y_std_var def fit(self, xs, ys): if (self._subsample_factor < 1): num_samples_tot = xs.shape[0] idx = np.random.randint(0, num_samples_tot, int((num_samples_tot * self._subsample_factor))) (xs, ys) = (xs[idx], ys[idx]) sess = tf.get_default_session() if self._normalize_inputs: sess.run([tf.assign(self._x_mean_var, np.mean(xs, axis=0, keepdims=True)), tf.assign(self._x_std_var, (np.std(xs, axis=0, keepdims=True) + 1e-08))]) if self._normalize_outputs: sess.run([tf.assign(self._y_mean_var, np.mean(ys, axis=0, keepdims=True)), tf.assign(self._y_std_var, (np.std(ys, axis=0, keepdims=True) + 1e-08))]) if self._use_trust_region: (old_means, old_log_stds) = self._f_pdists(xs) inputs = [xs, ys, old_means, old_log_stds] else: inputs = [xs, ys] loss_before = self._optimizer.loss(inputs) if self._name: prefix = (self._name + '_') else: prefix = '' logger.record_tabular((prefix + 'LossBefore'), loss_before) self._optimizer.optimize(inputs) loss_after = self._optimizer.loss(inputs) logger.record_tabular((prefix + 'LossAfter'), loss_after) if self._use_trust_region: logger.record_tabular((prefix + 'MeanKL'), self._optimizer.constraint_val(inputs)) logger.record_tabular((prefix + 'dLoss'), (loss_before - loss_after)) def predict(self, xs): return self._f_predict(xs) def sample_predict(self, xs): (means, log_stds) = self._f_pdists(xs) return self._dist.sample(dict(mean=means, log_std=log_stds)) def predict_log_likelihood(self, xs, ys): (means, log_stds) = self._f_pdists(xs) return self._dist.log_likelihood(ys, dict(mean=means, log_std=log_stds)) def log_likelihood_sym(self, x_var, y_var): normalized_xs_var = ((x_var - self._x_mean_var) / self._x_std_var) (normalized_means_var, normalized_log_stds_var) = L.get_output([self._l_mean, self._l_log_std], {self._mean_network.input_layer: normalized_xs_var}) means_var = ((normalized_means_var * self._y_std_var) + self._y_mean_var) log_stds_var = (normalized_log_stds_var + TT.log(self._y_std_var)) return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var)) def get_param_values(self, **tags): return LayersPowered.get_param_values(self, **tags) def set_param_values(self, flattened_params, **tags): LayersPowered.set_param_values(self, flattened_params, **tags)
class CustomMapping(object): def __init__(self, *args, **kwargs): self._d = dict(*args, **kwargs) def __getitem__(self, key): return self._d[key] def __setitem__(self, key, val): self._d[key] = val def __delitem__(self, key): del self._d[key] def __iter__(self): return iter(self._d) def __len__(self): return len(self._d) def __contains__(self, key): return (key in self._d) def __eq__(self, other): return (isinstance(other, CustomMapping) and (self._d == other._d)) def __ne__(self, other): return ((not isinstance(other, CustomMapping)) or (self._d != other._d)) def keys(self): return self._d.keys() def values(self): return self._d.values() def items(self): return self._d.items() def update(self, *args, **kwargs): self._d.update(*args, **kwargs)
def train(train_data, test_data, user_size, item_size): with tf.Session() as sess: iterator = tf.data.Iterator.from_structure(train_data.output_types, train_data.output_shapes) model = NCF.NCF(FLAGS.embedding_size, user_size, item_size, FLAGS.lr, FLAGS.optim, FLAGS.initializer, FLAGS.loss_func, FLAGS.activation, FLAGS.regularizer, iterator, FLAGS.topK, FLAGS.dropout, is_training=True) model.build() ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir) if ckpt: print(('Reading model parameters from %s' % ckpt.model_checkpoint_path)) model.saver.restore(sess, ckpt.model_checkpoint_path) else: print('Creating model with fresh parameters.') sess.run(tf.global_variables_initializer()) count = 0 for epoch in range(FLAGS.epochs): sess.run(model.iterator.make_initializer(train_data)) model.is_training = True model.get_data() start_time = time.time() try: while True: model.step(sess, count) count += 1 except tf.errors.OutOfRangeError: print(((('Epoch %d training ' % epoch) + 'Took: ') + time.strftime('%H: %M: %S', time.gmtime((time.time() - start_time))))) sess.run(model.iterator.make_initializer(test_data)) model.is_training = False model.get_data() start_time = time.time() (HR, MRR, NDCG) = ([], [], []) (prediction, label) = model.step(sess, None) try: while True: (prediction, label) = model.step(sess, None) label = int(label[0]) HR.append(metrics.hit(label, prediction)) MRR.append(metrics.mrr(label, prediction)) NDCG.append(metrics.ndcg(label, prediction)) except tf.errors.OutOfRangeError: hr = np.array(HR).mean() mrr = np.array(MRR).mean() ndcg = np.array(NDCG).mean() print(((('Epoch %d testing ' % epoch) + 'Took: ') + time.strftime('%H: %M: %S', time.gmtime((time.time() - start_time))))) print(('HR is %.3f, MRR is %.3f, NDCG is %.3f' % (hr, mrr, ndcg))) checkpoint_path = os.path.join(FLAGS.model_dir, 'NCF.ckpt') model.saver.save(sess, checkpoint_path)
class ZeroDimensionalSpatialMethod(pybamm.SpatialMethod): def __init__(self, options=None): super().__init__(options) def build(self, mesh): self._mesh = mesh def boundary_value_or_flux(self, symbol, discretised_child, bcs=None): return discretised_child def mass_matrix(self, symbol, boundary_conditions): return pybamm.Matrix(np.ones((1, 1))) def indefinite_integral(self, child, discretised_child, direction): if (direction == 'forward'): return discretised_child elif (direction == 'backward'): return (- discretised_child) def integral(self, child, discretised_child, integration_dimension): return discretised_child
def transform(obj: object, cls: Type[T], *, mapper: Callable[([Dict[(str, Any)]], Dict[(str, Any)])]=None, dump_cls: type=None, dump_args: List[Any]=None, dump_kwargs: List[Dict[(str, Any)]]=None, **kwargs) -> T: dump_args_ = (dump_args or []) dump_kwargs_ = (dump_kwargs or {}) dumped = dump(obj, dump_cls, *dump_args_, **dump_kwargs_) mapper_ = (mapper or (lambda x: x)) dumped_mapped = mapper_(dumped) return load(dumped_mapped, cls, **kwargs)
class UniF_AGRU(nn.Module): def __init__(self, emodict, worddict, embedding, args): super(UniF_AGRU, self).__init__() self.num_classes = emodict.n_words self.embeddings = embedding self.gpu = args.gpu self.hops = args.hops self.wind_1 = args.wind1 self.utt_gru = GRUencoder(args.d_word_vec, args.d_h1, num_layers=1) self.lin_1 = nn.Linear(200, 100) self.dropout_in = nn.Dropout(0.3) self.cont_gru = nn.GRU(100, 100, num_layers=1, bidirectional=False) self.dropout_mid = nn.Dropout(0.3) self.AttGRU = nn.ModuleList((AttGRU(d_model=100) for hop in range(self.hops))) self.classifier = nn.Linear(100, self.num_classes) def init_hidden(self, num_directs, num_layers, batch_size, d_model): return Variable(torch.zeros((num_directs * num_layers), batch_size, d_model)) def forward(self, sents, lengths): if (len(sents.size()) < 2): sents = sents.unsqueeze(0) w_embed = self.embeddings(sents) w_gru = self.utt_gru(w_embed, lengths) maxpl = torch.max(w_gru, dim=1)[0] s_utt = F.tanh(self.lin_1(maxpl)) s_utt = self.dropout_in(s_utt) s_out = [] cont_inp = s_utt.unsqueeze(1) s_out.append(cont_inp[:1]) attn_weights = [] if (sents.size()[0] > 1): batches = [] masks = [] for i in range(1, sents.size()[0]): pad = max((self.wind_1 - i), 0) i_st = (0 if (i < (self.wind_1 + 1)) else (i - self.wind_1)) m_pad = F.pad(cont_inp[i_st:i], (0, 0, 0, 0, pad, 0), mode='constant', value=0) batches.append(m_pad) mask = (([0] * pad) + ([1] * (self.wind_1 - pad))) masks.append(mask) batches_tensor = torch.cat(batches, dim=1) masks_tensor = torch.tensor(masks).long().to(sents.device) query_mask = torch.ones(masks_tensor.size()[0], 1).long().to(sents.device) attn_mask = get_attn_pad_mask(query_mask, masks_tensor) mem_out = self.cont_gru(batches_tensor)[0] mem_bank = (batches_tensor + mem_out).transpose(0, 1).contiguous() mem_bank = self.dropout_mid(mem_bank) query = cont_inp[1:] eps_mem = query for hop in range(self.hops): attn_hid = self.init_hidden(1, 1, masks_tensor.size()[0], 100).to(sents.device) (attn_out, attn_weight) = self.AttGRU[0](eps_mem, mem_bank, attn_hid, attn_mask) attn_weights.append(attn_weight.squeeze(1)) eps_mem = (eps_mem + attn_out) eps_mem = self.dropout_mid(eps_mem) s_out.append(eps_mem) s_cont = torch.cat(s_out, dim=0).squeeze(1) s_output = self.classifier(s_cont) pred_s = F.log_softmax(s_output, dim=1) return (pred_s, attn_weights)
(derivate=True, coderize=True) _loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = (((- (pred + eps).log()) * (1 - pred).pow(alpha)) * pos_weights) neg_loss = (((- ((1 - pred) + eps).log()) * pred.pow(alpha)) * neg_weights) return (pos_loss + neg_loss)
class TestParametersCLI(TestCase): def test_error(self): with self.assertRaisesRegex(NotImplementedError, 'deprecated'): pybamm.add_parameter() with self.assertRaisesRegex(NotImplementedError, 'deprecated'): pybamm.edit_parameter() with self.assertRaisesRegex(NotImplementedError, 'deprecated'): pybamm.remove_parameter()
class DirListAction(Action, ProcessableAction): mandatoryparams = ['directory'] optionalparams = ['dirmask'] varexpansion = ['directory', 'filename'] def __init__(self, execparams, parent): Action.__init__(self, execparams=execparams, parent=parent) def Execute(self): Action.Execute(self) self.path = self.execparams['directory'] mask = self.execparams.get('dirmask', '*') self.result = GetDirList(mask=mask, path=self.path) return (self.result is not None) def GetVariable(self, varname): ret = None if (varname == 'filename'): ret = os.path.join(self.path, self.result[0]) elif (varname == 'directory'): ret = self.path return ret def process(self, params): filename = params.get('filename', None) if (filename is None): raise AttributeError("You must specify 'filename='!") else: data = filter((lambda x: (x == filename)), self.result) if (len(data) == 0): data = None else: data = os.path.join(self.execparams['path'], data[0]) result = data return result
class Effect5243(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredChargeBoost((lambda mod: (mod.charge.requiresSkill('Rockets') or mod.charge.requiresSkill('Light Missiles'))), 'emDamage', ship.getModifiedItemAttr('shipBonusCF2'), skill='Caldari Frigate', **kwargs)
def prepare_z_y(G_batch_size, dim_z, nclasses, device='cuda', fp16=False, z_var=1.0, N_target_cate=100): z_ = Distribution(torch.randn(G_batch_size, dim_z, requires_grad=False)) z_.init_distribution('normal', mean=0, var=z_var) z_ = z_.to(device, (torch.float16 if fp16 else torch.float32)) if fp16: z_ = z_.half() y_ = Distribution(torch.zeros(G_batch_size, requires_grad=False)) y_.init_distribution('categorical', num_categories=nclasses, N_target_cate=N_target_cate) y_ = y_.to(device, torch.int64) return (z_, y_)
def compute_cov_g(g, classname, layer_info, fast_cnn): batch_size = g.size(0) if (classname == 'Conv2d'): if fast_cnn: g = g.view(g.size(0), g.size(1), (- 1)) g = g.sum((- 1)) else: g = g.transpose(1, 2).transpose(2, 3).contiguous() g = g.view((- 1), g.size((- 1))).mul_(g.size(1)).mul_(g.size(2)) elif (classname == 'AddBias'): g = g.view(g.size(0), g.size(1), (- 1)) g = g.sum((- 1)) g_ = (g * batch_size) return (g_.t() (g_ / g.size(0)))
def run(model_args, data_args, training_args, additional_training_args): setup_logging(training_args) set_seed(training_args.seed) datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) text_column_name = 'text' label_column_name = 'label' label_list = datasets['train'].features[label_column_name].names num_labels = len(label_list) label_to_id = {i: i for i in range(num_labels)} config = AutoConfig.from_pretrained(model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name) if (model_args.model_name_or_path == 'neuropark/sahajBERT'): tokenizer = AlbertBengaliTokenizerFast.from_pretrained(model_args.model_name_or_path) else: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, use_fast=True) model = AutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, config=config) padding = ('max_length' if data_args.pad_to_max_length else False) if (data_args.max_seq_length > tokenizer.model_max_length): logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.') max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): result = tokenizer(examples[text_column_name], padding=padding, max_length=max_seq_length, truncation=True) if ((label_to_id is not None) and ('label' in examples)): result['label'] = [(label_to_id[l] if (l != (- 1)) else (- 1)) for l in examples['label']] return result train_dataset = datasets['train'] train_dataset = train_dataset.map(preprocess_function, batched=True) valid_dataset = datasets['validation'] valid_dataset = valid_dataset.map(preprocess_function, batched=True) test_dataset = datasets['test'] test_dataset = test_dataset.map(preprocess_function, batched=True) data_collator = (default_data_collator if data_args.pad_to_max_length else None) metric = load_metric('accuracy') def compute_metrics(p): preds = (p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions) preds = np.argmax(preds, axis=1) result = metric.compute(predictions=preds, references=p.label_ids) if (len(result) > 1): result['combined_score'] = np.mean(list(result.values())).item() return result early_stopping = EarlyStoppingCallback(early_stopping_patience=additional_training_args.early_stopping_patience, early_stopping_threshold=additional_training_args.early_stopping_threshold) callbacks = [early_stopping] trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=valid_dataset, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, callbacks=callbacks) train_result = trainer.train() metrics = train_result.metrics metrics['train_samples'] = len(train_dataset) trainer.save_model() trainer.log_metrics('train', metrics) trainer.save_metrics('train', metrics) trainer.save_state() logger.info('*** Evaluate ***') metrics = trainer.evaluate(eval_dataset=test_dataset) metrics['eval_samples'] = len(test_dataset) trainer.log_metrics('eval', metrics) trainer.save_metrics('eval', metrics)
def get_resnet_v1_s(input_x, scope='resnet50_v1s', bottleneck_nums=[3, 4, 6, 3], base_channels=[64, 128, 256, 512], freeze=[True, False, False, False, False], is_training=True, freeze_norm=False, num_cls=1000, dropout=False): (net, fet_dict) = get_resnet_v1_s_base(input_x=input_x, scope=scope, bottleneck_nums=bottleneck_nums, base_channels=base_channels, freeze=freeze, is_training=is_training, freeze_norm=freeze_norm) with tf.variable_scope(scope): if (DATA_FORMAT.strip() == 'NCHW'): net = tf.reduce_mean(net, axis=[2, 3], name='global_avg_pooling', keep_dims=True) elif (DATA_FORMAT.strip() == 'NHWC'): net = tf.reduce_mean(net, axis=[1, 2], name='global_avg_pooling', keep_dims=True) else: raise ValueError('Data Format Erro...') net = slim.flatten(net, scope='flatten') if dropout: net = slim.dropout(net, keep_prob=0.5, is_training=is_training) logits = slim.fully_connected(net, num_outputs=num_cls, activation_fn=None, scope='logits') return logits
class TestBackBone(unittest.TestCase): def test_resnet_scriptability(self): cfg = get_cfg() resnet = build_resnet_backbone(cfg, ShapeSpec(channels=3)) scripted_resnet = torch.jit.script(resnet) inp = torch.rand(2, 3, 100, 100) out1 = resnet(inp)['res4'] out2 = scripted_resnet(inp)['res4'] self.assertTrue(torch.allclose(out1, out2)) def test_fpn_scriptability(self): cfg = model_zoo.get_config('Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml') bb = build_resnet_fpn_backbone(cfg, ShapeSpec(channels=3)) bb_s = torch.jit.script(bb) inp = torch.rand(2, 3, 128, 128) out1 = bb(inp)['p5'] out2 = bb_s(inp)['p5'] self.assertTrue(torch.allclose(out1, out2))
class ExpReplay(DataFlow, Callback): def __init__(self, predictor_io_names, player, state_shape, num_actions, batch_size, memory_size, init_memory_size, init_exploration, update_frequency, encoding_file='../AutoEncoder/encoding.npy'): init_memory_size = int(init_memory_size) for (k, v) in locals().items(): if (k != 'self'): setattr(self, k, v) self.exploration = init_exploration self.num_actions = num_actions self.encoding = np.load(encoding_file) logger.info('Number of Legal actions: {}'.format(self.num_actions)) self.rng = get_rng(self) self._init_memory_flag = threading.Event() self._populate_job_queue = queue.Queue(maxsize=5) self.mem = ReplayMemory(memory_size, state_shape) self.player.reset() self.player.prepare() self._current_ob = self.get_state() self._player_scores = StatCounter() self._current_game_score = StatCounter() def get_state(self): def cards_char2embedding(cards_char): test = (action_space_onehot60 == Card.char2onehot60(cards_char)) test = np.all(test, axis=1) target = np.where(test)[0] return self.encoding[target[0]] s = self.player.get_state_prob() s = np.concatenate([Card.val2onehot60(self.player.get_curr_handcards()), s]) last_two_cards_char = self.player.get_last_two_cards() last_two_cards_char = [to_char(c) for c in last_two_cards_char] return np.concatenate([s, cards_char2embedding(last_two_cards_char[0]), cards_char2embedding(last_two_cards_char[1])]) def get_simulator_thread(self): def populate_job_func(): self._populate_job_queue.get() for _ in range(self.update_frequency): self._populate_exp() th = ShareSessionThread(LoopThread(populate_job_func, pausable=False)) th.name = 'SimulatorThread' return th def _init_memory(self): logger.info('Populating replay memory with epsilon={} ...'.format(self.exploration)) with get_tqdm(total=self.init_memory_size) as pbar: while (len(self.mem) < self.init_memory_size): self._populate_exp() pbar.update() self._init_memory_flag.set() def _populate_exp(self): old_s = self._current_ob mask = get_mask(to_char(self.player.get_curr_handcards()), action_space, to_char(self.player.get_last_outcards())) if (self.rng.rand() <= self.exploration): act = self.rng.choice(range(self.num_actions)) else: q_values = self.predictor(old_s[(None, ...)])[0][0] q_values[(mask == 0)] = np.nan act = np.nanargmax(q_values) assert (act < self.num_actions) (reward, isOver, _) = self.player.step_manual(to_value(action_space[act])) while ((not isOver) and (self.player.get_role_ID() != ROLE_ID_TO_TRAIN)): (_, reward, _) = self.player.step_auto() isOver = (reward != 0) if (ROLE_ID_TO_TRAIN == 2): reward = (- reward) self._current_game_score.feed(reward) if isOver: self._player_scores.feed(self._current_game_score.sum) while True: self.player.reset() self.player.prepare() early_stop = False while (self.player.get_role_ID() != ROLE_ID_TO_TRAIN): (_, reward, _) = self.player.step_auto() isOver = (reward != 0) if isOver: print('prestart ends too early! now resetting env') early_stop = True break if early_stop: continue self._current_ob = self.get_state() break self._current_game_score.reset() self._current_ob = self.get_state() self.mem.append(Experience(old_s, mask, act, reward, isOver)) def debug(self, cnt=100000): with get_tqdm(total=cnt) as pbar: for i in range(cnt): self.mem.append(Experience(np.zeros([self.num_actions[0], self.num_actions[1], 256]), 0, 0)) pbar.update() def get_data(self): self._init_memory_flag.wait() while True: idx = self.rng.randint((self._populate_job_queue.maxsize * self.update_frequency), (len(self.mem) - 1), size=self.batch_size) batch_exp = [self.mem.sample(i) for i in idx] (yield self._process_batch(batch_exp)) self._populate_job_queue.put(1) def _process_batch(self, batch_exp): state = np.asarray([e[0] for e in batch_exp], dtype='float32') next_mask = np.asarray([e[1] for e in batch_exp], dtype='bool') action = np.asarray([e[2] for e in batch_exp], dtype='int32') reward = np.asarray([e[3] for e in batch_exp], dtype='float32') isOver = np.asarray([e[4] for e in batch_exp], dtype='bool') return [state, next_mask, action, reward, isOver] def _setup_graph(self): self.predictor = self.trainer.get_predictor(*self.predictor_io_names) def _before_train(self): while (self.player.get_role_ID() != ROLE_ID_TO_TRAIN): self.player.step_auto() (self._current_ob, self._action_space) = self.get_state_and_action_spaces() self._init_memory() self._simulator_th = self.get_simulator_thread() self._simulator_th.start() def _trigger(self): v = self._player_scores try: (mean, max) = (v.average, v.max) self.trainer.monitors.put_scalar('expreplay/mean_score', mean) self.trainer.monitors.put_scalar('expreplay/max_score', max) except Exception: logger.exception('Cannot log training scores.') v.reset()
class TestRaises(TestCase): def test_simple(self): self.assertRaisesRegex(RunTimeError, 'text .* match', someFunc) def test_simple_with_newlines(self): self.assertRaisesRegex(RunTimeError, 'text .* match', someFunc) def test_args(self): self.assertRaisesRegex(RunTimeError, 'text .* match', someFunc, 1, 2, 3) def test_kwargs(self): self.assertRaisesRegex(RunTimeError, 'text .* match', someFunc, foo=42, bar=43) def test_context_manager(self): with self.assertRaisesRegex(RunTimeError, 'text .* match'): someFunc('foo', None) def test_args_kwargs(self): self.assertRaisesRegex(RunTimeError, 'text .* match', someFunc, 1, 2, 3, foo=42, bar=43) def test_args_kwargs_with_newlines(self): self.assertRaisesRegex(RunTimeError, 'text .* match', someFunc, 1, 2, 3, foo=42, bar=43) def test_lambda(self): self.assertRaises(RunTimeError, (lambda : error(1, 2))) self.assertRaises(RunTimeError, (lambda : (error(1, 2) or error()))) def test_atom(self): self.assertRaisesRegex(RunTimeError, 'foobar', someFunc, 1, 2, 3) def test_expr(self): self.assertRaisesRegex(RunTimeError, ('foo' + 'bar'), someFunc, 1, 2, 3)
class Migration(migrations.Migration): dependencies = [('options', '0033_option_help')] operations = [migrations.AddField(model_name='option', name='view_text_lang1', field=models.TextField(blank=True, help_text='The view text for this option in the primary language.', null=True, verbose_name='View text (primary)')), migrations.AddField(model_name='option', name='view_text_lang2', field=models.TextField(blank=True, help_text='The view text for this option in the secondary language.', null=True, verbose_name='View text (secondary)')), migrations.AddField(model_name='option', name='view_text_lang3', field=models.TextField(blank=True, help_text='The view text for this option in the tertiary language.', null=True, verbose_name='View text (tertiary)')), migrations.AddField(model_name='option', name='view_text_lang4', field=models.TextField(blank=True, help_text='The view text for this option in the quaternary language.', null=True, verbose_name='View text (quaternary)')), migrations.AddField(model_name='option', name='view_text_lang5', field=models.TextField(blank=True, help_text='The view text for this option in the quinary language.', null=True, verbose_name='View text (quinary)'))]
def read_files(doc_file, keys_file): doc_dict = OrderedDict() keys_dict = OrderedDict() with open(doc_file) as f_doc: for line in f_doc: line_json = json.loads(line) doc_dict[line_json['docid']] = line_json['text'] with open(keys_file) as f_keys: contents = f_keys.read() contents = contents.split('%%%') for content in contents: if (not content): continue content = json.loads(content) (key, doc_id) = (content[0][0], content[0][1]) if (key == 'message_id'): if (doc_id not in keys_dict): keys_dict[doc_id] = OrderedDict() for key in key2role: keys_dict[doc_id][key] = list() for key in key2role: for keyval in content[1:]: (key_c, val) = (keyval[0], keyval[1]) if (key_c == key): if val: entity = list() for val_str in val['strings']: entity.append(val_str) if (entity not in keys_dict[doc_id][key]): keys_dict[doc_id][key].append(entity) doc_keys = OrderedDict() for doc_id in doc_dict.keys(): doc_keys[doc_id] = OrderedDict() doc_keys[doc_id]['doc'] = doc_dict[doc_id] doc_keys[doc_id]['roles'] = keys_dict[doc_id] return doc_keys
class PackedData(): class _NoData(): pass NO_DATA = _NoData() def __init__(self, node, data): self.left = self.NO_DATA self.right = self.NO_DATA if data: if (node.left is not None): self.left = data[0] if (len(data) > 1): self.right = data[1] else: self.right = data[0]
def setup_environment(): global _ENV_SETUP_DONE if _ENV_SETUP_DONE: return _ENV_SETUP_DONE = True _configure_libraries() custom_module_path = os.environ.get('DETECTRON2_ENV_MODULE') if custom_module_path: setup_custom_environment(custom_module_path) else: pass
class TestCase(): def __init__(self, test_name: str, model, input_shape, valid_hx=False, sequence_lens=None, device='cpu'): self.test_name = test_name self.model = model.to(device) self.input_shape = input_shape self.valid_hx = valid_hx self.device = device self.sequence_lens = sequence_lens
def _test(): import torch pretrained = False models = [pyramidnet101_a360] for model in models: net = model(pretrained=pretrained) net.eval() weight_count = _calc_width(net) print('m={}, {}'.format(model.__name__, weight_count)) assert ((model != pyramidnet101_a360) or (weight_count == )) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000))
def test_ki_with_broken_threads() -> None: thread = threading.main_thread() original = threading._active[thread.ident] try: del threading._active[thread.ident] _core.enable_ki_protection async def inner() -> None: assert (signal.getsignal(signal.SIGINT) != signal.default_int_handler) _core.run(inner) finally: threading._active[thread.ident] = original
def test_setup_cfg_no_version(tmp_path): setup_cfg = (tmp_path / 'setup.cfg') setup_cfg.write_text(dedent('\n [tool:isort]\n profile = black\n ')) decl = PatternVersionDeclaration(setup_cfg.resolve(), '^version = (?P<version>.*)$') assert (decl.parse() == set())
.skipif((not is_py39_plus), reason='literals and annotated are 3.9+') def test_type_names_with_quotes(): from typing import Annotated, Literal, Union converter = Converter() assert (converter.structure({1: 1}, Dict[(Annotated[(int, "'")], int)]) == {1: 1}) converter.register_structure_hook_func((lambda t: (t is Union[(Literal[('a', 2, 3)], Literal[4])])), (lambda v, _: v)) assert (converter.structure({2: 'a'}, Dict[(Union[(Literal[('a', 2, 3)], Literal[4])], str)]) == {2: 'a'})
class FactorVaeTest(absltest.TestCase): def test_metric(self): ground_truth_data = dummy_data.IdentityObservationsData() representation_function = (lambda x: x) random_state = np.random.RandomState(0) scores = factor_vae.compute_factor_vae(ground_truth_data, representation_function, random_state, None, 5, 3000, 2000, 2500) self.assertBetween(scores['train_accuracy'], 0.9, 1.0) self.assertBetween(scores['eval_accuracy'], 0.9, 1.0) def test_bad_metric(self): ground_truth_data = dummy_data.IdentityObservationsData() representation_function = np.zeros_like random_state = np.random.RandomState(0) scores = factor_vae.compute_factor_vae(ground_truth_data, representation_function, random_state, None, 5, 3000, 2000, 2500) self.assertBetween(scores['train_accuracy'], 0.0, 0.2) self.assertBetween(scores['eval_accuracy'], 0.0, 0.2)
def aug_args_with_log(args): id_process = os.getpid() time_current = datetime.datetime.now().isoformat() args.Version = torch.__version__ args.ID = id_process args.TIME = time_current path_storage = os.path.abspath(args.PathStorage) args.PathDomain = os.path.join(path_storage, 'domains', args.Domain) dict_args = vars(args) path_logs = os.path.join(args.PathDomain, 'Logs', args.FolderName) assert os.path.exists(path_logs) args.PathLog = os.path.join(path_logs, 'log.txt') log = LogReader(args.PathLog) saved_args = log.getArgs() args.PathSave = os.path.join(path_logs, 'saved_model') copyfile(args.PathLog, os.path.join(path_logs, 'log_bak.txt')) copyfile(args.PathSave, os.path.join(path_logs, 'saved_model_bak')) copyfile((args.PathSave + '_idx_and_dim.pkl'), (args.PathSave + '_idx_and_dim_bak.pkl')) args.Database = saved_args['Database'] args.CheckPoint = saved_args['CheckPoint'] args.BatchSize = saved_args['BatchSize'] args.TrackPeriod = saved_args['TrackPeriod'] args.Multiplier = saved_args['Multiplier'] args.DevMultiplier = saved_args['DevMultiplier'] args.TrainRatio = saved_args['TrainRatio'] args.DevRatio = saved_args['DevRatio'] args.TrainDownSampleMode = saved_args['TrainDownSampleMode'] args.TrainDownSampleSize = saved_args['TrainDownSampleSize'] args.DevDownSampleMode = saved_args['DevDownSampleMode'] args.DevDownSampleSize = saved_args['DevDownSampleSize'] args.LSTMPool = saved_args['LSTMPool'] args.UpdateMode = saved_args['UpdateMode'] args.TimeEmbeddingDim = saved_args['TimeEmbeddingDim'] args.TimeEmbeddingMode = saved_args['TimeEmbeddingMode'] args.UseGPU = saved_args['UseGPU'] args.Seed = saved_args['Seed'] args.Layer = saved_args['Layer'] args.MemorySize = saved_args['MemorySize'] if ('IntensityComputationMode' in saved_args): args.IntensityComputationMode = saved_args['IntensityComputationMode'] if ('AttentionTemperature' in saved_args): args.AttentionTemperature = saved_args['AttentionTemperature'] if ('WeightDecay' in saved_args): args.WeightDecay = saved_args['WeightDecay'] else: args.WeightDecay = 0 args.NumProcess = 1 if (args.NumThread < 1): args.NumThread = 1 print(f'mp num threads in torch : {torch.get_num_threads()}') if (torch.get_num_threads() != args.NumThread): print(f'not equal to NumThread arg ({args.NumThread})') torch.set_num_threads(args.NumThread) print(f'set to {args.NumThread}') assert (torch.get_num_threads() == args.NumThread), 'not set yet?!'
class TracesFileCache(object): caches = {} def __init__(self, cachedir): self.cachedir = cachedir self.dircaches = {} self.modified = set() util.ensuredir(self.cachedir) def get(self, abspath): dircache = self._get_dircache_for(abspath) if (abspath in dircache): return dircache[abspath] return None def put(self, abspath, tfile): cachepath = self._dircachepath(abspath) dircache = self._get_dircache(cachepath) dircache[abspath] = tfile self.modified.add(cachepath) def dump_modified(self): for cachepath in self.modified: self._dump_dircache(self.dircaches[cachepath], cachepath) self.modified = set() def clean(self): self.dump_modified() for fn in os.listdir(self.cachedir): if (len(fn) == 40): cache = self._load_dircache(pjoin(self.cachedir, fn)) self._dump_dircache(cache, pjoin(self.cachedir, fn)) def _get_dircache_for(self, abspath): return self._get_dircache(self._dircachepath(abspath)) def _get_dircache(self, cachepath): if (cachepath not in self.dircaches): if os.path.isfile(cachepath): self.dircaches[cachepath] = self._load_dircache(cachepath) else: self.dircaches[cachepath] = {} return self.dircaches[cachepath] def _dircachepath(self, abspath): cachefn = ehash(os.path.dirname(abspath)) return pjoin(self.cachedir, cachefn) def _load_dircache(self, cachefilename): with open(cachefilename, 'rb') as f: cache = pickle.load(f) for fn in list(cache.keys()): if (not os.path.isfile(fn)): del cache[fn] time_float = util.get_time_float() for v in cache.values(): v.trees_from_content(v.traces) for tr in v.traces: tr.file = v if (not isinstance(tr.station, str)): tr.prune_from_reuse_cache() tr.set_codes(str(tr.network), str(tr.station), str(tr.location), str(tr.channel)) tr.tmin = time_float(tr.tmin) tr.tmax = time_float(tr.tmax) v.data_use_count = 0 v.data_loaded = False v.fix_unicode_codes() return cache def _dump_dircache(self, cache, cachefilename): if (not cache): if os.path.exists(cachefilename): os.remove(cachefilename) return cache_copy = {} for fn in cache.keys(): trf = copy.copy(cache[fn]) trf.parent = None trf.by_tmin = None trf.by_tmax = None trf.by_tlen = None trf.by_mtime = None trf.data_use_count = 0 trf.data_loaded = False traces = [] for tr in trf.traces: tr = tr.copy(data=False) tr.ydata = None tr.meta = None tr.file = trf traces.append(tr) trf.traces = traces cache_copy[fn] = trf tmpfn = (cachefilename + ('.%i.tmp' % os.getpid())) with open(tmpfn, 'wb') as f: pickle.dump(cache_copy, f, protocol=2) if (is_windows and os.path.exists(cachefilename)): os.unlink(cachefilename) os.rename(tmpfn, cachefilename)
def virtual_scane_one_model(model_dir, worker_id): print(('Scanning ' + model_dir)) tmp_model_name = (('tmp' + str(worker_id)) + '.ply') TMP_DATA_PATH = ('./tmp' + str(worker_id)) TMP_PLY_POINTCLOUD_PATH = (('./tmp' + str(worker_id)) + '.ply_output') if (not os.path.exists(TMP_DATA_PATH)): os.makedirs(TMP_DATA_PATH) clean_dir(TMP_PLY_POINTCLOUD_PATH) (cam_view_points, cam_target_points) = generate_camera_view_target_points() model_filename = os.path.join(model_dir, 'models/model_normalized.obj') if (not os.path.exists(model_filename)): print(('File not found: %s' % model_filename)) return model_basename = os.path.basename(model_dir) prev_clean_output_filename = os.path.join(PREV_OUTPUT_DATA_PATH, (model_basename + '_clean.ply')) if os.path.exists(prev_clean_output_filename): print('Previously scanned, skip.', prev_clean_output_filename) return ply_tmp_name = os.path.join(TMP_DATA_PATH, tmp_model_name) mesh_util.convert_obj2ply(model_filename, ply_tmp_name, recenter=True, center_mode='box_center') cmd_str = ((((EXE_VIRTUAL_SCANNER + ' ') + ply_tmp_name) + ' ') + CMD_POSTFIX.format(','.join((str(e) for e in cam_view_points)), ','.join((str(e) for e in cam_target_points)))) os.system(cmd_str) all_xyz = [] pcd_files = glob.glob((TMP_PLY_POINTCLOUD_PATH + '/*.pcd')) for pf in pcd_files: xyz = pc_util.read_pcd(pf) all_xyz.extend(xyz) all_points = np.array(all_xyz) print('Collecte #points:', all_points.shape) if (all_points.shape[0] < 2048): print('Failed to scan sufficient points! Move to next model.') return all_points = pc_util.remove_duplicated_points(all_points) print(('Total points after merge: %d' % all_points.shape[0])) clean_output_filename = os.path.join(OUTPUT_DATA_PATH, (model_basename + '_clean.ply')) pc_util.write_ply(all_points, clean_output_filename) print(('Save point cloud to ' + clean_output_filename)) return
class DPM_Solver(): def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.0): self.model = model_fn self.noise_schedule = noise_schedule self.predict_x0 = predict_x0 self.thresholding = thresholding self.max_val = max_val def noise_prediction_fn(self, x, t): return self.model(x, t) def data_prediction_fn(self, x, t): noise = self.noise_prediction_fn(x, t) dims = x.dim() (alpha_t, sigma_t) = (self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)) x0 = ((x - (expand_dims(sigma_t, dims) * noise)) / expand_dims(alpha_t, dims)) if self.thresholding: p = 0.995 s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], (- 1))), p, dim=1) s = expand_dims(torch.maximum(s, (self.max_val * torch.ones_like(s).to(s.device))), dims) x0 = (torch.clamp(x0, (- s), s) / s) return x0 def model_fn(self, x, t): if self.predict_x0: return self.data_prediction_fn(x, t) else: return self.noise_prediction_fn(x, t) def get_time_steps(self, skip_type, t_T, t_0, N, device): if (skip_type == 'logSNR'): lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), (N + 1)).to(device) return self.noise_schedule.inverse_lambda(logSNR_steps) elif (skip_type == 'time_uniform'): return torch.linspace(t_T, t_0, (N + 1)).to(device) elif (skip_type == 'time_quadratic'): t_order = 2 t = torch.linspace((t_T ** (1.0 / t_order)), (t_0 ** (1.0 / t_order)), (N + 1)).pow(t_order).to(device) return t else: raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): if (order == 3): K = ((steps // 3) + 1) if ((steps % 3) == 0): orders = (([3] * (K - 2)) + [2, 1]) elif ((steps % 3) == 1): orders = (([3] * (K - 1)) + [1]) else: orders = (([3] * (K - 1)) + [2]) elif (order == 2): if ((steps % 2) == 0): K = (steps // 2) orders = ([2] * K) else: K = ((steps // 2) + 1) orders = (([2] * (K - 1)) + [1]) elif (order == 1): K = 1 orders = ([1] * steps) else: raise ValueError("'order' must be '1' or '2' or '3'.") if (skip_type == 'logSNR'): timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) else: timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor(([0] + orders))).to(device)] return (timesteps_outer, orders) def denoise_to_zero_fn(self, x, s): return self.data_prediction_fn(x, s) def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): ns = self.noise_schedule dims = x.dim() (lambda_s, lambda_t) = (ns.marginal_lambda(s), ns.marginal_lambda(t)) h = (lambda_t - lambda_s) (log_alpha_s, log_alpha_t) = (ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)) (sigma_s, sigma_t) = (ns.marginal_std(s), ns.marginal_std(t)) alpha_t = torch.exp(log_alpha_t) if self.predict_x0: phi_1 = torch.expm1((- h)) if (model_s is None): model_s = self.model_fn(x, s) x_t = ((expand_dims((sigma_t / sigma_s), dims) * x) - (expand_dims((alpha_t * phi_1), dims) * model_s)) if return_intermediate: return (x_t, {'model_s': model_s}) else: return x_t else: phi_1 = torch.expm1(h) if (model_s is None): model_s = self.model_fn(x, s) x_t = ((expand_dims(torch.exp((log_alpha_t - log_alpha_s)), dims) * x) - (expand_dims((sigma_t * phi_1), dims) * model_s)) if return_intermediate: return (x_t, {'model_s': model_s}) else: return x_t def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'): if (solver_type not in ['dpm_solver', 'taylor']): raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) if (r1 is None): r1 = 0.5 ns = self.noise_schedule dims = x.dim() (lambda_s, lambda_t) = (ns.marginal_lambda(s), ns.marginal_lambda(t)) h = (lambda_t - lambda_s) lambda_s1 = (lambda_s + (r1 * h)) s1 = ns.inverse_lambda(lambda_s1) (log_alpha_s, log_alpha_s1, log_alpha_t) = (ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t)) (sigma_s, sigma_s1, sigma_t) = (ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)) (alpha_s1, alpha_t) = (torch.exp(log_alpha_s1), torch.exp(log_alpha_t)) if self.predict_x0: phi_11 = torch.expm1(((- r1) * h)) phi_1 = torch.expm1((- h)) if (model_s is None): model_s = self.model_fn(x, s) x_s1 = ((expand_dims((sigma_s1 / sigma_s), dims) * x) - (expand_dims((alpha_s1 * phi_11), dims) * model_s)) model_s1 = self.model_fn(x_s1, s1) if (solver_type == 'dpm_solver'): x_t = (((expand_dims((sigma_t / sigma_s), dims) * x) - (expand_dims((alpha_t * phi_1), dims) * model_s)) - (((0.5 / r1) * expand_dims((alpha_t * phi_1), dims)) * (model_s1 - model_s))) elif (solver_type == 'taylor'): x_t = (((expand_dims((sigma_t / sigma_s), dims) * x) - (expand_dims((alpha_t * phi_1), dims) * model_s)) + (((1.0 / r1) * expand_dims((alpha_t * (((torch.exp((- h)) - 1.0) / h) + 1.0)), dims)) * (model_s1 - model_s))) else: phi_11 = torch.expm1((r1 * h)) phi_1 = torch.expm1(h) if (model_s is None): model_s = self.model_fn(x, s) x_s1 = ((expand_dims(torch.exp((log_alpha_s1 - log_alpha_s)), dims) * x) - (expand_dims((sigma_s1 * phi_11), dims) * model_s)) model_s1 = self.model_fn(x_s1, s1) if (solver_type == 'dpm_solver'): x_t = (((expand_dims(torch.exp((log_alpha_t - log_alpha_s)), dims) * x) - (expand_dims((sigma_t * phi_1), dims) * model_s)) - (((0.5 / r1) * expand_dims((sigma_t * phi_1), dims)) * (model_s1 - model_s))) elif (solver_type == 'taylor'): x_t = (((expand_dims(torch.exp((log_alpha_t - log_alpha_s)), dims) * x) - (expand_dims((sigma_t * phi_1), dims) * model_s)) - (((1.0 / r1) * expand_dims((sigma_t * (((torch.exp(h) - 1.0) / h) - 1.0)), dims)) * (model_s1 - model_s))) if return_intermediate: return (x_t, {'model_s': model_s, 'model_s1': model_s1}) else: return x_t def singlestep_dpm_solver_third_update(self, x, s, t, r1=(1.0 / 3.0), r2=(2.0 / 3.0), model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'): if (solver_type not in ['dpm_solver', 'taylor']): raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) if (r1 is None): r1 = (1.0 / 3.0) if (r2 is None): r2 = (2.0 / 3.0) ns = self.noise_schedule dims = x.dim() (lambda_s, lambda_t) = (ns.marginal_lambda(s), ns.marginal_lambda(t)) h = (lambda_t - lambda_s) lambda_s1 = (lambda_s + (r1 * h)) lambda_s2 = (lambda_s + (r2 * h)) s1 = ns.inverse_lambda(lambda_s1) s2 = ns.inverse_lambda(lambda_s2) (log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t) = (ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)) (sigma_s, sigma_s1, sigma_s2, sigma_t) = (ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t)) (alpha_s1, alpha_s2, alpha_t) = (torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)) if self.predict_x0: phi_11 = torch.expm1(((- r1) * h)) phi_12 = torch.expm1(((- r2) * h)) phi_1 = torch.expm1((- h)) phi_22 = ((torch.expm1(((- r2) * h)) / (r2 * h)) + 1.0) phi_2 = ((phi_1 / h) + 1.0) phi_3 = ((phi_2 / h) - 0.5) if (model_s is None): model_s = self.model_fn(x, s) if (model_s1 is None): x_s1 = ((expand_dims((sigma_s1 / sigma_s), dims) * x) - (expand_dims((alpha_s1 * phi_11), dims) * model_s)) model_s1 = self.model_fn(x_s1, s1) x_s2 = (((expand_dims((sigma_s2 / sigma_s), dims) * x) - (expand_dims((alpha_s2 * phi_12), dims) * model_s)) + (((r2 / r1) * expand_dims((alpha_s2 * phi_22), dims)) * (model_s1 - model_s))) model_s2 = self.model_fn(x_s2, s2) if (solver_type == 'dpm_solver'): x_t = (((expand_dims((sigma_t / sigma_s), dims) * x) - (expand_dims((alpha_t * phi_1), dims) * model_s)) + (((1.0 / r2) * expand_dims((alpha_t * phi_2), dims)) * (model_s2 - model_s))) elif (solver_type == 'taylor'): D1_0 = ((1.0 / r1) * (model_s1 - model_s)) D1_1 = ((1.0 / r2) * (model_s2 - model_s)) D1 = (((r2 * D1_0) - (r1 * D1_1)) / (r2 - r1)) D2 = ((2.0 * (D1_1 - D1_0)) / (r2 - r1)) x_t = ((((expand_dims((sigma_t / sigma_s), dims) * x) - (expand_dims((alpha_t * phi_1), dims) * model_s)) + (expand_dims((alpha_t * phi_2), dims) * D1)) - (expand_dims((alpha_t * phi_3), dims) * D2)) else: phi_11 = torch.expm1((r1 * h)) phi_12 = torch.expm1((r2 * h)) phi_1 = torch.expm1(h) phi_22 = ((torch.expm1((r2 * h)) / (r2 * h)) - 1.0) phi_2 = ((phi_1 / h) - 1.0) phi_3 = ((phi_2 / h) - 0.5) if (model_s is None): model_s = self.model_fn(x, s) if (model_s1 is None): x_s1 = ((expand_dims(torch.exp((log_alpha_s1 - log_alpha_s)), dims) * x) - (expand_dims((sigma_s1 * phi_11), dims) * model_s)) model_s1 = self.model_fn(x_s1, s1) x_s2 = (((expand_dims(torch.exp((log_alpha_s2 - log_alpha_s)), dims) * x) - (expand_dims((sigma_s2 * phi_12), dims) * model_s)) - (((r2 / r1) * expand_dims((sigma_s2 * phi_22), dims)) * (model_s1 - model_s))) model_s2 = self.model_fn(x_s2, s2) if (solver_type == 'dpm_solver'): x_t = (((expand_dims(torch.exp((log_alpha_t - log_alpha_s)), dims) * x) - (expand_dims((sigma_t * phi_1), dims) * model_s)) - (((1.0 / r2) * expand_dims((sigma_t * phi_2), dims)) * (model_s2 - model_s))) elif (solver_type == 'taylor'): D1_0 = ((1.0 / r1) * (model_s1 - model_s)) D1_1 = ((1.0 / r2) * (model_s2 - model_s)) D1 = (((r2 * D1_0) - (r1 * D1_1)) / (r2 - r1)) D2 = ((2.0 * (D1_1 - D1_0)) / (r2 - r1)) x_t = ((((expand_dims(torch.exp((log_alpha_t - log_alpha_s)), dims) * x) - (expand_dims((sigma_t * phi_1), dims) * model_s)) - (expand_dims((sigma_t * phi_2), dims) * D1)) - (expand_dims((sigma_t * phi_3), dims) * D2)) if return_intermediate: return (x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}) else: return x_t def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): if (solver_type not in ['dpm_solver', 'taylor']): raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) ns = self.noise_schedule dims = x.dim() (model_prev_1, model_prev_0) = model_prev_list[(- 2):] (t_prev_1, t_prev_0) = t_prev_list[(- 2):] (lambda_prev_1, lambda_prev_0, lambda_t) = (ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)) (log_alpha_prev_0, log_alpha_t) = (ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)) (sigma_prev_0, sigma_t) = (ns.marginal_std(t_prev_0), ns.marginal_std(t)) alpha_t = torch.exp(log_alpha_t) h_0 = (lambda_prev_0 - lambda_prev_1) h = (lambda_t - lambda_prev_0) r0 = (h_0 / h) D1_0 = (expand_dims((1.0 / r0), dims) * (model_prev_0 - model_prev_1)) if self.predict_x0: if (solver_type == 'dpm_solver'): x_t = (((expand_dims((sigma_t / sigma_prev_0), dims) * x) - (expand_dims((alpha_t * (torch.exp((- h)) - 1.0)), dims) * model_prev_0)) - ((0.5 * expand_dims((alpha_t * (torch.exp((- h)) - 1.0)), dims)) * D1_0)) elif (solver_type == 'taylor'): x_t = (((expand_dims((sigma_t / sigma_prev_0), dims) * x) - (expand_dims((alpha_t * (torch.exp((- h)) - 1.0)), dims) * model_prev_0)) + (expand_dims((alpha_t * (((torch.exp((- h)) - 1.0) / h) + 1.0)), dims) * D1_0)) elif (solver_type == 'dpm_solver'): x_t = (((expand_dims(torch.exp((log_alpha_t - log_alpha_prev_0)), dims) * x) - (expand_dims((sigma_t * (torch.exp(h) - 1.0)), dims) * model_prev_0)) - ((0.5 * expand_dims((sigma_t * (torch.exp(h) - 1.0)), dims)) * D1_0)) elif (solver_type == 'taylor'): x_t = (((expand_dims(torch.exp((log_alpha_t - log_alpha_prev_0)), dims) * x) - (expand_dims((sigma_t * (torch.exp(h) - 1.0)), dims) * model_prev_0)) - (expand_dims((sigma_t * (((torch.exp(h) - 1.0) / h) - 1.0)), dims) * D1_0)) return x_t def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): ns = self.noise_schedule dims = x.dim() (model_prev_2, model_prev_1, model_prev_0) = model_prev_list (t_prev_2, t_prev_1, t_prev_0) = t_prev_list (lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t) = (ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)) (log_alpha_prev_0, log_alpha_t) = (ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)) (sigma_prev_0, sigma_t) = (ns.marginal_std(t_prev_0), ns.marginal_std(t)) alpha_t = torch.exp(log_alpha_t) h_1 = (lambda_prev_1 - lambda_prev_2) h_0 = (lambda_prev_0 - lambda_prev_1) h = (lambda_t - lambda_prev_0) (r0, r1) = ((h_0 / h), (h_1 / h)) D1_0 = (expand_dims((1.0 / r0), dims) * (model_prev_0 - model_prev_1)) D1_1 = (expand_dims((1.0 / r1), dims) * (model_prev_1 - model_prev_2)) D1 = (D1_0 + (expand_dims((r0 / (r0 + r1)), dims) * (D1_0 - D1_1))) D2 = (expand_dims((1.0 / (r0 + r1)), dims) * (D1_0 - D1_1)) if self.predict_x0: x_t = ((((expand_dims((sigma_t / sigma_prev_0), dims) * x) - (expand_dims((alpha_t * (torch.exp((- h)) - 1.0)), dims) * model_prev_0)) + (expand_dims((alpha_t * (((torch.exp((- h)) - 1.0) / h) + 1.0)), dims) * D1)) - (expand_dims((alpha_t * ((((torch.exp((- h)) - 1.0) + h) / (h ** 2)) - 0.5)), dims) * D2)) else: x_t = ((((expand_dims(torch.exp((log_alpha_t - log_alpha_prev_0)), dims) * x) - (expand_dims((sigma_t * (torch.exp(h) - 1.0)), dims) * model_prev_0)) - (expand_dims((sigma_t * (((torch.exp(h) - 1.0) / h) - 1.0)), dims) * D1)) - (expand_dims((sigma_t * ((((torch.exp(h) - 1.0) - h) / (h ** 2)) - 0.5)), dims) * D2)) return x_t def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None): if (order == 1): return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) elif (order == 2): return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1) elif (order == 3): return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2) else: raise ValueError('Solver order must be 1 or 2 or 3, got {}'.format(order)) def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): if (order == 1): return self.dpm_solver_first_update(x, t_prev_list[(- 1)], t, model_s=model_prev_list[(- 1)]) elif (order == 2): return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) elif (order == 3): return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) else: raise ValueError('Solver order must be 1 or 2 or 3, got {}'.format(order)) def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-05, solver_type='dpm_solver'): ns = self.noise_schedule s = (t_T * torch.ones((x.shape[0],)).to(x)) lambda_s = ns.marginal_lambda(s) lambda_0 = ns.marginal_lambda((t_0 * torch.ones_like(s).to(x))) h = (h_init * torch.ones_like(s).to(x)) x_prev = x nfe = 0 if (order == 2): r1 = 0.5 lower_update = (lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)) higher_update = (lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs)) elif (order == 3): (r1, r2) = ((1.0 / 3.0), (2.0 / 3.0)) lower_update = (lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type)) higher_update = (lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs)) else: raise ValueError('For adaptive step size solver, order must be 2 or 3, got {}'.format(order)) while (torch.abs((s - t_0)).mean() > t_err): t = ns.inverse_lambda((lambda_s + h)) (x_lower, lower_noise_kwargs) = lower_update(x, s, t) x_higher = higher_update(x, s, t, **lower_noise_kwargs) delta = torch.max((torch.ones_like(x).to(x) * atol), (rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))) norm_fn = (lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], (- 1)))).mean(dim=(- 1), keepdim=True))) E = norm_fn(((x_higher - x_lower) / delta)).max() if torch.all((E <= 1.0)): x = x_higher s = t x_prev = x_lower lambda_s = ns.marginal_lambda(s) h = torch.min(((theta * h) * torch.float_power(E, ((- 1.0) / order)).float()), (lambda_0 - lambda_s)) nfe += order print('adaptive solver nfe', nfe) return x def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', atol=0.0078, rtol=0.05): t_0 = ((1.0 / self.noise_schedule.total_N) if (t_end is None) else t_end) t_T = (self.noise_schedule.T if (t_start is None) else t_start) device = x.device if (method == 'adaptive'): with torch.no_grad(): x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type) elif (method == 'multistep'): assert (steps >= order) timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) assert ((timesteps.shape[0] - 1) == steps) with torch.no_grad(): vec_t = timesteps[0].expand(x.shape[0]) model_prev_list = [self.model_fn(x, vec_t)] t_prev_list = [vec_t] for init_order in range(1, order): vec_t = timesteps[init_order].expand(x.shape[0]) x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type) model_prev_list.append(self.model_fn(x, vec_t)) t_prev_list.append(vec_t) for step in range(order, (steps + 1)): vec_t = timesteps[step].expand(x.shape[0]) if (lower_order_final and (steps < 15)): step_order = min(order, ((steps + 1) - step)) else: step_order = order x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type) for i in range((order - 1)): t_prev_list[i] = t_prev_list[(i + 1)] model_prev_list[i] = model_prev_list[(i + 1)] t_prev_list[(- 1)] = vec_t if (step < steps): model_prev_list[(- 1)] = self.model_fn(x, vec_t) elif (method in ['singlestep', 'singlestep_fixed']): if (method == 'singlestep'): (timesteps_outer, orders) = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device) elif (method == 'singlestep_fixed'): K = (steps // order) orders = ([order] * K) timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) for (i, order) in enumerate(orders): (t_T_inner, t_0_inner) = (timesteps_outer[i], timesteps_outer[(i + 1)]) timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device) lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) (vec_s, vec_t) = (t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])) h = (lambda_inner[(- 1)] - lambda_inner[0]) r1 = (None if (order <= 1) else ((lambda_inner[1] - lambda_inner[0]) / h)) r2 = (None if (order <= 2) else ((lambda_inner[2] - lambda_inner[0]) / h)) x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) if denoise_to_zero: x = self.denoise_to_zero_fn(x, (torch.ones((x.shape[0],)).to(device) * t_0)) return x
def load_data(args): (train, validate, test) = process_data.get_data(args.text_only) word_vector_path = '../Data/weibo/word_embedding.pickle' f = open(word_vector_path, 'rb') weight = pickle.load(f) (W, W2, word_idx_map, vocab, max_len) = (weight[0], weight[1], weight[2], weight[3], weight[4]) args.vocab_size = len(vocab) args.sequence_len = max_len print('translate data to embedding') (word_embedding, mask) = word2vec(validate['post_text'], word_idx_map, W) validate['post_text'] = word_embedding validate['mask'] = mask print('translate test data to embedding') (word_embedding, mask) = word2vec(test['post_text'], word_idx_map, W) test['post_text'] = word_embedding test['mask'] = mask (word_embedding, mask) = word2vec(train['post_text'], word_idx_map, W) train['post_text'] = word_embedding train['mask'] = mask print(('sequence length ' + str(args.sequence_length))) print(('Train Data Size is ' + str(len(train['post_text'])))) print('Finished loading data ') return (train, validate, test, W)
def get_public_key(message: bytes, signature: Signature, hasher: Callable[([bytes], bytes)]=eth_sign_sha3) -> PublicKey: hashed_message = hasher(message) if (signature[(- 1)] >= 27): signature = Signature((signature[:(- 1)] + bytes([(signature[(- 1)] - 27)]))) try: sig = keys.Signature(signature_bytes=signature) public_key = keys.ecdsa_recover(message_hash=hashed_message, signature=sig) except (BadSignature, ValidationError) as e: raise InvalidSignature() from e return public_key
class FusedQdqLinear(torch.autograd.Function): def forward(ctx, inp, weight, bias, weight_encoding_min, weight_encoding_max, weight_quantizer): ctx.save_for_backward(inp, weight, bias, weight_encoding_min, weight_encoding_max) ctx.weight_quantizer = weight_quantizer (qdq_weight, _) = ste.calculate_forward_pass(weight, weight_quantizer, weight_encoding_min, weight_encoding_max) return F.linear(inp, qdq_weight, bias) def backward(ctx, grad): (inp, weight, bias, weight_encoding_min, weight_encoding_max) = ctx.saved_tensors qdq_weight = intermediate_result = None if (inp.requires_grad or weight.requires_grad or weight_encoding_min.requires_grad or weight_encoding_max.requires_grad): (qdq_weight, intermediate_result) = ste.calculate_forward_pass(weight, ctx.weight_quantizer, weight_encoding_min, weight_encoding_max) dloss_by_dx = None if inp.requires_grad: assert (qdq_weight is not None) dloss_by_dx = torch.matmul(grad, qdq_weight) del qdq_weight dloss_by_dWq = None if (weight.requires_grad or weight_encoding_min.requires_grad or weight_encoding_max.requires_grad): dloss_by_dWq = torch.matmul(grad.view(grad.shape[(- 1)], (- 1)), inp.view((- 1), inp.shape[(- 1)])) dloss_by_dW = None if weight.requires_grad: assert (dloss_by_dWq is not None) assert (intermediate_result is not None) dloss_by_dW = (dloss_by_dWq * intermediate_result.mask_tensor) dloss_by_dmin = dloss_by_dmax = None if (weight_encoding_min.requires_grad or weight_encoding_max.requires_grad): assert (dloss_by_dWq is not None) assert (intermediate_result is not None) (dloss_by_dmin, dloss_by_dmax) = ste.calculate_gradients(weight, dloss_by_dWq, intermediate_result, ctx.weight_quantizer.channel_axis) del dloss_by_dWq del intermediate_result dloss_by_db = None if (isinstance(bias, torch.Tensor) and bias.requires_grad): dloss_by_db = grad.sum(dim=0) return (dloss_by_dx, dloss_by_dW, dloss_by_db, dloss_by_dmin, dloss_by_dmax, None)
def _parse_speaker_info(data_root): speaker_info_path = join(data_root, 'gender_f0range.txt') if (not exists(speaker_info_path)): raise RuntimeError("File {} doesn't exist".format(speaker_info_path)) speaker_info = OrderedDict() terms = ['speaker', 'Male_or_Female', 'minf0[Hz]', 'maxf0[Hz]'] with open(speaker_info_path, 'r', encoding='utf8') as file_: for line in file_: fields = line.strip().split() if (fields[0] == terms[0]): continue assert (len(fields) == 4) (speaker, gender, minf0, maxf0) = fields speaker_info[speaker] = {} speaker_info[speaker]['gender'] = gender speaker_info[speaker]['minf0'] = minf0 speaker_info[speaker]['maxf0'] = maxf0 return speaker_info
class StandardTransform(object): def __init__(self, transform=None, target_transform=None): self.transform = transform self.target_transform = target_transform def __call__(self, input, target): if (self.transform is not None): input = self.transform(input) if (self.target_transform is not None): target = self.target_transform(target) return (input, target) def _format_transform_repr(self, transform, head): lines = transform.__repr__().splitlines() return (['{}{}'.format(head, lines[0])] + ['{}{}'.format((' ' * len(head)), line) for line in lines[1:]]) def __repr__(self): body = [self.__class__.__name__] if (self.transform is not None): body += self._format_transform_repr(self.transform, 'Transform: ') if (self.target_transform is not None): body += self._format_transform_repr(self.target_transform, 'Target transform: ') return '\n'.join(body)
def test_check_output_with_called_process_error(tmp_path: Path, tmp_venv: VirtualEnv, mocker: MockerFixture) -> None: mocker.patch('subprocess.check_output', side_effect=subprocess.CalledProcessError(42, 'some_command', 'some output', 'some error')) with pytest.raises(EnvCommandError) as error: tmp_venv.run('python', '-') subprocess.check_output.assert_called_once() assert ('some output' in str(error.value)) assert ('some error' in str(error.value))
class DataCollatorForMaskedLM(DataCollatorForLanguageModeling): def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any]=None) -> Tuple[(Any, Any)]: import torch labels = inputs.clone() probability_matrix = torch.full(labels.shape, self.mlm_probability) if (special_tokens_mask is None): special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()] special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool) else: special_tokens_mask = special_tokens_mask.bool() probability_matrix.masked_fill_(special_tokens_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[(~ masked_indices)] = (- 100) indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices) inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced)) random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] return (inputs, labels)
class UnexpectedEOF(ParseError, UnexpectedInput): expected: 'List[Token]' def __init__(self, expected, state=None, terminals_by_name=None): super(UnexpectedEOF, self).__init__() self.expected = expected self.state = state from .lexer import Token self.token = Token('<EOF>', '') self.pos_in_stream = (- 1) self.line = (- 1) self.column = (- 1) self._terminals_by_name = terminals_by_name def __str__(self): message = 'Unexpected end-of-input. ' message += self._format_expected(self.expected) return message
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups): ndim = 2 weight_shape = tuple(weight_shape) stride = _tuple_of_ints(stride, ndim) padding = _tuple_of_ints(padding, ndim) output_padding = _tuple_of_ints(output_padding, ndim) dilation = _tuple_of_ints(dilation, ndim) key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) if (key in _conv2d_gradfix_cache): return _conv2d_gradfix_cache[key] assert (groups >= 1) assert (len(weight_shape) == (ndim + 2)) assert all(((stride[i] >= 1) for i in range(ndim))) assert all(((padding[i] >= 0) for i in range(ndim))) assert all(((dilation[i] >= 0) for i in range(ndim))) if (not transpose): assert all(((output_padding[i] == 0) for i in range(ndim))) else: assert all(((0 <= output_padding[i] < max(stride[i], dilation[i])) for i in range(ndim))) common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups) def calc_output_padding(input_shape, output_shape): if transpose: return [0, 0] return [(((input_shape[(i + 2)] - ((output_shape[(i + 2)] - 1) * stride[i])) - (1 - (2 * padding[i]))) - (dilation[i] * (weight_shape[(i + 2)] - 1))) for i in range(ndim)] class Conv2d(torch.autograd.Function): def forward(ctx, input, weight, bias): assert (weight.shape == weight_shape) ctx.save_for_backward((input if weight.requires_grad else _null_tensor), (weight if input.requires_grad else _null_tensor)) ctx.input_shape = input.shape if ((weight_shape[2:] == stride == dilation == (1, 1)) and (padding == (0, 0)) and (torch.cuda.get_device_capability(input.device) < (8, 0))): a = weight.reshape(groups, (weight_shape[0] // groups), weight_shape[1]) b = input.reshape(input.shape[0], groups, (input.shape[1] // groups), (- 1)) c = ((a.transpose(1, 2) if transpose else a) b.permute(1, 2, 0, 3).flatten(2)) c = c.reshape((- 1), input.shape[0], *input.shape[2:]).transpose(0, 1) c = (c if (bias is None) else (c + bias.unsqueeze(0).unsqueeze(2).unsqueeze(3))) return c.contiguous(memory_format=(torch.channels_last if (input.stride(1) == 1) else torch.contiguous_format)) if transpose: return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs) return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) def backward(ctx, grad_output): (input, weight) = ctx.saved_tensors input_shape = ctx.input_shape grad_input = None grad_weight = None grad_bias = None if ctx.needs_input_grad[0]: p = calc_output_padding(input_shape=input_shape, output_shape=grad_output.shape) op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) grad_input = op.apply(grad_output, weight, None) assert (grad_input.shape == input_shape) if (ctx.needs_input_grad[1] and (not weight_gradients_disabled)): grad_weight = Conv2dGradWeight.apply(grad_output, input) assert (grad_weight.shape == weight_shape) if ctx.needs_input_grad[2]: grad_bias = grad_output.sum([0, 2, 3]) return (grad_input, grad_weight, grad_bias) class Conv2dGradWeight(torch.autograd.Function): def forward(ctx, grad_output, input): ctx.save_for_backward((grad_output if input.requires_grad else _null_tensor), (input if grad_output.requires_grad else _null_tensor)) ctx.grad_output_shape = grad_output.shape ctx.input_shape = input.shape if ((weight_shape[2:] == stride == dilation == (1, 1)) and (padding == (0, 0))): a = grad_output.reshape(grad_output.shape[0], groups, (grad_output.shape[1] // groups), (- 1)).permute(1, 2, 0, 3).flatten(2) b = input.reshape(input.shape[0], groups, (input.shape[1] // groups), (- 1)).permute(1, 2, 0, 3).flatten(2) c = ((b a.transpose(1, 2)) if transpose else (a b.transpose(1, 2))).reshape(weight_shape) return c.contiguous(memory_format=(torch.channels_last if (input.stride(1) == 1) else torch.contiguous_format)) name = ('aten::cudnn_convolution_transpose_backward_weight' if transpose else 'aten::cudnn_convolution_backward_weight') flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32] return torch._C._jit_get_operation(name)(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags) def backward(ctx, grad2_grad_weight): (grad_output, input) = ctx.saved_tensors grad_output_shape = ctx.grad_output_shape input_shape = ctx.input_shape grad2_grad_output = None grad2_input = None if ctx.needs_input_grad[0]: grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None) assert (grad2_grad_output.shape == grad_output_shape) if ctx.needs_input_grad[1]: p = calc_output_padding(input_shape=input_shape, output_shape=grad_output_shape) op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) grad2_input = op.apply(grad_output, grad2_grad_weight, None) assert (grad2_input.shape == input_shape) return (grad2_grad_output, grad2_input) _conv2d_gradfix_cache[key] = Conv2d return Conv2d
class AwaitExpr(Expression): __slots__ = ('expr',) __match_args__ = ('expr',) expr: Expression def __init__(self, expr: Expression) -> None: super().__init__() self.expr = expr def accept(self, visitor: ExpressionVisitor[T]) -> T: return visitor.visit_await_expr(self)
(short_help='Run commands within project environments') ('args', required=True, nargs=(- 1)) ('--env', '-e', 'env_names', multiple=True, help='The environments to target') ('--include', '-i', 'included_variable_specs', multiple=True, help='The matrix variables to include') ('--exclude', '-x', 'excluded_variable_specs', multiple=True, help='The matrix variables to exclude') ('--filter', '-f', 'filter_json', help='The JSON data used to select environments') ('--force-continue', is_flag=True, help='Run every command and if there were any errors exit with the first code') ('--ignore-compat', is_flag=True, help='Ignore incompatibility when selecting specific environments') _obj def run(app, args, env_names, included_variable_specs, excluded_variable_specs, filter_json, force_continue, ignore_compat): app.ensure_environment_plugin_dependencies() project = app.project included_variables = {} excluded_variables = {} for (specs, variables, display_type) in ((included_variable_specs, included_variables, 'included'), (excluded_variable_specs, excluded_variables, 'excluded')): for spec in specs: (variable, values) = parse_variable_spec(spec) if (variable in variables): app.abort(f'Duplicate {display_type} variable: {variable}') variables[variable] = values if (not env_names): env_names = [app.env] elif ('system' in env_names): project.config.config['envs'] = {'system': {'type': 'system', 'skip-install': True, 'scripts': project.config.scripts}} env_names = list({env_name: None for env_name in env_names}) environments = [] matrix_selected = False for env_name in env_names: if (env_name in project.config.matrices): matrix_selected = True env_data = project.config.matrices[env_name]['envs'] if (not env_data): app.abort(f'No variables defined for matrix: {env_name}') environments.extend(select_matrix_environments(env_data, included_variables, excluded_variables)) else: environments.append(env_name) if filter_json: import json filter_data = json.loads(filter_json) if (not isinstance(filter_data, dict)): app.abort('The --filter/-f option must be a JSON mapping') environments[:] = filter_environments(project.config.envs, filter_data) if (not environments): app.abort('No environments were selected') elif ((not matrix_selected) and (included_variables or excluded_variables)): app.abort(f"Variable selection is unsupported for non-matrix environments: {', '.join(env_names)}") should_display_header = (app.verbose or matrix_selected or (len(environments) > 1)) any_compatible = False incompatible = {} with project.location.as_cwd(): for env_name in environments: environment = app.get_environment(env_name) if (not environment.exists()): try: environment.check_compatibility() except Exception as e: if (ignore_compat or matrix_selected): incompatible[environment.name] = str(e) continue app.abort(f'Environment `{env_name}` is incompatible: {e}') any_compatible = True if should_display_header: app.display_header(environment.name) if (env_name == 'system'): environment.exists = (lambda : True) app.prepare_environment(environment) app.run_shell_commands(environment, [environment.join_command_args(args)], force_continue=force_continue, show_code_on_error=False) if incompatible: num_incompatible = len(incompatible) padding = ('\n' if any_compatible else '') app.display_warning(f"{padding}Skipped {num_incompatible} incompatible environment{('s' if (num_incompatible > 1) else '')}:") for (env_name, reason) in incompatible.items(): app.display_warning(f'{env_name} -> {reason}')
def compute_output_dims_lengths(array_name: str, loop_orders, sub) -> str: dims_c_code = '' for (i, candidates) in enumerate(zip(*loop_orders)): for (j, candidate) in enumerate(candidates): if (candidate != 'x'): var = sub[f'lv{int(j)}'] dims_c_code += f'''{array_name}[{i}] = {var}_n{candidate}; ''' break else: dims_c_code += f'''{array_name}[{i}] = 1; ''' return dims_c_code
def rolling_volatility(returns, benchmark=None, period=126, period_label='6-Months', periods_per_year=252, lw=1.5, fontname='Arial', grayscale=False, figsize=(10, 3), ylabel='Volatility', subtitle=True, savefig=None, show=True): returns = _stats.rolling_volatility(returns, period, periods_per_year) if (benchmark is not None): benchmark = _utils._prepare_benchmark(benchmark, returns.index) benchmark = _stats.rolling_volatility(benchmark, period, periods_per_year, prepare_returns=False) fig = _core.plot_rolling_stats(returns, benchmark, hline=returns.mean(), hlw=1.5, ylabel=ylabel, title=('Rolling Volatility (%s)' % period_label), fontname=fontname, grayscale=grayscale, lw=lw, figsize=figsize, subtitle=subtitle, savefig=savefig, show=show) if (not show): return fig
def get_num_synset_2012_images(path, synsets_2012, files_to_skip=None): if path: logging.info('Attempting to read number of leaf images from %s...', path) if tf.io.gfile.exists(path): with tf.io.gfile.GFile(path, 'r') as f: num_synset_2012_images = json.load(f) logging.info('Successful.') return num_synset_2012_images logging.info('Unsuccessful. Deriving number of leaf images...') if (files_to_skip is None): files_to_skip = set() num_synset_2012_images = {} for s_2012 in synsets_2012: synset_dir = os.path.join(FLAGS.ilsvrc_2012_data_root, s_2012.wn_id) num_synset_2012_images[s_2012.wn_id] = len((set(tf.io.gfile.listdir(synset_dir)) - files_to_skip)) if path: with tf.io.gfile.GFile(path, 'w') as f: json.dump(num_synset_2012_images, f, indent=2) return num_synset_2012_images
class GradCAMElementWise(BaseCAM): def __init__(self, model, target_layers, use_cuda=False, reshape_transform=None): super(GradCAMElementWise, self).__init__(model, target_layers, use_cuda, reshape_transform) def get_cam_image(self, input_tensor, target_layer, target_category, activations, grads, eigen_smooth): elementwise_activations = np.maximum((grads * activations), 0) if eigen_smooth: cam = get_2d_projection(elementwise_activations) else: cam = elementwise_activations.sum(axis=1) return cam
def make_variable_state_initializer(**kwargs): def variable_state_initializer(shape, batch_size, dtype, index): args = kwargs.copy() if args.get('name'): args['name'] = ((args['name'] + '_') + str(index)) else: args['name'] = ('init_state_' + str(index)) args['shape'] = shape args['dtype'] = dtype var = tf.get_variable(**args) var = tf.expand_dims(var, 0) var = tf.tile(var, tf.stack(([batch_size] + ([1] * len(shape))))) var.set_shape(_state_size_with_prefix(shape, prefix=[None])) return var return variable_state_initializer
class Stream(StreamWriter): def __init__(self, reader: StreamReader, writer: StreamWriter) -> None: super().__init__(writer._transport, writer._protocol, writer._reader, writer._loop) self.remote = self.get_extra_info('peername') def read(self, n: int=(- 1)) -> bytes: return self._reader.read(n) def readline(self) -> bytes: return self._reader.readline() def readexactly(self, n: int) -> bytes: return self._reader.readexactly(n) def readuntil(self, separator: bytes=b'\n') -> bytes: return self._reader.readuntil(separator)
.skipif((not hasattr(m, 'load_variant')), reason='no <variant>') def test_variant(doc): assert (m.load_variant(1) == 'int') assert (m.load_variant('1') == 'std::string') assert (m.load_variant(1.0) == 'double') assert (m.load_variant(None) == 'std::nullptr_t') assert (m.load_variant_2pass(1) == 'int') assert (m.load_variant_2pass(1.0) == 'double') assert (m.cast_variant() == (5, 'Hello')) assert (doc(m.load_variant) == 'load_variant(arg0: Union[int, str, float, None]) -> str')
class ID3OptionParser(OptionParser): def __init__(self): mutagen_version = '.'.join(map(str, mutagen.version)) my_version = '.'.join(map(str, VERSION)) version = ('mid3iconv %s\nUses Mutagen %s' % (my_version, mutagen_version)) return OptionParser.__init__(self, version=version, usage='%prog [OPTION] [FILE]...', description='Mutagen-based replacement the id3iconv utility, which converts ID3 tags from legacy encodings to Unicode and stores them using the ID3v2 format.') def format_help(self, *args, **kwargs): text = OptionParser.format_help(self, *args, **kwargs) return (text + '\nFiles are updated in-place, so use --dry-run first.\n')
class XTSECalendarTestCase(ExchangeCalendarTestBase, TestCase): answer_key_filename = 'xtse' calendar_class = XTSEExchangeCalendar MAX_SESSION_HOURS = 6.5 def test_2012(self): expected_holidays_2012 = [pd.Timestamp('2012-01-02', tz=UTC), pd.Timestamp('2012-02-20', tz=UTC), pd.Timestamp('2012-04-06', tz=UTC), pd.Timestamp('2012-05-21', tz=UTC), pd.Timestamp('2012-07-02', tz=UTC), pd.Timestamp('2012-08-06', tz=UTC), pd.Timestamp('2012-09-03', tz=UTC), pd.Timestamp('2012-10-08', tz=UTC), pd.Timestamp('2012-12-25', tz=UTC), pd.Timestamp('2012-12-26', tz=UTC)] for session_label in expected_holidays_2012: self.assertNotIn(session_label, self.calendar.all_sessions) early_closes_2012 = [pd.Timestamp('2012-12-24', tz=UTC)] for early_close_session_label in early_closes_2012: self.assertIn(early_close_session_label, self.calendar.early_closes) def test_special_holidays(self): self.assertNotIn(pd.Period('9/11/2001'), self.calendar.all_sessions) self.assertNotIn(pd.Period('9/12/2001'), self.calendar.all_sessions) def test_new_years(self): start_session = pd.Timestamp('2012-01-02', tz=UTC) end_session = pd.Timestamp('2013-12-31', tz=UTC) sessions = self.calendar.sessions_in_range(start_session, end_session) day_after_new_years_sunday = pd.Timestamp('2012-01-02', tz=UTC) self.assertNotIn(day_after_new_years_sunday, sessions, 'If NYE falls on a weekend, {0} the Monday after is a holiday.'.format(day_after_new_years_sunday)) first_trading_day_after_new_years_sunday = pd.Timestamp('2012-01-03', tz=UTC) self.assertIn(first_trading_day_after_new_years_sunday, sessions, 'If NYE falls on a weekend, {0} the Tuesday after is the first trading day.'.format(first_trading_day_after_new_years_sunday)) new_years_day = pd.Timestamp('2013-01-01', tz=UTC) self.assertNotIn(new_years_day, sessions, 'If NYE falls during the week, e.g. {0}, it is a holiday.'.format(new_years_day)) first_trading_day_after_new_years = pd.Timestamp('2013-01-02', tz=UTC) self.assertIn(first_trading_day_after_new_years, sessions, 'If the day after NYE falls during the week, {0} is the first trading day.'.format(first_trading_day_after_new_years)) def test_christmas_eve_half_day(self): christmas_eve09 = pd.Timestamp('2009-12-24') christmas_eve09_close = self.calendar.next_close(christmas_eve09) self.assertEqual(christmas_eve09_close.tz_convert('America/Toronto'), pd.Timestamp('2009-12-24 4:00 PM', tz='America/Toronto')) christmas_eve10 = pd.Timestamp('2010-12-24') christmas_eve10_close = self.calendar.next_close(christmas_eve10) self.assertEqual(christmas_eve10_close.tz_convert('America/Toronto'), pd.Timestamp('2010-12-24 1:00 PM', tz='America/Toronto')) for year in ['2012', '2013', '2014', '2015']: christmas_eve = pd.Timestamp('{}-12-24'.format(year)) christmas_eve_close = self.calendar.next_close(christmas_eve) self.assertEqual(christmas_eve_close.tz_convert('America/Toronto'), pd.Timestamp('{}-12-24 1:00 PM'.format(year), tz='America/Toronto')) def test_christmas(self): christmas = pd.Timestamp('2015-12-25', tz=UTC) boxing_day_observed = pd.Timestamp('2015-12-28', tz=UTC) self.assertNotIn(christmas, self.calendar.all_sessions) self.assertNotIn(boxing_day_observed, self.calendar.all_sessions) christmas_observed = pd.Timestamp('2016-12-26', tz=UTC) boxing_day_observed = pd.Timestamp('2016-12-27', tz=UTC) self.assertNotIn(christmas, self.calendar.all_sessions) self.assertNotIn(boxing_day_observed, self.calendar.all_sessions) christmas_observed = pd.Timestamp('2016-12-26', tz=UTC) boxing_day_observed = pd.Timestamp('2016-12-27', tz=UTC) self.assertNotIn(christmas_observed, self.calendar.all_sessions) self.assertNotIn(boxing_day_observed, self.calendar.all_sessions) def test_victoria_day(self): self.assertIn(pd.Timestamp('2015-05-25'), self.calendar.all_sessions) self.assertNotIn(pd.Timestamp('2015-05-18'), self.calendar.all_sessions)
class CyberpunkAWS(): def __init__(self, expert_env, novice_env, horizon, itrs, trajs, imsize, expert_pkl, **kwargs): self.expert_env = expert_env self.novice_env = novice_env self.horizon = horizon self.itrs = itrs self.trajs = trajs self.expert_pkl = expert_pkl self.imsize = imsize def train(self): expert_env = TfEnv(self.expert_env) novice_env = TfEnv(self.novice_env) expert_fail_pol = RandomPolicy(expert_env.spec) policy = GaussianMLPPolicy(name='novice_policy', env_spec=novice_env.spec, init_std=10, hidden_sizes=(32, 32)) baseline = LinearFeatureBaseline(env_spec=expert_env.spec) algo = TRPO(env=novice_env, policy=policy, baseline=baseline, batch_size=(50 * 500), max_path_length=self.horizon, n_itr=self.itrs, discount=0.99, step_size=0.01, optimizer=ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-05))) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: algo.n_itr = 0 algo.start_itr = 0 algo.train(sess=sess) im_height = self.imsize[0] im_width = self.imsize[1] im_channels = 3 dim_input = [im_height, im_width, im_channels] disc = DomainConfusionVelocityDiscriminator(input_dim=dim_input, output_dim_class=2, output_dim_dom=2, tf_sess=sess) with open(self.expert_pkl, 'rb') as pfile: expert_policy = pickle.load(pfile) algo.n_itr = self.itrs trainer = CyberPunkTrainer(disc=disc, novice_policy_env=novice_env, expert_fail_pol=expert_fail_pol, expert_env=expert_env, novice_policy=policy, novice_policy_opt_algo=algo, expert_success_pol=expert_policy, im_width=im_width, im_height=im_height, im_channels=im_channels, tf_sess=sess, horizon=self.horizon) iterations = self.itrs for iter_step in range(0, iterations): logger.record_tabular('Iteration', iter_step) trainer.take_iteration(n_trajs_cost=self.trajs, n_trajs_policy=self.trajs) logger.dump_tabular(with_prefix=False) trainer.log_and_finish()
def test_unittest_not_shown_in_traceback(pytester: Pytester) -> None: pytester.makepyfile('\n import unittest\n class t(unittest.TestCase):\n def test_hello(self):\n x = 3\n self.assertEqual(x, 4)\n ') res = pytester.runpytest() res.stdout.no_fnmatch_line('*failUnlessEqual*')
class Client(QDialog): def __init__(self, parent=None): super(Client, self).__init__(parent) self.blockSize = 0 self.currentFortune = None hostLabel = QLabel('&Server name:') self.hostLineEdit = QLineEdit('fortune') hostLabel.setBuddy(self.hostLineEdit) self.statusLabel = QLabel('This examples requires that you run the Fortune Server example as well.') self.statusLabel.setWordWrap(True) self.getFortuneButton = QPushButton('Get Fortune') self.getFortuneButton.setDefault(True) quitButton = QPushButton('Quit') buttonBox = QDialogButtonBox() buttonBox.addButton(self.getFortuneButton, QDialogButtonBox.ActionRole) buttonBox.addButton(quitButton, QDialogButtonBox.RejectRole) self.socket = QLocalSocket() self.hostLineEdit.textChanged.connect(self.enableGetFortuneButton) self.getFortuneButton.clicked.connect(self.requestNewFortune) quitButton.clicked.connect(self.close) self.socket.readyRead.connect(self.readFortune) self.socket.error.connect(self.displayError) mainLayout = QGridLayout() mainLayout.addWidget(hostLabel, 0, 0) mainLayout.addWidget(self.hostLineEdit, 0, 1) mainLayout.addWidget(self.statusLabel, 2, 0, 1, 2) mainLayout.addWidget(buttonBox, 3, 0, 1, 2) self.setLayout(mainLayout) self.setWindowTitle('Fortune Client') self.hostLineEdit.setFocus() def requestNewFortune(self): self.getFortuneButton.setEnabled(False) self.blockSize = 0 self.socket.abort() self.socket.connectToServer(self.hostLineEdit.text()) def readFortune(self): ins = QDataStream(self.socket) ins.setVersion(QDataStream.Qt_4_0) if (self.blockSize == 0): if (self.socket.bytesAvailable() < 2): return self.blockSize = ins.readUInt16() if ins.atEnd(): return nextFortune = ins.readQString() if (nextFortune == self.currentFortune): QTimer.singleShot(0, self.requestNewFortune) return self.currentFortune = nextFortune self.statusLabel.setText(self.currentFortune) self.getFortuneButton.setEnabled(True) def displayError(self, socketError): errors = {QLocalSocket.ServerNotFoundError: 'The host was not found. Please check the host name and port settings.', QLocalSocket.ConnectionRefusedError: 'The connection was refused by the peer. Make sure the fortune server is running, and check that the host name and port settings are correct.', QLocalSocket.PeerClosedError: None} msg = errors.get(socketError, ('The following error occurred: %s.' % self.socket.errorString())) if (msg is not None): QMessageBox.information(self, 'Fortune Client', msg) self.getFortuneButton.setEnabled(True) def enableGetFortuneButton(self): self.getFortuneButton.setEnabled((self.hostLineEdit.text() != ''))
def test_no_initial_bytes(rgb_data_and_profile): (data, profile) = rgb_data_and_profile with MemoryFile() as memfile: with memfile.open(**profile) as dst: dst.write(data) view = memfile.getbuffer() assert (view.size > 1000000) data = bytes(bytearray(view)) with MemoryFile(data) as memfile: with memfile.open() as src: assert (sorted(src.profile.items()) == sorted(profile.items()))
class PageViewSet(ModelViewSet): permission_classes = ((HasModelPermission | HasObjectPermission),) serializer_class = PageSerializer filter_backends = (SearchFilter, DjangoFilterBackend) search_fields = ('uri', 'title') filterset_fields = ('attribute', 'uri', 'uri_prefix', 'uri_path', 'comment', 'is_collection') def get_queryset(self): queryset = Page.objects.all() if (self.action in ['index']): return queryset elif (self.action in ['nested', 'export', 'detail_export']): return queryset.prefetch_elements().select_related('attribute') else: return queryset.prefetch_related('conditions', 'sections', 'editors', 'page_questionsets__questionset', 'page_questions__question').select_related('attribute') (detail=True) def nested(self, request, pk): serializer = PageNestedSerializer(self.get_object(), context={'request': request}) return Response(serializer.data) (detail=False) def index(self, request): queryset = self.filter_queryset(self.get_queryset()) serializer = PageIndexSerializer(queryset, many=True) return Response(serializer.data) (detail=False, url_path='export(/(?P<export_format>[a-z]+))?') def export(self, request, export_format='xml'): queryset = self.filter_queryset(self.get_queryset()) if (export_format == 'xml'): serializer = PageExportSerializer(queryset, many=True) xml = PageRenderer().render(serializer.data, context=self.get_export_renderer_context(request)) return XMLResponse(xml, name='pages') else: return render_to_format(self.request, export_format, 'questions', 'questions/export/pages.html', {'pages': queryset}) (detail=True, url_path='export(/(?P<export_format>[a-z]+))?') def detail_export(self, request, pk=None, export_format='xml'): if (export_format == 'xml'): serializer = PageExportSerializer(self.get_object()) xml = PageRenderer().render([serializer.data], context=self.get_export_renderer_context(request)) return XMLResponse(xml, name=self.get_object().uri_path) else: return render_to_format(self.request, export_format, self.get_object().uri_path, 'questions/export/pages.html', {'pages': [self.get_object()]}) def get_export_renderer_context(self, request): full = is_truthy(request.GET.get('full')) return {'questionsets': (full or is_truthy(request.GET.get('questionsets', True))), 'questions': (full or is_truthy(request.GET.get('questions', True))), 'attributes': (full or is_truthy(request.GET.get('attributes'))), 'optionsets': (full or is_truthy(request.GET.get('optionsets'))), 'options': (full or is_truthy(request.GET.get('options'))), 'conditions': (full or is_truthy(request.GET.get('conditions')))}
class FontConfigSearchPattern(FontConfigPattern): def __init__(self, fontconfig): super(FontConfigSearchPattern, self).__init__(fontconfig) self.name = None self.bold = False self.italic = False self.size = None def match(self): self._prepare_search_pattern() result_pattern = self._get_match() if result_pattern: return FontConfigSearchResult(self._fontconfig, result_pattern) else: return None def _prepare_search_pattern(self): self._create() self._set_string(FC_FAMILY, self.name) self._set_double(FC_SIZE, self.size) self._set_integer(FC_WEIGHT, self._bold_to_weight(self.bold)) self._set_integer(FC_SLANT, self._italic_to_slant(self.italic)) self._substitute_defaults() def _substitute_defaults(self): assert self._pattern assert self._fontconfig self._fontconfig.FcConfigSubstitute(None, self._pattern, FcMatchPattern) self._fontconfig.FcDefaultSubstitute(self._pattern) def _get_match(self): assert self._pattern assert self._fontconfig match_result = FcResult() match_pattern = self._fontconfig.FcFontMatch(0, self._pattern, byref(match_result)) if _handle_fcresult(match_result.value): return match_pattern else: return None def dispose(self): self._destroy()
def test_retry_exec_iteration_handlederror_with_stopon(): rd = RetryDecorator({'max': 3, 'stopOn': '{k1}'}) context = Context({'k1': ['KeyError', 'ArbError']}) mock = MagicMock() err = HandledError() err.__cause__ = ValueError('arb') mock.side_effect = err with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error: with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug: assert (not rd.exec_iteration(2, context, mock, 3)) assert (context['retryCounter'] == 2) assert (rd.retry_counter == 2) assert (len(context) == 2) mock.assert_called_once_with({'k1': ['KeyError', 'ArbError'], 'retryCounter': 2}) mock_logger_error.assert_called_once_with('retry: ignoring error because retryCounter < max.\nValueError: arb') mock_logger_debug.assert_any_call('ValueError not in stopOn. Continue.')
def test_transform_multiply(test, device, n): a = wp.transform((0.0, 1.0, 0.0), wp.utils.quat_identity()) x = [] for i in range(10): x.append(wp.utils.transform_identity()) xforms = wp.array(x, dtype=wp.transform, device=device) wp.launch(transform_multiply, dim=n, inputs=[xforms, a], device=device)
.parametrize('version,parts,expected', [('3.4.5', dict(major=2, minor=5), '2.5.5'), ('3.4.5', dict(major=2, minor=5, patch=10), '2.5.10'), ('3.4.5-alpha.1.2', dict(major=2), '2.4.5-alpha.1.2'), ('3.4.5-alpha.1.2', dict(build='x1'), '3.4.5-alpha.1.2+x1'), ('3.4.5+build1', dict(major=2), '2.4.5+build1')]) def test_should_return_versioninfo_with_replaced_parts(version, parts, expected): assert (Version.parse(version).replace(**parts) == Version.parse(expected))
class WildcardPattern(BasePattern): def __init__(self, content=None, min=0, max=HUGE, name=None): assert (0 <= min <= max <= HUGE), (min, max) if (content is not None): content = tuple(map(tuple, content)) assert len(content), repr(content) for alt in content: assert len(alt), repr(alt) self.content = content self.min = min self.max = max self.name = name def optimize(self): subpattern = None if ((self.content is not None) and (len(self.content) == 1) and (len(self.content[0]) == 1)): subpattern = self.content[0][0] if ((self.min == 1) and (self.max == 1)): if (self.content is None): return NodePattern(name=self.name) if ((subpattern is not None) and (self.name == subpattern.name)): return subpattern.optimize() if ((self.min <= 1) and isinstance(subpattern, WildcardPattern) and (subpattern.min <= 1) and (self.name == subpattern.name)): return WildcardPattern(subpattern.content, (self.min * subpattern.min), (self.max * subpattern.max), subpattern.name) return self def match(self, node, results=None): return self.match_seq([node], results) def match_seq(self, nodes, results=None): for (c, r) in self.generate_matches(nodes): if (c == len(nodes)): if (results is not None): results.update(r) if self.name: results[self.name] = list(nodes) return True return False def generate_matches(self, nodes): if (self.content is None): for count in range(self.min, (1 + min(len(nodes), self.max))): r = {} if self.name: r[self.name] = nodes[:count] (yield (count, r)) elif (self.name == 'bare_name'): (yield self._bare_name_matches(nodes)) else: if hasattr(sys, 'getrefcount'): save_stderr = sys.stderr sys.stderr = StringIO() try: for (count, r) in self._recursive_matches(nodes, 0): if self.name: r[self.name] = nodes[:count] (yield (count, r)) except RuntimeError: for (count, r) in self._iterative_matches(nodes): if self.name: r[self.name] = nodes[:count] (yield (count, r)) finally: if hasattr(sys, 'getrefcount'): sys.stderr = save_stderr def _iterative_matches(self, nodes): nodelen = len(nodes) if (0 >= self.min): (yield (0, {})) results = [] for alt in self.content: for (c, r) in generate_matches(alt, nodes): (yield (c, r)) results.append((c, r)) while results: new_results = [] for (c0, r0) in results: if ((c0 < nodelen) and (c0 <= self.max)): for alt in self.content: for (c1, r1) in generate_matches(alt, nodes[c0:]): if (c1 > 0): r = {} r.update(r0) r.update(r1) (yield ((c0 + c1), r)) new_results.append(((c0 + c1), r)) results = new_results def _bare_name_matches(self, nodes): count = 0 r = {} done = False max = len(nodes) while ((not done) and (count < max)): done = True for leaf in self.content: if leaf[0].match(nodes[count], r): count += 1 done = False break r[self.name] = nodes[:count] return (count, r) def _recursive_matches(self, nodes, count): assert (self.content is not None) if (count >= self.min): (yield (0, {})) if (count < self.max): for alt in self.content: for (c0, r0) in generate_matches(alt, nodes): for (c1, r1) in self._recursive_matches(nodes[c0:], (count + 1)): r = {} r.update(r0) r.update(r1) (yield ((c0 + c1), r))
def bench_all(repeats, dict_path=None): logger.debug('loading MorphAnalyzer...') morph = MorphAnalyzer(dict_path) morph_plain = MorphAnalyzer(dict_path, result_type=None) logger.debug('loading benchmark data...') words = load_words() total_usages = get_total_usages(words) logger.debug('Words: %d, usages: %d', len(words), total_usages) start_time = datetime.datetime.now() logger.info('\nbenchmarking MorphAnalyzer():') bench_parse(morph, words, total_usages, repeats) bench_tag(morph, words, total_usages, repeats) logger.info('\nbenchmarking MorphAnalyzer(result_type=None):') bench_parse(morph_plain, words, total_usages, repeats) end_time = datetime.datetime.now() logger.info(('----\nDone in %s.\n' % (end_time - start_time)))
def _format_fallback_interval(start: _Instant, end: _Instant, skeleton: (str | None), tzinfo: (datetime.tzinfo | None), locale: ((Locale | str) | None)=LC_TIME) -> str: if (skeleton in locale.datetime_skeletons): format = (lambda dt: format_skeleton(skeleton, dt, tzinfo, locale=locale)) elif all(((isinstance(d, datetime.date) and (not isinstance(d, datetime.datetime))) for d in (start, end))): format = (lambda dt: format_date(dt, locale=locale)) elif all(((isinstance(d, datetime.time) and (not isinstance(d, datetime.date))) for d in (start, end))): format = (lambda dt: format_time(dt, tzinfo=tzinfo, locale=locale)) else: format = (lambda dt: format_datetime(dt, tzinfo=tzinfo, locale=locale)) formatted_start = format(start) formatted_end = format(end) if (formatted_start == formatted_end): return format(start) return locale.interval_formats.get(None, '{0}-{1}').replace('{0}', formatted_start).replace('{1}', formatted_end)
class DetectionLoss(nn.Module): __constants__ = ['num_classes'] def __init__(self, config): super(DetectionLoss, self).__init__() self.config = config self.num_classes = config.num_classes self.alpha = config.alpha self.gamma = config.gamma self.delta = config.delta self.box_loss_weight = config.box_loss_weight self.label_smoothing = config.label_smoothing self.new_focal = config.new_focal self.use_jit = config.jit_loss def forward(self, cls_outputs: List[torch.Tensor], box_outputs: List[torch.Tensor], cls_targets: List[torch.Tensor], box_targets: List[torch.Tensor], num_positives: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]: l_fn = loss_fn if ((not torch.jit.is_scripting()) and self.use_jit): l_fn = loss_jit return l_fn(cls_outputs, box_outputs, cls_targets, box_targets, num_positives, num_classes=self.num_classes, alpha=self.alpha, gamma=self.gamma, delta=self.delta, box_loss_weight=self.box_loss_weight, label_smoothing=self.label_smoothing, new_focal=self.new_focal)
class LabelSmoothSoftmaxCE(nn.Module): def __init__(self, lb_pos=0.9, lb_neg=0.005, reduction='mean', lb_ignore=255): super(LabelSmoothSoftmaxCE, self).__init__() self.lb_pos = lb_pos self.lb_neg = lb_neg self.reduction = reduction self.lb_ignore = lb_ignore self.log_softmax = nn.LogSoftmax(1) def forward(self, logits, label): logs = self.log_softmax(logits) ignore = (label.data.cpu() == self.lb_ignore) n_valid = (ignore == 0).sum() label = label.clone() label[ignore] = 0 lb_one_hot = logits.data.clone().zero_().scatter_(1, label.unsqueeze(1), 1) label = ((self.lb_pos * lb_one_hot) + (self.lb_neg * (1 - lb_one_hot))) ignore = ignore.nonzero() (_, M) = ignore.size() (a, *b) = ignore.chunk(M, dim=1) label[[a, torch.arange(label.size(1)), *b]] = 0 if (self.reduction == 'mean'): loss = ((- torch.sum(torch.sum((logs * label), dim=1))) / n_valid) elif (self.reduction == 'none'): loss = (- torch.sum((logs * label), dim=1)) return loss
def MakeRelativePathsInFlagsAbsolute(flags, working_directory): if (not working_directory): return list(flags) new_flags = [] make_next_absolute = False path_flags = ['-isystem', '-I', '-iquote', '--sysroot='] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if (not flag.startswith('/')): new_flag = os.path.join(working_directory, flag) for path_flag in path_flags: if (flag == path_flag): make_next_absolute = True break if flag.startswith(path_flag): path = flag[len(path_flag):] new_flag = (path_flag + os.path.join(working_directory, path)) break if new_flag: new_flags.append(new_flag) return new_flags
class FileParser(): def __init__(self, vim: VimClient): self._vim = vim async def parse_file_structure(self, file_name: str, vim_patterns: Dict) -> Tree[Position]: patterns = self._convert_patterns(vim_patterns) logger.fdebug('Converted pattern {vim_patterns} to {patterns}') with open(file_name, 'r') as test_file: lines = test_file.readlines() (res, _) = self._parse_position_tree(file_name, patterns['test'], patterns['namespace'], lines) x = Tree[Position].from_list([File(id=file_name, name=file_name, file=file_name, running=0), *res]) return x def _convert_patterns(self, vim_patterns: Dict[(str, List[str])]) -> Dict[(str, List[Pattern])]: tests = [self._convert_regex(pattern) for pattern in vim_patterns.get('test', '')] namespaces = [self._convert_regex(pattern) for pattern in vim_patterns.get('namespace', '')] return {'test': tests, 'namespace': namespaces} def _convert_regex(self, vim_regex: str) -> Pattern: regex = vim_regex for (pattern, repl) in REGEX_CONVERSIONS.items(): regex = re.sub(pattern, repl, regex) return re.compile(regex) def _parse_position_tree(self, file_name: str, test_patterns: List[Pattern], namespace_patterns: List[Pattern], lines: List[str], init_line: int=1, init_indent: int=(- 1), current_namespaces: Optional[List[str]]=None, last_test_indent=(- 1)) -> Tuple[(List[PosList], int)]: positions = [] current_namespaces = (current_namespaces or []) line_no = init_line while ((line_no - init_line) < len(lines)): line = lines[(line_no - init_line)] test_name = self._find_match(line, test_patterns) namespace_name = self._find_match(line, namespace_patterns) if test_name: cls = Test name = test_name children = None elif namespace_name: cls = Namespace name = namespace_name else: line_no += 1 continue current_indent = INDENT_PATTERN.match(line) if (current_indent and (len(current_indent[1]) <= init_indent)): consumed = max(((line_no - 1) - init_line), 1) return (positions, consumed) if (cls is Test): last_test_indent = len(current_indent[1]) id_suffix = hash((file_name, ' '.join(current_namespaces))) position = cls(id=self._clean_id((name + str(id_suffix))), file=file_name, line=line_no, col=1, name=name, running=0, namespaces=current_namespaces) if (cls is Namespace): (children, lines_consumed) = self._parse_position_tree(file_name, test_patterns, namespace_patterns, lines[((line_no - init_line) + 1):], init_line=(line_no + 1), init_indent=len(current_indent[1]), current_namespaces=[*current_namespaces, position.id], last_test_indent=last_test_indent) lines_consumed += 1 if (children and ((last_test_indent == (- 1)) or (last_test_indent >= len(current_indent[1])))): positions.append([position, *children]) else: lines_consumed = 1 positions.append(position) line_no += lines_consumed return (positions, line_no) def _clean_id(self, id: str) -> str: return re.subn('[.\'\\" \\\\/]', '_', id)[0] def _find_match(self, line: str, patterns: List[Pattern]) -> Optional[str]: for pattern in patterns: matched = pattern.match(line) if matched: return matched[1] return None
class Comal80Lexer(RegexLexer): name = 'COMAL-80' url = ' aliases = ['comal', 'comal80'] filenames = ['*.cml', '*.comal'] version_added = '' flags = re.IGNORECASE _suffix = "\\b(?!['\\[\\]\\\\])" _identifier = "[a-z]['\\[\\]\\\\\\w]*" tokens = {'root': [('//.*\\n', Comment.Single), ('\\s+', Whitespace), (':[=+-]|\\<\\>|[-+*/^<>=]', Operator), (('(and +then|or +else)' + _suffix), Operator.Word), (words(['and', 'bitand', 'bitor', 'bitxor', 'div', 'in', 'mod', 'not', 'or'], suffix=_suffix), Operator.Word), (words(['append', 'at', 'case', 'chain', 'close', 'copy', 'create', 'cursor', 'data', 'delete', 'dir', 'do', 'elif', 'else', 'end', 'endcase', 'endif', 'endfor', 'endloop', 'endtrap', 'endwhile', 'exec', 'exit', 'file', 'for', 'goto', 'handler', 'if', 'input', 'let', 'loop', 'mount', 'null', 'of', 'open', 'otherwise', 'output', 'page', 'pass', 'poke', 'print', 'random', 'read', 'repeat', 'report', 'return', 'rename', 'restore', 'select', 'step', 'stop', 'sys', 'then', 'to', 'trap', 'unit', 'unit$', 'until', 'using', 'when', 'while', 'write', 'zone'], suffix=_suffix), Keyword.Reserved), (words(['closed', 'dim', 'endfunc', 'endproc', 'external', 'func', 'import', 'proc', 'ref', 'use'], suffix=_suffix), Keyword.Declaration), (words(['abs', 'atn', 'chr$', 'cos', 'eod', 'eof', 'err', 'errfile', 'errtext', 'esc', 'exp', 'int', 'key$', 'len', 'log', 'ord', 'peek', 'randomize', 'rnd', 'sgn', 'sin', 'spc$', 'sqr', 'status$', 'str$', 'tab', 'tan', 'time', 'val'], suffix=_suffix), Name.Builtin), (words(['false', 'pi', 'true'], suffix=_suffix), Keyword.Constant), ('"', String, 'string'), ((_identifier + ':(?=[ \\n/])'), Name.Label), ((_identifier + '[$#]?'), Name), ('%[01]+', Number.Bin), ('\\$[0-9a-f]+', Number.Hex), ('\\d*\\.\\d*(e[-+]?\\d+)?', Number.Float), ('\\d+', Number.Integer), ('[(),:;]', Punctuation)], 'string': [('[^"]+', String), ('"[0-9]*"', String.Escape), ('"', String, '#pop')]}
def _extension_extra_sources(): extra_sources = {'qutip.core.data.matmul': ['qutip/core/data/src/matmul_csr_vector.cpp', 'qutip/core/data/src/matmul_diag_vector.cpp']} out = collections.defaultdict(list) for (module, sources) in extra_sources.items(): out[module] = [str(pathlib.Path(source)) for source in sources] return out
def initialise_pretrained_embedding(doc_vocab_size, embedding_dim, embedding_placeholder, name='embedding', trainable=True): with tf.name_scope(name): if trainable: print('init pretrained embds') embedding_matrix = tf.Variable(embedding_placeholder, trainable=True, name='W', dtype=tf.float32) else: W = tf.Variable(tf.constant(0.0, shape=[doc_vocab_size, embedding_dim]), trainable=False, name='W') embedding_matrix = W.assign(embedding_placeholder) return embedding_matrix