code
stringlengths
281
23.7M
def remove_trivial(types: Iterable[Type]) -> list[Type]: removed_none = False new_types = [] all_types = set() for t in types: p_t = get_proper_type(t) if isinstance(p_t, UninhabitedType): continue if (isinstance(p_t, NoneType) and (not state.strict_optional)): removed_none = True continue if (isinstance(p_t, Instance) and (p_t.type.fullname == 'builtins.object')): return [p_t] if (p_t not in all_types): new_types.append(t) all_types.add(p_t) if new_types: return new_types if removed_none: return [NoneType()] return [UninhabitedType()]
def plot_overlap(logger, names=None): names = (logger.names if (names == None) else names) numbers = logger.numbers for (_, name) in enumerate(names): x = np.arange(len(numbers[name])) plt.plot(x, np.asarray(numbers[name])) return [(((logger.title + '(') + name) + ')') for name in names]
class VecVideoRecorder(VecEnvWrapper): def __init__(self, venv, directory, record_video_trigger, video_length=200): VecEnvWrapper.__init__(self, venv) self.record_video_trigger = record_video_trigger self.video_recorder = None self.directory = os.path.abspath(directory) if (not os.path.exists(self.directory)): os.mkdir(self.directory) self.file_prefix = 'vecenv' self.file_infix = '{}'.format(os.getpid()) self.step_id = 0 self.video_length = video_length self.recording = False self.recorded_frames = 0 def reset(self): obs = self.venv.reset() self.start_video_recorder() return obs def start_video_recorder(self): self.close_video_recorder() base_path = os.path.join(self.directory, '{}.video.{}.video{:06}'.format(self.file_prefix, self.file_infix, self.step_id)) self.video_recorder = video_recorder.VideoRecorder(env=self.venv, base_path=base_path, metadata={'step_id': self.step_id}) self.video_recorder.capture_frame() self.recorded_frames = 1 self.recording = True def _video_enabled(self): return self.record_video_trigger(self.step_id) def step_wait(self): (obs, rews, dones, infos) = self.venv.step_wait() self.step_id += 1 if self.recording: self.video_recorder.capture_frame() self.recorded_frames += 1 if (self.recorded_frames > self.video_length): logger.info('Saving video to ', self.video_recorder.path) self.close_video_recorder() elif self._video_enabled(): self.start_video_recorder() return (obs, rews, dones, infos) def close_video_recorder(self): if self.recording: self.video_recorder.close() self.recording = False self.recorded_frames = 0 def close(self): VecEnvWrapper.close(self) self.close_video_recorder() def __del__(self): self.close()
class Strength(object): REQUIRED = None STRONG_PREFERRED = None PREFERRED = None STRONG_DEFAULT = None NORMAL = None WEAK_DEFAULT = None WEAKEST = None def __init__(self, strength, name): super(Strength, self).__init__() self.strength = strength self.name = name def stronger(cls, s1, s2): return (s1.strength < s2.strength) def weaker(cls, s1, s2): return (s1.strength > s2.strength) def weakest_of(cls, s1, s2): if cls.weaker(s1, s2): return s1 return s2 def strongest(cls, s1, s2): if cls.stronger(s1, s2): return s1 return s2 def next_weaker(self): strengths = {0: self.__class__.WEAKEST, 1: self.__class__.WEAK_DEFAULT, 2: self.__class__.NORMAL, 3: self.__class__.STRONG_DEFAULT, 4: self.__class__.PREFERRED, 5: self.__class__.REQUIRED} return strengths[self.strength]
def assert_balance_proof(token_network_address: TokenNetworkAddress, app0: RaidenService, app1: RaidenService, saved_state0: SavedState, saved_state1: SavedState) -> None: assert app0.wal assert app1.wal assert (app0.address == saved_state0.state.our_address) assert (app1.address == saved_state1.state.our_address) channel0 = views.get_channelstate_by_token_network_and_partner(saved_state0.state, token_network_address, app1.address) channel1 = views.get_channelstate_by_token_network_and_partner(saved_state1.state, token_network_address, app0.address) assert channel0 assert channel1 balanceproof0 = cast(BalanceProofUnsignedState, channel0.our_state.balance_proof) balanceproof1 = cast(BalanceProofSignedState, channel1.partner_state.balance_proof) if (balanceproof0 is None): msg = 'Bug detected. The sender does not have a balance proof, but the recipient does.' assert (balanceproof1 is None), msg return if (balanceproof1 is not None): nonce1 = balanceproof1.nonce else: nonce1 = 0 if (balanceproof0.nonce < nonce1): msg = "This is a bug, it should never happen. The nonce updates **always** start with the owner of the channel's end. This means for a channel A-B, only A can increase its nonce, same thing with B. At this point, the assertion is failling because this rule was broken, and the partner node has a larger nonce than the sending partner." raise AssertionError(msg) if (balanceproof0.nonce > nonce1): sent_balance_proof = get_event_with_balance_proof_by_balance_hash(storage=app0.wal.storage, canonical_identifier=balanceproof0.canonical_identifier, balance_hash=balanceproof0.balance_hash, recipient=app1.address) received_balance_proof = get_state_change_with_balance_proof_by_locksroot(storage=app1.wal.storage, canonical_identifier=balanceproof0.canonical_identifier, locksroot=balanceproof0.locksroot, sender=app0.address) if (received_balance_proof is not None): state_change_type = type(received_balance_proof.data) if (state_change_type == ReceiveTransferRefund): is_valid = False innermsg = 'Message is a refund' elif (state_change_type == ReceiveUnlock): assert isinstance(received_balance_proof, ReceiveUnlock), MYPY_ANNOTATION (is_valid, _, innermsg) = channel.handle_unlock(channel_state=channel1, unlock=received_balance_proof) elif (state_change_type == ReceiveLockExpired): assert isinstance(received_balance_proof, ReceiveLockExpired), MYPY_ANNOTATION (is_valid, innermsg, _) = channel.is_valid_lock_expired(state_change=received_balance_proof, channel_state=channel1, sender_state=channel1.partner_state, receiver_state=channel1.our_state, block_number=saved_state1.state.block_number) elif (state_change_type == ActionInitMediator): assert isinstance(received_balance_proof, ActionInitMediator), MYPY_ANNOTATION (is_valid, _, innermsg) = channel.handle_receive_lockedtransfer(channel_state=channel1, mediated_transfer=received_balance_proof.from_transfer) elif (state_change_type == ActionInitTarget): assert isinstance(received_balance_proof, ActionInitTarget), MYPY_ANNOTATION (is_valid, _, innermsg) = channel.handle_receive_lockedtransfer(channel_state=channel1, mediated_transfer=received_balance_proof.from_transfer) if (not is_valid): msg = f"Node1 received the node0's message but rejected it. This is likely a Raiden bug. reason={innermsg} state_change={received_balance_proof}" else: msg = f"Node1 received the node0's message at that time it was rejected, this is likely a race condition, node1 has to process the message again. reason={innermsg} state_change={received_balance_proof}" elif (sent_balance_proof is None): msg = 'Node0 did not send a message with the latest balanceproof, this is likely a Raiden bug.' else: msg = "Node0 sent the latest balanceproof but Node1 didn't receive, likely the test is missing proper synchronization amongst the nodes." msg = f'{msg}. node1={to_checksum_address(app1.address)} node0={to_checksum_address(app0.address)} state_change_id0={saved_state0.state_change_id} state_change_id1={saved_state1.state_change_id}.' raise AssertionError(msg) is_equal = ((balanceproof0.nonce == balanceproof1.nonce) and (balanceproof0.transferred_amount == balanceproof1.transferred_amount) and (balanceproof0.locked_amount == balanceproof1.locked_amount) and (balanceproof0.locksroot == balanceproof1.locksroot) and (balanceproof0.canonical_identifier == balanceproof1.canonical_identifier) and (balanceproof0.balance_hash == balanceproof1.balance_hash)) if (not is_equal): msg = f'The balance proof seems corrupted, the recipient has different values than the sender. node1={to_checksum_address(app1.address)} node0={to_checksum_address(app0.address)} state_change_id0={saved_state0.state_change_id} state_change_id1={saved_state1.state_change_id}.' raise AssertionError(msg)
class NColors(): RED = 1 GREEN = 2 YELLOW = 3 BLUE = 4 MAGENTA = 5 CYAN = 6 iRED = 7 iGREEN = 8 iYELLOW = 9 iBLUE = 10 iMAGENTA = 11 iCYAN = 12 def __init__(self, color_filter): curses.init_pair(NColors.RED, (curses.COLOR_RED if (not color_filter) else curses.COLOR_BLUE), curses.COLOR_BLACK) curses.init_pair(NColors.GREEN, curses.COLOR_GREEN, curses.COLOR_BLACK) curses.init_pair(NColors.YELLOW, curses.COLOR_YELLOW, curses.COLOR_BLACK) curses.init_pair(NColors.BLUE, curses.COLOR_BLUE, curses.COLOR_BLACK) curses.init_pair(NColors.MAGENTA, curses.COLOR_MAGENTA, curses.COLOR_BLACK) curses.init_pair(NColors.CYAN, curses.COLOR_CYAN, curses.COLOR_BLACK) curses.init_pair(NColors.iRED, curses.COLOR_WHITE, (curses.COLOR_RED if (not color_filter) else curses.COLOR_BLUE)) curses.init_pair(NColors.iGREEN, curses.COLOR_WHITE, curses.COLOR_GREEN) curses.init_pair(NColors.iYELLOW, curses.COLOR_BLACK, curses.COLOR_YELLOW) curses.init_pair(NColors.iBLUE, curses.COLOR_WHITE, curses.COLOR_BLUE) curses.init_pair(NColors.iMAGENTA, curses.COLOR_WHITE, curses.COLOR_MAGENTA) curses.init_pair(NColors.iCYAN, curses.COLOR_WHITE, curses.COLOR_CYAN) def init_grey(num): init_colorscale_pair(num, 240, curses.COLOR_BLACK) def italic(): return (curses.A_ITALIC if hasattr(curses, 'A_ITALIC') else curses.A_NORMAL) def red(): return curses.color_pair(NColors.RED) def green(): return curses.color_pair(NColors.GREEN) def yellow(): return curses.color_pair(NColors.YELLOW) def blue(): return curses.color_pair(NColors.BLUE) def magenta(): return curses.color_pair(NColors.MAGENTA) def cyan(): return curses.color_pair(NColors.CYAN) def ired(): return curses.color_pair(NColors.iRED) def igreen(): return curses.color_pair(NColors.iGREEN) def iyellow(): return curses.color_pair(NColors.iYELLOW) def iblue(): return curses.color_pair(NColors.iBLUE) def imagenta(): return curses.color_pair(NColors.iMAGENTA) def icyan(): return curses.color_pair(NColors.iCYAN)
def sync_grad(params): if (not is_distributed()): return handles = [] for p in params: if (p.grad is not None): handle = torch.distributed.all_reduce(p.grad.data, op=torch.distributed.ReduceOp.SUM, async_op=True) handles.append((p, handle)) for (p, handle) in handles: handle.wait() p.grad.data /= world_size()
def align_eyes(landmarks, size): desiredLeftEye = (0.35, 0.35) desiredFaceWidth = desiredFaceHeight = size (lStart, lEnd) = FACIAL_LANDMARKS_IDXS['left_eye'] (rStart, rEnd) = FACIAL_LANDMARKS_IDXS['right_eye'] leftEyePts = landmarks[lStart:lEnd] rightEyePts = landmarks[rStart:rEnd] leftEyeCenter = leftEyePts.mean(axis=0).astype('int') rightEyeCenter = rightEyePts.mean(axis=0).astype('int') dY = (rightEyeCenter[(0, 1)] - leftEyeCenter[(0, 1)]) dX = (rightEyeCenter[(0, 0)] - leftEyeCenter[(0, 0)]) angle = (np.degrees(np.arctan2(dY, dX)) - 180) eyesCenter = (((leftEyeCenter[(0, 0)] + rightEyeCenter[(0, 0)]) // 2), ((leftEyeCenter[(0, 1)] + rightEyeCenter[(0, 1)]) // 2)) M = cv2.getRotationMatrix2D(eyesCenter, angle, 1.0) return M
_grad() def update_bn_stats(model, data_loader, num_iters=200, logger=None): model.train() assert (len(data_loader) >= num_iters), f'length of dataloader {len(data_loader)} must be greater than iteration number {num_iters}' if is_parallel_module(model): parallel_module = model model = model.module else: parallel_module = model bn_layers = [m for m in model.modules() if (m.training and isinstance(m, _BatchNorm))] if (len(bn_layers) == 0): print_log('No BN found in model', logger=logger, level=logging.WARNING) return print_log(f'{len(bn_layers)} BN found', logger=logger) for m in model.modules(): if (m.training and isinstance(m, (_InstanceNorm, GroupNorm))): print_log('IN/GN stats will be updated like training.', logger=logger, level=logging.WARNING) momentum_actual = [bn.momentum for bn in bn_layers] for bn in bn_layers: bn.momentum = 1.0 running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers] running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers] finish_before_loader = False prog_bar = mmcv.ProgressBar(len(data_loader)) for (ind, data) in enumerate(data_loader): with torch.no_grad(): parallel_module(**data, return_loss=False) prog_bar.update() for (i, bn) in enumerate(bn_layers): running_mean[i] += ((bn.running_mean - running_mean[i]) / (ind + 1)) running_var[i] += ((bn.running_var - running_var[i]) / (ind + 1)) if ((ind + 1) >= num_iters): finish_before_loader = True break assert finish_before_loader, f'Dataloader stopped before iteration {num_iters}' for (i, bn) in enumerate(bn_layers): bn.running_mean = running_mean[i] bn.running_var = running_var[i] bn.momentum = momentum_actual[i]
class DNSAddress(DNSRecord): __slots__ = ('_hash', 'address', 'scope_id') def __init__(self, name: str, type_: int, class_: int, ttl: int, address: bytes, scope_id: Optional[int]=None, created: Optional[float]=None) -> None: super().__init__(name, type_, class_, ttl, created) self.address = address self.scope_id = scope_id self._hash = hash((self.key, type_, self.class_, address, scope_id)) def write(self, out: 'DNSOutgoing') -> None: out.write_string(self.address) def __eq__(self, other: Any) -> bool: return (isinstance(other, DNSAddress) and self._eq(other)) def _eq(self, other) -> bool: return ((self.address == other.address) and (self.scope_id == other.scope_id) and self._dns_entry_matches(other)) def __hash__(self) -> int: return self._hash def __repr__(self) -> str: try: return self.to_string(socket.inet_ntop((socket.AF_INET6 if _is_v6_address(self.address) else socket.AF_INET), self.address)) except (ValueError, OSError): return self.to_string(str(self.address))
def get_train_overlap(docs_by_task_set, ngrams_path, limit): info_dict_path = os.path.join(ngrams_path, 'info.json') info_dict = json.load(open(info_dict_path, 'r')) ngrams_n_size = info_dict['ngram_size'] janitor = Janitor() print('Building Lookups...') start = time.perf_counter() def get_overlaps_dump_path(task_name, task_set, ngrams_n_size, limit): return f'data/{task_name}/{task_set}_{ngrams_n_size}grams_limit{limit}.overlaps' lookups = {} duplicates = {} sets_to_decontaminate = len(docs_by_task_set.keys()) for ((task_name, task_set), docs) in docs_by_task_set.items(): if (not os.path.exists(f'data/{task_name}')): os.mkdir(f'data/{task_name}') overlaps_dump_path = get_overlaps_dump_path(task_name, task_set, ngrams_n_size, limit) if os.path.exists(overlaps_dump_path): duplicates[(task_name, task_set)] = pickle.load(open(overlaps_dump_path, 'rb')) sets_to_decontaminate -= 1 continue else: duplicates[(task_name, task_set)] = set() task_set_lookup_path = f'data/{task_name}/{task_set}_{ngrams_n_size}grams_limit{limit}.lookup' if os.path.exists(task_set_lookup_path): print(f'{task_set_lookup_path} available, loading...') lookups[(task_name, task_set)] = pickle.load(open(task_set_lookup_path, 'rb')) else: print(f'{task_set_lookup_path} not available, building...') lookup = collections.defaultdict(set) for (doc_id, document) in enumerate(docs): ngrams = word_ngrams(janitor.normalize_string(document), ngrams_n_size) for ngram in ngrams: lookup[ngram].add(doc_id) pickle.dump(lookup, open(task_set_lookup_path, 'wb')) lookups[(task_name, task_set)] = lookup elapsed = (time.perf_counter() - start) print(f'Building lookups took {elapsed:0.5f} seconds.') matched_ngrams = [] if (sets_to_decontaminate > 0): print('Merging lookups...') start = time.perf_counter() merged_lookup = collections.defaultdict(list) for ((task_name, task_set), lookup) in lookups.items(): for (ngram, doc_ids) in lookup.items(): merged_lookup[ngram].append((task_name, task_set, doc_ids)) elapsed = (time.perf_counter() - start) print(f'Merging lookups took {elapsed:0.5f} seconds.') print(f'{ngrams_n_size} grams files found in {ngrams_path}:') files = glob.glob(os.path.join(ngrams_path, f'*.sorted.zst')) print(files) for file in files: start = time.perf_counter() print(f'Scanning {file}') reader = ZStdTextReader(file) total_ngrams = 0 unique_ngrams = 0 matching_unique = 0 non_matching_unique = 0 current_ngram = '' for line in reader.read_tqdm(): total_ngrams += 1 [ngram, document_id] = line.rsplit(' ', 1) if (ngram != current_ngram): unique_ngrams += 1 current_ngram = ngram if (ngram in merged_lookup): matched_ngrams.append(ngram) matching_unique += 1 for (task_name, task_set, doc_ids) in merged_lookup[ngram]: task_doc_set = duplicates[(task_name, task_set)] for doc_id in doc_ids: task_doc_set.add(doc_id) del merged_lookup[ngram] else: non_matching_unique += 1 print(f'Total Ngrams: {total_ngrams}') print(f'Unique Ngrams: {unique_ngrams}') print(f'Unique Matching: {matching_unique}') print(f'Unique Non Matching: {non_matching_unique}') print('Matched ngrams:') for ngram in matched_ngrams: print(ngram) elapsed = (time.perf_counter() - start) print(f'Read took {elapsed:0.5f} seconds.') print(f'Speed: {((os.path.getsize(file) / 1000000.0) / elapsed)}MB/second') print(duplicates) for ((task_name, task_set), doc_ids) in duplicates.items(): overlaps_dump_path = get_overlaps_dump_path(task_name, task_set, ngrams_n_size, limit) pickle.dump(doc_ids, open(overlaps_dump_path, 'wb')) return {task_name: doc_ids for ((task_name, task_set), doc_ids) in duplicates.items()}
def _get_nargs_pattern_wrapper(self: argparse.ArgumentParser, action: argparse.Action) -> str: nargs_range = action.get_nargs_range() if (nargs_range is not None): if (nargs_range[1] == constants.INFINITY): range_max = '' else: range_max = nargs_range[1] nargs_pattern = f'(-*A{{{nargs_range[0]},{range_max}}}-*)' if action.option_strings: nargs_pattern = nargs_pattern.replace('-*', '') nargs_pattern = nargs_pattern.replace('-', '') return nargs_pattern return orig_argument_parser_get_nargs_pattern(self, action)
class LightSource(TutorialObject): def at_init(self): if self.db.is_giving_light: self.delete() def at_object_creation(self): super().at_object_creation() self.db.tutorial_info = 'This object can be lit to create light. It has a timeout for how long it burns.' self.db.is_giving_light = False self.db.burntime = (60 * 3) self.db.desc = 'A splinter of wood with remnants of resin on it, enough for burning.' self.cmdset.add_default(CmdSetLight, permanent=True) def _burnout(self): self.db.is_giving_light = False try: self.location.location.msg_contents(("%s's %s flickers and dies." % (self.location, self.key)), exclude=self.location) self.location.msg(('Your %s flickers and dies.' % self.key)) self.location.location.check_light_state() except AttributeError: try: self.location.msg_contents(('A %s on the floor flickers and dies.' % self.key)) self.location.location.check_light_state() except AttributeError: pass self.delete() def light(self): if self.db.is_giving_light: return False self.db.is_giving_light = True try: self.location.location.check_light_state() except AttributeError: try: self.location.check_light_state() except AttributeError: pass finally: self.deferred = delay((60 * 3), self._burnout) return True
def test_do_class_cleanups_on_setupclass_failure(pytester: Pytester) -> None: testpath = pytester.makepyfile('\n import unittest\n class MyTestCase(unittest.TestCase):\n values = []\n \n def setUpClass(cls):\n def cleanup():\n cls.values.append(1)\n cls.addClassCleanup(cleanup)\n assert False\n def test_one(self):\n pass\n def test_cleanup_called_exactly_once():\n assert MyTestCase.values == [1]\n ') reprec = pytester.inline_run(testpath) (passed, skipped, failed) = reprec.countoutcomes() assert (failed == 1) assert (passed == 1)
def create_stairs_split(bm, face, prop): xyz = local_xyz(face) size = Vector((prop.size_offset.size.x, prop.step_height)) h_height = (calc_face_dimensions(face)[1] / 2) f = create_face(bm, size, (prop.size_offset.offset - Vector((0, (h_height - (prop.step_height * (prop.step_count + 0.5)))))), xyz) bmesh.ops.translate(bm, verts=f.verts, vec=(face.calc_center_bounds() - (face.normal * prop.depth_offset))) if (not vec_equal(f.normal, face.normal)): bmesh.ops.reverse_faces(bm, faces=[f]) return f
class RankingScorer(): def __init__(self, scorer: Scorer, ranking: Ranking): self.scorer = scorer self.ranking = ranking.tolist() self.__provenance = Provenance() print_message(f'#> Loaded ranking with {len(self.ranking)} qid--pid pairs!') def provenance(self): return self.__provenance def run(self): print_message(f'#> Starting..') (qids, pids, *_) = zipstar(self.ranking) distillation_scores = self.scorer.launch(qids, pids) scores_by_qid = defaultdict(list) for (qid, pid, score) in tqdm.tqdm(zip(qids, pids, distillation_scores)): scores_by_qid[qid].append((score, pid)) with Run().open('distillation_scores.json', 'w') as f: for qid in tqdm.tqdm(scores_by_qid): obj = (qid, scores_by_qid[qid]) f.write((ujson.dumps(obj) + '\n')) output_path = f.name print_message(f'#> Saved the distillation_scores to {output_path}') with Run().open(f'{output_path}.meta', 'w') as f: d = {} d['metadata'] = get_metadata_only() d['provenance'] = self.provenance() line = ujson.dumps(d, indent=4) f.write(line) return output_path
_processor('multi_hot_answer_from_vocab') class MultiHotAnswerFromVocabProcessor(VQAAnswerProcessor): def __init__(self, config, *args, **kwargs): super().__init__(config, *args, **kwargs) def compute_answers_scores(self, answers_indices): scores = torch.zeros(self.get_vocab_size(), dtype=torch.float) scores[answers_indices] = 1 scores[self.answer_vocab.UNK_INDEX] = 0 return scores
class TestNumericalQEOMESCCalculation(QiskitChemistryTestCase): def setUp(self): super().setUp() aqua_globals.random_seed = 8 try: self.driver = PySCFDriver(atom='H .0 .0 .0; H .0 .0 0.75', unit=UnitsType.ANGSTROM, charge=0, spin=0, basis='sto3g') except QiskitChemistryError: self.skipTest('PYSCF driver does not appear to be installed') self.reference_energies = [(- 1.8427016), ((- 1.8427016) + 0.5943372), ((- 1.8427016) + 0.), ((- 1.8427016) + 1.5969296)] self.transformation = FermionicTransformation(qubit_mapping=FermionicQubitMappingType.JORDAN_WIGNER) solver = NumPyEigensolver() self.ref = solver self.quantum_instance = QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_transpiler=90, seed_simulator=12) def test_numpy_mes(self): solver = NumPyMinimumEigensolver() gsc = GroundStateEigensolver(self.transformation, solver) esc = QEOM(gsc, 'sd') results = esc.solve(self.driver) for (idx, _) in enumerate(self.reference_energies): self.assertAlmostEqual(results.computed_energies[idx], self.reference_energies[idx], places=4) def test_vqe_mes(self): solver = VQEUCCSDFactory(self.quantum_instance) gsc = GroundStateEigensolver(self.transformation, solver) esc = QEOM(gsc, 'sd') results = esc.solve(self.driver) for (idx, _) in enumerate(self.reference_energies): self.assertAlmostEqual(results.computed_energies[idx], self.reference_energies[idx], places=4) def test_numpy_factory(self): def filter_criterion(eigenstate, eigenvalue, aux_values): return np.isclose(aux_values[0][0], 2.0) solver = NumPyEigensolverFactory(filter_criterion=filter_criterion) esc = ExcitedStatesEigensolver(self.transformation, solver) results = esc.solve(self.driver) computed_energies = [results.computed_energies[0]] for comp_energy in results.computed_energies[1:]: if (not np.isclose(comp_energy, computed_energies[(- 1)])): computed_energies.append(comp_energy) for (idx, _) in enumerate(self.reference_energies): self.assertAlmostEqual(computed_energies[idx], self.reference_energies[idx], places=4)
def test_step_not_match(sentence, expected_not_matching_step, steps): step_to_print = (colorful.cyan(expected_not_matching_step) if expected_not_matching_step else 'ANY') sys.stdout.write('{0} STEP "{1}" SHOULD NOT MATCH {2} '.format(colorful.yellow('>>'), colorful.cyan(sentence), step_to_print)) result = match_step(sentence, steps) if result: if ((not expected_not_matching_step) or (result.func.__name__ == expected_not_matching_step)): output_failure(None, ["Expected sentence did match {0} but it shouldn't".format(expected_not_matching_step)]) return False print(str(colorful.bold_green(''))) return True
def entry_point_move_plans_between_datasets(): parser = argparse.ArgumentParser() parser.add_argument('-s', type=str, required=True, help='Source dataset name or id') parser.add_argument('-t', type=str, required=True, help='Target dataset name or id') parser.add_argument('-sp', type=str, required=True, help='Source plans identifier. If your plans are named "nnUNetPlans.json" then the identifier would be nnUNetPlans') parser.add_argument('-tp', type=str, required=False, default=None, help='Target plans identifier. Default is None meaning the source plans identifier will be kept. Not recommended if the source plans identifier is a default nnU-Net identifier such as nnUNetPlans!!!') args = parser.parse_args() move_plans_between_datasets(args.s, args.t, args.sp, args.tp)
class Conv2d(nn.Conv2d, RelProp): def gradprop2(self, DY, weight): Z = self.forward(self.X) output_padding = (self.X.size()[2] - ((((Z.size()[2] - 1) * self.stride[0]) - (2 * self.padding[0])) + self.kernel_size[0])) return F.conv_transpose2d(DY, weight, stride=self.stride, padding=self.padding, output_padding=output_padding) def relprop(self, R, alpha): if (self.X.shape[1] == 3): pw = torch.clamp(self.weight, min=0) nw = torch.clamp(self.weight, max=0) X = self.X L = ((self.X * 0) + torch.min(torch.min(torch.min(self.X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, keepdim=True)[0]) H = ((self.X * 0) + torch.max(torch.max(torch.max(self.X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, keepdim=True)[0]) Za = (((torch.conv2d(X, self.weight, bias=None, stride=self.stride, padding=self.padding) - torch.conv2d(L, pw, bias=None, stride=self.stride, padding=self.padding)) - torch.conv2d(H, nw, bias=None, stride=self.stride, padding=self.padding)) + 1e-09) S = (R / Za) C = (((X * self.gradprop2(S, self.weight)) - (L * self.gradprop2(S, pw))) - (H * self.gradprop2(S, nw))) R = C else: beta = (alpha - 1) pw = torch.clamp(self.weight, min=0) nw = torch.clamp(self.weight, max=0) px = torch.clamp(self.X, min=0) nx = torch.clamp(self.X, max=0) def f(w1, w2, x1, x2): Z1 = F.conv2d(x1, w1, bias=None, stride=self.stride, padding=self.padding) Z2 = F.conv2d(x2, w2, bias=None, stride=self.stride, padding=self.padding) S1 = safe_divide(R, Z1) S2 = safe_divide(R, Z2) C1 = (x1 * self.gradprop(Z1, x1, S1)[0]) C2 = (x2 * self.gradprop(Z2, x2, S2)[0]) return (C1 + C2) activator_relevances = f(pw, nw, px, nx) inhibitor_relevances = f(nw, pw, px, nx) R = ((alpha * activator_relevances) - (beta * inhibitor_relevances)) return R def RAP_relprop(self, R_p): def shift_rel(R, R_val): R_nonzero = torch.ne(R, 0).type(R.type()) shift = (safe_divide(R_val, torch.sum(R_nonzero, dim=[1, 2, 3], keepdim=True)) * torch.ne(R, 0).type(R.type())) K = (R - shift) return K def pos_prop(R, Za1, Za2, x1): R_pos = torch.clamp(R, min=0) R_neg = torch.clamp(R, max=0) S1 = safe_divide((R_pos * safe_divide((Za1 + Za2), (Za1 + Za2))), Za1) C1 = (x1 * self.gradprop(Za1, x1, S1)[0]) S1n = safe_divide((R_neg * safe_divide((Za1 + Za2), (Za1 + Za2))), Za2) C1n = (x1 * self.gradprop(Za2, x1, S1n)[0]) S2 = safe_divide((R_pos * safe_divide(Za2, (Za1 + Za2))), Za2) C2 = (x1 * self.gradprop(Za2, x1, S2)[0]) S2n = safe_divide((R_neg * safe_divide(Za2, (Za1 + Za2))), Za2) C2n = (x1 * self.gradprop(Za2, x1, S2n)[0]) Cp = (C1 + C2) Cn = (C2n + C1n) C = (Cp + Cn) C = shift_rel(C, (C.sum(dim=[1, 2, 3], keepdim=True) - R.sum(dim=[1, 2, 3], keepdim=True))) return C def f(R, w1, w2, x1, x2): R_nonzero = R.ne(0).type(R.type()) Za1 = (F.conv2d(x1, w1, bias=None, stride=self.stride, padding=self.padding) * R_nonzero) Za2 = ((- F.conv2d(x1, w2, bias=None, stride=self.stride, padding=self.padding)) * R_nonzero) Zb1 = ((- F.conv2d(x2, w1, bias=None, stride=self.stride, padding=self.padding)) * R_nonzero) Zb2 = (F.conv2d(x2, w2, bias=None, stride=self.stride, padding=self.padding) * R_nonzero) C1 = pos_prop(R, Za1, Za2, x1) C2 = pos_prop(R, Zb1, Zb2, x2) return (C1 + C2) def backward(R_p, px, nx, pw, nw): Rp = f(R_p, pw, nw, px, nx) return Rp def final_backward(R_p, pw, nw, X1): X = X1 L = ((X * 0) + torch.min(torch.min(torch.min(X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, keepdim=True)[0]) H = ((X * 0) + torch.max(torch.max(torch.max(X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3, keepdim=True)[0]) Za = ((torch.conv2d(X, self.weight, bias=None, stride=self.stride, padding=self.padding) - torch.conv2d(L, pw, bias=None, stride=self.stride, padding=self.padding)) - torch.conv2d(H, nw, bias=None, stride=self.stride, padding=self.padding)) Sp = safe_divide(R_p, Za) Rp = (((X * self.gradprop2(Sp, self.weight)) - (L * self.gradprop2(Sp, pw))) - (H * self.gradprop2(Sp, nw))) return Rp pw = torch.clamp(self.weight, min=0) nw = torch.clamp(self.weight, max=0) px = torch.clamp(self.X, min=0) nx = torch.clamp(self.X, max=0) if (self.X.shape[1] == 3): Rp = final_backward(R_p, pw, nw, self.X) else: Rp = backward(R_p, px, nx, pw, nw) return Rp
class GetCrtcInfo(rq.ReplyRequest): _request = rq.Struct(rq.Card8('opcode'), rq.Opcode(20), rq.RequestLength(), rq.Card32('crtc'), rq.Card32('config_timestamp')) _reply = rq.Struct(rq.ReplyCode(), rq.Card8('status'), rq.Card16('sequence_number'), rq.ReplyLength(), rq.Card32('timestamp'), rq.Int16('x'), rq.Int16('y'), rq.Card16('width'), rq.Card16('height'), rq.Card32('mode'), rq.Card16('rotation'), rq.Card16('possible_rotations'), rq.LengthOf('outputs', 2), rq.LengthOf('possible_outputs', 2), rq.List('outputs', rq.Card32Obj), rq.List('possible_outputs', rq.Card32Obj))
class YieldInfo(): yield_node: ast.Yield statement_node: ast.stmt lines: List[str] line_range: List[int] = field(init=False) def __post_init__(self) -> None: self.line_range = get_line_range_for_node(self.statement_node, self.lines) def is_assign_or_expr(self) -> bool: if (not isinstance(self.statement_node, (ast.Expr, ast.Assign))): return False return (self.statement_node.value is self.yield_node) def get_indentation(self) -> int: return get_indentation(self.lines[(self.statement_node.lineno - 1)]) def target_and_value(self) -> Tuple[(List[ast.AST], List[ast.AST])]: assert (self.yield_node.value is not None) if isinstance(self.statement_node, ast.Assign): if (not isinstance(self.statement_node.targets[0], ast.Tuple)): return ([self.statement_node.targets[0]], [self.yield_node.value]) elif (isinstance(self.yield_node.value, ast.Call) and isinstance(self.yield_node.value.func, ast.Name) and (self.yield_node.value.func.id == 'tuple') and isinstance(self.yield_node.value.args[0], ast.Tuple)): return (self.statement_node.targets[0].elts, self.yield_node.value.args[0].elts) elif isinstance(self.yield_node.value, ast.Tuple): return (self.statement_node.targets[0].elts, self.yield_node.value.elts) return ([self.statement_node.targets[0]], [self.yield_node.value]) else: assert False, f'Unrecognized node {self.statement_node}'
def test_make_vdom_constructor(): elmt = make_vdom_constructor('some-tag') assert (elmt({'data': 1}, [elmt()]) == {'tagName': 'some-tag', 'children': [{'tagName': 'some-tag'}], 'attributes': {'data': 1}}) no_children = make_vdom_constructor('no-children', allow_children=False) with pytest.raises(TypeError, match='cannot have children'): no_children([1, 2, 3]) assert (no_children() == {'tagName': 'no-children'})
class SerialAdapter(Adapter): def __init__(self, port, preprocess_reply=None, write_termination='', read_termination='', **kwargs): super().__init__(preprocess_reply=preprocess_reply) if isinstance(port, serial.SerialBase): self.connection = port else: self.connection = serial.Serial(port, **kwargs) self.write_termination = write_termination self.read_termination = read_termination def _write(self, command, **kwargs): command += self.write_termination self._write_bytes(command.encode(), **kwargs) def _write_bytes(self, content, **kwargs): self.connection.write(content, **kwargs) def _read(self, **kwargs): read = self._read_bytes((- 1), break_on_termchar=True, **kwargs).decode() if self.read_termination: return read.split(self.read_termination)[0] else: return read def _read_bytes(self, count, break_on_termchar, **kwargs): if (break_on_termchar and self.read_termination): return self.connection.read_until(self.read_termination.encode(), (count if (count > 0) else None), **kwargs) elif (count >= 0): return self.connection.read(count, **kwargs) else: return self._read_bytes_until_timeout() def _read_bytes_until_timeout(self, chunk_size=256, **kwargs): data = bytes() while True: chunk = self.connection.read(chunk_size, **kwargs) data += chunk if (len(chunk) < chunk_size): return data def flush_read_buffer(self): self.connection.reset_input_buffer() def __repr__(self): return ("<SerialAdapter(port='%s')>" % self.connection.port)
def get_criteo_dataset(params): name = params['dataset'] print('loading datasest {}'.format(name)) cache_path = os.path.join(params['data_cache_path'], '{}.pkl'.format(name)) if ((params['data_cache_path'] != 'None') and os.path.isfile(cache_path)): print('cache_path {}'.format(cache_path)) print('\nloading from dataset cache') with open(cache_path, 'rb') as f: data = pickle.load(f) train_data = data['train'] test_data = data['test'] else: print('\nbuilding dataset') (df, click_ts, pay_ts) = get_data_df(params) if ('1d' in name): data = DataDF(df, click_ts, pay_ts, attr_win=SECONDS_A_DAY) else: data = DataDF(df, click_ts, pay_ts) if (name == 'baseline_prtrain'): train_data = data.sub_days(0, 30).shuffle() mask = (train_data.pay_ts < 0) train_data.pay_ts[mask] = ((30 * SECONDS_A_DAY) + train_data.click_ts[mask]) test_data = data.sub_days(30, 60) elif ('baseline_pretrain_v2' in name): cut_hour = parse_float_arg(name, 'cut_hour') ob_win = int((SECONDS_AN_HOUR * cut_hour)) train_data = data.sub_days_v2(0, 30, ob_win).shuffle() mask = (train_data.pay_ts < 0) train_data.pay_ts[mask] = ((30 * SECONDS_A_DAY) + train_data.click_ts[mask]) test_data = data.sub_days(30, 60) elif ('baseline_pretrain_1d' in name): cut_hour = parse_float_arg(name, 'cut_hour') ob_win = int((SECONDS_AN_HOUR * cut_hour)) attr_win = SECONDS_A_DAY train_data = data.sub_days_v2(0, 30, ob_win, attr_win).shuffle() mask = (train_data.pay_ts < 0) train_data.pay_ts[mask] = ((30 * SECONDS_A_DAY) + train_data.click_ts[mask]) test_data = data.sub_days(30, 60) elif (name == 'dfm_prtrain'): train_data = data.sub_days(0, 30).shuffle() train_data.pay_ts[(train_data.pay_ts < 0)] = (SECONDS_A_DAY * 30) delay = (np.reshape((train_data.pay_ts - train_data.click_ts), ((- 1), 1)) / SECONDS_DELAY_NORM) train_data.labels = np.reshape(train_data.labels, ((- 1), 1)) train_data.labels = np.concatenate([train_data.labels, delay], axis=1) test_data = data.sub_days(30, 60) elif ('tn_dp_mask30d_pretrain_1d' in name): print('preprocess mask30d') cut_hour = parse_float_arg(name, 'cut_hour') ob_win = int((SECONDS_AN_HOUR * cut_hour)) mask_sec = int((SECONDS_A_DAY * 30)) attr_win = SECONDS_A_DAY train_data = data.sub_days(0, 30).shuffle() train_diff = (train_data.pay_ts - train_data.click_ts) train_label_tn = np.reshape(np.logical_or((train_data.pay_ts < 0), np.logical_or((train_data.pay_ts > mask_sec), (train_diff > attr_win))), ((- 1), 1)) train_label_dp = np.reshape(np.logical_and((train_data.pay_ts < mask_sec), np.logical_and((train_diff > ob_win), (train_diff < attr_win))), ((- 1), 1)) train_label = np.reshape(np.logical_and((train_data.pay_ts < mask_sec), np.logical_and((train_data.pay_ts > 0), (train_diff < attr_win))), ((- 1), 1)) train_data.labels = np.concatenate([train_label_tn, train_label_dp, train_label], axis=1) test_data = data.sub_days(30, 60) test_label_tn = np.reshape((test_data.pay_ts < 0), ((- 1), 1)) test_label_dp = np.reshape(((test_data.pay_ts - test_data.click_ts) > ob_win), ((- 1), 1)) test_label = np.reshape((test_data.pay_ts > 0), ((- 1), 1)) test_data.labels = np.concatenate([test_label_tn, test_label_dp, test_label], axis=1) elif ('tn_dp_mask30d_pretrain' in name): print('preprocess mask30d') cut_hour = parse_float_arg(name, 'cut_hour') ob_win = int((SECONDS_AN_HOUR * cut_hour)) mask_sec = int((SECONDS_A_DAY * 30)) train_data = data.sub_days(0, 30).shuffle() train_label_tn = np.reshape(np.logical_or((train_data.pay_ts < 0), (train_data.pay_ts > mask_sec)), ((- 1), 1)) train_label_dp = np.reshape(np.logical_and((train_data.pay_ts < mask_sec), ((train_data.pay_ts - train_data.click_ts) > ob_win)), ((- 1), 1)) train_label = np.reshape((train_data.pay_ts > 0), ((- 1), 1)) train_data.labels = np.concatenate([train_label_tn, train_label_dp, train_label], axis=1) test_data = data.sub_days(30, 60) test_label_tn = np.reshape((test_data.pay_ts < 0), ((- 1), 1)) test_label_dp = np.reshape(((test_data.pay_ts - test_data.click_ts) > ob_win), ((- 1), 1)) test_label = np.reshape((test_data.pay_ts > 0), ((- 1), 1)) test_data.labels = np.concatenate([test_label_tn, test_label_dp, test_label], axis=1) elif ('bidefuse_pretrain' in name): cut_hour = parse_float_arg(name, 'cut_hour') ob_win = int((SECONDS_AN_HOUR * cut_hour)) print('ob_win: {}'.format(cut_hour)) train_data = data.sub_days_v2(0, 30, ob_win).shuffle() train_label_inw = np.reshape((train_data.labels - train_data.delay_labels), ((- 1), 1)) train_label_outw = np.reshape(train_data.delay_labels, ((- 1), 1)) train_label = np.reshape((train_data.labels > 0), ((- 1), 1)) train_data.labels = np.concatenate([train_label_inw, train_label_outw, train_label], axis=1) test_data = data.sub_days(30, 60) test_label_inw = np.reshape((test_data.labels - test_data.delay_labels), ((- 1), 1)) test_label_outw = np.reshape(test_data.delay_labels, ((- 1), 1)) test_label = np.reshape((test_data.labels > 0), ((- 1), 1)) test_data.labels = np.concatenate([test_label_inw, test_label_outw, test_label], axis=1) elif ('tn_dp_pretrain' in name): cut_hour = parse_float_arg(name, 'cut_hour') ob_win = int((SECONDS_AN_HOUR * cut_hour)) train_data = data.sub_days_v2(0, 60, ob_win).shuffle() train_label_tn = np.reshape((train_data.pay_ts < 0), ((- 1), 1)) train_label_dp = np.reshape(((train_data.pay_ts - train_data.click_ts) > ob_win), ((- 1), 1)) train_label = np.reshape((train_data.pay_ts > 0), ((- 1), 1)) train_data.labels = np.concatenate([train_label_tn, train_label_dp, train_label], axis=1) test_data = data.sub_days(30, 60) test_label_tn = np.reshape((test_data.pay_ts < 0), ((- 1), 1)) test_label_dp = np.reshape(((test_data.pay_ts - test_data.click_ts) > ob_win), ((- 1), 1)) test_label = np.reshape((test_data.pay_ts > 0), ((- 1), 1)) test_data.labels = np.concatenate([test_label_tn, test_label_dp, test_label], axis=1) elif ('dp_pretrain' in name): cut_hour = parse_float_arg(name, 'cut_hour') ob_win = int((SECONDS_AN_HOUR * cut_hour)) train_data = data.sub_days(0, 30).mask_rn_v2().shuffle() train_label_dp = np.reshape(((train_data.pay_ts - train_data.click_ts) > ob_win), ((- 1), 1)) train_label = np.reshape((train_data.pay_ts > 0), ((- 1), 1)) train_data.labels = np.concatenate([train_label_dp], axis=1) test_data = data.sub_days(30, 60).mask_rn_v2() print('len of test_data : {}'.format(len(test_data.labels))) test_label_dp = np.reshape(((test_data.pay_ts - test_data.click_ts) > ob_win), ((- 1), 1)) test_label = np.reshape((test_data.pay_ts > 0), ((- 1), 1)) test_data.labels = np.concatenate([test_label_dp, test_label], axis=1) elif ('dp_v2_1d_pretrain' in name): cut_hour = parse_float_arg(name, 'cut_hour') ob_win = int((SECONDS_AN_HOUR * cut_hour)) attr_win = SECONDS_A_DAY train_data = data.sub_days(0, 30).mask_rn_v2().shuffle() print('len of train_data : {}'.format(len(train_data.labels))) train_diff = (train_data.pay_ts - train_data.click_ts) train_label_dp = np.reshape(np.logical_and((train_diff > ob_win), (train_diff < attr_win)), ((- 1), 1)) train_label = np.reshape(np.logical_and((train_data.pay_ts > 0), (train_diff < attr_win)), ((- 1), 1)) train_data.labels = np.concatenate([train_label_dp], axis=1) test_data = data.sub_days(30, 60).mask_rn_v2() print('len of test_data : {}'.format(len(test_data.labels))) test_label_dp = np.reshape(((test_data.pay_ts - test_data.click_ts) > ob_win), ((- 1), 1)) test_label = np.reshape((test_data.pay_ts > 0), ((- 1), 1)) test_data.labels = np.concatenate([test_label_dp, test_label], axis=1) elif ('fsiw1' in name): cd = parse_float_arg(name, 'cd') print('cd {}'.format(cd)) train_data = data.sub_days(0, 30).shuffle() test_data = data.sub_days(30, 60) train_data = train_data.to_fsiw_1(cd=(cd * SECONDS_A_DAY), T=(30 * SECONDS_A_DAY)) test_data = test_data.to_fsiw_1(cd=(cd * SECONDS_A_DAY), T=(60 * SECONDS_A_DAY)) elif ('fsiw0' in name): cd = parse_float_arg(name, 'cd') train_data = data.sub_days(0, 30).shuffle() test_data = data.sub_days(30, 60) train_data = train_data.to_fsiw_0(cd=(cd * SECONDS_A_DAY), T=(30 * SECONDS_A_DAY)) test_data = test_data.to_fsiw_0(cd=(cd * SECONDS_A_DAY), T=(60 * SECONDS_A_DAY)) else: raise NotImplementedError('{} dataset does not exist'.format(name)) if (params['data_cache_path'] != 'None'): with open(cache_path, 'wb') as f: pickle.dump({'train': train_data, 'test': test_data}, f) return {'train': {'x': train_data.x, 'click_ts': train_data.click_ts, 'pay_ts': train_data.pay_ts, 'sample_ts': train_data.sample_ts, 'labels': train_data.labels, 'delay_labels': train_data.delay_labels}, 'test': {'x': test_data.x, 'click_ts': test_data.click_ts, 'pay_ts': test_data.pay_ts, 'sample_ts': train_data.sample_ts, 'labels': test_data.labels}}
class ServiceBrowser(_ServiceBrowserBase, threading.Thread): def __init__(self, zc: 'Zeroconf', type_: Union[(str, list)], handlers: Optional[Union[(ServiceListener, List[Callable[(..., None)]])]]=None, listener: Optional[ServiceListener]=None, addr: Optional[str]=None, port: int=_MDNS_PORT, delay: int=_BROWSER_TIME, question_type: Optional[DNSQuestionType]=None) -> None: assert (zc.loop is not None) if (not zc.loop.is_running()): raise RuntimeError('The event loop is not running') threading.Thread.__init__(self) super().__init__(zc, type_, handlers, listener, addr, port, delay, question_type) self.queue: queue.SimpleQueue = queue.SimpleQueue() self.daemon = True self.start() zc.loop.call_soon_threadsafe(self._async_start) self.name = 'zeroconf-ServiceBrowser-{}-{}'.format('-'.join([type_[:(- 7)] for type_ in self.types]), getattr(self, 'native_id', self.ident)) def cancel(self) -> None: assert (self.zc.loop is not None) self.queue.put(None) self.zc.loop.call_soon_threadsafe(self._async_cancel) self.join() def run(self) -> None: while True: event = self.queue.get() if (event is None): return self._fire_service_state_changed_event(event) def async_update_records_complete(self) -> None: for pending in self._pending_handlers.items(): self.queue.put(pending) self._pending_handlers.clear() def __enter__(self) -> 'ServiceBrowser': return self def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> Optional[bool]: self.cancel() return None
_torch _vision class VideoMAEImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = (VideoMAEImageProcessor if is_vision_available() else None) def setUp(self): self.image_processor_tester = VideoMAEImageProcessingTester(self) def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, 'image_mean')) self.assertTrue(hasattr(image_processing, 'image_std')) self.assertTrue(hasattr(image_processing, 'do_normalize')) self.assertTrue(hasattr(image_processing, 'do_resize')) self.assertTrue(hasattr(image_processing, 'do_center_crop')) self.assertTrue(hasattr(image_processing, 'size')) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {'shortest_edge': 18}) self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {'shortest_edge': 42}) self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84}) def test_batch_feature(self): pass def test_call_pil(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) encoded_videos = image_processing(video_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_videos.shape, (1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_videos = image_processing(video_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_videos.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) def test_call_numpy(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) encoded_videos = image_processing(video_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_videos.shape, (1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_videos = image_processing(video_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_videos.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) def test_call_pytorch(self): image_processing = self.image_processing_class(**self.image_processor_dict) video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) encoded_videos = image_processing(video_inputs[0], return_tensors='pt').pixel_values self.assertEqual(encoded_videos.shape, (1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) encoded_videos = image_processing(video_inputs, return_tensors='pt').pixel_values self.assertEqual(encoded_videos.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width']))
def backward(x0, sc, c, phi, theta, psi, orientation, sigma_c, sigma_l, sigma_d, t1, bm): angles = (phi[x0], theta[x0], psi[x0]) sc_curr = GR.rotate(sc, angle=angles, default_val=0.0) x0_loc = convert(np.where((sc_curr == sc_curr.min()))) sc_mask = (sc_curr > 0) search_result = np.full(sc_mask.shape, (- 1.0)) for (idx, val) in np.ndenumerate(sc_mask): if (val is True): x = (((x0[0] - x0_loc[0]) + idx[0]), ((x0[1] - x0_loc[1]) + idx[1]), ((x0[2] - x0_loc[2]) + idx[2])) if ((x[0] <= (c.shape[0] - 1)) and (x[1] <= (c.shape[1] - 1)) and (x[2] <= (c.shape[2] - 1))): s = similarity(x0, x, c[x], sigma_c, sigma_l, sigma_d, orientation) if (s > t1): search_result[idx] = s if (search_result.max() > t1): x = convert(np.where((search_result == search_result.max()))) x = (((x0[0] - x0_loc[0]) + x[0]), ((x0[1] - x0_loc[1]) + x[1]), ((x0[2] - x0_loc[2]) + x[2])) bm[x] = True return backward(x, sc, c, phi, theta, psi, orientation, sigma_c, sigma_l, sigma_d, t1, bm) else: return bm
class SendVideoNote(): async def send_video_note(self: 'pyrogram.Client', chat_id: Union[(int, str)], video_note: Union[(str, BinaryIO)], duration: int=0, length: int=1, thumb: Union[(str, BinaryIO)]=None, disable_notification: bool=None, reply_to_message_id: int=None, schedule_date: datetime=None, protect_content: bool=None, reply_markup: Union[('types.InlineKeyboardMarkup', 'types.ReplyKeyboardMarkup', 'types.ReplyKeyboardRemove', 'types.ForceReply')]=None, progress: Callable=None, progress_args: tuple=()) -> Optional['types.Message']: file = None try: if isinstance(video_note, str): if os.path.isfile(video_note): thumb = (await self.save_file(thumb)) file = (await self.save_file(video_note, progress=progress, progress_args=progress_args)) media = raw.types.InputMediaUploadedDocument(mime_type=(self.guess_mime_type(video_note) or 'video/mp4'), file=file, thumb=thumb, attributes=[raw.types.DocumentAttributeVideo(round_message=True, duration=duration, w=length, h=length)]) else: media = utils.get_input_media_from_file_id(video_note, FileType.VIDEO_NOTE) else: thumb = (await self.save_file(thumb)) file = (await self.save_file(video_note, progress=progress, progress_args=progress_args)) media = raw.types.InputMediaUploadedDocument(mime_type=(self.guess_mime_type(video_note.name) or 'video/mp4'), file=file, thumb=thumb, attributes=[raw.types.DocumentAttributeVideo(round_message=True, duration=duration, w=length, h=length)]) while True: try: r = (await self.invoke(raw.functions.messages.SendMedia(peer=(await self.resolve_peer(chat_id)), media=media, silent=(disable_notification or None), reply_to_msg_id=reply_to_message_id, random_id=self.rnd_id(), schedule_date=utils.datetime_to_timestamp(schedule_date), noforwards=protect_content, reply_markup=((await reply_markup.write(self)) if reply_markup else None), message=''))) except FilePartMissing as e: (await self.save_file(video_note, file_id=file.id, file_part=e.value)) else: for i in r.updates: if isinstance(i, (raw.types.UpdateNewMessage, raw.types.UpdateNewChannelMessage, raw.types.UpdateNewScheduledMessage)): return (await types.Message._parse(self, i.message, {i.id: i for i in r.users}, {i.id: i for i in r.chats}, is_scheduled=isinstance(i, raw.types.UpdateNewScheduledMessage))) except StopTransmission: return None
def createHTMLDeviceSummary(testruns, htmlfile, title): html = summaryCSS('Device Summary - SleepGraph', False) devall = dict() for data in testruns: (host, url, devlist) = (data['host'], data['url'], data['devlist']) for type in devlist: if (type not in devall): devall[type] = dict() (mdevlist, devlist) = (devall[type], data['devlist'][type]) for name in devlist: length = devlist[name] if (name not in mdevlist): mdevlist[name] = {'name': name, 'host': host, 'worst': length, 'total': length, 'count': 1, 'url': url} else: if (length > mdevlist[name]['worst']): mdevlist[name]['worst'] = length mdevlist[name]['url'] = url mdevlist[name]['host'] = host mdevlist[name]['total'] += length mdevlist[name]['count'] += 1 th = '\t<th>{0}</th>\n' td = '\t<td align=center>{0}</td>\n' tdr = '\t<td align=right>{0}</td>\n' tdlink = '\t<td align=center><a href="{0}">html</a></td>\n' limit = 1 for type in sorted(devall, reverse=True): num = 0 devlist = devall[type] html += ('<div class="stamp">%s (%s devices > %d ms)</div><table>\n' % (title, type.upper(), limit)) html += ((((((('<tr>\n' + '<th align=right>Device Name</th>') + th.format('Average Time')) + th.format('Count')) + th.format('Worst Time')) + th.format('Host (worst time)')) + th.format('Link (worst time)')) + '</tr>\n') for name in sorted(devlist, key=(lambda k: (devlist[k]['worst'], devlist[k]['total'], devlist[k]['name'])), reverse=True): data = devall[type][name] data['average'] = (data['total'] / data['count']) if (data['average'] < limit): continue rcls = (['alt'] if ((num % 2) == 1) else []) html += ((('<tr class="' + ' '.join(rcls)) + '">\n') if (len(rcls) > 0) else '<tr>\n') html += tdr.format(data['name']) html += td.format(('%.3f ms' % data['average'])) html += td.format(data['count']) html += td.format(('%.3f ms' % data['worst'])) html += td.format(data['host']) html += tdlink.format(data['url']) html += '</tr>\n' num += 1 html += '</table>\n' hf = open(htmlfile, 'w') hf.write((html + '</body>\n</html>\n')) hf.close() return devall
_model def convformer_b36_in21k(pretrained=False, **kwargs): model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=SepConv, head_fn=MlpHead, **kwargs) model.default_cfg = default_cfgs['convformer_b36_in21k'] if pretrained: state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True) model.load_state_dict(state_dict) return model
def are_typed_dicts_overlapping(left: TypedDictType, right: TypedDictType, *, ignore_promotions: bool=False, prohibit_none_typevar_overlap: bool=False) -> bool: for key in left.required_keys: if (key not in right.items): return False if (not is_overlapping_types(left.items[key], right.items[key], ignore_promotions=ignore_promotions, prohibit_none_typevar_overlap=prohibit_none_typevar_overlap)): return False for key in right.required_keys: if (key not in left.items): return False if (not is_overlapping_types(left.items[key], right.items[key], ignore_promotions=ignore_promotions)): return False return True
def unevaluatedItems_draft2019(validator, unevaluatedItems, instance, schema): if (not validator.is_type(instance, 'array')): return evaluated_item_indexes = find_evaluated_item_indexes_by_schema(validator, instance, schema) unevaluated_items = [item for (index, item) in enumerate(instance) if (index not in evaluated_item_indexes)] if unevaluated_items: error = 'Unevaluated items are not allowed (%s %s unexpected)' (yield ValidationError((error % _utils.extras_msg(unevaluated_items))))
def get_precision_at_k(args, preds_path, gold_data_path): k = args.k hypos = [line.strip() for line in open(preds_path, 'r').readlines()] references = [line.strip() for line in open(gold_data_path, 'r').readlines()] em = total = 0 for (hypo, reference) in zip(hypos, references): hypo_provenance = set(hypo.split('\t')[:k]) ref_provenance = set(reference.split('\t')) total += 1 em += (len((hypo_provenance & ref_provenance)) / k) em = ((100.0 * em) / total) logger.info(f'{k}: {em: .2f}')
_bp.route('/images/<image_id>/checksum', methods=['PUT']) _auth _namespace_repo_from_session _v1_push_enabled() _namespace_enabled _repository_state _protect _readonly def put_image_checksum(namespace, repository, image_id): logger.debug('Checking repo permissions') permission = ModifyRepositoryPermission(namespace, repository) if (not permission.can()): abort(403) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image') if (repository_ref is None): abort(403) old_checksum = request.headers.get('X-Docker-Checksum') new_checksum = request.headers.get('X-Docker-Checksum-Payload') checksum = (new_checksum or old_checksum) if (not checksum): abort(400, 'Missing checksum for image %(image_id)s', issue='missing-checksum', image_id=image_id) logger.debug('Checking for image in manifest builder') builder = lookup_manifest_builder(repository_ref, session.get('manifest_builder'), store, docker_v2_signing_key) if (builder is None): abort(400) layer = builder.lookup_layer(image_id) if (layer is None): abort(404) if old_checksum: builder.save_precomputed_checksum(layer, checksum) return make_response('true', 200) if (not builder.validate_layer_checksum(layer, checksum)): logger.debug('put_image_checksum: Wrong checksum. Given: %s and expected: %s', checksum, builder.get_layer_checksums(layer)) abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch', image_id=image_id) return make_response('true', 200)
def remove_old_tests(client: APIClient): try: client.remove_container(container='freshenv_system_test', force=True) client.remove_image(image=freshenv_test_image, force=True) print(':heavy_check_mark: Test images removed.') except errors.APIError as e: if (e.status_code == 404): print(':heavy_check_mark: No test images found. Moving on...') except Exception: print(':cross_mark_button: Could not remove freshenv test image. A freshenv test environment maybe still running.') exit(1)
def getDefaultHeatTransferSolverSettings(): return {'parallel': False, 'compressible': False, 'nonNewtonian': False, 'transonic': False, 'porous': False, 'dynamicMeshing': False, 'buoyant': True, 'gravity': (0, (- 9.81), 0), 'transient': False, 'turbulenceModel': 'kEpsilon', 'potentialInit': False, 'heatTransfering': True, 'conjugate': False, 'radiationModel': 'noRadiation'}
class TestNfsCollector(CollectorTestCase): def setUp(self): config = get_collector_config('NfsCollector', {'interval': 1}) self.collector = NfsCollector(config, None) def test_import(self): self.assertTrue(NfsCollector) ('__builtin__.open') ('os.access', Mock(return_value=True)) (Collector, 'publish') def test_should_open_proc_stat(self, publish_mock, open_mock): open_mock.return_value = StringIO('') self.collector.collect() open_mock.assert_called_once_with('/proc/net/rpc/nfs') (Collector, 'publish') def test_should_work_with_real_data_rhel5(self, publish_mock): NfsCollector.PROC = self.getFixturePath('rhel5-1') self.collector.collect() self.assertPublishedMany(publish_mock, {}) NfsCollector.PROC = self.getFixturePath('rhel5-2') self.collector.collect() metrics = {'net.packets': 0.0, 'net.tcpcnt': 0.0, 'net.tcpconn': 0.0, 'net.udpcnt': 0.0, 'rpc.authrefrsh': 0.0, 'rpc.calls': 8042864.0, 'rpc.retrans': 0.0, 'v2.create': 0.0, 'v2.fsstat': 0.0, 'v2.getattr': 0.0, 'v2.link': 0.0, 'v2.lookup': 0.0, 'v2.mkdir': 0.0, 'v2.null': 0.0, 'v2.read': 0.0, 'v2.readdir': 0.0, 'v2.readlink': 0.0, 'v2.remove': 0.0, 'v2.rename': 0.0, 'v2.rmdir': 0.0, 'v2.root': 0.0, 'v2.setattr': 0.0, 'v2.symlink': 0.0, 'v2.wrcache': 0.0, 'v2.write': 0.0, 'v3.access': 40672.0, 'v3.commit': 0.0, 'v3.create': 91.0, 'v3.fsinfo': 0.0, 'v3.fsstat': 20830.0, 'v3.getattr': 162507.0, 'v3.link': 0.0, 'v3.lookup': 89.0, 'v3.mkdir': 0.0, 'v3.mknod': 0.0, 'v3.null': 0.0, 'v3.pathconf': 0.0, 'v3.read': 6093419.0, 'v3.readdir': 4002.0, 'v3.readdirplus': 0.0, 'v3.readlink': 0.0, 'v3.remove': 9.0, 'v3.rename': 0.0, 'v3.rmdir': 0.0, 'v3.setattr': 8640.0, 'v3.symlink': 0.0, 'v3.write': 1712605.0, 'v4.access': 0.0, 'v4.close': 0.0, 'v4.commit': 0.0, 'v4.confirm': 0.0, 'v4.create': 0.0, 'v4.delegreturn': 0.0, 'v4.fs_locations': 0.0, 'v4.fsinfo': 0.0, 'v4.getacl': 0.0, 'v4.getattr': 0.0, 'v4.link': 0.0, 'v4.lock': 0.0, 'v4.lockt': 0.0, 'v4.locku': 0.0, 'v4.lookup': 0.0, 'v4.lookup_root': 0.0, 'v4.null': 0.0, 'v4.open': 0.0, 'v4.open_conf': 0.0, 'v4.open_dgrd': 0.0, 'v4.open_noat': 0.0, 'v4.pathconf': 0.0, 'v4.read': 0.0, 'v4.readdir': 0.0, 'v4.readlink': 0.0, 'v4.rel_lkowner': 0.0, 'v4.remove': 0.0, 'v4.rename': 0.0, 'v4.renew': 0.0, 'v4.server_caps': 0.0, 'v4.setacl': 0.0, 'v4.setattr': 0.0, 'v4.setclntid': 0.0, 'v4.statfs': 0.0, 'v4.symlink': 0.0, 'v4.write': 0.0} self.assertPublishedMany(publish_mock, metrics) self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) (Collector, 'publish') def test_should_work_with_real_data_rhel6(self, publish_mock): NfsCollector.PROC = self.getFixturePath('rhel6-1') self.collector.collect() self.assertPublishedMany(publish_mock, {}) NfsCollector.PROC = self.getFixturePath('rhel6-2') self.collector.collect() metrics = {'net.packets': 0.0, 'net.tcpcnt': 0.0, 'net.tcpconn': 0.0, 'net.udpcnt': 0.0, 'rpc.authrefrsh': 32.0, 'rpc.calls': 32.0, 'rpc.retrans': 0.0, 'v2.create': 0.0, 'v2.fsstat': 0.0, 'v2.getattr': 0.0, 'v2.link': 0.0, 'v2.lookup': 0.0, 'v2.mkdir': 0.0, 'v2.null': 0.0, 'v2.read': 0.0, 'v2.readdir': 0.0, 'v2.readlink': 0.0, 'v2.remove': 0.0, 'v2.rename': 0.0, 'v2.rmdir': 0.0, 'v2.root': 0.0, 'v2.setattr': 0.0, 'v2.symlink': 0.0, 'v2.wrcache': 0.0, 'v2.write': 0.0, 'v3.access': 6.0, 'v3.commit': 0.0, 'v3.create': 0.0, 'v3.fsinfo': 0.0, 'v3.fsstat': 17.0, 'v3.getattr': 7.0, 'v3.link': 0.0, 'v3.lookup': 0.0, 'v3.mkdir': 0.0, 'v3.mknod': 0.0, 'v3.null': 0.0, 'v3.pathconf': 0.0, 'v3.read': 0.0, 'v3.readdir': 0.0, 'v3.readdirplus': 0.0, 'v3.readlink': 0.0, 'v3.remove': 0.0, 'v3.rename': 0.0, 'v3.rmdir': 0.0, 'v3.setattr': 1.0, 'v3.symlink': 0.0, 'v3.write': 1.0, 'v4.access': 0.0, 'v4.close': 0.0, 'v4.commit': 0.0, 'v4.confirm': 0.0, 'v4.create': 0.0, 'v4.create_ses': 0.0, 'v4.delegreturn': 0.0, 'v4.destroy_ses': 0.0, 'v4.ds_write': 0.0, 'v4.exchange_id': 0.0, 'v4.fs_locations': 0.0, 'v4.fsinfo': 0.0, 'v4.get_lease_t': 0.0, 'v4.getacl': 0.0, 'v4.getattr': 0.0, 'v4.getdevinfo': 0.0, 'v4.getdevlist': 0.0, 'v4.layoutcommit': 0.0, 'v4.layoutget': 0.0, 'v4.layoutreturn': 0.0, 'v4.link': 0.0, 'v4.lock': 0.0, 'v4.lockt': 0.0, 'v4.locku': 0.0, 'v4.lookup': 0.0, 'v4.lookup_root': 0.0, 'v4.null': 0.0, 'v4.open': 0.0, 'v4.open_conf': 0.0, 'v4.open_dgrd': 0.0, 'v4.open_noat': 0.0, 'v4.pathconf': 0.0, 'v4.read': 0.0, 'v4.readdir': 0.0, 'v4.readlink': 0.0, 'v4.reclaim_comp': 0.0, 'v4.rel_lkowner': 0.0, 'v4.remove': 0.0, 'v4.rename': 0.0, 'v4.renew': 0.0, 'v4.sequence': 0.0, 'v4.server_caps': 0.0, 'v4.setacl': 0.0, 'v4.setattr': 0.0, 'v4.setclntid': 0.0, 'v4.statfs': 0.0, 'v4.symlink': 0.0, 'v4.write': 0.0} self.assertPublishedMany(publish_mock, metrics)
class STM32F4xxRccV2(STM32F4xxRcc): class Type(ctypes.Structure): _fields_ = [('CR', ctypes.c_uint32), ('PLLCFGR', ctypes.c_uint32), ('CFGR', ctypes.c_uint32), ('CIR', ctypes.c_uint32), ('AHB1RSTR', ctypes.c_uint32), ('AHB2RSTR', ctypes.c_uint32), ('AHB3RSTR', ctypes.c_uint32), ('RESERVED0', ctypes.c_uint32), ('APB1RSTR', ctypes.c_uint32), ('APB2RSTR', ctypes.c_uint32), ('RESERVED1', (ctypes.c_uint32 * 2)), ('AHB1ENR', ctypes.c_uint32), ('AHB2ENR', ctypes.c_uint32), ('AHB3ENR', ctypes.c_uint32), ('RESERVED2', ctypes.c_uint32), ('APB1ENR', ctypes.c_uint32), ('APB2ENR', ctypes.c_uint32), ('RESERVED3', (ctypes.c_uint32 * 2)), ('AHB1LPENR', ctypes.c_uint32), ('AHB2LPENR', ctypes.c_uint32), ('AHB3LPENR', ctypes.c_uint32), ('RESERVED4', ctypes.c_uint32), ('APB1LPENR', ctypes.c_uint32), ('APB2LPENR', ctypes.c_uint32), ('RESERVED5', (ctypes.c_uint32 * 2)), ('BDCR', ctypes.c_uint32), ('CSR', ctypes.c_uint32), ('RESERVED6', (ctypes.c_uint32 * 2)), ('SSCGR', ctypes.c_uint32), ('PLLI2SCFGR', ctypes.c_uint32)]
class net(): def __init__(self, X_train, y_train, n_hidden, n_epochs=40, normalize=False, tau=1.0, dropout=0.05): if normalize: self.std_X_train = np.std(X_train, 0) self.std_X_train[(self.std_X_train == 0)] = 1 self.mean_X_train = np.mean(X_train, 0) else: self.std_X_train = np.ones(X_train.shape[1]) self.mean_X_train = np.zeros(X_train.shape[1]) X_train = ((X_train - np.full(X_train.shape, self.mean_X_train)) / np.full(X_train.shape, self.std_X_train)) self.mean_y_train = np.mean(y_train) self.std_y_train = np.std(y_train) y_train_normalized = ((y_train - self.mean_y_train) / self.std_y_train) y_train_normalized = np.array(y_train_normalized, ndmin=2).T N = X_train.shape[0] batch_size = 128 lengthscale = 0.01 reg = (((lengthscale ** 2) * (1 - dropout)) / ((2.0 * N) * tau)) inputs = Input(shape=(X_train.shape[1],)) inter = Dropout(dropout)(inputs, training=True) inter = Dense(n_hidden[0], activation='relu', W_regularizer=l2(reg))(inter) for i in range((len(n_hidden) - 1)): inter = Dropout(dropout)(inter, training=True) inter = Dense(n_hidden[(i + 1)], activation='relu', W_regularizer=l2(reg))(inter) inter = Dropout(dropout)(inter, training=True) outputs = Dense(y_train_normalized.shape[1], W_regularizer=l2(reg))(inter) model = Model(inputs, outputs) model.compile(loss='mean_squared_error', optimizer='adam') start_time = time.time() model.fit(X_train, y_train_normalized, batch_size=batch_size, nb_epoch=n_epochs, verbose=0) self.model = model self.tau = tau self.running_time = (time.time() - start_time) def predict(self, X_test, y_test): X_test = np.array(X_test, ndmin=2) y_test = np.array(y_test, ndmin=2).T X_test = ((X_test - np.full(X_test.shape, self.mean_X_train)) / np.full(X_test.shape, self.std_X_train)) model = self.model standard_pred = model.predict(X_test, batch_size=500, verbose=1) standard_pred = ((standard_pred * self.std_y_train) + self.mean_y_train) rmse_standard_pred = (np.mean(((y_test.squeeze() - standard_pred.squeeze()) ** 2.0)) ** 0.5) T = 10000 Yt_hat = np.array([model.predict(X_test, batch_size=500, verbose=0) for _ in range(T)]) Yt_hat = ((Yt_hat * self.std_y_train) + self.mean_y_train) MC_pred = np.mean(Yt_hat, 0) rmse = (np.mean(((y_test.squeeze() - MC_pred.squeeze()) ** 2.0)) ** 0.5) ll = (((logsumexp((((- 0.5) * self.tau) * ((y_test[None] - Yt_hat) ** 2.0)), 0) - np.log(T)) - (0.5 * np.log((2 * np.pi)))) + (0.5 * np.log(self.tau))) test_ll = np.mean(ll) return (rmse_standard_pred, rmse, test_ll)
def test_direct_junction_minimum_connection_suc_pred(direct_junction_both_lane_fixture): (main_road, small_road, junction_creator) = direct_junction_both_lane_fixture main_road.add_successor(xodr.ElementType.junction, junction_creator.id) small_road.add_predecessor(xodr.ElementType.junction, junction_creator.id) junction_creator.add_connection(main_road, small_road) assert (len(junction_creator.junction.connections) == 1) assert (len(junction_creator.junction.connections[0].links) == 2) assert (junction_creator.junction.connections[0].links[0] == ((- 1), (- 1))) assert (junction_creator.junction.connections[0].links[1] == (1, 1))
.parametrize('repo_name, extended_repo_names, expected_status', [pytest.param(('x' * 255), False, 201, id='Maximum allowed length'), pytest.param(('x' * 255), True, 201, id='Maximum allowed length'), pytest.param(('x' * 256), False, 400, id='Over allowed length'), pytest.param(('x' * 256), True, 400, id='Over allowed length'), pytest.param('a|b', False, 400, id='Invalid name'), pytest.param('a|b', True, 400, id='Invalid name'), pytest.param('UpperCase', False, 400, id='Uppercase Not Allowed'), pytest.param('UpperCase', True, 400, id='Uppercase Not Allowed'), pytest.param('testrepo/nested', False, 400, id='Slashes Not Allowed'), pytest.param('testrepo/nested', True, 201, id='Slashes Allowed'), pytest.param(('testrepo/' + ('x' * 247)), True, 400, id='Slashes Allowed But Too Long'), pytest.param(('devtable/' + ('x' * 246)), True, 201, id='Slashes Allowed Max Allowed'), pytest.param('devtable/nested1/nested2', False, 400, id='Slashes Allowed Multiple Levels'), pytest.param('devtable/nested1/nested2', True, 201, id='Slashes Allowed Multiple Levels')]) def test_create_repository(repo_name, extended_repo_names, expected_status, app): with patch('features.EXTENDED_REPOSITORY_NAMES', FeatureNameValue('EXTENDED_REPOSITORY_NAMES', extended_repo_names)): with client_with_identity('devtable', app) as cl: body = {'namespace': 'devtable', 'repository': repo_name, 'visibility': 'public', 'description': 'foo'} result = conduct_api_call(cl, RepositoryList, 'post', None, body, expected_code=expected_status).json if (expected_status == 201): assert (result['name'] == repo_name) assert (model.repository.get_repository('devtable', repo_name).name == repo_name)
def recv_param(learner_ip, actor_id, param_queue): ctx = zmq.Context() param_socket = ctx.socket(zmq.SUB) param_socket.setsockopt(zmq.SUBSCRIBE, b'') param_socket.setsockopt(zmq.CONFLATE, 1) connect_param_socket(ctx, param_socket, learner_ip, actor_id) while True: data = param_socket.recv(copy=False) param = pickle.loads(data) param_queue.put(param)
def test_030_parseTime_legal(): report = Metar.Metar('KEWR 101651Z') assert report.decode_completed assert (report.time.day == 10) assert (report.time.hour == 16) assert (report.time.minute == 51) if ((today.day > 10) or ((today.hour > 16) and (today.day == 10))): assert (report.time.month == today.month) if ((today.month > 1) or (today.day > 10)): assert (report.time.year == today.year)
class ResNet50bn(ResNetD): def __init__(self, n_classes: int, n_input_channels: int=3, input_dimension: int=2, final_layer_dropout: float=0.0, stochastic_depth_p: float=0.0, squeeze_excitation: bool=False, squeeze_excitation_rd_ratio: float=(1.0 / 16)): super().__init__(n_classes, n_input_channels, config='50_bn', input_dimension=input_dimension, final_layer_dropout=final_layer_dropout, stochastic_depth_p=stochastic_depth_p, squeeze_excitation=squeeze_excitation, squeeze_excitation_rd_ratio=squeeze_excitation_rd_ratio)
def test_quant_scheme_percentile(): if (version.parse(tf.version.VERSION) >= version.parse('2.00')): model = dense_functional() qsim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, default_param_bw=16, default_output_bw=16) (_, _, output_quantizers) = qsim._get_quantizer_list() with pytest.raises(RuntimeError): for quantizer in output_quantizers: quantizer.set_percentile_value(99.99) for quantizer in output_quantizers: quantizer.quant_scheme = QuantScheme.post_training_percentile quantizer.set_percentile_value(99.99) assert np.allclose(quantizer.get_percentile_value(), 99.99)
def execute_benchmark(config: Config): args = config.args if (args.multiprocessing_method == 'forkserver'): import multiprocessing.forkserver as f f.ensure_running() with dask.config.set({'distributed.worker.multiprocessing-method': args.multiprocessing_method}): if ((args.scheduler_file is not None) or (args.scheduler_address is not None)): run_client_from_existing_scheduler(args, config) else: run_create_client(args, config)
def gamma_dicom(dicom_dataset_ref, dicom_dataset_eval, dose_percent_threshold, distance_mm_threshold, **kwargs): (axes_reference, dose_reference) = zyx_and_dose_from_dataset(dicom_dataset_ref) (axes_evaluation, dose_evaluation) = zyx_and_dose_from_dataset(dicom_dataset_eval) gamma = gamma_shell(axes_reference, dose_reference, axes_evaluation, dose_evaluation, dose_percent_threshold, distance_mm_threshold, **kwargs) return gamma
def _get_code_for_demoing_a_gate(gate_func: Callable, vertical: bool) -> str: (lines, obj_expression) = _get_lines_for_constructing_an_object(gate_func) vert_str = '' if vertical: vert_str = ', vertical=True' return _GATE_DISPLAY.format(lines='\n'.join(lines), obj_expression=obj_expression, vert_str=vert_str)
class AttrVI_ATTR_USB_CLASS(RangeAttribute): resources = [(constants.InterfaceType.usb, 'RAW')] py_name = '' visa_name = 'VI_ATTR_USB_CLASS' visa_type = 'ViInt16' default = NotAvailable (read, write, local) = (True, False, False) (min_value, max_value, values) = (0, 255, None)
def delimited_list(expr: Union[(str, ParserElement)], delim: Union[(str, ParserElement)]=',', combine: bool=False, min: typing.Optional[int]=None, max: typing.Optional[int]=None, *, allow_trailing_delim: bool=False) -> ParserElement: return DelimitedList(expr, delim, combine, min, max, allow_trailing_delim=allow_trailing_delim)
def nonempty_intersection_answer_by_order(sets): answer = [frozenset((sets.index(x) for x in combination)) for combination in utils.powerset(sets, nonempty=True, max_size=None) if ((len(combination) >= 2) and frozenset.intersection(*combination))] return {i: set((x for x in answer if (len(x) == i))) for i in set(map(len, answer))}
class ScheduleItem(): id: strawberry.ID conference: Annotated[('Conference', strawberry.lazy('api.conferences.types'))] title: str start: datetime end: datetime status: str submission: Optional[Submission] slug: str description: str type: str duration: Optional[int] highlight_color: Optional[str] language: Language audience_level: Optional[Annotated[('AudienceLevel', strawberry.lazy('api.conferences.types'))]] youtube_video_id: Optional[str] def has_limited_capacity(self) -> bool: return (self.attendees_total_capacity is not None) def has_spaces_left(self) -> bool: if (self.attendees_total_capacity is None): return True return ((self.attendees_total_capacity - self.attendees.count()) > 0) def spaces_left(self) -> int: if (self.attendees_total_capacity is None): return 0 return (self.attendees_total_capacity - self.attendees.count()) def user_has_spot(self, info) -> bool: user_id = info.context.request.user.id return self.attendees.filter(user_id=user_id).exists() def speakers(self) -> List[ScheduleItemUser]: speakers = [] for speaker in self.speakers: speakers.append(ScheduleItemUser(id=speaker.id, fullname=speaker.fullname, full_name=speaker.full_name, conference_code=self.conference.code)) return speakers def keynote(self) -> Optional[Annotated[('Keynote', strawberry.lazy('api.conferences.types'))]]: from api.conferences.types import Keynote if (not self.keynote_id): return None return Keynote.from_django_model(self.keynote) def rooms(self, info) -> List[Room]: return self.rooms.all() def image(self, info) -> Optional[str]: if (not self.image): return None return info.context.request.build_absolute_uri(self.image.url) def slido_url(self, info) -> str: if self.slido_url: return self.slido_url return self.slot.day.added_rooms.get(room_id=self.rooms.first().id).slido_url
class HelloGLWidget(QOpenGLWidget): xRotationChanged = pyqtSignal(int) yRotationChanged = pyqtSignal(int) zRotationChanged = pyqtSignal(int) def __init__(self, parent=None): super(HelloGLWidget, self).__init__(parent) self.object = 0 self.xRot = 0 self.yRot = 0 self.zRot = 0 self.lastPos = QPoint() self.trolltechGreen = QColor.fromCmykF(0.4, 0.0, 1.0, 0.0) self.trolltechPurple = QColor.fromCmykF(0.39, 0.39, 0.0, 0.0) self.setWindowTitle('Hello GL') def getXRotation(self): return self.xRot (int) def setXRotation(self, angle): angle = self.normalizeAngle(angle) if (angle != self.xRot): self.xRot = angle self.xRotationChanged.emit(angle) self.update() xRotation = pyqtProperty(int, getXRotation, setXRotation) def getYRotation(self): return self.yRot (int) def setYRotation(self, angle): angle = self.normalizeAngle(angle) if (angle != self.yRot): self.yRot = angle self.yRotationChanged.emit(angle) self.update() yRotation = pyqtProperty(int, getYRotation, setYRotation) def getZRotation(self): return self.zRot (int) def setZRotation(self, angle): angle = self.normalizeAngle(angle) if (angle != self.zRot): self.zRot = angle self.zRotationChanged.emit(angle) self.update() zRotation = pyqtProperty(int, getZRotation, setZRotation) def minimumSizeHint(self): return QSize(50, 50) def sizeHint(self): return QSize(200, 200) def initializeGL(self): self.gl = self.context().versionFunctions() self.gl.initializeOpenGLFunctions() self.setClearColor(self.trolltechPurple.darker()) self.object = self.makeObject() self.gl.glShadeModel(self.gl.GL_SMOOTH) self.gl.glEnable(self.gl.GL_DEPTH_TEST) self.gl.glEnable(self.gl.GL_CULL_FACE) def paintGL(self): self.gl.glClear((self.gl.GL_COLOR_BUFFER_BIT | self.gl.GL_DEPTH_BUFFER_BIT)) self.gl.glLoadIdentity() self.gl.glTranslated(0.0, 0.0, (- 10.0)) self.gl.glRotated((self.xRot / 16.0), 1.0, 0.0, 0.0) self.gl.glRotated((self.yRot / 16.0), 0.0, 1.0, 0.0) self.gl.glRotated((self.zRot / 16.0), 0.0, 0.0, 1.0) self.gl.glCallList(self.object) def resizeGL(self, width, height): side = min(width, height) self.gl.glViewport(((width - side) / 2), ((height - side) / 2), side, side) self.gl.glMatrixMode(self.gl.GL_PROJECTION) self.gl.glLoadIdentity() self.gl.glOrtho((- 0.5), (+ 0.5), (+ 0.5), (- 0.5), 4.0, 15.0) self.gl.glMatrixMode(self.gl.GL_MODELVIEW) def mousePressEvent(self, event): self.lastPos = QPoint(event.pos()) def mouseMoveEvent(self, event): dx = (event.x() - self.lastPos.x()) dy = (event.y() - self.lastPos.y()) if (event.buttons() & Qt.LeftButton): self.setXRotation((self.xRot + (8 * dy))) self.setYRotation((self.yRot + (8 * dx))) elif (event.buttons() & Qt.RightButton): self.setXRotation((self.xRot + (8 * dy))) self.setZRotation((self.zRot + (8 * dx))) self.lastPos = QPoint(event.pos()) def makeObject(self): genList = self.gl.glGenLists(1) self.gl.glNewList(genList, self.gl.GL_COMPILE) self.gl.glBegin(self.gl.GL_QUADS) x1 = (+ 0.06) y1 = (- 0.14) x2 = (+ 0.14) y2 = (- 0.06) x3 = (+ 0.08) y3 = (+ 0.0) x4 = (+ 0.3) y4 = (+ 0.22) self.quad(x1, y1, x2, y2, y2, x2, y1, x1) self.quad(x3, y3, x4, y4, y4, x4, y3, x3) self.extrude(x1, y1, x2, y2) self.extrude(x2, y2, y2, x2) self.extrude(y2, x2, y1, x1) self.extrude(y1, x1, x1, y1) self.extrude(x3, y3, x4, y4) self.extrude(x4, y4, y4, x4) self.extrude(y4, x4, y3, x3) Pi = 3. NumSectors = 200 for i in range(NumSectors): angle1 = (((i * 2) * Pi) / NumSectors) x5 = (0.3 * math.sin(angle1)) y5 = (0.3 * math.cos(angle1)) x6 = (0.2 * math.sin(angle1)) y6 = (0.2 * math.cos(angle1)) angle2 = ((((i + 1) * 2) * Pi) / NumSectors) x7 = (0.2 * math.sin(angle2)) y7 = (0.2 * math.cos(angle2)) x8 = (0.3 * math.sin(angle2)) y8 = (0.3 * math.cos(angle2)) self.quad(x5, y5, x6, y6, x7, y7, x8, y8) self.extrude(x6, y6, x7, y7) self.extrude(x8, y8, x5, y5) self.gl.glEnd() self.gl.glEndList() return genList def quad(self, x1, y1, x2, y2, x3, y3, x4, y4): self.setColor(self.trolltechGreen) self.gl.glVertex3d(x1, y1, (- 0.05)) self.gl.glVertex3d(x2, y2, (- 0.05)) self.gl.glVertex3d(x3, y3, (- 0.05)) self.gl.glVertex3d(x4, y4, (- 0.05)) self.gl.glVertex3d(x4, y4, (+ 0.05)) self.gl.glVertex3d(x3, y3, (+ 0.05)) self.gl.glVertex3d(x2, y2, (+ 0.05)) self.gl.glVertex3d(x1, y1, (+ 0.05)) def extrude(self, x1, y1, x2, y2): self.setColor(self.trolltechGreen.darker((250 + int((100 * x1))))) self.gl.glVertex3d(x1, y1, (+ 0.05)) self.gl.glVertex3d(x2, y2, (+ 0.05)) self.gl.glVertex3d(x2, y2, (- 0.05)) self.gl.glVertex3d(x1, y1, (- 0.05)) def normalizeAngle(self, angle): while (angle < 0): angle += (360 * 16) while (angle > (360 * 16)): angle -= (360 * 16) return angle def setClearColor(self, c): self.gl.glClearColor(c.redF(), c.greenF(), c.blueF(), c.alphaF()) def setColor(self, c): self.gl.glColor4f(c.redF(), c.greenF(), c.blueF(), c.alphaF())
class SawyerReachWallV2Policy(Policy): def _parse_obs(obs): return {'hand_pos': obs[:3], 'unused_1': obs[3], 'puck_pos': obs[4:7], 'unused_2': obs[7:(- 3)], 'goal_pos': obs[(- 3):]} def get_action(self, obs): o_d = self._parse_obs(obs) action = Action({'delta_pos': np.arange(3), 'grab_effort': 3}) action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=5.0) action['grab_effort'] = 0.0 return action.array def _desired_pos(o_d): pos_hand = o_d['hand_pos'] pos_goal = o_d['goal_pos'] if (((- 0.1) <= pos_hand[0] <= 0.3) and (0.6 <= pos_hand[1] <= 0.8) and (pos_hand[2] < 0.25)): return (pos_goal + np.array([0.0, 0.0, 1.0])) return pos_goal
def test_biorbd_model_import(): from bioptim.examples.getting_started import pendulum as ocp_module bioptim_folder = os.path.dirname(ocp_module.__file__) model_path = '/models/pendulum.bioMod' BiorbdModel((bioptim_folder + model_path)) BiorbdModel(biorbd.Model((bioptim_folder + model_path))) with pytest.raises(ValueError, match="The model should be of type 'str' or 'biorbd.Model'"): BiorbdModel(1) with pytest.raises(ValueError, match="The model should be of type 'str' or 'biorbd.Model'"): BiorbdModel([])
class TerminusSendStringCommand(TerminusFindTerminalMixin, sublime_plugin.WindowCommand): def run(self, string, tag=None, visible_only=False, bracketed=False): terminal = self.find_terminal(self.window, tag=tag, visible_only=visible_only) if (not terminal): raise Exception('no terminal found') elif (not terminal.process.isalive()): raise Exception('process is terminated') if terminal.show_in_panel: self.window.run_command('show_panel', {'panel': 'output.{}'.format(terminal.panel_name)}) else: self.bring_view_to_topmost(terminal.view) terminal.view.run_command('terminus_paste_text', {'text': string, 'bracketed': bracketed}) def bring_view_to_topmost(self, view): if (not view_is_visible(view)): window = view.window() if window: window_active_view = window.active_view() window.focus_view(view) (group, _) = window.get_view_index(view) if (window.get_view_index(window_active_view)[0] != group): window.focus_view(window_active_view)
class Logger(logging.Logger): NAME = 'SingletonLogger' def get(cls, file_path=None, level='INFO', colorize=True, track_code=False): logging.setLoggerClass(cls) logger = logging.getLogger(cls.NAME) logging.setLoggerClass(logging.Logger) logger.setLevel(level) if logger.hasHandlers(): if (len(logger.handlers) == 2): return logger logger.handlers.clear() log_format = '%(levelname)s %(asctime)s | %(message)s' if track_code: log_format = '%(levelname)s::%(asctime)s | [%(filename)s] [%(funcName)s:%(lineno)d] %(message)s' date_format = '%m/%d %H:%M:%S' if colorize: formatter = ColorFormatter(log_format, date_format) else: formatter = logging.Formatter(log_format, date_format) stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) if file_path: file_handler = logging.FileHandler(file_path) file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.propagate = False return logger def nofmt(self, msg, *args, level='INFO', **kwargs): level = levelize(level) formatters = self.remove_formats() super().log(level, msg, *args, **kwargs) self.set_formats(formatters) def remove_formats(self): formatters = [] for handler in self.handlers: formatters.append(handler.formatter) handler.setFormatter(logging.Formatter('%(message)s')) return formatters def set_formats(self, formatters): for (handler, formatter) in zip(self.handlers, formatters): handler.setFormatter(formatter) def set_file_handler(self, file_path): file_handler = logging.FileHandler(file_path) formatter = self.handlers[0].formatter file_handler.setFormatter(formatter) self.addHandler(file_handler)
def activate(locale: str, path: (str | None)=None) -> gettext_module.NullTranslations: if (path is None): path = _get_default_locale_path() if (path is None): msg = "Humanize cannot determinate the default location of the 'locale' folder. You need to pass the path explicitly." raise Exception(msg) if (locale not in _TRANSLATIONS): translation = gettext_module.translation('humanize', path, [locale]) _TRANSLATIONS[locale] = translation _CURRENT.locale = locale return _TRANSLATIONS[locale]
class BITMAPV5HEADER(Structure): _fields_ = [('bV5Size', DWORD), ('bV5Width', LONG), ('bV5Height', LONG), ('bV5Planes', WORD), ('bV5BitCount', WORD), ('bV5Compression', DWORD), ('bV5SizeImage', DWORD), ('bV5XPelsPerMeter', LONG), ('bV5YPelsPerMeter', LONG), ('bV5ClrUsed', DWORD), ('bV5ClrImportant', DWORD), ('bV5RedMask', DWORD), ('bV5GreenMask', DWORD), ('bV5BlueMask', DWORD), ('bV5AlphaMask', DWORD), ('bV5CSType', DWORD), ('bV5Endpoints', CIEXYZTRIPLE), ('bV5GammaRed', DWORD), ('bV5GammaGreen', DWORD), ('bV5GammaBlue', DWORD), ('bV5Intent', DWORD), ('bV5ProfileData', DWORD), ('bV5ProfileSize', DWORD), ('bV5Reserved', DWORD)]
def get_payee_channel(channelidentifiers_to_channels: Dict[(ChannelID, NettingChannelState)], transfer_pair: MediationPairState) -> Optional[NettingChannelState]: payee_channel_identifier = transfer_pair.payee_transfer.balance_proof.channel_identifier return channelidentifiers_to_channels.get(payee_channel_identifier)
class MethodRenamedBase(): def run_method(method: Callable, old: Union[(dict, str)], new: Union[(dict, str)], required: Union[(List[str], str, dict)]=None): if (required is None): required = {} if isinstance(required, str): required = [required] if isinstance(required, list): required = {key: 'value' for key in required} if (new in required): del required[new] with pytest.warns(RemovedInPAM11Warning): if isinstance(old, dict): old_kwargs = old else: old_kwargs = {old: 'dummy'} old_params = method(**required, **old_kwargs) with warnings.catch_warnings(): warnings.simplefilter('error') if isinstance(new, dict): new_kwargs = new else: new_kwargs = {new: 'dummy'} new_params = method(**required, **new_kwargs) common_params = ['AWSAccessKeyId', 'MWSAuthToken', 'Signature', 'SignatureMethod', 'SignatureVersion', 'Timestamp'] for key in common_params: if (key in old_params): del old_params[key] if (key in new_params): del new_params[key] assert (old_params == new_params)
def test_input_toggling_lambda_condition(qtbot): class TestProcedure(Procedure): toggle_par = IntegerParameter('toggle', default=100) x = Parameter('X', default='value', group_by='toggle_par', group_condition=(lambda v: (50 < v < 90))) wdg = InputsWidget(TestProcedure, inputs=('toggle_par', 'x')) qtbot.addWidget(wdg) assert (wdg.x.isVisibleTo(wdg) is False) wdg.toggle_par.setValue(80) assert (wdg.x.isVisibleTo(wdg) is True) wdg.toggle_par.setValue(40) assert (wdg.x.isVisibleTo(wdg) is False)
class CIFAR10Policy(object): def __init__(self, fillcolor=(128, 128, 128)): self.policies = [SubPolicy(0.1, 'invert', 7, 0.2, 'contrast', 6, fillcolor), SubPolicy(0.7, 'rotate', 2, 0.3, 'translateX', 9, fillcolor), SubPolicy(0.8, 'sharpness', 1, 0.9, 'sharpness', 3, fillcolor), SubPolicy(0.5, 'shearY', 8, 0.7, 'translateY', 9, fillcolor), SubPolicy(0.5, 'autocontrast', 8, 0.9, 'equalize', 2, fillcolor), SubPolicy(0.2, 'shearY', 7, 0.3, 'posterize', 7, fillcolor), SubPolicy(0.4, 'color', 3, 0.6, 'brightness', 7, fillcolor), SubPolicy(0.3, 'sharpness', 9, 0.7, 'brightness', 9, fillcolor), SubPolicy(0.6, 'equalize', 5, 0.5, 'equalize', 1, fillcolor), SubPolicy(0.6, 'contrast', 7, 0.6, 'sharpness', 5, fillcolor), SubPolicy(0.7, 'color', 7, 0.5, 'translateX', 8, fillcolor), SubPolicy(0.3, 'equalize', 7, 0.4, 'autocontrast', 8, fillcolor), SubPolicy(0.4, 'translateY', 3, 0.2, 'sharpness', 6, fillcolor), SubPolicy(0.9, 'brightness', 6, 0.2, 'color', 8, fillcolor), SubPolicy(0.5, 'solarize', 2, 0.0, 'invert', 3, fillcolor), SubPolicy(0.2, 'equalize', 0, 0.6, 'autocontrast', 0, fillcolor), SubPolicy(0.2, 'equalize', 8, 0.8, 'equalize', 4, fillcolor), SubPolicy(0.9, 'color', 9, 0.6, 'equalize', 6, fillcolor), SubPolicy(0.8, 'autocontrast', 4, 0.2, 'solarize', 8, fillcolor), SubPolicy(0.1, 'brightness', 3, 0.7, 'color', 0, fillcolor), SubPolicy(0.4, 'solarize', 5, 0.9, 'autocontrast', 3, fillcolor), SubPolicy(0.9, 'translateY', 9, 0.7, 'translateY', 9, fillcolor), SubPolicy(0.9, 'autocontrast', 2, 0.8, 'solarize', 3, fillcolor), SubPolicy(0.8, 'equalize', 8, 0.1, 'invert', 3, fillcolor), SubPolicy(0.7, 'translateY', 9, 0.9, 'autocontrast', 1, fillcolor)] def __call__(self, img): policy_idx = random.randint(0, (len(self.policies) - 1)) return self.policies[policy_idx](img) def __repr__(self): return 'AutoAugment CIFAR10 Policy'
class BartTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = BartTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if (pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space): pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type')) pre_tok_state['add_prefix_space'] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space tokenizer_component = 'post_processor' tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) if ('sep' in state): state['sep'] = tuple(state['sep']) if ('cls' in state): state['cls'] = tuple(state['cls']) changes_to_apply = False if (state.get('add_prefix_space', add_prefix_space) != add_prefix_space): state['add_prefix_space'] = add_prefix_space changes_to_apply = True if (state.get('trim_offsets', trim_offsets) != trim_offsets): state['trim_offsets'] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop('type')) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) def mask_token(self) -> str: if ((self._mask_token is None) and self.verbose): logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) _token.setter def mask_token(self, value): value = (AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value) self._mask_token = value def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if (is_split_into_words and (not self.add_prefix_space)): raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if (is_split_into_words and (not self.add_prefix_space)): raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = (([self.bos_token_id] + token_ids_0) + [self.eos_token_id]) if (token_ids_1 is None): return output return (((output + [self.eos_token_id]) + token_ids_1) + [self.eos_token_id]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
class Latexify(): def __init__(self, model, filename=None, newline=True): self.model = model self.filename = filename self.newline = newline def _get_geometry_displays(self, var): geo = [] if (not var.domain): return geo rng_min = None rng_max = None name = None for (var_name, rng) in self.model.default_geometry[var.domain[0]].items(): name = re.findall('(.)_*.*', str(var_name))[0] rng_min = get_rng_min_max_name(rng, 'min') for (var_name, rng) in self.model.default_geometry[var.domain[(- 1)]].items(): rng_max = get_rng_min_max_name(rng, 'max') geo_latex = f'\quad {rng_min} < {name} < {rng_max}' geo.append(geo_latex) return geo def _get_bcs_displays(self, var): sympy = have_optional_dependency('sympy') bcs_eqn_list = [] bcs = self.model.boundary_conditions.get(var, None) if bcs: var_name = next(iter(self.model.default_geometry[var.domain[0]].keys())) rng_left = next(iter(self.model.default_geometry[var.domain[0]].values())) rng_right = next(iter(self.model.default_geometry[var.domain[(- 1)]].values())) var_name = re.findall('(.)_*.*', str(var_name))[0] rng_min = get_rng_min_max_name(rng_left, 'min') rng_max = get_rng_min_max_name(rng_right, 'max') for (side, rng) in [('left', rng_min), ('right', rng_max)]: (bc_value, bc_type) = bcs[side] bcs_side = sympy.latex(bc_value.to_equation()) bcs_side_latex = (bcs_side + f'\quad ext{{at }} {var_name} = {rng}') if (bc_type == 'Dirichlet'): lhs = sympy.Symbol(var.print_name) else: lhs = sympy.Symbol(('\\nabla ' + var.print_name)) bcs_eqn = sympy.Eq(lhs, sympy.Symbol(bcs_side_latex), evaluate=False) bcs_eqn_list.append(bcs_eqn) return bcs_eqn_list def _get_param_var(self, node): sympy = have_optional_dependency('sympy') param_list = [] var_list = [] dfs_nodes = [node] while dfs_nodes: node = dfs_nodes.pop() if (getattr(node, 'print_name', None) is not None): node_copy = copy.copy(node) node_copy.print_name = None node_copy_eqn = node_copy.to_equation() if re.search('(^[0-9a-zA-Z-\\s.-\\[\\]()]*$)', str(node_copy_eqn)): node_copy_latex = (('\\text{' + str(node_copy_eqn)) + '}') else: node_copy_latex = sympy.latex(node_copy_eqn) node_latex = sympy.Eq(sympy.Symbol(node.print_name), sympy.Symbol(node_copy_latex)) if isinstance(node_copy, (pybamm.Parameter, pybamm.Variable, pybamm.FunctionParameter, pybamm.Scalar)): var_list.append(node_latex) else: param_list.append(node_latex) dfs_nodes.extend(node.children) return (param_list, var_list) def latexify(self, output_variables=None): sympy = have_optional_dependency('sympy') if (output_variables is None): if ('Voltage [V]' in self.model.variables): output_variables = ['Voltage [V]'] else: output_variables = [] eqn_list = [] param_list = [] var_list = [] eqn_list.append(sympy.Symbol((('\\large{\\underline{\\textbf{' + self.model.name) + ' Equations}}}'))) for eqn_type in ['rhs', 'algebraic']: for (var, eqn) in getattr(self.model, eqn_type).items(): var_symbol = sympy.Symbol(var.print_name) eqn_list.append(sympy.Symbol((('\\\\ \\textbf{' + str(var)) + '}'))) ddt = sympy.Derivative(var_symbol, 't') if (eqn_type == 'rhs'): lhs = ddt else: lhs = 0 if ((len(var.domain) != 0) and (var.domain != 'current collector') and (eqn_type == 'rhs')): lhs.force_partial = True bcs = self._get_bcs_displays(var) geo = self._get_geometry_displays(var) if geo: rhs = sympy.latex(sympy.nsimplify(eqn.to_equation())) rhs = sympy.Symbol((rhs + ','.join(geo))) else: rhs = sympy.nsimplify(eqn.to_equation()) if (not (eqn_type == 'algebraic')): init = self.model.initial_conditions.get(var, None) init_eqn = sympy.Eq(var_symbol, init.to_equation(), evaluate=False) init_eqn = sympy.Symbol((sympy.latex(init_eqn) + '\\quad \\text{at}\\; t=0')) lhs_rhs = sympy.Eq(lhs, rhs, evaluate=False) sympy.init_printing(use_latex=True, latex_mode='plain', latex_printer=custom_print_func, use_unicode='True') eqn_list.append(lhs_rhs) if (not (eqn_type == 'algebraic')): eqn_list.extend([init_eqn]) eqn_list.extend(bcs) (list1, list2) = self._get_param_var(eqn) param_list.extend(list1) var_list.extend(list2) for var_name in output_variables: var = self.model.variables[var_name].to_equation() var_eqn = sympy.Eq(sympy.Symbol('V'), var, evaluate=False) eqn_list.append(sympy.Symbol((('\\\\ \\textbf{' + var_name) + '}'))) eqn_list.extend([var_eqn]) param_list = list(dict.fromkeys(param_list)) var_list = list(dict.fromkeys(var_list)) eqn_list.append(sympy.Symbol('\\\\ \\textbf{Parameters and Variables}')) eqn_list.extend(var_list) eqn_list.extend(param_list) eqn_new_line = sympy.Symbol(''.join(map(custom_print_func, eqn_list))) if (self.filename is None): if (self.newline is True): return eqn_new_line else: return eqn_list elif self.filename.endswith('.tex'): return sympy.preview(eqn_new_line, outputTexFile=self.filename) elif (self.filename is not None): if self.filename.endswith('.pdf'): return sympy.preview(eqn_new_line, output='pdf', viewer='file', filename=self.filename, euler=False) else: try: return sympy.preview(eqn_new_line, viewer='file', filename=self.filename, dvioptions=['-D', '900'], euler=False) except RuntimeError: warnings.warn('RuntimeError - Setting the output resolution to default') return sympy.preview(eqn_new_line, viewer='file', filename=self.filename, euler=False)
class HIDBS1(FinTS3Segment): account = DataElementGroupField(type=KTI1, _d='Kontoverbindung international') sepa_descriptor = DataElementField(type='an', max_length=256, _d='SEPA Descriptor') sepa_pain_message = DataElementField(type='bin', _d='SEPA pain message') task_id = DataElementField(type='an', max_length=99, required=False, _d='Auftragsidentifikation') task_cancelable = DataElementField(type='jn', required=False, _d='Auftrag loschbar') task_changeable = DataElementField(type='jn', required=False, _d='Auftrag anderbar')
def update_view_markers(view=None): if (view is None): view = sublime.active_window().active_view() fn = view.file_name() if (fn is not None): fn = normalize(fn) pos_scope = get_setting('position_scope', 'entity.name.class') pos_icon = get_setting('position_icon', 'bookmark') cursor = [] if ((fn == gdb_cursor) and (gdb_cursor_position != 0)): cursor.append(view.full_line(view.text_point((gdb_cursor_position - 1), 0))) global gdb_last_cursor_view if (gdb_last_cursor_view is not None): gdb_last_cursor_view.erase_regions('sublimegdb.position') gdb_last_cursor_view = view view.add_regions('sublimegdb.position', cursor, pos_scope, pos_icon, sublime.HIDDEN) gdb_callstack_view.update_marker(pos_scope, pos_icon) gdb_threads_view.update_marker(pos_scope, pos_icon) gdb_breakpoint_view.update_marker(view)
def test_zip_file_object_read(path_zip_file): with open(path_zip_file, 'rb') as zip_file_object: with ZipMemoryFile(zip_file_object) as zipmemfile: with zipmemfile.open('white-gemini-iv.vrt') as src: assert (src.driver == 'VRT') assert (src.count == 3) assert (src.dtypes == ('uint8', 'uint8', 'uint8')) assert (src.read().shape == (3, 768, 1024))
class Latency(commands.Cog): def __init__(self, bot: Bot) -> None: self.bot = bot () _whitelist(channels=(Channels.bot_commands,), roles=STAFF_PARTNERS_COMMUNITY_ROLES) async def ping(self, ctx: commands.Context) -> None: bot_ping = ((arrow.utcnow() - ctx.message.created_at).total_seconds() * 1000) if (bot_ping <= 0): bot_ping = 'Your clock is out of sync, could not calculate ping.' else: bot_ping = f'{bot_ping:.{ROUND_LATENCY}f} ms' try: async with self.bot. as request: request.raise_for_status() site_status = 'Healthy' except client_exceptions.ClientResponseError as e: site_status = f'The site returned an error in the response: ({e.status}) {e}' except client_exceptions.ClientConnectionError: site_status = 'Could not establish connection with the site.' discord_ping = f'{(self.bot.latency * 1000):.{ROUND_LATENCY}f} ms' embed = Embed(title='Pong!') for (desc, latency) in zip(DESCRIPTIONS, [bot_ping, site_status, discord_ping], strict=True): embed.add_field(name=desc, value=latency, inline=False) (await ctx.send(embed=embed))
def get_markdown_docstring_lines(cls: Type) -> List[str]: config = Config() docstring = (cls.__doc__ if cls.__doc__ else '') gds = _GoogleDocstringToMarkdown(inspect.cleandoc(docstring), config=config, what='class') lines = ([f'## `{cls.__name__}`'] + gds.lines()) lines = [re.sub(':py:func:`(\\w+)`', '`\\1`', line) for line in lines] return lines
class MetafileLister(ScriptBase): ARGS_HELP = '<metafile>...' def add_options(self): self.add_bool_option('--reveal', help='show full announce URL including keys') self.add_bool_option('--raw', help="print the metafile's raw content in all detail") self.add_bool_option('--json', help='print the unmasked metafile, serialized to JSON') self.add_bool_option('-V', '--skip-validation', help='show broken metafiles with an invalid structure') self.add_value_option('-o', '--output', 'KEY,KEY1.KEY2,...', action='append', default=[], help='select fields to print, output is separated by TABs; note that __file__ is the path to the metafile, __hash__ is the info hash, and __size__ is the data size in bytes') def mainloop(self): if (not self.args): self.parser.print_help() self.parser.exit() for (idx, filename) in enumerate(self.args): torrent = metafile.Metafile(filename) if (idx and (not self.options.output)): print('') print(('~' * 79)) try: try: data = metafile.checked_open(filename, log=(self.LOG if self.options.skip_validation else None), quiet=(self.options.quiet and (self.options.output or self.options.raw))) except EnvironmentError as exc: self.fatal(("Can't read '%s' (%s)" % (filename, str(exc).replace((": '%s'" % filename), '')))) raise listing = None if (self.options.raw or self.options.json): if ((not self.options.reveal) and ('info' in data)): data['info']['pieces'] = ('<%d piece hashes>' % (len(data['info']['pieces']) / len(hashlib.sha1().digest()))) if self.options.json: listing = json.dumps(data, default=repr, indent=4, sort_keys=True) else: pprinter = (pprint.PrettyPrinter if self.options.reveal else metafile.MaskingPrettyPrinter)() listing = pprinter.pformat(data) elif self.options.output: def splitter(fields): for flist in fields: for field in flist.split(','): (yield field.strip()) data['__file__'] = filename if ('info' in data): data['__hash__'] = metafile.info_hash(data) data['__size__'] = metafile.data_size(data) values = [] for field in splitter(self.options.output): try: val = data for key in field.split('.'): val = val[key] except KeyError as exc: self.LOG.error(('%s: Field %r not found (%s)' % (filename, field, exc))) break else: values.append(('%s' % val)) else: listing = '\t'.join((fmt.to_utf8(x) for x in values)) else: listing = '\n'.join(torrent.listing(masked=(not self.options.reveal))) except (ValueError, KeyError, bencode.BencodeError) as exc: if self.options.debug: raise self.LOG.warning(('Bad metafile %r (%s: %s)' % (filename, type(exc).__name__, exc))) else: if (listing is not None): print(fmt.to_utf8(listing))
def _imagenet32(split: str) -> Dataset: dataset_path = os.path.join(os.getenv('PT_DATA_DIR', 'datasets'), 'Imagenet32') if (split == 'train'): return ImageNetDS(dataset_path, 32, train=True, transform=transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor()])) elif (split == 'test'): return ImageNetDS(dataset_path, 32, train=False, transform=transforms.ToTensor())
class TriviallyDoubleCommutesDualBasisTest(unittest.TestCase): def test_trivially_double_commutes_no_intersection(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('3^ 4'), FermionOperator('3^ 2^ 3 2'), FermionOperator('4^ 1'))) def test_no_trivial_double_commute_with_intersection(self): self.assertFalse(trivially_double_commutes_dual_basis(FermionOperator('4^ 2'), FermionOperator('2^ 1'), FermionOperator('5^ 2^ 5 2'))) def test_trivially_double_commutes_both_single_number_operators(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('4^ 3'), FermionOperator('3^ 3'), FermionOperator('3^ 3'))) def test_trivially_double_commutes_nonintersecting_single_number_ops(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('3^ 2'), FermionOperator('2^ 2'), FermionOperator('3^ 3'))) def test_trivially_double_commutes_both_double_number_operators(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('4^ 3'), FermionOperator('3^ 2^ 3 2'), FermionOperator('3^ 1^ 3 1'))) def test_trivially_double_commutes_one_double_number_operators(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('4^ 3'), FermionOperator('3^ 2^ 3 2'), FermionOperator('3^ 3'))) def test_no_trivial_double_commute_right_hopping_operator(self): self.assertFalse(trivially_double_commutes_dual_basis(FermionOperator('4^ 3'), FermionOperator('3^ 1^ 3 1'), FermionOperator('3^ 2'))) def test_no_trivial_double_commute_left_hopping_operator(self): self.assertFalse(trivially_double_commutes_dual_basis(FermionOperator('4^ 3'), FermionOperator('3^ 2'), FermionOperator('3^ 3'))) def test_trivially_double_commutes_both_hopping_create_same_mode(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('3^ 3'), FermionOperator('3^ 2'), FermionOperator('3^ 1'))) def test_trivially_double_commutes_both_hopping_annihilate_same_mode(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('1^ 1'), FermionOperator('4^ 1'), FermionOperator('3^ 1'))) def test_trivially_double_commutes_hopping_and_number_on_same_modes(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('4^ 3'), FermionOperator('4^ 1'), FermionOperator('4^ 1^ 4 1'))) def test_trivially_double_commutes_no_intersection_a_with_bc(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('5^ 2'), FermionOperator('3^ 1'), FermionOperator('4^ 1^ 4 1'))) def test_trivially_double_commutes_double_create_in_a_and_b(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('5^ 2'), FermionOperator('3^ 1'), FermionOperator('4^ 1^ 4 1'))) def test_trivially_double_commutes_double_annihilate_in_a_and_c(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('5^ 2'), FermionOperator('3^ 1'), FermionOperator('4^ 1^ 4 1'))) def test_no_trivial_double_commute_double_annihilate_with_create(self): self.assertFalse(trivially_double_commutes_dual_basis(FermionOperator('5^ 2'), FermionOperator('2^ 1'), FermionOperator('4^ 2'))) def test_trivially_double_commutes_excess_create(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('5^ 2'), FermionOperator('5^ 5'), FermionOperator('5^ 1'))) def test_trivially_double_commutes_excess_annihilate(self): self.assertTrue(trivially_double_commutes_dual_basis(FermionOperator('5^ 2'), FermionOperator('3^ 2'), FermionOperator('2^ 2')))
def stream_run(params): (train_stream, test_stream) = get_criteo_dataset_stream(params) if (params['method'] == 'DFM'): model = get_model('MLP_EXP_DELAY', params) model.load_weights(params['pretrain_dfm_model_ckpt_path']) else: model = get_model('MLP_SIG', params) model.load_weights(params['pretrain_baseline_model_ckpt_path']) models = {'model': model} if (params['method'] in ['ES-DFM', 'DEFUSE', 'ES-DFM_1d', 'DEFUSE_1d', 'DEFUSE_3d', 'DEFUSE_7d', 'DEFUSE_14d']): esdfm_model = get_model('MLP_tn_dp', params) esdfm_model.load_weights(params['pretrain_esdfm_model_ckpt_path']) models['esdfm'] = esdfm_model elif (params['method'] in ['DEFER', 'DEFER_unbiased']): defer_model = get_model('MLP_dp', params) defer_model.load_weights(params['pretrain_defer_model_ckpt_path']) models['defer'] = defer_model if (params['method'] == 'DEFER_unbiased'): esdfm_model = get_model('MLP_tn_dp', params) esdfm_model.load_weights(params['pretrain_esdfm_model_ckpt_path']) models['esdfm'] = esdfm_model elif (params['method'] == 'DFM'): dfm_model = get_model('MLP_EXP_DELAY', params) dfm_model.load_weights(params['pretrain_dfm_model_ckpt_path']) models['model'] = dfm_model optimizer = get_optimizer(params['optimizer'], params) auc_ma = ScalarMovingAverage() nll_ma = ScalarMovingAverage() prauc_ma = ScalarMovingAverage() for (ep, (train_dataset, test_dataset)) in enumerate(zip(train_stream, test_stream)): if (params['method'] in ['FNW_1d', 'FNC_1d']): train_data = tf.data.Dataset.from_tensor_slices((dict(train_dataset['x']), train_dataset['labels'])) else: train_data = tf.data.Dataset.from_tensor_slices((dict(train_dataset['x']), train_dataset['labels'], train_dataset['delay_labels'], train_dataset['inw_labels'])) train_data = train_data.batch(params['batch_size']).prefetch(1) train(models, optimizer, train_data, params) test_batch_size = test_dataset['x'].shape[0] test_data = tf.data.Dataset.from_tensor_slices((dict(test_dataset['x']), test_dataset['labels'])) test_data = test_data.batch(params['batch_size']).prefetch(1) (auc, prauc, llloss) = test(model, test_data, params) print('epoch {}, auc {}, prauc {}, llloss {}'.format(ep, auc, prauc, llloss)) auc_ma.add((auc * test_batch_size), test_batch_size) nll_ma.add((llloss * test_batch_size), test_batch_size) prauc_ma.add((prauc * test_batch_size), test_batch_size) print('epoch {}, auc_ma {}, prauc_ma {}, llloss_ma {}'.format(ep, auc_ma.get(), prauc_ma.get(), nll_ma.get()))
('/reservations/{reservation_number}', status_code=status.HTTP_200_OK, responses={status.HTTP_200_OK: {'model': ReservationResponse}, status.HTTP_404_NOT_FOUND: {'model': BaseResponse}}) def get_reservation(reservation_number: str, reservation_query: ReservationQueryUseCase=Depends(Provide[AppContainer.reception.reservation_query])) -> ReservationResponse: try: reservation: Reservation = reservation_query.get_reservation(reservation_number=reservation_number) except ReservationNotFoundException as e: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=e.message) return ReservationResponse(detail='ok', result=ReservationSchema.build(reservation=reservation))
def infer_numpy_ndarray(node, context: (InferenceContext | None)=None): ndarray = '\n class ndarray(object):\n def __init__(self, shape, dtype=float, buffer=None, offset=0,\n strides=None, order=None):\n self.T = numpy.ndarray([0, 0])\n self.base = None\n self.ctypes = None\n self.data = None\n self.dtype = None\n self.flags = None\n # Should be a numpy.flatiter instance but not available for now\n # Putting an array instead so that iteration and indexing are authorized\n self.flat = np.ndarray([0, 0])\n self.imag = np.ndarray([0, 0])\n self.itemsize = None\n self.nbytes = None\n self.ndim = None\n self.real = np.ndarray([0, 0])\n self.shape = numpy.ndarray([0, 0])\n self.size = None\n self.strides = None\n\n def __abs__(self): return numpy.ndarray([0, 0])\n def __add__(self, value): return numpy.ndarray([0, 0])\n def __and__(self, value): return numpy.ndarray([0, 0])\n def __array__(self, dtype=None): return numpy.ndarray([0, 0])\n def __array_wrap__(self, obj): return numpy.ndarray([0, 0])\n def __contains__(self, key): return True\n def __copy__(self): return numpy.ndarray([0, 0])\n def __deepcopy__(self, memo): return numpy.ndarray([0, 0])\n def __divmod__(self, value): return (numpy.ndarray([0, 0]), numpy.ndarray([0, 0]))\n def __eq__(self, value): return numpy.ndarray([0, 0])\n def __float__(self): return 0.\n def __floordiv__(self): return numpy.ndarray([0, 0])\n def __ge__(self, value): return numpy.ndarray([0, 0])\n def __getitem__(self, key): return uninferable\n def __gt__(self, value): return numpy.ndarray([0, 0])\n def __iadd__(self, value): return numpy.ndarray([0, 0])\n def __iand__(self, value): return numpy.ndarray([0, 0])\n def __ifloordiv__(self, value): return numpy.ndarray([0, 0])\n def __ilshift__(self, value): return numpy.ndarray([0, 0])\n def __imod__(self, value): return numpy.ndarray([0, 0])\n def __imul__(self, value): return numpy.ndarray([0, 0])\n def __int__(self): return 0\n def __invert__(self): return numpy.ndarray([0, 0])\n def __ior__(self, value): return numpy.ndarray([0, 0])\n def __ipow__(self, value): return numpy.ndarray([0, 0])\n def __irshift__(self, value): return numpy.ndarray([0, 0])\n def __isub__(self, value): return numpy.ndarray([0, 0])\n def __itruediv__(self, value): return numpy.ndarray([0, 0])\n def __ixor__(self, value): return numpy.ndarray([0, 0])\n def __le__(self, value): return numpy.ndarray([0, 0])\n def __len__(self): return 1\n def __lshift__(self, value): return numpy.ndarray([0, 0])\n def __lt__(self, value): return numpy.ndarray([0, 0])\n def __matmul__(self, value): return numpy.ndarray([0, 0])\n def __mod__(self, value): return numpy.ndarray([0, 0])\n def __mul__(self, value): return numpy.ndarray([0, 0])\n def __ne__(self, value): return numpy.ndarray([0, 0])\n def __neg__(self): return numpy.ndarray([0, 0])\n def __or__(self, value): return numpy.ndarray([0, 0])\n def __pos__(self): return numpy.ndarray([0, 0])\n def __pow__(self): return numpy.ndarray([0, 0])\n def __repr__(self): return str()\n def __rshift__(self): return numpy.ndarray([0, 0])\n def __setitem__(self, key, value): return uninferable\n def __str__(self): return str()\n def __sub__(self, value): return numpy.ndarray([0, 0])\n def __truediv__(self, value): return numpy.ndarray([0, 0])\n def __xor__(self, value): return numpy.ndarray([0, 0])\n def all(self, axis=None, out=None, keepdims=False): return np.ndarray([0, 0])\n def any(self, axis=None, out=None, keepdims=False): return np.ndarray([0, 0])\n def argmax(self, axis=None, out=None): return np.ndarray([0, 0])\n def argmin(self, axis=None, out=None): return np.ndarray([0, 0])\n def argpartition(self, kth, axis=-1, kind=\'introselect\', order=None): return np.ndarray([0, 0])\n def argsort(self, axis=-1, kind=\'quicksort\', order=None): return np.ndarray([0, 0])\n def astype(self, dtype, order=\'K\', casting=\'unsafe\', subok=True, copy=True): return np.ndarray([0, 0])\n def byteswap(self, inplace=False): return np.ndarray([0, 0])\n def choose(self, choices, out=None, mode=\'raise\'): return np.ndarray([0, 0])\n def clip(self, min=None, max=None, out=None): return np.ndarray([0, 0])\n def compress(self, condition, axis=None, out=None): return np.ndarray([0, 0])\n def conj(self): return np.ndarray([0, 0])\n def conjugate(self): return np.ndarray([0, 0])\n def copy(self, order=\'C\'): return np.ndarray([0, 0])\n def cumprod(self, axis=None, dtype=None, out=None): return np.ndarray([0, 0])\n def cumsum(self, axis=None, dtype=None, out=None): return np.ndarray([0, 0])\n def diagonal(self, offset=0, axis1=0, axis2=1): return np.ndarray([0, 0])\n def dot(self, b, out=None): return np.ndarray([0, 0])\n def dump(self, file): return None\n def dumps(self): return str()\n def fill(self, value): return None\n def flatten(self, order=\'C\'): return np.ndarray([0, 0])\n def getfield(self, dtype, offset=0): return np.ndarray([0, 0])\n def item(self, *args): return uninferable\n def itemset(self, *args): return None\n def max(self, axis=None, out=None): return np.ndarray([0, 0])\n def mean(self, axis=None, dtype=None, out=None, keepdims=False): return np.ndarray([0, 0])\n def min(self, axis=None, out=None, keepdims=False): return np.ndarray([0, 0])\n def newbyteorder(self, new_order=\'S\'): return np.ndarray([0, 0])\n def nonzero(self): return (1,)\n def partition(self, kth, axis=-1, kind=\'introselect\', order=None): return None\n def prod(self, axis=None, dtype=None, out=None, keepdims=False): return np.ndarray([0, 0])\n def ptp(self, axis=None, out=None): return np.ndarray([0, 0])\n def put(self, indices, values, mode=\'raise\'): return None\n def ravel(self, order=\'C\'): return np.ndarray([0, 0])\n def repeat(self, repeats, axis=None): return np.ndarray([0, 0])\n def reshape(self, shape, order=\'C\'): return np.ndarray([0, 0])\n def resize(self, new_shape, refcheck=True): return None\n def round(self, decimals=0, out=None): return np.ndarray([0, 0])\n def searchsorted(self, v, side=\'left\', sorter=None): return np.ndarray([0, 0])\n def setfield(self, val, dtype, offset=0): return None\n def setflags(self, write=None, align=None, uic=None): return None\n def sort(self, axis=-1, kind=\'quicksort\', order=None): return None\n def squeeze(self, axis=None): return np.ndarray([0, 0])\n def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return np.ndarray([0, 0])\n def sum(self, axis=None, dtype=None, out=None, keepdims=False): return np.ndarray([0, 0])\n def swapaxes(self, axis1, axis2): return np.ndarray([0, 0])\n def take(self, indices, axis=None, out=None, mode=\'raise\'): return np.ndarray([0, 0])\n def tobytes(self, order=\'C\'): return b\'\'\n def tofile(self, fid, sep="", format="%s"): return None\n def tolist(self, ): return []\n def tostring(self, order=\'C\'): return b\'\'\n def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): return np.ndarray([0, 0])\n def transpose(self, *axes): return np.ndarray([0, 0])\n def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return np.ndarray([0, 0])\n def view(self, dtype=None, type=None): return np.ndarray([0, 0])\n ' if numpy_supports_type_hints(): ndarray += '\n \n def __class_getitem__(cls, value):\n return cls\n ' node = extract_node(ndarray) return node.infer(context=context)
class TestStatCall(unittest.TestCase): def test_stat_call(self): expected = 'Samples read: 627456\nLength (seconds): 14.228027\nScaled by: .0\nMaximum amplitude: 0.010895\nMinimum amplitude: -0.004883\nMidline amplitude: 0.003006\nMean norm: 0.000137\nMean amplitude: -0.000062\nRMS amplitude: 0.000200\nMaximum delta: 0.015778\nMinimum delta: 0.000000\nMean delta: 0.000096\nRMS delta: 0.000124\nRough frequency: 4349\nVolume adjustment: 91.787\n' actual = file_info._stat_call(SILENT_FILE) self.assertEqual(expected, actual)
def test_creating_simple_scenario(): scenario = Scenario(1, 'Scenario', 'I am a Scenario', 'foo.feature', 1, parent=None, tags=None, preconditions=None, background=None) assert (scenario.id == 1) assert (scenario.keyword == 'Scenario') assert (scenario.sentence == 'I am a Scenario') assert (scenario.path == 'foo.feature') assert (scenario.line == 1) assert (scenario.parent is None) assert (scenario.tags == []) assert (scenario.preconditions == []) assert (scenario.background is None)
def user_action_for_spam(user, threshold): total_spam = ProposalComment.objects.filter(commenter=user, is_spam=True).count() if (total_spam >= threshold): if (user.is_active is True): user.is_active = False user.save() elif (user.is_active is False): user.is_active = True user.save()
def download_from_google(token_id, filename): print(('Downloading %s ...' % os.path.basename(filename))) url = ' destination = (filename + '.tar.gz') session = requests.Session() response = session.get(url, params={'id': token_id, 'confirm': 't'}, stream=True) token = get_confirm_token(response) if token: params = {'id': token_id, 'confirm': token} response = session.get(url, params=params, stream=True) save_response_content(response, destination) file = tarfile.open(destination, 'r:gz') print(('Extracting %s ...' % destination)) file.extractall(filename) file.close() os.remove(destination) os.rename(filename, (filename + '_tmp')) os.rename(os.path.join((filename + '_tmp'), os.path.basename(filename)), filename) os.rmdir((filename + '_tmp'))
class RCC_APB2LPENR(IntEnum): TIM1LPEN = (1 << 0) USART1LPEN = (1 << 4) USART6LPEN = (1 << 5) ADC1LPEN = (1 << 8) SDIOLPEN = (1 << 11) SPI1LPEN = (1 << 12) SPI4LPEN = (1 << 13) SYSCFGLPEN = (1 << 14) TIM9LPEN = (1 << 16) TIM10LPEN = (1 << 17) TIM11LPEN = (1 << 18) SPI5LPEN = (1 << 20)
def override_services(config, override_services): if (override_services == []): return for service in list(config.keys()): if ((service + '=true') in override_services): config[service]['autostart'] = 'true' elif ((service + '=false') in override_services): config[service]['autostart'] = 'false'
class GreetExecutor(ActionExecutor): def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo, char_index, modify=True, in_place=False): current_line = script[0] info.set_current_line(current_line) node = state.get_state_node(current_line.object()) if (node is None): info.object_found_error() elif self.check_if_person(state, node, info): if modify: (yield state.change_state([], in_place=in_place)) else: (yield state) def check_if_person(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo): if (Property.PERSON not in node.properties): info.error('{} is not person', node) return False return True
class Logger(): def __init__(self): self.log_file_open = None self.log_file_local = None self.verbosity = self.term_verbosity = int(os.getenv('RDIFF_BACKUP_VERBOSITY', '3')) self.termverbset = None def __call__(self, message, verbosity): if ((verbosity > self.verbosity) and (verbosity > self.term_verbosity)): return if (not isinstance(message, (bytes, str))): raise TypeError('You can only log bytes or str, and not {lt}'.format(lt=type(message))) if (verbosity <= self.verbosity): self.log_to_file(message, verbosity) if (verbosity <= self.term_verbosity): self.log_to_term(message, verbosity) def log_to_file(self, message, verbosity=None): if self.log_file_open: if self.log_file_local: tmpstr = self._format(message, self.verbosity, verbosity) self.logfp.write(_to_bytes(tmpstr)) self.logfp.flush() else: self.log_file_conn.log.Log.log_to_file(message, verbosity) def log_to_term(self, message, verbosity): if ((verbosity <= 2) or Globals.server): termfp = sys.stderr else: termfp = sys.stdout tmpstr = self._format(message, self.term_verbosity, verbosity) if ((self.verbosity <= DEBUG) and ('\n' not in tmpstr[:(- 1)])): termfp.write((textwrap.fill(tmpstr, subsequent_indent=(' ' * 9), break_long_words=False, break_on_hyphens=False, width=(shutil.get_terminal_size().columns - 1)) + '\n')) else: termfp.write(tmpstr) def conn(self, direction, result, req_num): if (self.term_verbosity <= DEBUG): return if (type(result) is bytes): result_repr = repr(result) else: result_repr = str(result) if Globals.server: conn_str = 'Server' else: conn_str = 'Client' self.log_to_term('{cs} {di} ({rn}): {rr}'.format(cs=conn_str, di=direction, rn=req_num, rr=result_repr), DEBUG) def FatalError(self, message, return_code=1): self.log_to_term('Fatal Error: {em}'.format(em=message), ERROR) sys.exit(return_code) def exception(self, only_terminal=0, verbosity=INFO): assert (only_terminal in (0, 1, 2)), "Variable only_terminal '{ot}' must be one of [012]".format(ot=only_terminal) if ((only_terminal == 0) or ((only_terminal == 1) and self.log_file_open)): logging_func = self.__call__ else: logging_func = self.log_to_term if (verbosity >= self.term_verbosity): return exception_string = self._exception_to_string() try: logging_func(exception_string, verbosity) except OSError: print('OS error while trying to log exception!') print(exception_string) def setverbosity(self, verbosity_string): try: self.verbosity = int(verbosity_string) except ValueError: Log.FatalError("Verbosity must be a number, received '{vs}' instead".format(vs=verbosity_string)) if (not self.termverbset): self.term_verbosity = self.verbosity def setterm_verbosity(self, termverb_string): try: self.term_verbosity = int(termverb_string) except ValueError: Log.FatalError("Terminal verbosity must be a number, received '{tv}' instead".format(tv=termverb_string)) self.termverbset = 1 def open_logfile(self, log_rp): assert (not self.log_file_open), "Can't open an already opened logfile" log_rp.conn.log.Log.open_logfile_local(log_rp) for conn in Globals.connections: conn.log.Log.open_logfile_allconn(log_rp.conn) def open_logfile_allconn(self, log_file_conn): self.log_file_open = 1 self.log_file_conn = log_file_conn def open_logfile_local(self, log_rp): assert (log_rp.conn is Globals.local_connection), 'Action only foreseen locally and not over {conn}'.format(conn=log_rp.conn) try: self.logfp = log_rp.open('ab') except OSError as exc: raise LoggerError("Unable to open logfile {lf} due to exception '{ex}'".format(lf=log_rp, ex=exc)) self.log_file_local = 1 def close_logfile(self): if self.log_file_open: for conn in Globals.connections: conn.log.Log.close_logfile_allconn() self.log_file_conn.log.Log.close_logfile_local() def close_logfile_allconn(self): self.log_file_open = None def close_logfile_local(self): assert (self.log_file_conn is Globals.local_connection), 'Action only foreseen locally and not over {lc}'.format(lc=self.log_file_conn) self.logfp.close() self.log_file_local = None def _exception_to_string(self): (type, value, tb) = sys.exc_info() s = ("Exception '%s' raised of class '%s':\n%s" % (value, type, ''.join(traceback.format_tb(tb)))) return s def _format(self, message, verbosity, msg_verbosity): if (verbosity <= DEBUG): if ((msg_verbosity == INFO) and ('\n' in message[:(- 1)])): return '{msg}\n'.format(msg=message) else: return '{pre:<9}{msg}\n'.format(pre=_LOG_PREFIX[msg_verbosity], msg=message) else: timestamp = datetime.datetime.now(datetime.timezone.utc).astimezone().strftime('%F %H:%M:%S.%f %z') if Globals.server: role = 'SERVER' else: role = 'CLIENT' return '{time} <{role}-{pid}> {pre} {msg}\n'.format(time=timestamp, role=role, pid=os.getpid(), pre=_LOG_PREFIX[msg_verbosity], msg=message)
_grad() def evaluate(parts): model.eval() metrics = {} predictions = {} for part in parts: predictions[part] = torch.cat([model((None if (X_num is None) else X_num[part][idx]), (None if (X_cat is None) else X_cat[part][idx])) for idx in lib.IndexLoader(D.size(part), args['training']['eval_batch_size'], False, device)]).cpu().numpy() try: metrics[part] = lib.calculate_metrics(D.info['task_type'], Y[part].numpy(), predictions[part], 'logits', y_info) except ValueError as err: assert ('Target scores need to be probabilities for multiclass roc_auc' in str(err)) metrics[part] = {'score': (- .0)} for (part, part_metrics) in metrics.items(): print(f'[{part:<5}]', lib.make_summary(part_metrics)) return (metrics, predictions)
def downsample_mask(mask, max_n, seed=0): train_mask = mask if ((max_n is not None) and (np.sum(train_mask) > max_n)): n_train = int(max_n) curr_train_idxs = np.nonzero(train_mask)[0] rng = np.random.default_rng(seed=seed) train_idxs_idx = rng.choice(len(curr_train_idxs), size=n_train, replace=False) train_idxs = curr_train_idxs[train_idxs_idx] train_mask = np.zeros_like(train_mask) train_mask[train_idxs] = True assert (np.sum(train_mask) == n_train) return train_mask
def createDelexData(): loadData() dic = delexicalize.prepareSlotValuesIndependent() fin1 = file('data/multi-woz/data.json') data = json.load(fin1) fin2 = file('data/multi-woz/dialogue_acts.json') data2 = json.load(fin2) for dialogue_name in tqdm(data): dialogue = data[dialogue_name] idx_acts = 1 for (idx, turn) in enumerate(dialogue['log']): sent = normalize(turn['text']) words = sent.split() sent = delexicalize.delexicalise(' '.join(words), dic) sent = delexicaliseReferenceNumber(sent, turn) digitpat = re.compile('\\d+') sent = re.sub(digitpat, '[value_count]', sent) dialogue['log'][idx]['text'] = sent if ((idx % 2) == 1): pointer_vector = addDBPointer(turn) pointer_vector = addBookingPointer(dialogue, turn, pointer_vector) dialogue['log'][(idx - 1)]['db_pointer'] = pointer_vector.tolist() dialogue = fixDelex(dialogue_name, dialogue, data2, idx, idx_acts) idx_acts += 1 delex_data[dialogue_name] = dialogue with open('data/multi-woz/delex.json', 'w') as outfile: json.dump(delex_data, outfile) return delex_data
.parametrize('env', ((), ('TOX_ENV_DIR', '/tox_env_dir'))) def test_cache_reportheader(env, pytester: Pytester, monkeypatch: MonkeyPatch) -> None: pytester.makepyfile('def test_foo(): pass') if env: monkeypatch.setenv(*env) expected = os.path.join(env[1], '.pytest_cache') else: monkeypatch.delenv('TOX_ENV_DIR', raising=False) expected = '.pytest_cache' result = pytester.runpytest('-v') result.stdout.fnmatch_lines([('cachedir: %s' % expected)])
def apply_regularization(regularizer, weights_list=None): if (not weights_list): weights_list = ops.get_collection(ops.GraphKeys.WEIGHTS) if (not weights_list): raise ValueError('No weights to regularize.') with ops.name_scope('get_regularization_penalty', values=weights_list) as scope: penalties = [regularizer(w) for w in weights_list] penalties = [(p if (p is not None) else constant_op.constant(0.0)) for p in penalties] for p in penalties: if (p.get_shape().ndims != 0): raise ValueError(('regularizer must return a scalar Tensor instead of a Tensor with rank %d.' % p.get_shape().ndims)) summed_penalty = math_ops.add_n(penalties, name=scope) ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, summed_penalty) return summed_penalty
class Block(nn.Module): def __init__(self, seq_len, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, downsample=None, **kwargs): super().__init__() self.norm1 = norm_layer(dim) self.downsample = downsample self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs) self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity()) self.norm2 = norm_layer(dim) mlp_hidden_dim = int((dim * mlp_ratio)) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) if downsample: self.pos_embed = nn.Parameter(torch.zeros(1, seq_len, dim)) trunc_normal_(self.pos_embed, std=0.02) .ignore def no_weight_decay(self): return {'pos_embed'} def forward(self, x): x = (x + self.drop_path(self.attn(self.norm1(x)))) x = (x + self.drop_path(self.mlp(self.norm2(x)))) if (self.downsample is not None): x = self.downsample(x.transpose(2, 1)).transpose(1, 2) x = (x + self.pos_embed) return x
def run(args=None): logger = setup_custom_logger('beaver', args) beaver_config = BeaverConfig(args, logger=logger) logger = setup_custom_logger('beaver', args, config=beaver_config) if (beaver_config.get('logstash_version') not in [0, 1]): raise LookupError('Invalid logstash_version') queue = multiprocessing.JoinableQueue(beaver_config.get('max_queue_size')) manager_proc = None ssh_tunnel = create_ssh_tunnel(beaver_config, logger=logger) def queue_put(*args): return queue.put(*args) def queue_put_nowait(*args): return queue.put_nowait(*args) def cleanup(signalnum, frame): if (signalnum is not None): sig_name = tuple((v for (v, k) in signal.__dict__.iteritems() if (k == signalnum)))[0] logger.info('{0} detected'.format(sig_name)) logger.info('Shutting down. Please wait...') else: logger.info('Worker process cleanup in progress...') try: queue_put_nowait(('exit', ())) except Queue.Full: pass if (manager_proc is not None): try: manager_proc.terminate() manager_proc.join() except RuntimeError: pass if (ssh_tunnel is not None): logger.info('Closing ssh tunnel...') ssh_tunnel.close() if (signalnum is not None): logger.info('Shutdown complete.') return os._exit(signalnum) signal.signal(signal.SIGTERM, cleanup) signal.signal(signal.SIGINT, cleanup) signal.signal(signal.SIGQUIT, cleanup) def create_queue_consumer(): process_args = (queue, beaver_config, logger) proc = multiprocessing.Process(target=run_queue, args=process_args) logger.info('Starting queue consumer') proc.start() return proc def create_queue_producer(): manager = TailManager(beaver_config=beaver_config, queue_consumer_function=create_queue_consumer, callback=queue_put, logger=logger) manager.run() while 1: try: if REOPEN_FILES: logger.debug('Detected non-linux platform. Files will be reopened for tailing') t = time.time() while True: if ((manager_proc is None) or (not manager_proc.is_alive())): logger.info('Starting worker...') t = time.time() manager_proc = multiprocessing.Process(target=create_queue_producer) manager_proc.start() logger.info('Working...') manager_proc.join(10) if beaver_config.get('refresh_worker_process'): if (beaver_config.get('refresh_worker_process') < (time.time() - t)): logger.info('Worker has exceeded refresh limit. Terminating process...') cleanup(None, None) except KeyboardInterrupt: pass
def tor_reconnect(self): if self.using_tor: try: self.tor_controller.signal(Signal.NEWNYM) self.logger.info('New Tor connection processing') time.sleep(self.tor_delay) except (InvalidArguments, ProtocolError): self.logger.error("couldn't establish new tor connection, disabling tor") self.using_tor = False
def get_cmdclass(cmdclass=None): if ('versioneer' in sys.modules): del sys.modules['versioneer'] cmds = ({} if (cmdclass is None) else cmdclass.copy()) from setuptools import Command class cmd_version(Command): description = 'report generated version string' user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print(('Version: %s' % vers['version'])) print((' full-revisionid: %s' % vers.get('full-revisionid'))) print((' dirty: %s' % vers.get('dirty'))) print((' date: %s' % vers.get('date'))) if vers['error']: print((' error: %s' % vers['error'])) cmds['version'] = cmd_version if ('build_py' in cmds): _build_py = cmds['build_py'] else: from setuptools.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) if getattr(self, 'editable_mode', False): return if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print(('UPDATING %s' % target_versionfile)) write_to_version_file(target_versionfile, versions) cmds['build_py'] = cmd_build_py if ('build_ext' in cmds): _build_ext = cmds['build_ext'] else: from setuptools.command.build_ext import build_ext as _build_ext class cmd_build_ext(_build_ext): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_ext.run(self) if self.inplace: return target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) if (not os.path.exists(target_versionfile)): print(f'Warning: {target_versionfile} does not exist, skipping version update. This can happen if you are running build_ext without first running build_py.') return print(('UPDATING %s' % target_versionfile)) write_to_version_file(target_versionfile, versions) cmds['build_ext'] = cmd_build_ext if ('cx_Freeze' in sys.modules): from cx_Freeze.dist import build_exe as _build_exe class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print(('UPDATING %s' % target_versionfile)) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, 'w') as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write((LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})) cmds['build_exe'] = cmd_build_exe del cmds['build_py'] if ('py2exe' in sys.modules): from py2exe.distutils_buildexe import py2exe as _py2exe class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print(('UPDATING %s' % target_versionfile)) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, 'w') as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write((LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})) cmds['py2exe'] = cmd_py2exe if ('egg_info' in cmds): _sdist = cmds['egg_info'] else: from setuptools.command.egg_info import egg_info as _egg_info class cmd_egg_info(_egg_info): def find_sources(self): super().find_sources() root = get_root() cfg = get_config_from_root(root) self.filelist.append('versioneer.py') if cfg.versionfile_source: self.filelist.append(cfg.versionfile_source) self.filelist.sort() self.filelist.remove_duplicates() from setuptools import unicode_utils normalized = [unicode_utils.filesys_decode(f).replace(os.sep, '/') for f in self.filelist.files] manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt') with open(manifest_filename, 'w') as fobj: fobj.write('\n'.join(normalized)) cmds['egg_info'] = cmd_egg_info if ('sdist' in cmds): _sdist = cmds['sdist'] else: from setuptools.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions self.distribution.metadata.version = versions['version'] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print(('UPDATING %s' % target_versionfile)) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds['sdist'] = cmd_sdist return cmds
def _runner(init, shape, target_mean=None, target_std=None, target_max=None, target_min=None): variable = K.variable(init(shape)) output = K.get_value(variable) lim = 0.03 if (target_std is not None): assert (abs((output.std() - target_std)) < lim) if (target_mean is not None): assert (abs((output.mean() - target_mean)) < lim) if (target_max is not None): assert (abs((output.max() - target_max)) < lim) if (target_min is not None): assert (abs((output.min() - target_min)) < lim)
def _init_weights(module, name, zero_init_last=False): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') if (module.bias is not None): nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): nn.init.normal_(module.weight, mean=0.0, std=0.01) if (module.bias is not None): nn.init.zeros_(module.bias) elif (zero_init_last and hasattr(module, 'zero_init_last')): module.zero_init_last()
.parametrize('username,password', users) .parametrize('project_id', projects) .parametrize('snapshot_id', snapshots) def test_snapshot_rollback_get(db, client, username, password, project_id, snapshot_id): client.login(username=username, password=password) project = Project.objects.get(pk=project_id) project_snapshots = list(project.snapshots.values_list('id', flat=True)) url = reverse('snapshot_rollback', args=[project_id, snapshot_id]) response = client.get(url) if (snapshot_id in project_snapshots): if (project_id in rollback_snapshot_permission_map.get(username, [])): assert (response.status_code == 200) elif password: assert (response.status_code == 403) else: assert (response.status_code == 302) else: assert (response.status_code == 404)
def convert_standalone_batchnorms(model: tf.keras.Model, folded_bns: set) -> List[tf.keras.layers.BatchNormalization]: bn_converted = [] for layer in model.layers: if (isinstance(layer, tf.keras.layers.BatchNormalization) and (layer not in folded_bns)): convert_batchnorm_parameters(layer) _logger.debug('%s weights got converted', layer.name) bn_converted.append(layer) return bn_converted
class OpenCLSSASimulator(SSABase): _supports = {'multi_initials': True, 'multi_param_values': True} def __init__(self, model, verbose=False, tspan=None, precision=np.float64, **kwargs): if (cl is None): raise ImportError('pyopencl library required for {}'.format(self.__class__.__name__)) super(OpenCLSSASimulator, self).__init__(model, verbose, **kwargs) generate_equations(self._model) self.tspan = tspan self.verbose = verbose self._step_0 = True self._dtype = precision template_code = Template(filename=os.path.join(os.path.dirname(__file__), 'templates', 'opencl_ssa.cl')) args = self._get_template_args() if (self._dtype == np.float32): args['prec'] = '#define USE_SINGLE_PRECISION' self._logger.warn('Should be cautious using single precision.') else: args['prec'] = '#define USE_DOUBLE_PRECISION' _d = {np.uint32: 'uint', np.int32: 'int', np.int16: 'ushort', np.int64: 'long', np.uint64: 'unsigned long'} self._dtype_species = np.int32 args['spc_type'] = _d[self._dtype_species] if (verbose == 2): args['verbose'] = '#define VERBOSE' elif (verbose > 3): args['verbose'] = '#define VERBOSE_MAX' else: args['verbose'] = '' self._logger.info('Initialized OpenCLSSASimulator class') self._code = template_code.render(**args) def _compile(self): if self.verbose: self._logger.info('Output OpenCl file to ssa_opencl_code.cl') with open('ssa_opencl_code.cl', 'w') as source_file: source_file.write(self._code) self.context = cl.create_some_context(True) devices = self.context.devices self._device_name = devices[0].name self._n_devices = self.context.num_devices if (devices[0].type == device_type.CPU): self._local_work_size = (1, 1) elif ('CUDA' in devices[0].platform.name.upper()): self._local_work_size = (32, 1) else: self._local_work_size = (64, 1) self._logger.info(f'Using device {self._device_name}') self.devices = devices self.program = cl.Program(self.context, self._code).build(options=['-cl-denorms-are-zero', '-cl-no-signed-zeros', '-cl-finite-math-only', '-cl-mad-enable', '-I {}'.format(os.path.join(os.path.dirname(__file__)))]) def run(self, tspan=None, param_values=None, initials=None, number_sim=0, random_seed=0): super(OpenCLSSASimulator, self).run(tspan=tspan, initials=initials, param_values=param_values, number_sim=number_sim) if (tspan is None): if (self.tspan is None): raise Exception('Please provide tspan') else: tspan = self.tspan t_out = np.array(tspan, dtype=self._dtype) if self._step_0: self._setup() self._logger.info('Creating content on device') n_sim_per_device = (self.num_sim // self._n_devices) local_work_size = self._local_work_size (blocks, threads) = self.get_blocks(n_sim_per_device, local_work_size[0]) total_threads = int((blocks * threads)) timer_start = time.time() with ThreadPoolExecutor(self._n_devices) as executor: sim_partial = partial(call, ocl_instance=self, n_sim_per_device=n_sim_per_device, t_out=t_out, total_threads=total_threads, local_work_size=local_work_size, random_seed=random_seed) results = [executor.submit(sim_partial, i) for i in range(self._n_devices)] traj = [r.result() for r in results] traj = [r.reshape((total_threads, len(t_out), self._n_species)) for r in traj] traj = np.vstack(traj) traj = traj[:self.num_sim] self._time = (time.time() - timer_start) self._logger.info('{} simulations in {:.4f}s'.format(self.num_sim, self._time)) tout = np.array(([tspan] * self.num_sim)) return SimulationResult(self, tout, traj) def _setup(self): self._compile() self._step_0 = False