code
stringlengths
281
23.7M
class MaximumLikelihoodAmplitudeEstimation(AmplitudeEstimationAlgorithm): def __init__(self, num_oracle_circuits: int, state_preparation: Optional[Union[(QuantumCircuit, CircuitFactory)]]=None, grover_operator: Optional[Union[(QuantumCircuit, CircuitFactory)]]=None, objective_qubits: Optional[List[int]]=None, post_processing: Optional[Callable[([float], float)]]=None, a_factory: Optional[CircuitFactory]=None, q_factory: Optional[CircuitFactory]=None, i_objective: Optional[int]=None, likelihood_evals: Optional[int]=None, quantum_instance: Optional[Union[(QuantumInstance, BaseBackend, Backend)]]=None) -> None: validate_min('num_oracle_circuits', num_oracle_circuits, 1) if isinstance(state_preparation, CircuitFactory): a_factory = state_preparation state_preparation = None if isinstance(grover_operator, CircuitFactory): q_factory = grover_operator grover_operator = None if isinstance(objective_qubits, int): i_objective = objective_qubits objective_qubits = None super().__init__(state_preparation=state_preparation, grover_operator=grover_operator, objective_qubits=objective_qubits, post_processing=post_processing, a_factory=a_factory, q_factory=q_factory, i_objective=i_objective, quantum_instance=quantum_instance) self._evaluation_schedule = ([0] + [(2 ** j) for j in range(num_oracle_circuits)]) self._likelihood_evals = likelihood_evals if (likelihood_evals is None): default = 10000 self._likelihood_evals = np.maximum(default, int((((np.pi / 2) * 1000) * (2 ** num_oracle_circuits)))) self._circuits = [] self._ret = {} def construct_circuits(self, measurement: bool=False) -> List[QuantumCircuit]: self._ret['num_oracle_queries'] = 0 self._circuits = [] if (self.state_preparation is not None): num_qubits = max(self.state_preparation.num_qubits, self.grover_operator.num_qubits) q = QuantumRegister(num_qubits, 'q') qc_0 = QuantumCircuit(q, name='qc_a') if measurement: c = ClassicalRegister(len(self.objective_qubits)) qc_0.add_register(c) qc_0.compose(self.state_preparation, inplace=True) for k in self._evaluation_schedule: qc_k = qc_0.copy(name=('qc_a_q_%s' % k)) if (k != 0): qc_k.compose(self.grover_operator.power(k), inplace=True) if measurement: qc_k.barrier() qc_k.measure(self.objective_qubits, *c) self._circuits += [qc_k] else: q = QuantumRegister(self._a_factory.num_target_qubits, 'q') qc_0 = QuantumCircuit(q, name='qc_a') warnings.filterwarnings('ignore', category=DeprecationWarning) q_factory = self.q_factory warnings.filterwarnings('always', category=DeprecationWarning) num_ancillas = np.maximum(self._a_factory.required_ancillas(), q_factory.required_ancillas()) q_aux = None if (num_ancillas > 0): q_aux = QuantumRegister(num_ancillas, 'aux') qc_0.add_register(q_aux) if measurement: c = ClassicalRegister(len(self.objective_qubits)) qc_0.add_register(c) self._a_factory.build(qc_0, q, q_aux) for k in self._evaluation_schedule: qc_k = qc_0.copy(name=('qc_a_q_%s' % k)) if (k != 0): q_factory.build_power(qc_k, q, k, q_aux) if measurement: qc_k.barrier() qc_k.measure([q[obj] for obj in self.objective_qubits], c) self._circuits += [qc_k] return self._circuits def _evaluate_statevectors(self, statevectors: Union[(List[List[complex]], List[np.ndarray])]) -> List[float]: if (self._circuits is None): raise AquaError('Before calling _evaluate_statevector_results the construct_circuit method must be called, which sets the internal _circuit variable required in this method.') num_qubits = self._circuits[0].num_qubits probabilities = [] for statevector in statevectors: p_k = 0.0 for (i, amplitude) in enumerate(statevector): probability = (np.abs(amplitude) ** 2) bitstr = ('{:0%db}' % num_qubits).format(i)[::(- 1)] if self.is_good_state(bitstr): p_k += probability probabilities += [p_k] return probabilities def _get_hits(self) -> Tuple[(List[float], List[int])]: one_hits = [] all_hits: List = [] try: if self.quantum_instance.is_statevector: probabilities = self._evaluate_statevectors(self._ret['statevectors']) one_hits = probabilities all_hits = np.ones_like(one_hits) else: for c in self._ret['counts']: one_hits += [c.get('1', 0)] all_hits += [sum(c.values())] except KeyError as ex: raise AquaError('Call run() first!') from ex return (one_hits, all_hits) def _safe_min(self, array, default=0): if (len(array) == 0): return default return np.min(array) def _safe_max(self, array, default=(np.pi / 2)): if (len(array) == 0): return default return np.max(array) def _compute_fisher_information(self, a: Optional[float]=None, num_sum_terms: Optional[int]=None, observed: bool=False) -> float: if (a is None): try: a = self._ret['value'] except KeyError as ex: raise KeyError('Call run() first!') from ex theta_a = np.arcsin(np.sqrt(np.real(a))) (one_hits, all_hits) = self._get_hits() evaluation_schedule = self._evaluation_schedule if (num_sum_terms is not None): evaluation_schedule = evaluation_schedule[:num_sum_terms] fisher_information = None if observed: d_loglik = 0 for (shots_k, h_k, m_k) in zip(all_hits, one_hits, evaluation_schedule): tan = np.tan((((2 * m_k) + 1) * theta_a)) d_loglik += (((2 * m_k) + 1) * ((h_k / tan) + ((shots_k - h_k) * tan))) d_loglik /= np.sqrt((a * (1 - a))) fisher_information = ((d_loglik ** 2) / len(all_hits)) else: fisher_information = sum(((shots_k * (((2 * m_k) + 1) ** 2)) for (shots_k, m_k) in zip(all_hits, evaluation_schedule))) fisher_information /= (a * (1 - a)) return fisher_information def _fisher_confint(self, alpha: float=0.05, observed: bool=False) -> List[float]: fisher_information = None try: fisher_information = self._ret['fisher_information'] except KeyError as ex: raise AssertionError('Call run() first!') from ex if observed: fisher_information = self._compute_fisher_information(observed=True) normal_quantile = norm.ppf((1 - (alpha / 2))) confint = (np.real(self._ret['value']) + ((normal_quantile / np.sqrt(fisher_information)) * np.array([(- 1), 1]))) mapped_confint = [self.post_processing(bound) for bound in confint] return mapped_confint def _likelihood_ratio_confint(self, alpha: float=0.05, nevals: Optional[int]=None) -> List[float]: if (nevals is None): nevals = self._likelihood_evals def loglikelihood(theta, one_counts, all_counts): loglik = 0 for (i, k) in enumerate(self._evaluation_schedule): loglik += (np.log((np.sin((((2 * k) + 1) * theta)) ** 2)) * one_counts[i]) loglik += (np.log((np.cos((((2 * k) + 1) * theta)) ** 2)) * (all_counts[i] - one_counts[i])) return loglik (one_counts, all_counts) = self._get_hits() eps = 1e-15 thetas = np.linspace((0 + eps), ((np.pi / 2) - eps), nevals) values = np.zeros(len(thetas)) for (i, theta) in enumerate(thetas): values[i] = loglikelihood(theta, one_counts, all_counts) loglik_mle = loglikelihood(self._ret['theta'], one_counts, all_counts) chi2_quantile = chi2.ppf((1 - alpha), df=1) thres = (loglik_mle - (chi2_quantile / 2)) above_thres = thetas[(values >= thres)] confint = [self._safe_min(above_thres, default=0), self._safe_max(above_thres, default=(np.pi / 2))] mapped_confint = [self.post_processing((np.sin(bound) ** 2)) for bound in confint] return mapped_confint def confidence_interval(self, alpha: float, kind: str='fisher') -> List[float]: if ('estimation' not in self._ret.keys()): raise AquaError('Call run() first!') if self._quantum_instance.is_statevector: return (2 * [self._ret['estimation']]) if (kind in ['likelihood_ratio', 'lr']): return self._likelihood_ratio_confint(alpha) if (kind in ['fisher', 'fi']): return self._fisher_confint(alpha, observed=False) if (kind in ['observed_fisher', 'observed_information', 'oi']): return self._fisher_confint(alpha, observed=True) raise NotImplementedError('CI `{}` is not implemented.'.format(kind)) def _compute_mle_safe(self): (one_hits, all_hits) = self._get_hits() eps = 1e-15 search_range = [(0 + eps), ((np.pi / 2) - eps)] def loglikelihood(theta): loglik = 0 for (i, k) in enumerate(self._evaluation_schedule): loglik += (np.log((np.sin((((2 * k) + 1) * theta)) ** 2)) * one_hits[i]) loglik += (np.log((np.cos((((2 * k) + 1) * theta)) ** 2)) * (all_hits[i] - one_hits[i])) return (- loglik) est_theta = brute(loglikelihood, [search_range], Ns=self._likelihood_evals)[0] return est_theta def _run_mle(self) -> float: return self._compute_mle_safe() def _run(self) -> 'MaximumLikelihoodAmplitudeEstimationResult': if (self.state_preparation is None): if (self._a_factory is None): raise AquaError('Either the state_preparation variable or the a_factory (deprecated) must be set to run the algorithm.') if self._quantum_instance.is_statevector: self.construct_circuits(measurement=False) ret = self._quantum_instance.execute(self._circuits) statevectors = [np.asarray(ret.get_statevector(circuit)) for circuit in self._circuits] self._ret['statevectors'] = statevectors shots = 1 else: self.construct_circuits(measurement=True) ret = self._quantum_instance.execute(self._circuits) self._ret['counts'] = [ret.get_counts(circuit) for circuit in self._circuits] shots = self._quantum_instance._run_config.shots self._ret['theta'] = self._run_mle() self._ret['value'] = (np.sin(self._ret['theta']) ** 2) self._ret['estimation'] = self.post_processing(self._ret['value']) self._ret['fisher_information'] = self._compute_fisher_information() self._ret['num_oracle_queries'] = (shots * sum((k for k in self._evaluation_schedule))) confidence_interval = self._fisher_confint(alpha=0.05) self._ret['95%_confidence_interval'] = confidence_interval ae_result = AmplitudeEstimationAlgorithmResult() ae_result.a_estimation = self._ret['value'] ae_result.estimation = self._ret['estimation'] ae_result.num_oracle_queries = self._ret['num_oracle_queries'] ae_result.confidence_interval = self._ret['95%_confidence_interval'] result = MaximumLikelihoodAmplitudeEstimationResult() result.combine(ae_result) if ('statevectors' in self._ret): result.circuit_results = self._ret['statevectors'] elif ('counts' in self._ret): result.circuit_results = self._ret['counts'] result.theta = self._ret['theta'] result.fisher_information = self._ret['fisher_information'] return result
def test_rpcs(): rpcs = RPC(**TEST_RPCS_NATIVE_PYTHON) for (key, value) in rpcs.to_dict().items(): assert (key in TEST_RPCS_NATIVE_PYTHON.keys()) assert (value == TEST_RPCS_NATIVE_PYTHON[key]) assert isinstance(value, (float, list)) if isinstance(value, list): assert (len(value) == 20) assert isinstance(value[0], float)
def validate_and_save(cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, valid_subsets: List[str], end_of_epoch: bool) -> Tuple[(List[Optional[float]], bool)]: num_updates = trainer.get_num_updates() max_update = (cfg.optimization.max_update or math.inf) should_stop = False if (num_updates >= max_update): should_stop = True logger.info(f'Stopping training due to num_updates: {num_updates} >= max_update: {max_update}') training_time_hours = (trainer.cumulative_training_time() / (60 * 60)) if ((cfg.optimization.stop_time_hours > 0) and (training_time_hours > cfg.optimization.stop_time_hours)): should_stop = True logger.info(f'Stopping training due to cumulative_training_time: {training_time_hours} > stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)') do_save = ((end_of_epoch and ((epoch_itr.epoch % cfg.checkpoint.save_interval) == 0)) or should_stop or ((cfg.checkpoint.save_interval_updates > 0) and (num_updates > 0) and ((num_updates % cfg.checkpoint.save_interval_updates) == 0) and (num_updates >= cfg.dataset.validate_after_updates))) do_validate = ((((not end_of_epoch) and do_save) or (end_of_epoch and ((epoch_itr.epoch % cfg.dataset.validate_interval) == 0)) or should_stop or ((cfg.dataset.validate_interval_updates > 0) and (num_updates > 0) and ((num_updates % cfg.dataset.validate_interval_updates) == 0))) and (not cfg.dataset.disable_validation) and (num_updates >= cfg.dataset.validate_after_updates)) valid_losses = [None] if do_validate: logger.info('Training time: {0:.2f}'.format(training_time_hours)) valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets) should_stop |= should_stop_early(cfg, valid_losses[0]) if (do_save or should_stop): checkpoint_utils.save_checkpoint(cfg.checkpoint, trainer, epoch_itr, valid_losses[0]) return (valid_losses, should_stop)
def find_closest_psnr(target, img, fmt='jpeg'): lower = 0 upper = 100 prev_mid = upper def _psnr(a, b): a = np.asarray(a).astype(np.float32) b = np.asarray(b).astype(np.float32) mse = np.mean(np.square((a - b))) return ((20 * math.log10(255.0)) - (10.0 * math.log10(mse))) for i in range(10): mid = (((upper - lower) / 2) + lower) if (int(mid) == int(prev_mid)): break prev_mid = mid (rec, bpp) = pillow_encode(img, fmt=fmt, quality=int(mid)) psnr_val = _psnr(rec, img) if (psnr_val > target): upper = (mid - 1) else: lower = mid return (rec, bpp, psnr_val)
class Encoder(nn.Module): def __init__(self, d_model, d_ff, d_k, d_v, n_layers, n_heads, len_q): super(Encoder, self).__init__() self.layers = nn.ModuleList([EncoderLayer(d_model, d_ff, d_k, d_v, n_heads, len_q) for _ in range(n_layers)]) def forward(self, enc_inputs): enc_outputs = enc_inputs enc_self_attns = [] for layer in self.layers: (enc_outputs, enc_self_attn) = layer(enc_outputs) enc_self_attns.append(enc_self_attn) return (enc_outputs, enc_self_attns)
class CoverPluginHandler(PluginHandler): def __init__(self, use_built_in=True): self.providers = set() if use_built_in: self.built_in = {built_in.EmbeddedCover, built_in.FilesystemCover} else: self.built_in = set() def plugin_handle(self, plugin): return issubclass(plugin.cls, CoverSourcePlugin) def plugin_enable(self, plugin): self.providers.add(plugin) print_d(f'Registered {plugin.cls.__name__} cover source') def plugin_disable(self, plugin): self.providers.remove(plugin) print_d(f'Unregistered {plugin.cls.__name__} cover source') def sources(self): sources = chain((p.cls for p in self.providers), self.built_in) (yield from sorted(sources, reverse=True, key=(lambda x: x.priority())))
def service_installed(service: str) -> bool: if (not service.endswith('.service')): service += '.service' try: out = subprocess.check_output(['systemctl', 'list-unit-files', service], text=True) except subprocess.CalledProcessError: return False return (len(out.splitlines()) > 3)
class Mode(ItemAttrShortcut, HandledItem): def __init__(self, item, owner=None): if (item.group.name != 'Ship Modifiers'): raise ValueError(('Passed item "%s" (category: (%s)) is not a Ship Modifier' % (item.name, item.category.name))) self.owner = owner self.__item = item self.__itemModifiedAttributes = ModifiedAttributeDict() self.__itemModifiedAttributes.original = self.item.attributes self.__itemModifiedAttributes.overrides = self.item.overrides def item(self): return self.__item def itemModifiedAttributes(self): return self.__itemModifiedAttributes def fits(self, fit): raise NotImplementedError() def clear(self): self.itemModifiedAttributes.clear() def calculateModifiedAttributes(self, fit, runTime, forceProjected=False): if self.item: for effect in self.item.effects.values(): if ((effect.runTime == runTime) and effect.activeByDefault): effect.handler(fit, self, ('module',), None, effect=effect) def __deepcopy__(self, memo): copy = Mode(self.item) return copy def __repr__(self): return 'Mode(ID={}, name={}) at {}'.format(self.item.ID, self.item.name, hex(id(self)))
class Effect5918(BaseEffect): runTime = 'early' type = ('projected', 'passive') def handler(fit, beacon, context, projectionRange, **kwargs): fit.modules.filteredChargeMultiply((lambda mod: mod.charge.requiresSkill('Bomb Deployment')), 'thermalDamage', beacon.getModifiedItemAttr('smartbombDamageMultiplier'), stackingPenalties=True, penaltyGroup='postMul', **kwargs)
class KeithleyBuffer(): buffer_points = Instrument.control(':TRAC:POIN?', ':TRAC:POIN %d', ' An integer property that controls the number of buffer points. This\n does not represent actual points in the buffer, but the configuration\n value instead. ', validator=truncated_range, values=[2, 1024], cast=int) def config_buffer(self, points=64, delay=0): self.write(':STAT:PRES;*CLS;*SRE 1;:STAT:MEAS:ENAB 512;') self.write(':TRAC:CLEAR;') self.buffer_points = points self.trigger_count = points self.trigger_delay = delay self.write(':TRAC:FEED SENSE;:TRAC:FEED:CONT NEXT;') self.check_errors() def is_buffer_full(self): status_bit = int(self.ask('*STB?')) return (status_bit == 65) def wait_for_buffer(self, should_stop=(lambda : False), timeout=60, interval=0.1): t = time() while (not self.is_buffer_full()): sleep(interval) if should_stop(): return if ((time() - t) > timeout): raise Exception('Timed out waiting for Keithley buffer to fill.') def buffer_data(self): self.write(':FORM:DATA ASCII') return np.array(self.values(':TRAC:DATA?'), dtype=np.float64) def start_buffer(self): self.write(':INIT') def reset_buffer(self): self.write(':STAT:PRES;*CLS;:TRAC:CLEAR;:TRAC:FEED:CONT NEXT;') def stop_buffer(self): if (type(self.adapter) is PrologixAdapter): self.write('++clr') else: self.write(':ABOR') def disable_buffer(self): self.write(':TRAC:FEED:CONT NEV')
def test_asking_qu_questions(): type_ = '_quservice._tcp.local.' zeroconf = r.Zeroconf(interfaces=['127.0.0.1']) old_send = zeroconf.async_send first_outgoing = None def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT): nonlocal first_outgoing if (first_outgoing is None): first_outgoing = out old_send(out, addr=addr, port=port) with patch.object(zeroconf, 'async_send', send): zeroconf.get_service_info(f'name.{type_}', type_, 500, question_type=r.DNSQuestionType.QU) assert (first_outgoing.questions[0].unicast is True) zeroconf.close()
def getFiles(folder, suffix='.json', exclude=['results.json']): file_list = [] for (root, _, filenames) in os.walk(folder): for f in filenames: if (f.endswith(suffix) and (f not in exclude)): file_list.append(os.path.join(root, f)) file_list.sort() return file_list
class FakeNetCDF4FileHandler2(FakeNetCDF4FileHandler): def get_test_content(self, filename, filename_info, filetype_info): dt = filename_info.get('start_time', datetime(2016, 1, 1, 12, 0, 0)) (sat, inst) = {'VIIRS_NPP': ('NPP', 'VIIRS'), 'VIIRS_N20': ('N20', 'VIIRS')}[filename_info['sensor_id']] file_content = {'/attr/platform': sat, '/attr/sensor': inst, '/attr/spatial_resolution': '742 m at nadir', '/attr/time_coverage_start': dt.strftime('%Y%m%dT%H%M%SZ'), '/attr/time_coverage_end': (dt + timedelta(minutes=6)).strftime('%Y%m%dT%H%M%SZ')} file_content['lat'] = DEFAULT_LAT_DATA file_content['lat/attr/comment'] = 'Latitude of retrievals' file_content['lat/attr/long_name'] = 'latitude' file_content['lat/attr/standard_name'] = 'latitude' file_content['lat/attr/units'] = 'degrees_north' file_content['lat/attr/valid_min'] = (- 90.0) file_content['lat/attr/valid_max'] = 90.0 file_content['lat/shape'] = DEFAULT_FILE_SHAPE file_content['lon'] = DEFAULT_LON_DATA file_content['lon/attr/comment'] = 'Longitude of retrievals' file_content['lon/attr/long_name'] = 'longitude' file_content['lon/attr/standard_name'] = 'longitude' file_content['lon/attr/units'] = 'degrees_east' file_content['lon/attr/valid_min'] = (- 180.0) file_content['lon/attr/valid_max'] = 180.0 file_content['lon/shape'] = DEFAULT_FILE_SHAPE for k in ['sea_surface_temperature', 'satellite_zenith_angle', 'sea_ice_fraction', 'wind_speed']: file_content[k] = DEFAULT_FILE_DATA[(None, ...)] file_content[(k + '/attr/scale_factor')] = 1.1 file_content[(k + '/attr/add_offset')] = 0.1 file_content[(k + '/attr/units')] = 'some_units' file_content[(k + '/attr/comment')] = 'comment' file_content[(k + '/attr/standard_name')] = 'standard_name' file_content[(k + '/attr/long_name')] = 'long_name' file_content[(k + '/attr/valid_min')] = 0 file_content[(k + '/attr/valid_max')] = 65534 file_content[(k + '/attr/_FillValue')] = 65534 file_content[(k + '/shape')] = (1, DEFAULT_FILE_SHAPE[0], DEFAULT_FILE_SHAPE[1]) file_content['l2p_flags'] = np.zeros((1, DEFAULT_FILE_SHAPE[0], DEFAULT_FILE_SHAPE[1]), dtype=np.uint16) convert_file_content_to_data_array(file_content, dims=('time', 'nj', 'ni')) return file_content
def test_sampling_no_nodata_masked_beyond_bounds(data): filename = str(data.join('RGB.byte.tif')) with rasterio.open(filename, 'r+') as src: src.nodata = None with rasterio.open(filename) as src: data = next(src.sample([(0.0, 0.0)], masked=True)) assert numpy.ma.is_masked(data) assert all((data.mask == True))
class TestResamplerRegistryManipulation(): def setup_method(self): _ = list_resamplers() self.mock_reg = mock.patch('pyresample.future.resamplers.registry.RESAMPLER_REGISTRY', {}) self.mock_reg.start() def teardown_method(self): self.mock_reg.stop() def test_no_builtins_warning(self): import warnings with warnings.catch_warnings(record=True) as w: avail_resamplers = list_resamplers() assert (not avail_resamplers) assert_warnings_contain(w, 'reinstall') def test_manual_resampler_registration(self): rname = 'my_resampler' _register_resampler_class(rname, Resampler) unregister_resampler(rname) with _ignore_no_builtin_resamplers(): assert (rname not in list_resamplers()) .parametrize('new_resampler', [Resampler, _custom_resampler_class()]) def test_multiple_registration_warning_same_class(self, new_resampler): rname = 'my_resampler' _register_resampler_class(rname, Resampler) with pytest.raises(ValueError): _register_resampler_class(rname, new_resampler, no_exist=False) unregister_resampler(rname) _register_resampler_class(rname, Resampler)
def test_cannot_update_a_grant_if_grants_are_closed(graphql_client, user, conference_factory, grant_factory): graphql_client.force_login(user) conference = conference_factory(active_grants=False) grant = grant_factory(conference=conference, user_id=user.id) response = _update_grant(graphql_client, grant, name='Marcotte') assert (response['data']['updateGrant']['__typename'] == 'GrantErrors') assert (response['data']['updateGrant']['errors']['nonFieldErrors'] == ['The grants form is not open!'])
class Section(): def __init__(self, lc, data): self.name = lc.section_name self.segment_name = lc.segment_name self.address = lc.address self.size = lc.size self.offset = lc.offset self.align = lc.alignment self.rel_offset = lc.relocations_offset self.rel_num = lc.number_of_relocations self.flags = lc.flags self.content = data[self.offset:(self.offset + self.size)]
def cfstring_to_string(cfstring): length = cf.CFStringGetLength(cfstring) size = cf.CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) buffer = c_buffer((size + 1)) result = cf.CFStringGetCString(cfstring, buffer, len(buffer), kCFStringEncodingUTF8) if result: return str(buffer.value, 'utf-8')
def DS_format_to_bert(pretrained_path, args): corpora = {'train': [], 'val': [], 'test': []} bert = BertData(pretrained_path, args) read_root_path = Path(args.raw_path) for corpus_type in corpora: save_root_path = (Path(args.save_path) / corpus_type) save_root_path.mkdir(exist_ok=True, parents=True) is_test = (corpus_type[:4] == 'test') read_path = (read_root_path / f'{corpus_type}.json') save_path = (save_root_path / f'{corpus_type}.bert.bin') logger.info(f'Processing {read_path.stem}') jobs = json.load(read_path.open('r', encoding='utf-8')) for d in tqdm(jobs): b_data = bert.preprocess(d, use_bert_basic_tokenizer=True, is_test=is_test) if (b_data is None): continue (src_subtoken_idxs, tgt_idxs, segments_ids, cls_ids, src_txt, tgt_txts, role_masks) = b_data b_data_dict = {'src': src_subtoken_idxs, 'tgt_user': tgt_idxs[0], 'tgt_agent': tgt_idxs[1], 'segs': segments_ids, 'clss': cls_ids, 'src_txt': src_txt, 'tgt_txt_user': tgt_txts[0], 'tgt_txt_agent': tgt_txts[1], 'role_mask': role_masks} corpora[corpus_type].append(b_data_dict) logger.info(('Processed instances %d' % len(corpora[corpus_type]))) logger.info(('Saving to %s' % save_path)) torch.save(corpora[corpus_type], save_path)
class Artist(): def __init__(self, name, sort_name, id_): self.name = name self.sort_name = sort_name self.id = id_ def is_various(self): return (self.id == VARIOUS_ARTISTS_ARTISTID) def from_credit(cls, mbcredit): artists = [] for credit in mbcredit: try: artist = credit['artist'] except TypeError: pass else: artists.append(Artist(artist['name'], artist['sort-name'], artist['id'])) return artists
class TestReportInfo(): def test_itemreport_reportinfo(self, pytester: Pytester) -> None: pytester.makeconftest('\n import pytest\n class MyFunction(pytest.Function):\n def reportinfo(self):\n return "ABCDE", 42, "custom"\n def pytest_pycollect_makeitem(collector, name, obj):\n if name == "test_func":\n return MyFunction.from_parent(name=name, parent=collector)\n ') item = pytester.getitem('def test_func(): pass') item.config.pluginmanager.getplugin('runner') assert (item.location == ('ABCDE', 42, 'custom')) def test_func_reportinfo(self, pytester: Pytester) -> None: item = pytester.getitem('def test_func(): pass') (path, lineno, modpath) = item.reportinfo() assert (os.fspath(path) == str(item.path)) assert (lineno == 0) assert (modpath == 'test_func') def test_class_reportinfo(self, pytester: Pytester) -> None: modcol = pytester.getmodulecol('\n # lineno 0\n class TestClass(object):\n def test_hello(self): pass\n ') classcol = pytester.collect_by_name(modcol, 'TestClass') assert isinstance(classcol, Class) (path, lineno, msg) = classcol.reportinfo() assert (os.fspath(path) == str(modcol.path)) assert (lineno == 1) assert (msg == 'TestClass') .filterwarnings('ignore:usage of Generator.Function is deprecated, please use pytest.Function instead') def test_reportinfo_with_nasty_getattr(self, pytester: Pytester) -> None: modcol = pytester.getmodulecol('\n # lineno 0\n class TestClass:\n def __getattr__(self, name):\n return "this is not an int"\n\n def __class_getattr__(cls, name):\n return "this is not an int"\n\n def intest_foo(self):\n pass\n\n def test_bar(self):\n pass\n ') classcol = pytester.collect_by_name(modcol, 'TestClass') assert isinstance(classcol, Class) (path, lineno, msg) = classcol.reportinfo() func = list(classcol.collect())[0] assert isinstance(func, Function) (path, lineno, msg) = func.reportinfo()
def _create_post_title(config, show, episode): if show.name_en: title = config.post_title_with_en else: title = config.post_title if ((episode.number == show.length) and config.post_title_postfix_final): title += (' ' + config.post_title_postfix_final) return title
.filterwarnings('error:Duplicate name') .parametrize('build_tag_arg, existing_build_tag, filename', [(None, None, 'test-1.0-py2.py3-none-any.whl'), ('2b', None, 'test-1.0-2b-py2.py3-none-any.whl'), (None, '3', 'test-1.0-3-py2.py3-none-any.whl'), ('', '3', 'test-1.0-py2.py3-none-any.whl')], ids=['nobuildnum', 'newbuildarg', 'oldbuildnum', 'erasebuildnum']) def test_pack(tmp_path_factory, tmp_path, build_tag_arg, existing_build_tag, filename): unpack_dir = tmp_path_factory.mktemp('wheeldir') with ZipFile(TESTWHEEL_PATH) as zf: old_record = zf.read('test-1.0.dist-info/RECORD') old_record_lines = sorted((line.rstrip() for line in old_record.split(b'\n') if (line and (not line.startswith(b'test-1.0.dist-info/WHEEL,'))))) zf.extractall(str(unpack_dir)) if existing_build_tag: wheel_file_path = unpack_dir.joinpath('test-1.0.dist-info').joinpath('WHEEL') wheel_file_content = wheel_file_path.read_bytes() assert (b'Build' not in wheel_file_content) wheel_file_content += b'Build: 3\r\n' wheel_file_path.write_bytes(wheel_file_content) pack(str(unpack_dir), str(tmp_path), build_tag_arg) new_wheel_path = tmp_path.joinpath(filename) assert new_wheel_path.is_file() with ZipFile(str(new_wheel_path)) as zf: new_record = zf.read('test-1.0.dist-info/RECORD') new_record_lines = sorted((line.rstrip() for line in new_record.split(b'\n') if (line and (not line.startswith(b'test-1.0.dist-info/WHEEL,'))))) parser = BytesParser(policy=email.policy.compat32) new_wheel_file_content = parser.parsebytes(zf.read('test-1.0.dist-info/WHEEL')) assert (new_record_lines == old_record_lines) expected_wheel_content = Message() expected_wheel_content['Wheel-Version'] = '1.0' expected_wheel_content['Generator'] = 'bdist_wheel (0.30.0)' expected_wheel_content['Root-Is-Purelib'] = 'false' expected_wheel_content['Tag'] = 'py2-none-any' expected_wheel_content['Tag'] = 'py3-none-any' expected_build_num = (build_tag_arg if (build_tag_arg is not None) else existing_build_tag) if expected_build_num: expected_wheel_content['Build'] = expected_build_num assert (sorted(new_wheel_file_content.items()) == sorted(expected_wheel_content.items()))
class ews_input_long(unittest.TestCase): def test(self): run_test(self, ['--ews', '01 1379 500'], ' Month/Day/Year H:M:S 06/11/2006 00:08:20 GPS\n Modified Julian Date 53897. GPS\n GPSweek DayOfWeek SecOfWeek 355 0 500.000000\n FullGPSweek Zcount 1379 333\n Year DayOfYear SecondOfDay 2006 162 500.000000\n Unix: Second Microsecond 0\n Zcount: 29-bit (32-bit) ()\n')
def ss_multmodel_factory(nsamples, data_mods, data_lhs, idx=None): for dm in data_mods: dm.train() for dlh in data_lhs: dlh.train() mll_list = [gpytorch.ExactMarginalLogLikelihood(dlh, dm) for (dlh, dm) in zip(data_lhs, data_mods)] latent_lh = data_mods[0].covar_module.latent_lh latent_mod = data_mods[0].covar_module.latent_mod def ss_ell_builder(latent_mod, latent_lh, data_mods, data_lhs): latent_lh.train() latent_mod.train() loss = 0.0 for i in range(len(data_mods)): demeaned_logdens = data_mods[i].covar_module.latent_params omega = data_mods[i].covar_module.omega latent_mod.set_train_data(inputs=omega, targets=demeaned_logdens.detach(), strict=False) loss = (loss + mll_list[i](data_mods[i](*mll_list[i].model.train_inputs), mll_list[i].model.train_targets)) return loss ell_func = (lambda h: ss_ell_builder(latent_mod, latent_lh, data_mods, data_lhs)) data_par_list = [list(dm.parameters()) for dm in data_mods] optim_pars = [par for sublist in data_par_list for par in sublist] return SGD(optim_pars, ell_func, n_samples=nsamples, lr=0.1)
def find_dps(graph: DataPipeGraph, dp_type: Type[DataPipe]) -> List[DataPipe]: dps: List[DataPipe] = [] cache: Set[int] = set() def helper(g) -> None: for (dp_id, (dp, src_graph)) in g.items(): if (dp_id in cache): continue cache.add(dp_id) if (type(dp) is dp_type): dps.append(dp) helper(src_graph) helper(graph) return dps
class NegativeSampling(Strategy): def __init__(self, model=None, loss=None, batch_size=256, regul_rate=0.0, l3_regul_rate=0.0): super(NegativeSampling, self).__init__() self.model = model self.loss = loss self.batch_size = batch_size self.regul_rate = regul_rate self.l3_regul_rate = l3_regul_rate def _get_positive_score(self, score): positive_score = score[:self.batch_size] positive_score = positive_score.view((- 1), self.batch_size).permute(1, 0) return positive_score def _get_negative_score(self, score): negative_score = score[self.batch_size:] negative_score = negative_score.view((- 1), self.batch_size).permute(1, 0) return negative_score def forward(self, data): score = self.model(data) p_score = self._get_positive_score(score) n_score = self._get_negative_score(score) loss_res = self.loss(p_score, n_score) if (self.regul_rate != 0): loss_res += (self.regul_rate * self.model.regularization(data)) if (self.l3_regul_rate != 0): loss_res += (self.l3_regul_rate * self.model.l3_regularization()) return loss_res
class TestGVARFloat(unittest.TestCase): def test_fun(self): test_data = [((- 1.0), b'\xbe\xf0\x00\x00'), ((- 0.1640625), b'\xbf\xd6\x00\x00'), (0.0, b'\x00\x00\x00\x00'), (0.1640625, b'*\x00\x00'), (1.0, b'A\x10\x00\x00'), (, b'Bd*\x00')] for (expected, str_val) in test_data: val = np.frombuffer(str_val, dtype='>i4') assert (expected == make_gvar_float(val))
class FixedPropertyData(PropertyData): def __init__(self, name, size): PropertyData.__init__(self, name) self.size = size def parse_binary_value(self, data, display, length, format): return PropertyData.parse_binary_value(self, data, display, (self.size // (format // 8)), format) def pack_value(self, value): (data, dlen, fmt) = PropertyData.pack_value(self, value) if (len(data) != self.size): raise BadDataError(('Wrong data length for FixedPropertyData: %s' % (value,))) return (data, dlen, fmt)
.parametrize('type_', _data.to.dtypes) .parametrize(['operator', 'dispatch'], [pytest.param((lambda data, number: (data * number)), _data.mul, id='mul'), pytest.param((lambda data, number: (number * data)), _data.mul, id='rmul'), pytest.param((lambda data, number: (data / number)), (lambda data, number: _data.mul(data, (1 / number))), id='div')]) def test_data_scalar_operator(type_, operator, dispatch): data = qutip.qeye(2, dtype=type_).data number = 3 numpy.testing.assert_allclose(operator(data, number).to_array(), dispatch(data, number).to_array(), rtol=1e-15)
class ExpectedEnvVars(): def __init__(self, env_vars: dict): self.env_vars = env_vars def __eq__(self, other): return all(((not ((key not in other) or (other[key] != value))) for (key, value) in self.env_vars.items())) def __hash__(self): return hash(self.env_vars)
def test_is_currency(): assert is_currency('EUR') assert (not is_currency('eUr')) assert (not is_currency('FUU')) assert (not is_currency('')) assert (not is_currency(None)) assert (not is_currency(' EUR ')) assert (not is_currency(' ')) assert (not is_currency([])) assert (not is_currency(set()))
class NoDuplicateOptWarningFilter(logging.Filter): prev_msgs: set = set() def filter(self, record): msg = record.getMessage() if msg.startswith('Optimization Warning: '): if (msg in self.prev_msgs): return False else: self.prev_msgs.add(msg) return True return True
_loss('sum_arbitrary') class SumArbitraryLoss(ClassyLoss): def __init__(self, losses: List[ClassyLoss], weights: Optional[Tensor]=None) -> None: super().__init__() if (weights is None): weights = torch.ones(len(losses)) self.losses = losses self.weights = weights def from_config(cls, config: Dict[(str, Any)]) -> 'SumArbitraryLoss': assert ((type(config['losses']) == list) and (len(config['losses']) > 0)), 'losses must be a list of registered losses with length > 0' assert ((type(config['weights']) == list) and (len(config['weights']) == len(config['losses']))), 'weights must be None or a list and have same length as losses' loss_modules = [] for loss_config in config['losses']: loss_modules.append(build_loss(loss_config)) assert all((isinstance(loss_module, ClassyLoss) for loss_module in loss_modules)), 'All losses must be registered, valid ClassyLosses' return cls(losses=loss_modules, weights=config.get('weights', None)) def forward(self, prediction, target): for (idx, loss) in enumerate(self.losses): current_loss = loss(prediction, target) if (idx == 0): total_loss = current_loss else: total_loss = total_loss.add(self.weights[idx], current_loss) return total_loss
class BatchLexer(RegexLexer): name = 'Batchfile' aliases = ['batch', 'bat', 'dosbatch', 'winbatch'] filenames = ['*.bat', '*.cmd'] mimetypes = ['application/x-dos-batch'] url = ' version_added = '0.7' flags = (re.MULTILINE | re.IGNORECASE) _nl = '\\n\\x1a' _punct = '&<>|' _ws = '\\t\\v\\f\\r ,;=\\xa0' _nlws = '\\s\\x1a\\xa0,;=' _space = ('(?:(?:(?:\\^[%s])?[%s])+)' % (_nl, _ws)) _keyword_terminator = ('(?=(?:\\^[%s]?)?[%s+./:[\\\\\\]]|[%s%s(])' % (_nl, _ws, _nl, _punct)) _token_terminator = ('(?=\\^?[%s]|[%s%s])' % (_ws, _punct, _nl)) _start_label = ('((?:(?<=^[^:])|^[^:]?)[%s]*)(:)' % _ws) _label = ('(?:(?:[^%s%s+:^]|\\^[%s]?[\\w\\W])*)' % (_nlws, _punct, _nl)) _label_compound = ('(?:(?:[^%s%s+:^)]|\\^[%s]?[^)])*)' % (_nlws, _punct, _nl)) _number = ('(?:-?(?:0[0-7]+|0x[\\da-f]+|\\d+)%s)' % _token_terminator) _opword = '(?:equ|geq|gtr|leq|lss|neq)' _string = ('(?:"[^%s"]*(?:"|(?=[%s])))' % (_nl, _nl)) _variable = ('(?:(?:%%(?:\\*|(?:~[a-z]*(?:\\$[^:]+:)?)?\\d|[^%%:%s]+(?::(?:~(?:-?\\d+)?(?:,(?:-?\\d+)?)?|(?:[^%%%s^]|\\^[^%%%s])[^=%s]*=(?:[^%%%s^]|\\^[^%%%s])*)?)?%%))|(?:\\^?![^!:%s]+(?::(?:~(?:-?\\d+)?(?:,(?:-?\\d+)?)?|(?:[^!%s^]|\\^[^!%s])[^=%s]*=(?:[^!%s^]|\\^[^!%s])*)?)?\\^?!))' % (_nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl)) _core_token = ('(?:(?:(?:\\^[%s]?)?[^"%s%s])+)' % (_nl, _nlws, _punct)) _core_token_compound = ('(?:(?:(?:\\^[%s]?)?[^"%s%s)])+)' % (_nl, _nlws, _punct)) _token = ('(?:[%s]+|%s)' % (_punct, _core_token)) _token_compound = ('(?:[%s]+|%s)' % (_punct, _core_token_compound)) _stoken = ('(?:[%s]+|(?:%s|%s|%s)+)' % (_punct, _string, _variable, _core_token)) def _make_begin_state(compound, _core_token=_core_token, _core_token_compound=_core_token_compound, _keyword_terminator=_keyword_terminator, _nl=_nl, _punct=_punct, _string=_string, _space=_space, _start_label=_start_label, _stoken=_stoken, _token_terminator=_token_terminator, _variable=_variable, _ws=_ws): rest = ('(?:%s|%s|[^"%%%s%s%s])*' % (_string, _variable, _nl, _punct, (')' if compound else ''))) rest_of_line = ('(?:(?:[^%s^]|\\^[%s]?[\\w\\W])*)' % (_nl, _nl)) rest_of_line_compound = ('(?:(?:[^%s^)]|\\^[%s]?[^)])*)' % (_nl, _nl)) set_space = ('((?:(?:\\^[%s]?)?[^\\S\\n])*)' % _nl) suffix = '' if compound: _keyword_terminator = ('(?:(?=\\))|%s)' % _keyword_terminator) _token_terminator = ('(?:(?=\\))|%s)' % _token_terminator) suffix = '/compound' return [(('\\)', Punctuation, '#pop') if compound else (('\\)((?=\\()|%s)%s' % (_token_terminator, rest_of_line)), Comment.Single)), (('(?=%s)' % _start_label), Text, ('follow%s' % suffix)), (_space, using(this, state='text')), include(('redirect%s' % suffix)), (('[%s]+' % _nl), Text), ('\\(', Punctuation, 'root/compound'), ('+', Punctuation), (('((?:for|if|rem)(?:(?=(?:\\^[%s]?)?/)|(?:(?!\\^)|(?<=m))(?:(?=\\()|%s)))(%s?%s?(?:\\^[%s]?)?/(?:\\^[%s]?)?\\?)' % (_nl, _token_terminator, _space, (_core_token_compound if compound else _core_token), _nl, _nl)), bygroups(Keyword, using(this, state='text')), ('follow%s' % suffix)), (('(goto%s)(%s(?:\\^[%s]?)?/(?:\\^[%s]?)?\\?%s)' % (_keyword_terminator, rest, _nl, _nl, rest)), bygroups(Keyword, using(this, state='text')), ('follow%s' % suffix)), (words(('assoc', 'break', 'cd', 'chdir', 'cls', 'color', 'copy', 'date', 'del', 'dir', 'dpath', 'echo', 'endlocal', 'erase', 'exit', 'ftype', 'keys', 'md', 'mkdir', 'mklink', 'move', 'path', 'pause', 'popd', 'prompt', 'pushd', 'rd', 'ren', 'rename', 'rmdir', 'setlocal', 'shift', 'start', 'time', 'title', 'type', 'ver', 'verify', 'vol'), suffix=_keyword_terminator), Keyword, ('follow%s' % suffix)), (('(call)(%s?)(:)' % _space), bygroups(Keyword, using(this, state='text'), Punctuation), ('call%s' % suffix)), (('call%s' % _keyword_terminator), Keyword), (('(for%s(?!\\^))(%s)(/f%s)' % (_token_terminator, _space, _token_terminator)), bygroups(Keyword, using(this, state='text'), Keyword), ('for/f', 'for')), (('(for%s(?!\\^))(%s)(/l%s)' % (_token_terminator, _space, _token_terminator)), bygroups(Keyword, using(this, state='text'), Keyword), ('for/l', 'for')), (('for%s(?!\\^)' % _token_terminator), Keyword, ('for2', 'for')), (('(goto%s)(%s?)(:?)' % (_keyword_terminator, _space)), bygroups(Keyword, using(this, state='text'), Punctuation), ('label%s' % suffix)), (('(if(?:(?=\\()|%s)(?!\\^))(%s?)((?:/i%s)?)(%s?)((?:not%s)?)(%s?)' % (_token_terminator, _space, _token_terminator, _space, _token_terminator, _space)), bygroups(Keyword, using(this, state='text'), Keyword, using(this, state='text'), Keyword, using(this, state='text')), ('(?', 'if')), (('rem(((?=\\()|%s)%s?%s?.*|%s%s)' % (_token_terminator, _space, _stoken, _keyword_terminator, (rest_of_line_compound if compound else rest_of_line))), Comment.Single, ('follow%s' % suffix)), (('(set%s)%s(/a)' % (_keyword_terminator, set_space)), bygroups(Keyword, using(this, state='text'), Keyword), ('arithmetic%s' % suffix)), (('(set%s)%s((?:/p)?)%s((?:(?:(?:\\^[%s]?)?[^"%s%s^=%s]|\\^[%s]?[^"=])+)?)((?:(?:\\^[%s]?)?=)?)' % (_keyword_terminator, set_space, set_space, _nl, _nl, _punct, (')' if compound else ''), _nl, _nl)), bygroups(Keyword, using(this, state='text'), Keyword, using(this, state='text'), using(this, state='variable'), Punctuation), ('follow%s' % suffix)), default(('follow%s' % suffix))] def _make_follow_state(compound, _label=_label, _label_compound=_label_compound, _nl=_nl, _space=_space, _start_label=_start_label, _token=_token, _token_compound=_token_compound, _ws=_ws): suffix = ('/compound' if compound else '') state = [] if compound: state.append(('(?=\\))', Text, '#pop')) state += [(('%s([%s]*)(%s)(.*)' % (_start_label, _ws, (_label_compound if compound else _label))), bygroups(Text, Punctuation, Text, Name.Label, Comment.Single)), include(('redirect%s' % suffix)), (('(?=[%s])' % _nl), Text, '#pop'), ('\\|\\|?|&&?', Punctuation, '#pop'), include('text')] return state def _make_arithmetic_state(compound, _nl=_nl, _punct=_punct, _string=_string, _variable=_variable, _ws=_ws, _nlws=_nlws): op = '=+\\-*/!~' state = [] if compound: state.append(('(?=\\))', Text, '#pop')) state += [('0[0-7]+', Number.Oct), ('0x[\\da-f]+', Number.Hex), ('\\d+', Number.Integer), ('[(),]+', Punctuation), (('([%s]|%%|\\^\\^)+' % op), Operator), (('(%s|%s|(\\^[%s]?)?[^()%s%%\\^"%s%s]|\\^[%s]?%s)+' % (_string, _variable, _nl, op, _nlws, _punct, _nlws, ('[^)]' if compound else '[\\w\\W]'))), using(this, state='variable')), ('(?=[\\x00|&])', Text, '#pop'), include('follow')] return state def _make_call_state(compound, _label=_label, _label_compound=_label_compound): state = [] if compound: state.append(('(?=\\))', Text, '#pop')) state.append((('(:?)(%s)' % (_label_compound if compound else _label)), bygroups(Punctuation, Name.Label), '#pop')) return state def _make_label_state(compound, _label=_label, _label_compound=_label_compound, _nl=_nl, _punct=_punct, _string=_string, _variable=_variable): state = [] if compound: state.append(('(?=\\))', Text, '#pop')) state.append((('(%s?)((?:%s|%s|\\^[%s]?%s|[^"%%^%s%s%s])*)' % ((_label_compound if compound else _label), _string, _variable, _nl, ('[^)]' if compound else '[\\w\\W]'), _nl, _punct, (')' if compound else ''))), bygroups(Name.Label, Comment.Single), '#pop')) return state def _make_redirect_state(compound, _core_token_compound=_core_token_compound, _nl=_nl, _punct=_punct, _stoken=_stoken, _string=_string, _space=_space, _variable=_variable, _nlws=_nlws): stoken_compound = ('(?:[%s]+|(?:%s|%s|%s)+)' % (_punct, _string, _variable, _core_token_compound)) return [(('((?:(?<=[%s])\\d)?)(>>?&|<&)([%s]*)(\\d)' % (_nlws, _nlws)), bygroups(Number.Integer, Punctuation, Text, Number.Integer)), (('((?:(?<=[%s])(?<!\\^[%s])\\d)?)(>>?|<)(%s?%s)' % (_nlws, _nl, _space, (stoken_compound if compound else _stoken))), bygroups(Number.Integer, Punctuation, using(this, state='text')))] tokens = {'root': _make_begin_state(False), 'follow': _make_follow_state(False), 'arithmetic': _make_arithmetic_state(False), 'call': _make_call_state(False), 'label': _make_label_state(False), 'redirect': _make_redirect_state(False), 'root/compound': _make_begin_state(True), 'follow/compound': _make_follow_state(True), 'arithmetic/compound': _make_arithmetic_state(True), 'call/compound': _make_call_state(True), 'label/compound': _make_label_state(True), 'redirect/compound': _make_redirect_state(True), 'variable-or-escape': [(_variable, Name.Variable), (('%%%%|\\^[%s]?(\\^!|[\\w\\W])' % _nl), String.Escape)], 'string': [('"', String.Double, '#pop'), (_variable, Name.Variable), ('\\^!|%%', String.Escape), (('[^"%%^%s]+|[%%^]' % _nl), String.Double), default('#pop')], 'sqstring': [include('variable-or-escape'), ('[^%]+|%', String.Single)], 'bqstring': [include('variable-or-escape'), ('[^%]+|%', String.Backtick)], 'text': [('"', String.Double, 'string'), include('variable-or-escape'), (('[^"%%^%s%s\\d)]+|.' % (_nlws, _punct)), Text)], 'variable': [('"', String.Double, 'string'), include('variable-or-escape'), (('[^"%%^%s]+|.' % _nl), Name.Variable)], 'for': [(('(%s)(in)(%s)(\\()' % (_space, _space)), bygroups(using(this, state='text'), Keyword, using(this, state='text'), Punctuation), '#pop'), include('follow')], 'for2': [('\\)', Punctuation), (('(%s)(do%s)' % (_space, _token_terminator)), bygroups(using(this, state='text'), Keyword), '#pop'), (('[%s]+' % _nl), Text), include('follow')], 'for/f': [(('(")((?:%s|[^"])*?")([%s]*)(\\))' % (_variable, _nlws)), bygroups(String.Double, using(this, state='string'), Text, Punctuation)), ('"', String.Double, ('#pop', 'for2', 'string')), (("('(?:%%%%|%s|[\\w\\W])*?')([%s]*)(\\))" % (_variable, _nlws)), bygroups(using(this, state='sqstring'), Text, Punctuation)), (('(`(?:%%%%|%s|[\\w\\W])*?`)([%s]*)(\\))' % (_variable, _nlws)), bygroups(using(this, state='bqstring'), Text, Punctuation)), include('for2')], 'for/l': [('-?\\d+', Number.Integer), include('for2')], 'if': [(('((?:cmdextversion|errorlevel)%s)(%s)(\\d+)' % (_token_terminator, _space)), bygroups(Keyword, using(this, state='text'), Number.Integer), '#pop'), (('(defined%s)(%s)(%s)' % (_token_terminator, _space, _stoken)), bygroups(Keyword, using(this, state='text'), using(this, state='variable')), '#pop'), (('(exist%s)(%s%s)' % (_token_terminator, _space, _stoken)), bygroups(Keyword, using(this, state='text')), '#pop'), (('(%s%s)(%s)(%s%s)' % (_number, _space, _opword, _space, _number)), bygroups(using(this, state='arithmetic'), Operator.Word, using(this, state='arithmetic')), '#pop'), (_stoken, using(this, state='text'), ('#pop', 'if2'))], 'if2': [(('(%s?)(==)(%s?%s)' % (_space, _space, _stoken)), bygroups(using(this, state='text'), Operator, using(this, state='text')), '#pop'), (('(%s)(%s)(%s%s)' % (_space, _opword, _space, _stoken)), bygroups(using(this, state='text'), Operator.Word, using(this, state='text')), '#pop')], '(?': [(_space, using(this, state='text')), ('\\(', Punctuation, ('#pop', 'else?', 'root/compound')), default('#pop')], 'else?': [(_space, using(this, state='text')), (('else%s' % _token_terminator), Keyword, '#pop'), default('#pop')]}
class StataDarkStyle(Style): name = 'stata-dark' background_color = '#232629' highlight_color = '#49483e' styles = {Token: '#cccccc', Whitespace: '#bbbbbb', Error: 'bg:#e3d2d2 #a61717', String: '#51cc99', Number: '#4FB8CC', Operator: '', Name.Function: '#6a6aff', Name.Other: '#e2828e', Keyword: 'bold #7686bb', Keyword.Constant: '', Comment: 'italic #777777', Name.Variable: 'bold #7AB4DB', Name.Variable.Global: 'bold #BE646C', Generic.Prompt: '#ffffff'}
class StaffPublisherReportView(BaseReportView): force_revshare = 70.0 impression_model = PublisherPaidImpression report = OptimizedPublisherPaidReport template_name = 'adserver/reports/staff-publishers.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) sort = self.request.GET.get('sort', '') force_revshare = self.request.GET.get('force_revshare', self.force_revshare) impressions = self.get_queryset(start_date=context['start_date'], end_date=context['end_date']) publishers = Publisher.objects.filter(id__in=impressions.values('publisher')) if (sort == 'created'): publishers = publishers.order_by('-created') revenue_share_percentage = self.request.GET.get('revenue_share_percentage', '') if revenue_share_percentage: try: publishers = publishers.filter(revenue_share_percentage=float(revenue_share_percentage)) except ValueError: pass publishers_and_reports = [] report = None sort_options = None for publisher in publishers: queryset = self.get_queryset(publisher=publisher, start_date=context['start_date'], end_date=context['end_date']) report = self.report(queryset, force_revshare=force_revshare) report.generate() if (report.total['views'] > 0): publishers_and_reports.append((publisher, report)) if (publishers_and_reports and report): sort_options = list(report.total.keys()) if (sort and (sort in sort_options)): publishers_and_reports = sorted(publishers_and_reports, key=(lambda obj: obj[1].total[sort]), reverse=True) sort_options.append('created') total_clicks = sum((report.total['clicks'] for (_, report) in publishers_and_reports)) total_views = sum((report.total['views'] for (_, report) in publishers_and_reports)) total_revenue = sum((report.total['revenue'] for (_, report) in publishers_and_reports)) our_total_revenue = (total_revenue - sum((report.total['revenue_share'] for (_, report) in publishers_and_reports))) days = {} for (publisher, report) in publishers_and_reports: for day in report.results: if (day['date'] not in days): days[day['date']] = collections.defaultdict(int) days[day['date']]['views_by_publisher'] = {} days[day['date']]['clicks_by_publisher'] = {} days[day['date']]['date'] = day['date'] days[day['date']]['views'] += day['views'] days[day['date']]['clicks'] += day['clicks'] days[day['date']]['views_by_publisher'][publisher.name] = day['views'] days[day['date']]['clicks_by_publisher'][publisher.name] = day['clicks'] days[day['date']]['revenue'] += float(day['revenue']) days[day['date']]['our_revenue'] += float(day['our_revenue']) days[day['date']]['ctr'] = calculate_ctr(days[day['date']]['clicks'], days[day['date']]['views']) days[day['date']]['ecpm'] = calculate_ecpm(days[day['date']]['revenue'], days[day['date']]['views']) days = sorted(days.values(), key=(lambda obj: obj['date']), reverse=True) context.update({'publishers': [p for (p, _) in publishers_and_reports], 'publishers_and_reports': publishers_and_reports, 'total_clicks': total_clicks, 'total_revenue': total_revenue, 'our_total_revenue': our_total_revenue, 'days': days, 'total_views': total_views, 'total_ctr': calculate_ctr(total_clicks, total_views), 'total_ecpm': calculate_ecpm(total_revenue, total_views), 'revshare_options': set((str(pub.revenue_share_percentage) for pub in Publisher.objects.all())), 'revenue_share_percentage': revenue_share_percentage, 'sort': sort, 'sort_options': sort_options, 'metabase_total_revenue': settings.METABASE_QUESTIONS.get('TOTAL_REVENUE')}) return context
class TestEllipticCurvePEMPublicKeySerialization(): .parametrize(('key_path', 'loader_func', 'encoding'), [(os.path.join('asymmetric', 'PEM_Serialization', 'ec_public_key.pem'), serialization.load_pem_public_key, serialization.Encoding.PEM), (os.path.join('asymmetric', 'DER_Serialization', 'ec_public_key.der'), serialization.load_der_public_key, serialization.Encoding.DER)]) def test_public_bytes_match(self, key_path, loader_func, encoding, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key_bytes = load_vectors_from_file(key_path, (lambda pemfile: pemfile.read()), mode='rb') key = loader_func(key_bytes, backend) serialized = key.public_bytes(encoding, serialization.PublicFormat.SubjectPublicKeyInfo) assert (serialized == key_bytes) def test_public_bytes_openssh(self, backend): _skip_curve_unsupported(backend, ec.SECP192R1()) _skip_curve_unsupported(backend, ec.SECP256R1()) key_bytes = load_vectors_from_file(os.path.join('asymmetric', 'PEM_Serialization', 'ec_public_key.pem'), (lambda pemfile: pemfile.read()), mode='rb') key = serialization.load_pem_public_key(key_bytes, backend) ssh_bytes = key.public_bytes(serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH) assert (ssh_bytes == b'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCS8827s9rUZyxZTi/um01+oIlWrwLHOjQxRU9CDAndom00zVAw5BRrIKtHB+SWD4P+sVJTARSq1mHt8kOIWrPc=') key = ec.generate_private_key(ec.SECP192R1(), backend).public_key() with pytest.raises(ValueError): key.public_bytes(serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH) def test_public_bytes_invalid_encoding(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key = load_vectors_from_file(os.path.join('asymmetric', 'PEM_Serialization', 'ec_public_key.pem'), (lambda pemfile: serialization.load_pem_public_key(pemfile.read().encode(), backend))) with pytest.raises(TypeError): key.public_bytes('notencoding', serialization.PublicFormat.SubjectPublicKeyInfo) .parametrize(('encoding', 'fmt'), (list(itertools.product([serialization.Encoding.Raw, serialization.Encoding.X962, serialization.Encoding.PEM, serialization.Encoding.DER], [serialization.PublicFormat.Raw])) + list(itertools.product([serialization.Encoding.Raw], [serialization.PublicFormat.SubjectPublicKeyInfo, serialization.PublicFormat.PKCS1, serialization.PublicFormat.UncompressedPoint, serialization.PublicFormat.CompressedPoint])))) def test_public_bytes_rejects_invalid(self, encoding, fmt, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key = ec.generate_private_key(ec.SECP256R1(), backend).public_key() with pytest.raises(ValueError): key.public_bytes(encoding, fmt) def test_public_bytes_invalid_format(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key = load_vectors_from_file(os.path.join('asymmetric', 'PEM_Serialization', 'ec_public_key.pem'), (lambda pemfile: serialization.load_pem_public_key(pemfile.read().encode(), backend))) with pytest.raises(TypeError): key.public_bytes(serialization.Encoding.PEM, 'invalidformat') def test_public_bytes_pkcs1_unsupported(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key = load_vectors_from_file(os.path.join('asymmetric', 'PEM_Serialization', 'ec_public_key.pem'), (lambda pemfile: serialization.load_pem_public_key(pemfile.read().encode(), backend))) with pytest.raises(ValueError): key.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.PKCS1) .parametrize('vector', load_vectors_from_file(os.path.join('asymmetric', 'EC', 'compressed_points.txt'), load_nist_vectors)) def test_from_encoded_point_compressed(self, vector, backend): curve = {b'SECP256R1': ec.SECP256R1(), b'SECP256K1': ec.SECP256K1()}[vector['curve']] _skip_curve_unsupported(backend, curve) point = binascii.unhexlify(vector['point']) pn = ec.EllipticCurvePublicKey.from_encoded_point(curve, point) public_num = pn.public_numbers() assert (public_num.x == int(vector['x'], 16)) assert (public_num.y == int(vector['y'], 16)) def test_from_encoded_point_notoncurve(self): uncompressed_point = binascii.unhexlify('a9edf2197c2f8eb3d39aed9c34a66e45d918a07dc7684c42c9b37ac686699ececc4f5f0d756d3c450708a0694eb0a07a68b805070b40b058d27271f6e') with pytest.raises(ValueError): ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP256R1(), uncompressed_point) def test_from_encoded_point_uncompressed(self): uncompressed_point = binascii.unhexlify('a9edf2197c2f8eb3d39aed9c34a66e45d918a07dc7684c42c9b37ac686699ececc4f5f0d756d3c450708a0694eb0a07a68b805070b40b058d27271f6d') pn = ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP256R1(), uncompressed_point) assert (pn.public_numbers().x == int('7399336a9edf2197c2f8eb3d39aed9c34a66e45d918a07dc7684c42c9b37ac68', 16)) assert (pn.public_numbers().y == int('6699ececc4f5f0d756d3c450708a0694eb0a07a68b805070b40b058d27271f6d', 16)) def test_from_encoded_point_invalid_length(self): bad_data = binascii.unhexlify('a9edf2197c2f8eb3d39aed9c34a66e45d918a07dc7684c42c9b37ac686699ececc4f5f0d756d3c450708a0694eb0a07a68b805070b40b058d27271f6d') with pytest.raises(ValueError): ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP384R1(), bad_data) def test_from_encoded_point_empty_byte_string(self): with pytest.raises(ValueError): ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP384R1(), b'') def test_from_encoded_point_not_a_curve(self): with pytest.raises(TypeError): ec.EllipticCurvePublicKey.from_encoded_point('notacurve', b'\x04data') def test_from_encoded_point_unsupported_encoding(self): unsupported_type = binascii.unhexlify('a9edf2197c2f8eb3d39aed9c34a66e45d918a07dc7684c42c9b37ac68') with pytest.raises(ValueError): ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP256R1(), unsupported_type) .parametrize('vector', load_vectors_from_file(os.path.join('asymmetric', 'EC', 'compressed_points.txt'), load_nist_vectors)) def test_serialize_point(self, vector, backend): curve = {b'SECP256R1': ec.SECP256R1(), b'SECP256K1': ec.SECP256K1()}[vector['curve']] _skip_curve_unsupported(backend, curve) point = binascii.unhexlify(vector['point']) key = ec.EllipticCurvePublicKey.from_encoded_point(curve, point) key2 = ec.EllipticCurvePublicKey.from_encoded_point(curve, key.public_bytes(serialization.Encoding.X962, serialization.PublicFormat.UncompressedPoint)) assert (key.public_bytes(serialization.Encoding.X962, serialization.PublicFormat.CompressedPoint) == point) assert (key2.public_bytes(serialization.Encoding.X962, serialization.PublicFormat.CompressedPoint) == point)
class _ExpectFeedback(_Feedback): def __init__(self, oper, default=0.0): self.oper = QobjEvo(oper) self.N = oper.shape[1] self.N2 = (oper.shape[1] ** 2) self.default = default def check_consistency(self, dims): if (not ((self.oper._dims == dims) or (self.oper._dims[1] == dims) or (self.oper._dims == dims[0]))): raise ValueError(f'Dimensions of the expect operator ({self.oper.dims}) does not match the operator ({dims}).') def __call__(self, t, state): if (state.shape[0] == self.N): return self.oper.expect_data(t, state) if ((state.shape[0] == self.N2) and (state.shape[1] == 1)): return self.oper.expect_data(t, _data.column_unstack(state, self.N)) raise ValueError(f'Shape of the expect operator ({self.oper.shape}) does not match the state ({state.shape}).') def __repr__(self): return 'ExpectFeedback'
class ctx(object): def __init__(self, kwd_dict=None, **kwds): self.kwds = (kwd_dict or kwds) def __enter__(self): _ctx.clear() for (k, v) in self.kwds.items(): _ctx.add(k, v) return self def __exit__(self, exc_type=None, exc_val=None, exc_tb=None): self.kwds = None _ctx.clear()
def test_customizations(ansi_io: BufferedIO) -> None: bar = ProgressBar(ansi_io, 10, 0) bar.set_bar_width(10) bar.set_bar_character('_') bar.set_empty_bar_character(' ') bar.set_progress_character('/') bar.set_format(' %current%/%max% [%bar%] %percent:3s%%') bar.start() bar.advance() output = [' 0/10 [/ ] 0%', ' 1/10 [_/ ] 10%'] expected = generate_output(output) assert (expected == ansi_io.fetch_error())
def _is_property_decorator(decorator): def _is_property_class(class_node): return ((class_node.name == 'property') and (class_node.root().name == builtins.__name__)) for inferred in decorator.infer(): if (not isinstance(inferred, astroid.nodes.ClassDef)): continue if _is_property_class(inferred): return True if any((_is_property_class(ancestor) for ancestor in inferred.ancestors())): return True return False
def events_for_onchain_secretreveal(channel_state: NettingChannelState, secret: Secret, expiration: BlockExpiration, block_hash: BlockHash) -> List[Event]: events: List[Event] = [] typecheck(secret, T_Secret) if (get_status(channel_state) in CHANNEL_STATES_UP_TO_CLOSED): reveal_event = ContractSendSecretReveal(expiration=expiration, secret=secret, triggered_by_block_hash=block_hash) events.append(reveal_event) return events
def total_norm_constraint(tensor_vars, max_norm, epsilon=1e-07, return_norm=False): norm = pt.sqrt(sum((pt.sum((tensor ** 2)) for tensor in tensor_vars))) dtype = np.dtype(pytensor.config.floatX).type target_norm = pt.clip(norm, 0, dtype(max_norm)) multiplier = (target_norm / (dtype(epsilon) + norm)) tensor_vars_scaled = [(step * multiplier) for step in tensor_vars] if return_norm: return (tensor_vars_scaled, norm) else: return tensor_vars_scaled
def test_exception_lookup_last_except_handler_wins() -> None: node = extract_node('\n try:\n 1/0\n except ValueError as exc:\n pass\n try:\n 1/0\n except OSError as exc:\n exc #\n ') assert isinstance(node, nodes.NodeNG) inferred = node.inferred() assert (len(inferred) == 1) inferred_exc = inferred[0] assert isinstance(inferred_exc, Instance) assert (inferred_exc.name == 'OSError') node = extract_node('\n try:\n 1/0\n except ZeroDivisionError as exc:\n pass\n except ValueError as exc:\n exc #\n ') assert isinstance(node, nodes.NodeNG) inferred = node.inferred() assert (len(inferred) == 1) inferred_exc = inferred[0] assert isinstance(inferred_exc, Instance) assert (inferred_exc.name == 'ValueError')
def bin_bloq_counts(bloq: Bloq) -> Dict[(str, int)]: classified_bloqs = defaultdict(int) for (bloq, num_calls) in bloq.bloq_counts().items(): if isinstance(bloq, (Split, Join, Allocate, Free)): continue num_t = bloq.call_graph(generalizer=GENERALIZERS)[1].get(TGate()) if (num_t is not None): num_t = int(num_t) if isinstance(bloq, bloq_comparators): classified_bloqs['comparator'] += (num_calls * num_t) elif isinstance(bloq, MultiControlPauli): if isinstance(bloq.target_gate, cirq.ops.common_gates.ZPowGate): classified_bloqs['reflections'] += (num_calls * num_t) elif isinstance(bloq, (SelectSwapQROM, QROM)): classified_bloqs['qrom'] += (num_calls * num_t) elif isinstance(bloq, CSwap): classified_bloqs['controlled_swaps'] += (num_calls * num_t) elif isinstance(bloq, rotation_bloqs): classified_bloqs['rotation'] += (num_calls * num_t) elif isinstance(bloq, ToContiguousIndex): classified_bloqs['contiguous_register'] += (num_calls * num_t) else: classified_bloqs['other'] += (num_calls * num_t) return classified_bloqs
class VkKeyboard(object): __slots__ = ('one_time', 'lines', 'keyboard', 'inline') def __init__(self, one_time=False, inline=False): self.one_time = one_time self.inline = inline self.lines = [[]] self.keyboard = {'one_time': self.one_time, 'inline': self.inline, 'buttons': self.lines} def get_keyboard(self): return sjson_dumps(self.keyboard) def get_empty_keyboard(cls): keyboard = cls() keyboard.keyboard['buttons'] = [] return keyboard.get_keyboard() def add_button(self, label, color=VkKeyboardColor.SECONDARY, payload=None): current_line = self.lines[(- 1)] if (len(current_line) >= MAX_BUTTONS_ON_LINE): raise ValueError(f'Max {MAX_BUTTONS_ON_LINE} buttons on a line') color_value = color if isinstance(color_value, VkKeyboardColor): color_value = color_value.value if ((payload is not None) and (not isinstance(payload, str))): payload = sjson_dumps(payload) button_type = VkKeyboardButton.TEXT.value current_line.append({'color': color_value, 'action': {'type': button_type, 'payload': payload, 'label': label}}) def add_callback_button(self, label, color=VkKeyboardColor.SECONDARY, payload=None): current_line = self.lines[(- 1)] if (len(current_line) >= MAX_BUTTONS_ON_LINE): raise ValueError(f'Max {MAX_BUTTONS_ON_LINE} buttons on a line') color_value = color if isinstance(color_value, VkKeyboardColor): color_value = color_value.value if ((payload is not None) and (not isinstance(payload, str))): payload = sjson_dumps(payload) button_type = VkKeyboardButton.CALLBACK.value current_line.append({'color': color_value, 'action': {'type': button_type, 'payload': payload, 'label': label}}) def add_location_button(self, payload=None): current_line = self.lines[(- 1)] if (len(current_line) != 0): raise ValueError('This type of button takes the entire width of the line') if ((payload is not None) and (not isinstance(payload, str))): payload = sjson_dumps(payload) button_type = VkKeyboardButton.LOCATION.value current_line.append({'action': {'type': button_type, 'payload': payload}}) def add_vkpay_button(self, hash, payload=None): current_line = self.lines[(- 1)] if (len(current_line) != 0): raise ValueError('This type of button takes the entire width of the line') if ((payload is not None) and (not isinstance(payload, str))): payload = sjson_dumps(payload) button_type = VkKeyboardButton.VKPAY.value current_line.append({'action': {'type': button_type, 'payload': payload, 'hash': hash}}) def add_vkapps_button(self, app_id, owner_id, label, hash, payload=None): current_line = self.lines[(- 1)] if (len(current_line) != 0): raise ValueError('This type of button takes the entire width of the line') if ((payload is not None) and (not isinstance(payload, str))): payload = sjson_dumps(payload) button_type = VkKeyboardButton.VKAPPS.value current_line.append({'action': {'type': button_type, 'app_id': app_id, 'owner_id': owner_id, 'label': label, 'payload': payload, 'hash': hash}}) def add_openlink_button(self, label, link, payload=None): current_line = self.lines[(- 1)] if (len(current_line) >= MAX_BUTTONS_ON_LINE): raise ValueError(f'Max {MAX_BUTTONS_ON_LINE} buttons on a line') if ((payload is not None) and (not isinstance(payload, str))): payload = sjson_dumps(payload) button_type = VkKeyboardButton.OPENLINK.value current_line.append({'action': {'type': button_type, 'link': link, 'label': label, 'payload': payload}}) def add_line(self): if self.inline: if (len(self.lines) >= MAX_INLINE_LINES): raise ValueError(f'Max {MAX_INLINE_LINES} lines for inline keyboard') elif (len(self.lines) >= MAX_DEFAULT_LINES): raise ValueError(f'Max {MAX_DEFAULT_LINES} lines for default keyboard') self.lines.append([])
.parametrize(('damage', 'items', 'requirement'), [(50, [], _arr_req('and', [_json_req(50)])), (MAX_DAMAGE, [], _arr_req('and', [_json_req(1, 'Dark', ResourceType.ITEM)])), (80, [], _arr_req('and', [_json_req(50), _json_req(30)])), (30, [], _arr_req('or', [_json_req(50), _json_req(30)])), (50, [], _arr_req('or', [_json_req(50), _json_req(1, 'Dark', ResourceType.ITEM)])), (0, ['Dark'], _arr_req('or', [_json_req(50), _json_req(1, 'Dark', ResourceType.ITEM)])), (100, [], _arr_req('or', [_json_req(100), _arr_req('and', [_json_req(50), _json_req(1, 'Dark', ResourceType.ITEM)])])), (50, ['Dark'], _arr_req('or', [_json_req(100), _arr_req('and', [_json_req(50), _json_req(1, 'Dark', ResourceType.ITEM)])])), (150, [], _arr_req('and', [_json_req(100), _arr_req('or', [_json_req(50), _json_req(1, 'Dark', ResourceType.ITEM)])])), (100, ['Dark'], _arr_req('and', [_json_req(100), _arr_req('or', [_json_req(50), _json_req(1, 'Dark', ResourceType.ITEM)])])), (200, [], _arr_req('and', [_json_req(100), _json_req(100, 'DarkWorld1')])), (121, ['DarkSuit'], _arr_req('and', [_json_req(100), _json_req(100, 'DarkWorld1')])), (100, ['LightSuit'], _arr_req('and', [_json_req(100), _json_req(100, 'DarkWorld1')]))]) def test_requirement_damage(damage, items, requirement, echoes_resource_database): req = data_reader.read_requirement(requirement, echoes_resource_database) collection = ResourceCollection.from_dict(echoes_resource_database, {echoes_resource_database.get_item(item): 1 for item in items}) assert (req.damage(collection, echoes_resource_database) == damage)
def test_hashgrid_query(test, device): grid = wp.HashGrid(dim_x, dim_y, dim_z, device) for i in range(num_runs): if print_enabled: print(f'Run: {(i + 1)}') print('') np.random.seed(532) points = ((np.random.rand(num_points, 3) * scale) - (np.array((scale, scale, scale)) * 0.5)) def particle_grid(dim_x, dim_y, dim_z, lower, radius, jitter): points = np.meshgrid(np.linspace(0, dim_x, dim_x), np.linspace(0, dim_y, dim_y), np.linspace(0, dim_z, dim_z)) points_t = (((np.array((points[0], points[1], points[2])).T * radius) * 2.0) + np.array(lower)) points_t = (points_t + ((np.random.rand(*points_t.shape) * radius) * jitter)) return points_t.reshape(((- 1), 3)) points = particle_grid(16, 32, 16, (0.0, 0.3, 0.0), (cell_radius * 0.25), 0.1) points_arr = wp.array(points, dtype=wp.vec3, device=device) counts_arr = wp.zeros(len(points), dtype=int, device=device) counts_arr_ref = wp.zeros(len(points), dtype=int, device=device) with wp.ScopedTimer('brute', active=print_enabled): wp.launch(kernel=count_neighbors_reference, dim=(len(points) * len(points)), inputs=[query_radius, points_arr, counts_arr_ref, len(points)], device=device) wp.synchronize() with wp.ScopedTimer('grid build', active=print_enabled): grid.build(points_arr, cell_radius) wp.synchronize() with wp.ScopedTimer('grid query', active=print_enabled): wp.launch(kernel=count_neighbors, dim=len(points), inputs=[grid.id, query_radius, points_arr, counts_arr], device=device) wp.synchronize() counts = counts_arr.numpy() counts_ref = counts_arr_ref.numpy() if print_enabled: print(f'Grid min: {np.min(counts)} max: {np.max(counts)} avg: {np.mean(counts)}') print(f'Ref min: {np.min(counts_ref)} max: {np.max(counts_ref)} avg: {np.mean(counts_ref)}') print(f'Passed: {np.array_equal(counts, counts_ref)}') test.assertTrue(np.array_equal(counts, counts_ref))
class Effect1007(BaseEffect): type = 'passive' def handler(fit, skill, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Autocannon Specialization')), 'damageMultiplier', (skill.getModifiedItemAttr('damageMultiplierBonus') * skill.level), **kwargs)
def create_labels(num_rows: int, num_classes: int=2, dtype: Optional[np.dtype]=None): if (num_classes == 0): dtype = (dtype or np.float32) return pd.Series(np.random.uniform(0, 1, size=num_rows), dtype=dtype, name='label') dtype = (dtype or np.int32) return pd.Series(np.random.randint(0, num_classes, size=num_rows), dtype=dtype, name='label')
def post_callback(request): metadata = request.get_metadata() sorted_metadata = sorted(metadata.items(), key=(lambda x: (x[0] if ('Awb' not in x[0]) else f'Z{x[0]}'))) pretty_metadata = [] for (k, v) in sorted_metadata: row = '' try: iter(v) if (k == 'ColourCorrectionMatrix'): matrix = np.around(np.reshape(v, ((- 1), 3)), decimals=2) row = f'''{k}: {matrix}''' else: row_data = [(f'{x:.2f}' if (type(x) is float) else f'{x}') for x in v] row = f"{k}: ({', '.join(row_data)})" except TypeError: if (type(v) is float): row = f'{k}: {v:.2f}' else: row = f'{k}: {v}' pretty_metadata.append(row) info_tab.setText('\n'.join(pretty_metadata)) if (not aec_tab.exposure_time.isEnabled()): aec_tab.exposure_time.setValue(metadata['ExposureTime']) aec_tab.analogue_gain.setValue(metadata['AnalogueGain']) if (not aec_tab.colour_gain_r.isEnabled()): aec_tab.colour_gain_r.setValue(metadata.get('ColourGains', [1.0, 1.0])[0]) aec_tab.colour_gain_b.setValue(metadata.get('ColourGains', [1.0, 1.0])[1]) vid_tab.frametime = metadata['FrameDuration']
class TTPE2(TestCase): def test_unsynch(self): header = ID3Header() header.version = (2, 4, 0) header._flags = 128 badsync = b'\x00\xff\x00ab\x00' self.assertEquals(TPE2._fromData(header, 0, badsync), [u'yab']) header._flags = 0 self.assertEquals(TPE2._fromData(header, 2, badsync), [u'yab']) tag = TPE2._fromData(header, 0, badsync) self.assertEquals(tag, [u'y', u'ab'])
class SignalsRegister(metaclass=abc.ABCMeta): def save_signals(self, signals: List[Signal]): raise NotImplementedError() def get_signals(self) -> QFDataFrame: raise NotImplementedError() def get_signals_for_ticker(self, ticker: Optional[Ticker], alpha_model=None) -> QFSeries: raise NotImplementedError() def _generate_ticker_name(self, signal: Signal) -> str: ticker = signal.ticker model_name = ('' if (signal.alpha_model is None) else str(signal.alpha_model)) return ((ticker.name + '') + model_name)
def get_values(hive, key): vdict = {} cmd = ops.cmd.getDszCommand('registryquery') cmd.hive = hive cmd.key = key obj = cmd.execute() if cmd.success: for key in obj.key: for value in key.value: vdict[value.name] = value.value return vdict
('pypyr.steps.dsl.fileinoutrewriter.ObjectRewriter', spec=ObjectRewriter) def test_objectrewriterstep_run_step_no_out(mock_rewriter): context = Context({'root': {'in': 'inpathhere'}}) obj = ObjectRewriterStep('blah.name', 'root', context) assert (obj.path_in == 'inpathhere') assert (not obj.path_out) assert (obj.context == context) assert (obj.logger.name == 'blah.name') mock_representer = Mock(spec=ObjectRepresenter) obj.run_step(mock_representer) assert (mock_rewriter.mock_calls[0] == call(context.get_formatted_value, mock_representer, encoding_in=None, encoding_out=None)) mock_rewriter.return_value.files_in_to_out.assert_called_once_with(in_path='inpathhere', out_path=None)
def create_lmdb_for_div2k(): folder_path = 'datasets/DIV2K/DIV2K_train_HR_sub' lmdb_path = 'datasets/DIV2K/DIV2K_train_HR_sub.lmdb' (img_path_list, keys) = prepare_keys_div2k(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) folder_path = 'datasets/DIV2K/DIV2K_train_LR_bicubic/X2_sub' lmdb_path = 'datasets/DIV2K/DIV2K_train_LR_bicubic_X2_sub.lmdb' (img_path_list, keys) = prepare_keys_div2k(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) folder_path = 'datasets/DIV2K/DIV2K_train_LR_bicubic/X3_sub' lmdb_path = 'datasets/DIV2K/DIV2K_train_LR_bicubic_X3_sub.lmdb' (img_path_list, keys) = prepare_keys_div2k(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) folder_path = 'datasets/DIV2K/DIV2K_train_LR_bicubic/X4_sub' lmdb_path = 'datasets/DIV2K/DIV2K_train_LR_bicubic_X4_sub.lmdb' (img_path_list, keys) = prepare_keys_div2k(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
class DictObjectModelTest(unittest.TestCase): def test__class__(self) -> None: ast_node = builder.extract_node('{}.__class__') inferred = next(ast_node.infer()) self.assertIsInstance(inferred, astroid.ClassDef) self.assertEqual(inferred.name, 'dict') def test_attributes_inferred_as_methods(self) -> None: ast_nodes = builder.extract_node('\n {}.values #\n {}.items #\n {}.keys #\n ') for node in ast_nodes: inferred = next(node.infer()) self.assertIsInstance(inferred, astroid.BoundMethod) def test_wrapper_objects_for_dict_methods_python3(self) -> None: ast_nodes = builder.extract_node('\n {1:1, 2:3}.values() #\n {1:1, 2:3}.keys() #\n {1:1, 2:3}.items() #\n ') assert isinstance(ast_nodes, list) values = next(ast_nodes[0].infer()) self.assertIsInstance(values, objects.DictValues) self.assertEqual([elt.value for elt in values.elts], [1, 3]) keys = next(ast_nodes[1].infer()) self.assertIsInstance(keys, objects.DictKeys) self.assertEqual([elt.value for elt in keys.elts], [1, 2]) items = next(ast_nodes[2].infer()) self.assertIsInstance(items, objects.DictItems)
def test_args_refcount(): refcount = m.arg_refcount_h myval = 54321 expected = refcount(myval) assert (m.arg_refcount_h(myval) == expected) assert (m.arg_refcount_o(myval) == (expected + 1)) assert (m.arg_refcount_h(myval) == expected) assert (refcount(myval) == expected) assert (m.mixed_plus_args(1, 2.0, 'a', myval) == (1, 2.0, ('a', myval))) assert (refcount(myval) == expected) assert (m.mixed_plus_kwargs(3, 4.0, a=1, b=myval) == (3, 4.0, {'a': 1, 'b': myval})) assert (refcount(myval) == expected) assert (m.args_function((- 1), myval) == ((- 1), myval)) assert (refcount(myval) == expected) assert (m.mixed_plus_args_kwargs(5, 6.0, myval, a=myval) == (5, 6.0, (myval,), {'a': myval})) assert (refcount(myval) == expected) assert (m.args_kwargs_function(7, 8, myval, a=1, b=myval) == ((7, 8, myval), {'a': 1, 'b': myval})) assert (refcount(myval) == expected) exp3 = refcount(myval, myval, myval) assert (m.args_refcount(myval, myval, myval) == (exp3, exp3, exp3)) assert (refcount(myval) == expected) assert (m.mixed_args_refcount(myval, myval, myval) == ((exp3 + 3), (exp3 + 3), (exp3 + 3)))
class ProjectForm(forms.ModelForm): use_required_attribute = False def __init__(self, *args, **kwargs): catalogs = kwargs.pop('catalogs') projects = kwargs.pop('projects') super().__init__(*args, **kwargs) self.fields['title'].widget.attrs.update({'autofocus': True}) self.fields['catalog'].queryset = catalogs self.fields['catalog'].empty_label = None self.fields['catalog'].initial = catalogs.first() if settings.NESTED_PROJECTS: self.fields['parent'].queryset = projects class Meta(): model = Project fields = ['title', 'description', 'catalog'] if settings.NESTED_PROJECTS: fields += ['parent'] field_classes = {'catalog': CatalogChoiceField} widgets = {'catalog': forms.RadioSelect()}
class RemoteGraphicsView(QtWidgets.QWidget): def __init__(self, parent=None, *args, **kwds): self._img = None self._imgReq = None self._sizeHint = (640, 480) QtWidgets.QWidget.__init__(self) remoteKwds = {} for kwd in ['useOpenGL', 'background']: if (kwd in kwds): remoteKwds[kwd] = kwds.pop(kwd) self._proc = mp.QtProcess(**kwds) self.pg = self._proc._import('pyqtgraph') self.pg.setConfigOptions(**CONFIG_OPTIONS) rpgRemote = self._proc._import('pyqtgraph.widgets.RemoteGraphicsView') self._view = rpgRemote.Renderer(*args, **remoteKwds) self._view._setProxyOptions(deferGetattr=True) self.setFocusPolicy(QtCore.Qt.FocusPolicy.StrongFocus) self.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding) self.setMouseTracking(True) self.shm = None shmFileName = self._view.shmFileName() if (sys.platform == 'win32'): opener = (lambda path, flags: os.open(path, (flags | os.O_TEMPORARY))) else: opener = None self.shmFile = open(shmFileName, 'rb', opener=opener) self._view.sceneRendered.connect(mp.proxy(self.remoteSceneChanged)) for method in ['scene', 'setCentralItem']: setattr(self, method, getattr(self._view, method)) def resizeEvent(self, ev): ret = super().resizeEvent(ev) self._view.resize(self.size(), _callSync='off') return ret def sizeHint(self): return QtCore.QSize(*self._sizeHint) def remoteSceneChanged(self, data): (w, h, size) = data if ((self.shm is None) or (self.shm.size != size)): if (self.shm is not None): self.shm.close() self.shm = mmap.mmap(self.shmFile.fileno(), size, access=mmap.ACCESS_READ) self._img = QtGui.QImage(self.shm, w, h, QtGui.QImage.Format.Format_RGB32).copy() self.update() def paintEvent(self, ev): if (self._img is None): return p = QtGui.QPainter(self) p.drawImage(self.rect(), self._img, self._img.rect()) p.end() def mousePressEvent(self, ev): self._view.mousePressEvent(MouseEvent(ev), _callSync='off') ev.accept() return super().mousePressEvent(ev) def mouseReleaseEvent(self, ev): self._view.mouseReleaseEvent(MouseEvent(ev), _callSync='off') ev.accept() return super().mouseReleaseEvent(ev) def mouseMoveEvent(self, ev): self._view.mouseMoveEvent(MouseEvent(ev), _callSync='off') ev.accept() return super().mouseMoveEvent(ev) def wheelEvent(self, ev): self._view.wheelEvent(WheelEvent(ev), _callSync='off') ev.accept() return super().wheelEvent(ev) def enterEvent(self, ev): self._view.enterEvent(EnterEvent(ev), _callSync='off') return super().enterEvent(ev) def leaveEvent(self, ev): self._view.leaveEvent(LeaveEvent(ev), _callSync='off') return super().leaveEvent(ev) def remoteProcess(self): return self._proc def close(self): self._view.sceneRendered.disconnect() self._proc.close()
class Bubble(object): def __init__(self, position, radius, velocity): self.position = position self.vel = velocity self.radius = radius self.innerColor = self.randomColor() self.outerColor = self.randomColor() self.updateBrush() def updateBrush(self): gradient = QRadialGradient(QPointF(self.radius, self.radius), self.radius, QPointF((self.radius * 0.5), (self.radius * 0.5))) gradient.setColorAt(0, QColor(255, 255, 255, 255)) gradient.setColorAt(0.25, self.innerColor) gradient.setColorAt(1, self.outerColor) self.brush = QBrush(gradient) def drawBubble(self, painter): painter.save() painter.translate((self.position.x() - self.radius), (self.position.y() - self.radius)) painter.setBrush(self.brush) painter.drawEllipse(0, 0, int((2 * self.radius)), int((2 * self.radius))) painter.restore() def randomColor(self): red = random.randrange(205, 256) green = random.randrange(205, 256) blue = random.randrange(205, 256) alpha = random.randrange(91, 192) return QColor(red, green, blue, alpha) def move(self, bbox): self.position += self.vel leftOverflow = ((self.position.x() - self.radius) - bbox.left()) rightOverflow = ((self.position.x() + self.radius) - bbox.right()) topOverflow = ((self.position.y() - self.radius) - bbox.top()) bottomOverflow = ((self.position.y() + self.radius) - bbox.bottom()) if (leftOverflow < 0.0): self.position.setX((self.position.x() - (2 * leftOverflow))) self.vel.setX((- self.vel.x())) elif (rightOverflow > 0.0): self.position.setX((self.position.x() - (2 * rightOverflow))) self.vel.setX((- self.vel.x())) if (topOverflow < 0.0): self.position.setY((self.position.y() - (2 * topOverflow))) self.vel.setY((- self.vel.y())) elif (bottomOverflow > 0.0): self.position.setY((self.position.y() - (2 * bottomOverflow))) self.vel.setY((- self.vel.y())) def rect(self): return QRectF((self.position.x() - self.radius), (self.position.y() - self.radius), (2 * self.radius), (2 * self.radius))
class bdist_wheel(Command): description = 'create a wheel distribution' supported_compressions = {'stored': ZIP_STORED, 'deflated': ZIP_DEFLATED} user_options = [('bdist-dir=', 'b', 'temporary directory for creating the distribution'), ('plat-name=', 'p', ('platform name to embed in generated filenames (default: %s)' % get_platform(None))), ('keep-temp', 'k', 'keep the pseudo-installation tree around after creating the distribution archive'), ('dist-dir=', 'd', 'directory to put final built distributions in'), ('skip-build', None, 'skip rebuilding everything (for testing/debugging)'), ('relative', None, 'build the archive using relative paths (default: false)'), ('owner=', 'u', 'Owner name used when creating a tar file [default: current user]'), ('group=', 'g', 'Group name used when creating a tar file [default: current group]'), ('universal', None, 'make a universal wheel (default: false)'), ('compression=', None, "zipfile compression (one of: {}) (default: 'deflated')".format(', '.join(supported_compressions))), ('python-tag=', None, ("Python implementation compatibility tag (default: '%s')" % python_tag())), ('build-number=', None, 'Build number for this particular version. As specified in PEP-0427, this must start with a digit. [default: None]'), ('py-limited-api=', None, 'Python tag (cp32|cp33|cpNN) for abi3 wheel tag (default: false)')] boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal'] def initialize_options(self): self.bdist_dir = None self.data_dir = None self.plat_name = None self.plat_tag = None self.format = 'zip' self.keep_temp = False self.dist_dir = None self.egginfo_dir = None self.root_is_pure = None self.skip_build = None self.relative = False self.owner = None self.group = None self.universal = False self.compression = 'deflated' self.python_tag = python_tag() self.build_number = None self.py_limited_api = False self.plat_name_supplied = False def finalize_options(self): if (self.bdist_dir is None): bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'wheel') egg_info = self.distribution.get_command_obj('egg_info') egg_info.ensure_finalized() self.data_dir = (self.wheel_dist_name + '.data') self.plat_name_supplied = (self.plat_name is not None) try: self.compression = self.supported_compressions[self.compression] except KeyError: raise ValueError(f'Unsupported compression: {self.compression}') from None need_options = ('dist_dir', 'plat_name', 'skip_build') self.set_undefined_options('bdist', *zip(need_options, need_options)) self.root_is_pure = (not (self.distribution.has_ext_modules() or self.distribution.has_c_libraries())) if (self.py_limited_api and (not re.match(PY_LIMITED_API_PATTERN, self.py_limited_api))): raise ValueError(("py-limited-api must match '%s'" % PY_LIMITED_API_PATTERN)) wheel = self.distribution.get_option_dict('wheel') if ('universal' in wheel): log.warning('The [wheel] section is deprecated. Use [bdist_wheel] instead.') val = wheel['universal'][1].strip() if (val.lower() in ('1', 'true', 'yes')): self.universal = True if ((self.build_number is not None) and (not self.build_number[:1].isdigit())): raise ValueError('Build tag (build-number) must start with a digit.') def wheel_dist_name(self): components = (safer_name(self.distribution.get_name()), safer_version(self.distribution.get_version())) if self.build_number: components += (self.build_number,) return '-'.join(components) def get_tag(self): if self.plat_name_supplied: plat_name = self.plat_name elif self.root_is_pure: plat_name = 'any' else: if (self.plat_name and (not self.plat_name.startswith('macosx'))): plat_name = self.plat_name else: plat_name = get_platform(self.bdist_dir) if _is_32bit_interpreter(): if (plat_name in ('linux-x86_64', 'linux_x86_64')): plat_name = 'linux_i686' if (plat_name in ('linux-aarch64', 'linux_aarch64')): plat_name = 'linux_armv7l' plat_name = plat_name.lower().replace('-', '_').replace('.', '_').replace(' ', '_') if self.root_is_pure: if self.universal: impl = 'py2.py3' else: impl = self.python_tag tag = (impl, 'none', plat_name) else: impl_name = tags.interpreter_name() impl_ver = tags.interpreter_version() impl = (impl_name + impl_ver) if (self.py_limited_api and (impl_name + impl_ver).startswith('cp3')): impl = self.py_limited_api abi_tag = 'abi3' else: abi_tag = str(get_abi_tag()).lower() tag = (impl, abi_tag, plat_name) supported_tags = [(t.interpreter, t.abi, plat_name) for t in tags.sys_tags()] assert (tag in supported_tags), f'would build wheel with unsupported tag {tag}' return tag def run(self): build_scripts = self.reinitialize_command('build_scripts') build_scripts.executable = 'python' build_scripts.force = True build_ext = self.reinitialize_command('build_ext') build_ext.inplace = False if (not self.skip_build): self.run_command('build') install = self.reinitialize_command('install', reinit_subcommands=True) install.root = self.bdist_dir install.compile = False install.skip_build = self.skip_build install.warn_dir = False install_scripts = self.reinitialize_command('install_scripts') install_scripts.no_ep = True for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'): setattr(install, ('install_' + key), os.path.join(self.data_dir, key)) basedir_observed = '' if (os.name == 'nt'): basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..')) self.install_libbase = self.install_lib = basedir_observed setattr(install, ('install_purelib' if self.root_is_pure else 'install_platlib'), basedir_observed) log.info(f'installing to {self.bdist_dir}') self.run_command('install') (impl_tag, abi_tag, plat_tag) = self.get_tag() archive_basename = f'{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}' if (not self.relative): archive_root = self.bdist_dir else: archive_root = os.path.join(self.bdist_dir, self._ensure_relative(install.install_base)) self.set_undefined_options('install_egg_info', ('target', 'egginfo_dir')) distinfo_dirname = '{}-{}.dist-info'.format(safer_name(self.distribution.get_name()), safer_version(self.distribution.get_version())) distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname) self.egg2dist(self.egginfo_dir, distinfo_dir) self.write_wheelfile(distinfo_dir) if (not os.path.exists(self.dist_dir)): os.makedirs(self.dist_dir) wheel_path = os.path.join(self.dist_dir, (archive_basename + '.whl')) with WheelFile(wheel_path, 'w', self.compression) as wf: wf.write_files(archive_root) getattr(self.distribution, 'dist_files', []).append(('bdist_wheel', '{}.{}'.format(*sys.version_info[:2]), wheel_path)) if (not self.keep_temp): log.info(f'removing {self.bdist_dir}') if (not self.dry_run): if (sys.version_info < (3, 12)): rmtree(self.bdist_dir, onerror=remove_readonly) else: rmtree(self.bdist_dir, onexc=remove_readonly_exc) def write_wheelfile(self, wheelfile_base, generator=(('bdist_wheel (' + wheel_version) + ')')): from email.message import Message msg = Message() msg['Wheel-Version'] = '1.0' msg['Generator'] = generator msg['Root-Is-Purelib'] = str(self.root_is_pure).lower() if (self.build_number is not None): msg['Build'] = self.build_number (impl_tag, abi_tag, plat_tag) = self.get_tag() for impl in impl_tag.split('.'): for abi in abi_tag.split('.'): for plat in plat_tag.split('.'): msg['Tag'] = '-'.join((impl, abi, plat)) wheelfile_path = os.path.join(wheelfile_base, 'WHEEL') log.info(f'creating {wheelfile_path}') with open(wheelfile_path, 'wb') as f: BytesGenerator(f, maxheaderlen=0).flatten(msg) def _ensure_relative(self, path): (drive, path) = os.path.splitdrive(path) if (path[0:1] == os.sep): path = (drive + path[1:]) return path def license_paths(self): if (setuptools_major_version >= 57): return (self.distribution.metadata.license_files or ()) files = set() metadata = self.distribution.get_option_dict('metadata') if (setuptools_major_version >= 42): patterns = self.distribution.metadata.license_files elif ('license_files' in metadata): patterns = metadata['license_files'][1].split() else: patterns = () if ('license_file' in metadata): warnings.warn('The "license_file" option is deprecated. Use "license_files" instead.', DeprecationWarning, stacklevel=2) files.add(metadata['license_file'][1]) if ((not files) and (not patterns) and (not isinstance(patterns, list))): patterns = ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*') for pattern in patterns: for path in iglob(pattern): if path.endswith('~'): log.debug(f'ignoring license file "{path}" as it looks like a backup') continue if ((path not in files) and os.path.isfile(path)): log.info(f'adding license file "{path}" (matched pattern "{pattern}")') files.add(path) return files def egg2dist(self, egginfo_path, distinfo_path): def adios(p): if (os.path.exists(p) and (not os.path.islink(p)) and os.path.isdir(p)): shutil.rmtree(p) elif os.path.exists(p): os.unlink(p) adios(distinfo_path) if (not os.path.exists(egginfo_path)): import glob pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info') possible = glob.glob(pat) err = f'Egg metadata expected at {egginfo_path} but not found' if possible: alt = os.path.basename(possible[0]) err += f' ({alt} found - possible misnamed archive file?)' raise ValueError(err) if os.path.isfile(egginfo_path): pkginfo_path = egginfo_path pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path) os.mkdir(distinfo_path) else: pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO') pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path) shutil.copytree(egginfo_path, distinfo_path, ignore=(lambda x, y: {'PKG-INFO', 'requires.txt', 'SOURCES.txt', 'not-zip-safe'})) dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt') with open(dependency_links_path, encoding='utf-8') as dependency_links_file: dependency_links = dependency_links_file.read().strip() if (not dependency_links): adios(dependency_links_path) pkg_info_path = os.path.join(distinfo_path, 'METADATA') serialization_policy = EmailPolicy(utf8=True, mangle_from_=False, max_line_length=0) with open(pkg_info_path, 'w', encoding='utf-8') as out: Generator(out, policy=serialization_policy).flatten(pkg_info) for license_path in self.license_paths: filename = os.path.basename(license_path) shutil.copy(license_path, os.path.join(distinfo_path, filename)) adios(egginfo_path)
class DropoutWrapper(nn.Module): def __init__(self, dropout_p=0, enable_vbp=True): super(DropoutWrapper, self).__init__() self.enable_variational_dropout = enable_vbp self.dropout_p = dropout_p def forward(self, x): if ((self.training == False) or (self.dropout_p == 0)): return x if (len(x.size()) == 3): mask = Variable(((1.0 / (1 - self.dropout_p)) * torch.bernoulli(((1 - self.dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1)))), requires_grad=False) return (mask.unsqueeze(1).expand_as(x) * x) else: return F.dropout(x, p=self.dropout_p, training=self.training)
class Stem(nn.Module): def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, padding: str='', bias: bool=False, act_layer: str='gelu', norm_layer: str='batchnorm2d', norm_eps: float=1e-05): super().__init__() if (not isinstance(out_chs, (list, tuple))): out_chs = to_2tuple(out_chs) norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) self.out_chs = out_chs[(- 1)] self.stride = 2 self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2, padding=padding, bias=bias) self.norm1 = norm_act_layer(out_chs[0]) self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1, padding=padding, bias=bias) def init_weights(self, scheme=''): named_apply(partial(_init_conv, scheme=scheme), self) def forward(self, x): x = self.conv1(x) x = self.norm1(x) x = self.conv2(x) return x
def _populate_kernel_cache(np_type, k_type): if (np_type not in _SUPPORTED_TYPES): raise ValueError("Datatype {} not found for '{}'".format(np_type, k_type)) if ((str(np_type), k_type) in _cupy_kernel_cache): return _cupy_kernel_cache[(str(np_type), k_type)] = _get_function('/io/_reader.fatbin', ('_cupy_unpack_' + str(np_type)))
_combinator('and') class AndFilter(BaseFilter): def __init__(self, stack): self.subfilters = [stack[(- 2)], stack[(- 1)]] stack.pop() stack.pop() stack.append(self) def __call__(self, fobj): return accept_file(fobj, self.subfilters) def __str__(self): return '<Filter: {comp}>'.format(comp=' and '.join(map(str, self.subfilters))) def decompose(self): return self.subfilters
class GaussianStatePreparationCircuitTest(unittest.TestCase): def setUp(self): self.n_qubits_range = range(3, 6) def test_ground_state_particle_conserving(self): for n_qubits in self.n_qubits_range: print(n_qubits) quadratic_hamiltonian = random_quadratic_hamiltonian(n_qubits, True, True) print(quadratic_hamiltonian) sparse_operator = get_sparse_operator(quadratic_hamiltonian) (ground_energy, _) = get_ground_state(sparse_operator) (circuit_description, start_orbitals) = gaussian_state_preparation_circuit(quadratic_hamiltonian) state = jw_configuration_state(start_orbitals, n_qubits) for parallel_ops in circuit_description: for op in parallel_ops: self.assertTrue((op != 'pht')) (i, j, theta, phi) = op state = jw_sparse_givens_rotation(i, j, theta, phi, n_qubits).dot(state) difference = ((sparse_operator * state) - (ground_energy * state)) discrepancy = numpy.amax(numpy.abs(difference)) self.assertAlmostEqual(discrepancy, 0) def test_ground_state_particle_nonconserving(self): for n_qubits in self.n_qubits_range: quadratic_hamiltonian = random_quadratic_hamiltonian(n_qubits, False, True) sparse_operator = get_sparse_operator(quadratic_hamiltonian) (ground_energy, _) = get_ground_state(sparse_operator) (circuit_description, start_orbitals) = gaussian_state_preparation_circuit(quadratic_hamiltonian) state = jw_configuration_state(start_orbitals, n_qubits) particle_hole_transformation = jw_sparse_particle_hole_transformation_last_mode(n_qubits) for parallel_ops in circuit_description: for op in parallel_ops: if (op == 'pht'): state = particle_hole_transformation.dot(state) else: (i, j, theta, phi) = op state = jw_sparse_givens_rotation(i, j, theta, phi, n_qubits).dot(state) difference = ((sparse_operator * state) - (ground_energy * state)) discrepancy = numpy.amax(numpy.abs(difference)) self.assertAlmostEqual(discrepancy, 0) def test_bad_input(self): with self.assertRaises(ValueError): gaussian_state_preparation_circuit('a')
def get_map(Hist) -> np.ndarray: sum_Hist = sum(Hist) Pr = (Hist / sum_Hist) Sk = [] temp_sum = 0 for n in Pr: temp_sum = (temp_sum + n) Sk.append(temp_sum) Sk = np.array(Sk) img_map = [] for m in range(256): temp_map = int(((255 * Sk[m]) + 0.5)) img_map.append(temp_map) img_map = np.array(img_map) return img_map
def partition_all(n, seq): args = ([iter(seq)] * n) it = zip_longest(*args, fillvalue=no_pad) try: prev = next(it) except StopIteration: return for item in it: (yield prev) prev = item if (prev[(- 1)] is no_pad): try: (yield prev[:(len(seq) % n)]) except TypeError: (lo, hi) = (0, n) while (lo < hi): mid = ((lo + hi) // 2) if (prev[mid] is no_pad): hi = mid else: lo = (mid + 1) (yield prev[:lo]) else: (yield prev)
def pylsp_references(document, position, exclude_declaration): code_position = _utils.position_to_jedi_linecolumn(document, position) usages = document.jedi_script().get_references(**code_position) if exclude_declaration: usages = [d for d in usages if (not d.is_definition())] return [{'uri': (uris.uri_with(document.uri, path=str(d.module_path)) if d.module_path else document.uri), 'range': {'start': {'line': (d.line - 1), 'character': d.column}, 'end': {'line': (d.line - 1), 'character': (d.column + len(d.name))}}} for d in usages if (not d.in_builtin_module())]
def main(): print('Loading CUB trainset') trainset = CUB(input_size=input_size, root=root, is_train=True, model_type=model_type) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=8, drop_last=False) print('Loading CUB testset') testset = CUB(input_size=input_size, root=root, is_train=False, model_type=model_type) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=8, drop_last=False) if (model_type == 'kfn'): print('Knowledge Fusion Network') model = TwoBranch(num_classes=num_classes, channels=channels) else: print('Gaze Augmentation Training') model = MainNet(proposalN=proposalN, num_classes=num_classes, channels=channels) criterion = nn.CrossEntropyLoss() parameters = model.parameters() save_path = os.path.join(model_path, model_name) if os.path.exists(save_path): (start_epoch, lr) = auto_load_resume(model, save_path, status='train') assert (start_epoch < end_epoch) else: os.makedirs(save_path) start_epoch = 0 lr = init_lr if (not os.path.exists(os.path.join(save_path, 'best'))): os.makedirs(os.path.join(save_path, 'best')) optimizer = torch.optim.SGD(parameters, lr=lr, momentum=0.9, weight_decay=weight_decay) model = model.cuda() scheduler = MultiStepLR(optimizer, milestones=lr_milestones, gamma=lr_decay_rate) time_str = time.strftime('%Y%m%d-%H%M%S') shutil.copy('./config.py', os.path.join(save_path, '{}config.py'.format(time_str))) train(model=model, model_type=model_type, trainloader=trainloader, testloader=testloader, criterion=criterion, optimizer=optimizer, scheduler=scheduler, save_path=save_path, start_epoch=start_epoch, end_epoch=end_epoch, save_interval=save_interval)
class CuArray(): def __init__(self, shape, dtype=np.float32, init='empty', grow_only=False): self._ptr = c_void_p() self.grow_only = grow_only self.resize(shape, dtype, init) def resize(self, shape=None, dtype=None, init='empty'): shape_tuple = (shape if isinstance(shape, tuple) else (shape,)) self.shape = (self.shape if (shape is None) else shape_tuple) self.dtype = (self.dtype if (dtype is None) else np.dtype(dtype)) self.size = int(np.prod(shape)) self.itemsize = self.dtype.itemsize self.nbytes = (self.itemsize * self.size) if (init not in ('zeros', 'empty')): return elif (init == 'empty'): self._arr = np.empty(self.shape, dtype=self.dtype) elif (init == 'zeros'): self._arr = np.zeros(self.shape, dtype=self.dtype) self._ptr = c_void_p(self._arr.ctypes.data) def fromArray(arr): obj = CuArray(arr.shape, arr.dtype, init=None) obj.setArray(arr) return obj def zeroFill(self): self.getArray()[:] = 0 def setArray(self, arr): params_changed = ((arr.shape != self.shape) or (arr.dtype != self.dtype)) uninitialized_memory = (self._ptr.value is None) if (params_changed or uninitialized_memory): self.resize(arr.shape, arr.dtype, None) self._ptr = c_void_p(arr.ctypes.data) self._arr = arr def getArray(self): my_ctype = {1: c_char, 2: c_short, 4: c_int, 8: c_longlong}[self.itemsize] my_cptr = cast(self._ptr.value, POINTER(my_ctype)) arr = np.ctypeslib.as_array(my_cptr, self.shape).view(self.dtype) return arr
class Nest(nn.Module): def __init__(self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4.0, qkv_bias=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.5, norm_layer=None, act_layer=None, pad_type='', weight_init='', global_pool='avg'): super().__init__() for param_name in ['embed_dims', 'num_heads', 'depths']: param_value = locals()[param_name] if isinstance(param_value, collections.abc.Sequence): assert (len(param_value) == num_levels), f'Require `len({param_name}) == num_levels`' embed_dims = to_ntuple(num_levels)(embed_dims) num_heads = to_ntuple(num_levels)(num_heads) depths = to_ntuple(num_levels)(depths) self.num_classes = num_classes self.num_features = embed_dims[(- 1)] self.feature_info = [] norm_layer = (norm_layer or partial(nn.LayerNorm, eps=1e-06)) act_layer = (act_layer or nn.GELU) self.drop_rate = drop_rate self.num_levels = num_levels if isinstance(img_size, collections.abc.Sequence): assert (img_size[0] == img_size[1]), 'Model only handles square inputs' img_size = img_size[0] assert ((img_size % patch_size) == 0), '`patch_size` must divide `img_size` evenly' self.patch_size = patch_size self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() assert (((img_size // patch_size) % math.sqrt(self.num_blocks[0])) == 0), "First level blocks don't fit evenly. Check `img_size`, `patch_size`, and `num_levels`" self.block_size = int(((img_size // patch_size) // math.sqrt(self.num_blocks[0]))) self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False) self.num_patches = self.patch_embed.num_patches self.seq_length = (self.num_patches // self.num_blocks[0]) levels = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] prev_dim = None curr_stride = 4 for i in range(len(self.num_blocks)): dim = embed_dims[i] levels.append(NestLevel(self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, dp_rates[i], norm_layer, act_layer, pad_type=pad_type)) self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] prev_dim = dim curr_stride *= 2 self.levels = nn.Sequential(*levels) self.norm = norm_layer(embed_dims[(- 1)]) (self.global_pool, self.head) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.init_weights(weight_init) .ignore def init_weights(self, mode=''): assert (mode in ('nlhb', '')) head_bias = ((- math.log(self.num_classes)) if ('nlhb' in mode) else 0.0) for level in self.levels: trunc_normal_(level.pos_embed, std=0.02, a=(- 2), b=2) named_apply(partial(_init_nest_weights, head_bias=head_bias), self) .ignore def no_weight_decay(self): return {f'level.{i}.pos_embed' for i in range(len(self.levels))} .ignore def group_matcher(self, coarse=False): matcher = dict(stem='^patch_embed', blocks=[(('^levels\\.(\\d+)' if coarse else '^levels\\.(\\d+)\\.transformer_encoder\\.(\\d+)'), None), ('^levels\\.(\\d+)\\.(?:pool|pos_embed)', (0,)), ('^norm', (99999,))]) return matcher .ignore def set_grad_checkpointing(self, enable=True): for l in self.levels: l.grad_checkpointing = enable .ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes (self.global_pool, self.head) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.patch_embed(x) x = self.levels(x) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x def forward_head(self, x, pre_logits: bool=False): x = self.global_pool(x) if (self.drop_rate > 0.0): x = F.dropout(x, p=self.drop_rate, training=self.training) return (x if pre_logits else self.head(x)) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x
class FC3Handler(BaseHandler): version = FC3 commandMap = {'auth': commands.authconfig.FC3_Authconfig, 'authconfig': commands.authconfig.FC3_Authconfig, 'autopart': commands.autopart.FC3_AutoPart, 'autostep': commands.autostep.FC3_AutoStep, 'bootloader': commands.bootloader.FC3_Bootloader, 'cdrom': commands.cdrom.FC3_Cdrom, 'clearpart': commands.clearpart.FC3_ClearPart, 'cmdline': commands.displaymode.FC3_DisplayMode, 'device': commands.device.FC3_Device, 'deviceprobe': commands.deviceprobe.FC3_DeviceProbe, 'driverdisk': commands.driverdisk.FC3_DriverDisk, 'firewall': commands.firewall.FC3_Firewall, 'firstboot': commands.firstboot.FC3_Firstboot, 'graphical': commands.displaymode.FC3_DisplayMode, 'halt': commands.reboot.FC3_Reboot, 'harddrive': commands.harddrive.FC3_HardDrive, 'ignoredisk': commands.ignoredisk.FC3_IgnoreDisk, 'install': commands.upgrade.FC3_Upgrade, 'interactive': commands.interactive.FC3_Interactive, 'keyboard': commands.keyboard.FC3_Keyboard, 'lang': commands.lang.FC3_Lang, 'langsupport': commands.langsupport.FC3_LangSupport, 'lilo': commands.bootloader.FC3_Lilo, 'lilocheck': commands.lilocheck.FC3_LiloCheck, 'logvol': commands.logvol.FC3_LogVol, 'method': commands.method.FC3_Method, 'monitor': commands.monitor.FC3_Monitor, 'mouse': commands.mouse.FC3_Mouse, 'network': commands.network.FC3_Network, 'nfs': commands.nfs.FC3_NFS, 'part': commands.partition.FC3_Partition, 'partition': commands.partition.FC3_Partition, 'poweroff': commands.reboot.FC3_Reboot, 'raid': commands.raid.FC3_Raid, 'reboot': commands.reboot.FC3_Reboot, 'rootpw': commands.rootpw.FC3_RootPw, 'selinux': commands.selinux.FC3_SELinux, 'shutdown': commands.reboot.FC3_Reboot, 'skipx': commands.skipx.FC3_SkipX, 'text': commands.displaymode.FC3_DisplayMode, 'timezone': commands.timezone.FC3_Timezone, 'upgrade': commands.upgrade.FC3_Upgrade, 'url': commands.url.FC3_Url, 'vnc': commands.vnc.FC3_Vnc, 'volgroup': commands.volgroup.FC3_VolGroup, 'xconfig': commands.xconfig.FC3_XConfig, 'zerombr': commands.zerombr.FC3_ZeroMbr, 'zfcp': commands.zfcp.FC3_ZFCP} dataMap = {'DriverDiskData': commands.driverdisk.FC3_DriverDiskData, 'LogVolData': commands.logvol.FC3_LogVolData, 'NetworkData': commands.network.FC3_NetworkData, 'PartData': commands.partition.FC3_PartData, 'RaidData': commands.raid.FC3_RaidData, 'VolGroupData': commands.volgroup.FC3_VolGroupData, 'ZFCPData': commands.zfcp.FC3_ZFCPData}
class VanEncoder(nn.Module): def __init__(self, config: VanConfig): super().__init__() self.stages = nn.ModuleList([]) patch_sizes = config.patch_sizes strides = config.strides hidden_sizes = config.hidden_sizes depths = config.depths mlp_ratios = config.mlp_ratios drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] for (num_stage, (patch_size, stride, hidden_size, depth, mlp_expantion, drop_path_rate)) in enumerate(zip(patch_sizes, strides, hidden_sizes, depths, mlp_ratios, drop_path_rates)): is_first_stage = (num_stage == 0) in_channels = hidden_sizes[(num_stage - 1)] if is_first_stage: in_channels = config.num_channels self.stages.append(VanStage(config, in_channels, hidden_size, patch_size=patch_size, stride=stride, depth=depth, mlp_ratio=mlp_expantion, drop_path_rate=drop_path_rate)) def forward(self, hidden_state, output_hidden_states=False, return_dict=True): all_hidden_states = (() if output_hidden_states else None) for (_, stage_module) in enumerate(self.stages): hidden_state = stage_module(hidden_state) if output_hidden_states: all_hidden_states = (all_hidden_states + (hidden_state,)) if (not return_dict): return tuple((v for v in [hidden_state, all_hidden_states] if (v is not None))) return VanEncoderOutput(last_hidden_state=hidden_state, hidden_states=all_hidden_states)
def _spin_hamiltonian(N): from qutip.core import tensor, qeye, sigmax, sigmay, sigmaz h = (((2 * np.pi) * 1.0) * np.ones(N)) Jz = (((2 * np.pi) * 0.1) * np.ones(N)) Jx = (((2 * np.pi) * 0.1) * np.ones(N)) Jy = (((2 * np.pi) * 0.1) * np.ones(N)) si = qeye(2) sx = sigmax() sy = sigmay() sz = sigmaz() sx_list = [] sy_list = [] sz_list = [] for n in range(N): op_list = ([si] * N) op_list[n] = sx sx_list.append(tensor(op_list)) op_list[n] = sy sy_list.append(tensor(op_list)) op_list[n] = sz sz_list.append(tensor(op_list)) H = 0 for n in range(N): H += (((- 0.5) * h[n]) * sz_list[n]) for n in range((N - 1)): H += ((((- 0.5) * Jx[n]) * sx_list[n]) * sx_list[(n + 1)]) H += ((((- 0.5) * Jy[n]) * sy_list[n]) * sy_list[(n + 1)]) H += ((((- 0.5) * Jz[n]) * sz_list[n]) * sz_list[(n + 1)]) return H
class BosonicBath(Bath): def _check_cks_and_vks(self, ck_real, vk_real, ck_imag, vk_imag): if ((len(ck_real) != len(vk_real)) or (len(ck_imag) != len(vk_imag))): raise ValueError('The bath exponent lists ck_real and vk_real, and ck_imag and vk_imag must be the same length.') def _check_coup_op(self, Q): if (not isinstance(Q, Qobj)): raise ValueError('The coupling operator Q must be a Qobj.') def __init__(self, Q, ck_real, vk_real, ck_imag, vk_imag, combine=True, tag=None): self._check_cks_and_vks(ck_real, vk_real, ck_imag, vk_imag) self._check_coup_op(Q) exponents = [] exponents.extend((BathExponent('R', None, Q, ck, vk, tag=tag) for (ck, vk) in zip(ck_real, vk_real))) exponents.extend((BathExponent('I', None, Q, ck, vk, tag=tag) for (ck, vk) in zip(ck_imag, vk_imag))) if combine: exponents = self.combine(exponents) super().__init__(exponents) def combine(cls, exponents, rtol=1e-05, atol=1e-07): groups = [] remaining = exponents[:] while remaining: e1 = remaining.pop(0) group = [e1] for e2 in remaining[:]: if (np.isclose(e1.vk, e2.vk, rtol=rtol, atol=atol) and _isequal(e1.Q, e2.Q, tol=atol)): group.append(e2) remaining.remove(e2) groups.append(group) new_exponents = [] for combine in groups: exp1 = combine[0] if ((exp1.type != exp1.types.RI) and all(((exp2.type == exp1.type) for exp2 in combine))): ck = sum((exp.ck for exp in combine)) new_exponents.append(BathExponent(exp1.type, None, exp1.Q, ck, exp1.vk, tag=exp1.tag)) else: ck_R = (sum((exp.ck for exp in combine if (exp.type == exp.types.R))) + sum((exp.ck for exp in combine if (exp.type == exp.types.RI)))) ck_I = (sum((exp.ck for exp in combine if (exp.type == exp.types.I))) + sum((exp.ck2 for exp in combine if (exp.type == exp.types.RI)))) new_exponents.append(BathExponent('RI', None, exp1.Q, ck_R, exp1.vk, ck2=ck_I, tag=exp1.tag)) return new_exponents
def append_call_sample_docstring(model_class, checkpoint, output_type, config_class, mask=None): model_class.__call__ = copy_func(model_class.__call__) model_class.__call__ = add_code_sample_docstrings(checkpoint=checkpoint, output_type=output_type, config_class=config_class, model_cls=model_class.__name__)(model_class.__call__)
def save_datasets(ds_train: List[ContextualizedExample], ds_dev: List[ContextualizedExample], output_dir): utils.IO.ensure_dir(output_dir) output_file = Path(output_dir, f'train.jsonl') logger.warning(f'Saving to {output_file}') output_objs = [] for example in ds_train: output_objs.append(example_to_dict(example)) utils.JsonL.save(output_file, output_objs) logger.info(f'Train set size {len(output_objs)}') output_file = Path(output_dir, f'dev.jsonl') logger.warning(f'Saving to {output_file}') output_objs = [] for example in ds_dev: output_objs.append(example_to_dict(example)) utils.JsonL.save(output_file, output_objs) logger.info(f'Dev set size {len(output_objs)}')
def test_jac_method_grad(): na = 3 params = getfnparams(na) nnparams = getnnparams(na) num_nnparams = len(nnparams) jacs = jac(func2(*nnparams), params) nout = jacs[0].shape[(- 2)] def fcnr(i, v, *allparams): nnparams = allparams[:num_nnparams] params = allparams[num_nnparams:] jacs = jac(func2(*nnparams), params) return jacs[i].rmv(v.view((- 1))) def fcnl(i, v, *allparams): nnparams = allparams[:num_nnparams] params = allparams[num_nnparams:] jacs = jac(func2(*nnparams), params) return jacs[i].mv(v.view((- 1))) v = torch.rand((na,), dtype=dtype, requires_grad=True) w = [torch.rand_like(p).requires_grad_() for p in params] for i in range(len(jacs)): gradcheck(fcnr, (i, v, *nnparams, *params)) gradgradcheck(fcnr, (i, v, *nnparams, *params)) gradcheck(fcnl, (i, w[i], *nnparams, *params)) gradgradcheck(fcnl, (i, w[i], *nnparams, *params))
class TestNoMaterial(TestWavefront): def setUp(self): self.mesh_names = ['Simple', 'SimpleB'] self.material_names = ['default0'] self.meshes = pywavefront.Wavefront(fixture('simple_no_mtl.obj')) def testMeshMaterialVertices(self): self.assertEqual(len(self.meshes.meshes[self.mesh_names[0]].materials[0].vertices), 48)
class DropDB(ProductionCommand): keyword = 'dropdb' def assemble(self): super().assemble() self.parser.add_argument('-y', '--yes', action='store_true', dest='yes', help='automatically answers yes on prompts') self.parser.add_argument('-U', '--super-user-name', dest='super_user_name', default=None, help='the name of the priviledged user who may perform this operation') def execute(self, args): super().execute(args) if (args.yes or input('Are you sure? (y/N)? ').strip().lower().startswith('y')): return self.sys_control.drop_database(super_user_name=args.super_user_name) else: return
class ResNet(nn.Module): def __init__(self, depth, num_filters, block_name='BasicBlock', num_classes=10): super(ResNet, self).__init__() if (block_name.lower() == 'basicblock'): assert (((depth - 2) % 6) == 0), 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202' n = ((depth - 2) // 6) block = BasicBlock elif (block_name.lower() == 'bottleneck'): assert (((depth - 2) % 9) == 0), 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199' n = ((depth - 2) // 9) block = Bottleneck else: raise ValueError('block_name shoule be Basicblock or Bottleneck') self.inplanes = num_filters[0] self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=3, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(num_filters[0]) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, num_filters[1], n) self.layer2 = self._make_layer(block, num_filters[2], n, stride=2) self.layer3 = self._make_layer(block, num_filters[3], n, stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear((num_filters[3] * block.expansion), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion))) layers = list([]) layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def get_feat_modules(self): feat_m = nn.ModuleList([]) feat_m.append(self.conv1) feat_m.append(self.bn1) feat_m.append(self.relu) feat_m.append(self.layer1) feat_m.append(self.layer2) feat_m.append(self.layer3) return feat_m def get_bn_before_relu(self): if isinstance(self.layer1[0], Bottleneck): bn1 = self.layer1[(- 1)].bn3 bn2 = self.layer2[(- 1)].bn3 bn3 = self.layer3[(- 1)].bn3 elif isinstance(self.layer1[0], BasicBlock): bn1 = self.layer1[(- 1)].bn2 bn2 = self.layer2[(- 1)].bn2 bn3 = self.layer3[(- 1)].bn2 else: raise NotImplementedError('ResNet unknown block error !!!') return [bn1, bn2, bn3] def forward(self, x, is_feat=False): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) f0 = x x = self.layer1(x) f1 = x x = self.layer2(x) f2 = x x = self.layer3(x) f3 = x x = self.avgpool(x) x = x.view(x.size(0), (- 1)) f4 = x x = self.fc(x) if is_feat: return ([f1, f2, f3], x) else: return x
(epilog=merge_epilog) ('--title', help='Title to use for the output file') ('--output', '-o', 'output_filename', default=None, type=click.Path(), help='The output database filename (the default is "merged.mmpdb")') _multiple_databases_parameters() ('--verify', type=click.Choice(['off', 'options', 'constants', 'all']), default='all', help='level of consistency checking') _obj def merge(reporter, databases_options, title, output_filename, verify): start_time = time.time() reporter = reporters.get_reporter('verbose') reporter.quiet = False databases = databases_options.databases if (not databases): die('Must specify at least one mmpdb database.') if (title is None): title = f'Merged MMPs from {len(databases)} files' if (output_filename is None): output_filename = 'merged.mmpdb' reporter.warning(f'No --output file specified, using {output_filename!r}') (fragment_options, index_options) = verify_consistency(verify, databases, reporter) create_output_database(output_filename=output_filename, title=title, fragment_options=fragment_options, index_options=index_options) working_db = sqlite3.connect(':memory:') schema.create_schema_for_sqlite(working_db) working_db.execute('PRAGMA synchronous=off') working_db.execute('PRAGMA journal_mode=off') c = working_db.cursor() c.execute('ATTACH DATABASE ? AS new', (output_filename,)) filenames = databases process_compound_tables(c, filenames, reporter) process_rule_smiles_tables(c, filenames, reporter) process_rule_tables(c, filenames, reporter) process_environment_fingerprint_tables(c, filenames, reporter) process_rule_environment_tables(c, filenames, reporter) process_pair_tables(c, filenames, reporter) try: c.execute('COMMIT') except sqlite3.OperationalError: pass c.execute('DETACH DATABASE new') c.close() working_db.close() reporter.update('[Stage 7/7] Indexing and analyzing...') output_db = sqlite3.connect(output_filename) start_index_time = time.time() with transaction(output_db.cursor()) as c: schema.create_index(c) index_writers.update_counts(c) c.execute('PRAGMA analysis_limit=1000') c.execute('ANALYZE') end_index_time = time.time() reporter.report(f'[Stage 7/7] Indexed and analyzed the merged records in {SECS(start_index_time, end_index_time)}.') end_time = time.time() reporter.report(f'Merged {len(filenames)} files in {SECS(start_time, end_time)}.')
def install_emc(args): emc_path = os.path.join(FAKE_DIRECTORY, 'kernel/debug', 'bpmp/debug/clk/emc') if (not os.path.isdir(emc_path)): print('The directory {path} is not present. Creating a new one..'.format(path=emc_path)) os.makedirs(emc_path) write_on_file(os.path.join(emc_path, 'rate'), '4000000') write_on_file(os.path.join(emc_path, 'max_rate'), '') write_on_file(os.path.join(emc_path, 'min_rate'), '0') write_on_file(os.path.join(emc_path, 'mrq_rate_locked'), '') path_activity = os.path.join(FAKE_DIRECTORY, 'kernel/actmon_avg_activity') if (not os.path.isdir(path_activity)): print('The directory {path} is not present. Creating a new one..'.format(path=path_activity)) os.makedirs(path_activity) write_on_file(os.path.join(path_activity, 'mc_all'), '0')
def check_input_dir(fs_dir, user_dir, vis_type, freesurfer_install_required=True): in_dir = fs_dir if ((fs_dir is None) and (user_dir is None)): raise ValueError('At least one of --fs_dir or --user_dir must be specified.') if (fs_dir is not None): if (user_dir is not None): raise ValueError('Only one of --fs_dir or --user_dir can be specified.') if (freesurfer_install_required and (not freesurfer_installed())): raise EnvironmentError('Freesurfer functionality is requested (e.g. visualizing annotations), but is not installed!') if ((fs_dir is None) and (vis_type in freesurfer_vis_types)): raise ValueError('vis_type depending on Freesurfer organization is specified, but --fs_dir is not provided.') if (user_dir is None): if (not pexists(fs_dir)): raise IOError('Freesurfer directory specified does not exist!') else: in_dir = fs_dir type_of_features = 'freesurfer' elif (fs_dir is None): if (not pexists(user_dir)): raise IOError('User-specified input directory does not exist!') else: in_dir = user_dir type_of_features = 'generic' if (not pexists(in_dir)): raise IOError('Invalid specification - check proper combination of --fs_dir and --user_dir') return (in_dir, type_of_features)
class ViewSwitchIpDetail(db.Model): __tablename__ = 'tb_view_switch_ip_detail' id = db.Column(db.Integer, primary_key=True) switch_id = db.Column(db.Integer, nullable=False) domain_name = db.Column(db.String(256), nullable=False, primary_key=True) before_enabled_server_rooms = db.Column(db.String(256), default='[]') isp = db.Column(db.String(256), primary_key=True) before_state = db.Column(db.String(32), default='disabled') after_state = db.Column(db.String(32), default='disabled') def __init__(self, switch_id, domain_name, before_enabled_server_rooms, isp, before_state, after_state): self.switch_id = switch_id self.domain_name = domain_name self.before_enabled_server_rooms = json.dumps(before_enabled_server_rooms) self.isp = isp self.before_state = before_state self.after_state = after_state
class OpTypePattern(Pattern): def __init__(self, op_type, name=None, inputs=None, ordered_inputs=True): self._op_type = op_type self._name = name if (inputs is None): inputs = [] if (len(inputs) > 8): raise ValueError('Only < 8 inputs are allowed when ordered_inputs is False.') self._inputs = [(input_pattern if isinstance(input_pattern, Pattern) else OpTypePattern(input_pattern)) for input_pattern in inputs] self._ordered_inputs = ordered_inputs def name(self): return self._name def match(self, op, tensor): if (self._op_type != '*'): if (op.type not in self._op_type.split('|')): return None match_result = MatchResult() match_result.add(self, op, tensor) if (not self._inputs): return match_result if (len(op.inputs) != len(self._inputs)): return None input_patterns_list = [self._inputs] if (not self._ordered_inputs): input_patterns_list = list(itertools.permutations(self._inputs)) for input_patterns in input_patterns_list: match_failed = False for (input_tensor, input_pattern) in zip(op.inputs, input_patterns): input_match_result = input_pattern.match(input_tensor.op, input_tensor) if (input_match_result is None): match_failed = True break match_result.merge_from(input_match_result) if (not match_failed): return match_result return None
def plot_slit(w, I=None, wunit='', plot_unit='same', Iunit=None, warnings=True, ls='-', title=None, waveunit=None): if (waveunit is not None): warn('`waveunit=` parameter in convolve_with_slit is now named `wunit=`', DeprecationWarning) wunit = waveunit import matplotlib.pyplot as plt from radis.misc.plot import set_style set_style() try: from radis.plot.toolbar import add_tools add_tools() except: pass if (isinstance(w, str) and (I is None)): (w, I) = np.loadtxt(w).T assert (len(w) == len(I)) if anynan(I): warn('Slit function has nans') w = w[(~ np.isnan(I))] I = I[(~ np.isnan(I))] assert (len(I) > 0) wunit = cast_waveunit(wunit, force_match=False) plot_unit = cast_waveunit(plot_unit, force_match=False) if (plot_unit == 'same'): plot_unit = wunit elif ((wunit == 'cm-1') and (plot_unit == 'nm')): w = cm2nm(w) wunit = 'nm' elif ((wunit == 'nm') and (plot_unit == 'cm-1')): w = nm2cm(w) wunit = 'cm-1' elif (wunit == plot_unit): pass elif (plot_unit == ''): pass else: raise ValueError('Unknown plot unit: {0}'.format(plot_unit)) (FWHM, xmin, xmax) = get_FWHM(w, I, return_index=True) FWHM_eff = get_effective_FWHM(w, I) if (plot_unit == 'nm'): xlabel = 'Wavelength (nm)' elif (plot_unit == 'cm-1'): xlabel = 'Wavenumber (cm-1)' elif (plot_unit == ''): xlabel = 'Wavespace' else: raise ValueError('Unknown unit for plot_unit: {0}'.format(plot_unit)) ylabel = 'Slit function' if (Iunit is not None): ylabel += ' ({0})'.format(Iunit) (fig, ax) = plt.subplots() ax.plot(w, I, 'o', color='lightgrey') ax.plot(w, I, 'k', ls=ls, label=(('FWHM: {0:.3f} {1}'.format(FWHM, plot_unit) + '\nEff. FWHM: {0:.3f} {1}'.format(FWHM_eff, plot_unit)) + '\nArea: {0:.3f}'.format(abs(np.trapz(I, x=w))))) plt.axvline(w[(len(w) // 2)], ls='-', lw=2, color='lightgrey') plt.axvline(w[((xmin + xmax) // 2)], ls='--', color='k', lw=0.5) plt.axvline(w[xmin], ls='--', color='k', lw=0.5) plt.axvline(w[xmax], ls='--', color='k', lw=0.5) plt.axhline((I.max() / 2), ls='--', color='k', lw=0.5) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) plt.legend(loc='best', prop={'size': 16}) if title: plt.title(title) fig.tight_layout() (xlmin, xlmax) = ax.get_xlim() ax.set_xlim(((xlmin - 0.5), (xlmax + 0.5))) if warnings: if (w[((xmin + xmax) // 2)] != w[(len(w) // 2)]): warn(('Slit function doesnt seem centered: center measured with FWHM' + ' is not the array center (shift: {0:.3f}{1}): This can induce offsets!'.format(abs((w[((xmin + xmax) // 2)] - w[(len(w) // 2)])), wunit))) if ((I[0] != 0) or (I[(- 1)] != 0)): warn('Slit function should have zeros on both sides') return (fig, ax)
class IgnoredUnusedAttributes(StringSequenceOption): name = 'ignored_unused_attributes' default_value = ['_abc_cache', '_abc_negative_cache', '__abstractmethods__', '_abc_negative_cache_version', '_abc_registry', '__module__', '__doc__', '__init__', '__dict__', '__weakref__', '__enter__', '__exit__', '__metaclass__']
def build_train_valid_test_data_iterators(build_train_valid_test_datasets_provider): args = get_args() (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None) print_rank_0('> building train, validation, and test datasets ...') if (mpu.get_model_parallel_rank() == 0): data_parallel_size = mpu.get_data_parallel_world_size() global_batch_size = (args.batch_size * data_parallel_size) train_iters = args.train_iters eval_iters = (((train_iters // args.eval_interval) + 1) * args.eval_iters) test_iters = args.eval_iters train_val_test_num_samples = [(train_iters * global_batch_size), (eval_iters * global_batch_size), (test_iters * global_batch_size)] print_rank_0(' > datasets target sizes (minimum size):') print_rank_0(' train: {}'.format(train_val_test_num_samples[0])) print_rank_0(' validation: {}'.format(train_val_test_num_samples[1])) print_rank_0(' test: {}'.format(train_val_test_num_samples[2])) (train_ds, valid_ds, test_ds) = build_train_valid_test_datasets_provider(train_val_test_num_samples) train_dataloader = make_data_loader(train_ds) valid_dataloader = make_data_loader(valid_ds) test_dataloader = make_data_loader(test_ds) do_train = ((train_dataloader is not None) and (args.train_iters > 0)) do_valid = ((valid_dataloader is not None) and (args.eval_iters > 0)) do_test = ((test_dataloader is not None) and (args.eval_iters > 0)) flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)]) else: flags = torch.cuda.LongTensor([0, 0, 0]) torch.distributed.broadcast(flags, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group()) args.do_train = flags[0].item() args.do_valid = flags[1].item() args.do_test = flags[2].item() if (train_dataloader is not None): train_dataloader.batch_sampler.start_iter = (args.iteration % len(train_dataloader)) print_rank_0('setting training data start iteration to {}'.format(train_dataloader.batch_sampler.start_iter)) if (valid_dataloader is not None): start_iter_val = ((args.iteration // args.eval_interval) * args.eval_iters) valid_dataloader.batch_sampler.start_iter = (start_iter_val % len(valid_dataloader)) print_rank_0('setting validation data start iteration to {}'.format(valid_dataloader.batch_sampler.start_iter)) if (train_dataloader is not None): train_data_iterator = iter(train_dataloader) else: train_data_iterator = None if (valid_dataloader is not None): valid_data_iterator = iter(valid_dataloader) else: valid_data_iterator = None if (test_dataloader is not None): test_data_iterator = iter(test_dataloader) else: test_data_iterator = None return (train_data_iterator, valid_data_iterator, test_data_iterator)
class SectionArgspathWrapper(Dataset): def __init__(self, dataset, section, args_path): self.dataset = dataset self.section = section self.args_path = args_path def __getitem__(self, index): item = self.dataset[index] item['section'] = self.section item['arg_path'] = self.args_path return item def __len__(self): return len(self.dataset)
class SpatialDropout1D(Dropout): _spatialdropout1d_support def __init__(self, rate, **kwargs): super(SpatialDropout1D, self).__init__(rate, **kwargs) self.input_spec = InputSpec(ndim=3) def _get_noise_shape(self, inputs): input_shape = K.shape(inputs) noise_shape = (input_shape[0], 1, input_shape[2]) return noise_shape
def squad_convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, padding_strategy='max_length', return_dataset=False, threads=1, tqdm_enabled=True): features = [] threads = min(threads, cpu_count()) with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p: annotate_ = partial(squad_convert_example_to_features, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, padding_strategy=padding_strategy, is_training=is_training) features = list(tqdm(p.imap(annotate_, examples, chunksize=32), total=len(examples), desc='convert squad examples to features', disable=(not tqdm_enabled))) new_features = [] unique_id = example_index = 0 for example_features in tqdm(features, total=len(features), desc='add example index and unique id', disable=(not tqdm_enabled)): if (not example_features): continue for example_feature in example_features: example_feature.example_index = example_index example_feature.unique_id = unique_id new_features.append(example_feature) unique_id += 1 example_index += 1 features = new_features del new_features if (return_dataset == 'pt'): if (not is_torch_available()): raise RuntimeError('PyTorch must be installed to return a PyTorch dataset.') all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float) if (not is_training): all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset(all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_attention_masks, all_token_type_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask, all_is_impossible) return (features, dataset) elif (return_dataset == 'tf'): if (not is_tf_available()): raise RuntimeError('TensorFlow must be installed to return a TensorFlow dataset.') def gen(): for (i, ex) in enumerate(features): if (ex.token_type_ids is None): (yield ({'input_ids': ex.input_ids, 'attention_mask': ex.attention_mask, 'feature_index': i, 'qas_id': ex.qas_id}, {'start_positions': ex.start_position, 'end_positions': ex.end_position, 'cls_index': ex.cls_index, 'p_mask': ex.p_mask, 'is_impossible': ex.is_impossible})) else: (yield ({'input_ids': ex.input_ids, 'attention_mask': ex.attention_mask, 'token_type_ids': ex.token_type_ids, 'feature_index': i, 'qas_id': ex.qas_id}, {'start_positions': ex.start_position, 'end_positions': ex.end_position, 'cls_index': ex.cls_index, 'p_mask': ex.p_mask, 'is_impossible': ex.is_impossible})) if ('token_type_ids' in tokenizer.model_input_names): train_types = ({'input_ids': tf.int32, 'attention_mask': tf.int32, 'token_type_ids': tf.int32, 'feature_index': tf.int64, 'qas_id': tf.string}, {'start_positions': tf.int64, 'end_positions': tf.int64, 'cls_index': tf.int64, 'p_mask': tf.int32, 'is_impossible': tf.int32}) train_shapes = ({'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None]), 'token_type_ids': tf.TensorShape([None]), 'feature_index': tf.TensorShape([]), 'qas_id': tf.TensorShape([])}, {'start_positions': tf.TensorShape([]), 'end_positions': tf.TensorShape([]), 'cls_index': tf.TensorShape([]), 'p_mask': tf.TensorShape([None]), 'is_impossible': tf.TensorShape([])}) else: train_types = ({'input_ids': tf.int32, 'attention_mask': tf.int32, 'feature_index': tf.int64, 'qas_id': tf.string}, {'start_positions': tf.int64, 'end_positions': tf.int64, 'cls_index': tf.int64, 'p_mask': tf.int32, 'is_impossible': tf.int32}) train_shapes = ({'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None]), 'feature_index': tf.TensorShape([]), 'qas_id': tf.TensorShape([])}, {'start_positions': tf.TensorShape([]), 'end_positions': tf.TensorShape([]), 'cls_index': tf.TensorShape([]), 'p_mask': tf.TensorShape([None]), 'is_impossible': tf.TensorShape([])}) return tf.data.Dataset.from_generator(gen, train_types, train_shapes) else: return features
def expected_protocol(instrument_cls, comm_pairs, connection_attributes={}, connection_methods={}, **kwargs): protocol = ProtocolAdapter(comm_pairs, connection_attributes=connection_attributes, connection_methods=connection_methods) instr = instrument_cls(protocol, **kwargs) (yield instr) assert (protocol._index == len(comm_pairs)), f'Unprocessed protocol definitions remain: {comm_pairs[protocol._index:]}.' assert (protocol._write_buffer is None), f"Non-empty write buffer remains: '{protocol._write_buffer}'." assert (protocol._read_buffer is None), f"Non-empty read buffer remains: '{protocol._read_buffer}'."
class DummyStateMachine(StateMachineWS): def __init__(self): self.memo = Struct(title_styles=[], inliner=None) self.state = RSTState(self) self.input_offset = 0 def reset(self, document, parent, level): self.language = languages.get_language(document.settings.language_code) self.memo.document = document self.memo.reporter = document.reporter self.memo.language = self.language self.memo.section_level = level if (self.memo.inliner is None): self.memo.inliner = Inliner() self.memo.inliner.init_customizations(document.settings) inliner = self.memo.inliner inliner.reporter = document.reporter inliner.document = document inliner.language = self.language inliner.parent = parent self.document = document self.reporter = self.memo.reporter self.node = parent self.state.runtime_init() self.input_lines = document['source'] def run_directive(self, name, arguments=None, options=None, content=None): if (options is None): options = {} if (content is None): content = [] if (arguments is None): arguments = [] (direc, _) = directive(name, self.language, self.document) direc = direc(name=name, arguments=arguments, options=options, content=content, lineno=self.node.line, content_offset=0, block_text='Dummy BlockText', state=self.state, state_machine=self) return direc.run() def run_role(self, name, options=None, content=None): if (options is None): options = {} if (content is None): content = [] (role_fn, _) = role(name, self.language, self.node.line, self.reporter) (vec, _) = role_fn(name, rawtext=str(content), text=str(content), lineno=self.node.line, inliner=self.memo.inliner, options=options, content=content) assert (len(vec) == 1), 'only support one list in role' return vec[0] def get_source_and_line(self, lineno=None): if lineno: return (self.document['source'], lineno) else: return (self.document['source'], self.node.line)
class _FragList(): flist: list[bytes] def __init__(self, init: (list[bytes] | None)=None) -> None: self.flist = [] if init: self.flist.extend(init) def put_raw(self, val: bytes) -> None: self.flist.append(val) def put_u32(self, val: int) -> None: self.flist.append(val.to_bytes(length=4, byteorder='big')) def put_u64(self, val: int) -> None: self.flist.append(val.to_bytes(length=8, byteorder='big')) def put_sshstr(self, val: (bytes | _FragList)) -> None: if isinstance(val, (bytes, memoryview, bytearray)): self.put_u32(len(val)) self.flist.append(val) else: self.put_u32(val.size()) self.flist.extend(val.flist) def put_mpint(self, val: int) -> None: self.put_sshstr(_to_mpint(val)) def size(self) -> int: return sum(map(len, self.flist)) def render(self, dstbuf: memoryview, pos: int=0) -> int: for frag in self.flist: flen = len(frag) (start, pos) = (pos, (pos + flen)) dstbuf[start:pos] = frag return pos def tobytes(self) -> bytes: buf = memoryview(bytearray(self.size())) self.render(buf) return buf.tobytes()
def change_name_color(caller, treestr, index, selection): if (not caller.db.uncolored_name): caller.db.uncolored_name = caller.key colordict = {'Red': '|511', 'Pink': '|533', 'Maroon': '|301', 'Orange': '|531', 'Brown': '|321', 'Sienna': '|420', 'Yellow': '|551', 'Gold': '|540', 'Dandelion': '|553', 'Green': '|141', 'Lime': '|350', 'Forest': '|032', 'Blue': '|115', 'Cyan': '|155', 'Navy': '|113', 'Purple': '|415', 'Lavender': '|535', 'Fuchsia': '|503'} if (selection == 'Remove name color'): caller.key = caller.db.uncolored_name caller.msg('Name color removed.') elif (selection in colordict): newcolor = colordict[selection] caller.key = ((newcolor + caller.db.uncolored_name) + '|n') caller.msg(((newcolor + ('Name color changed to %s!' % selection)) + '|n'))
def test_tree_max_product_tree(): try: from scipy.sparse.csgraph import minimum_spanning_tree except: raise SkipTest('Not testing trees, scipy version >= 0.11 required') rnd = np.random.RandomState(0) for i in range(100): graph = rnd.uniform(size=(10, 10)) tree = minimum_spanning_tree(sparse.csr_matrix(graph)) tree_edges = np.c_[tree.nonzero()] unary_potentials = rnd.normal(size=(10, 3)) pairwise_potentials = rnd.normal(size=(9, 3, 3)) result_ad3 = inference_ad3(unary_potentials, pairwise_potentials, tree_edges, branch_and_bound=True) result_mp = inference_max_product(unary_potentials, pairwise_potentials, tree_edges) assert_array_equal(result_ad3, result_mp)