code
stringlengths
281
23.7M
def include_subclasses(cl: type, converter: Converter, subclasses: (tuple[(type, ...)] | None)=None, union_strategy: (Callable[([Any, BaseConverter], Any)] | None)=None, overrides: (dict[(str, AttributeOverride)] | None)=None) -> None: collect() if (subclasses is not None): parent_subclass_tree = (cl, *subclasses) else: parent_subclass_tree = tuple(_make_subclasses_tree(cl)) if (union_strategy is None): _include_subclasses_without_union_strategy(cl, converter, parent_subclass_tree, overrides) else: _include_subclasses_with_union_strategy(converter, parent_subclass_tree, union_strategy, overrides)
class World(): def __init__(self, locs, objs, relations, args): self.graph = nx.Graph() self.graph.add_nodes_from(locs, type='location', fillcolor='yellow', style='filled') self.graph.add_nodes_from(objs, type='object') self.graph.add_edges_from(relations) self.locations = {v for v in locs} self.objects = {v for v in objs} self.edge_labels = {} self.args = args with open(args.input_text) as f: self.input_text = f.read() self.model = QA('model/albert-large-squad') def is_connected(self): return (len(list(nx.connected_components(self.graph))) == 1) def query(self, query, nsamples=10, cutoff=8): return self.model.predictTopK(self.input_text, query, nsamples, cutoff) def generateNeighbors(self, nsamples=100): self.candidates = {} for u in self.graph.nodes: self.candidates[u] = {} if (self.graph.nodes[u]['type'] == 'location'): self.candidates[u]['location'] = self.query(random.choice(loc2loc_templates).format(u), nsamples) self.candidates[u]['object'] = self.query(random.choice(loc2obj_templates).format(u), nsamples) self.candidates[u]['character'] = self.query(random.choice(loc2char_templates).format(u), nsamples) if (self.graph.nodes[u]['type'] == 'object'): self.candidates[u]['location'] = self.query(random.choice(obj2loc_templates).format(u), nsamples) if (self.graph.nodes[u]['type'] == 'character'): self.candidates[u]['location'] = self.query(random.choice(char2loc_templates).format(u), nsamples) def relatedness(self, u, v, type='location'): s = 0 (u2v, probs) = self.candidates[u][type] if (u2v is not None): for (c, p) in zip(u2v, probs): a = set(c.text.split()).difference(articles) b = set(v.split()).difference(articles) best_intersect = 0 for x in self.graph.nodes: xx = set(x.split()).difference(articles) best_intersect = max(best_intersect, len(a.intersection(xx))) if (len(a.intersection(b)) == best_intersect): s += (len(a.intersection(b)) * p) (v2u, probs) = self.candidates[v]['location'] if (v2u is not None): for (c, p) in zip(v2u, probs): a = set(c.text.split()).difference(articles) b = set(u.split()).difference(articles) best_intersect = 0 for x in self.graph.nodes: xx = set(x.split()).difference(articles) best_intersect = max(best_intersect, len(a.intersection(xx))) if (len(a.intersection(b)) == best_intersect): s += (len(a.intersection(b)) * p) return s def extractEntity(self, query, threshold=0.05, cutoff=0): (preds, probs) = self.query(query, 50, cutoff) if (preds is None): return (None, 0) for (pred, prob) in zip(preds, probs): t = pred.text p = prob print('> ', t, p) if (len(t) < 1): continue if ((p > threshold) and ('MASK' not in t)): for (pred, prob) in zip(preds, probs): if ((t != pred.text) and (pred.text in t) and (prob > threshold) and (len(pred.text) > 2)): t = pred.text p = prob break t = t.strip(string.punctuation) remove = t words = t.split() if (words[0].lower() in articles): remove = ' '.join(words[1:]) words[0] = words[0].lower() t = ' '.join(words[1:]) print(remove) self.input_text = self.input_text.replace(remove, '[MASK]').replace(' ', ' ').replace(' .', '.') return (t, p) return (None, 0) def generate(self): locs = [] objs = [] chars = [] threshold = 0.05 if (args.cutoffs == 'fairy'): cutoffs = [6.5, (- 7), (- 5)] elif (args.cutoffs == 'mystery'): cutoffs = [3.5, (- 7.5), (- 6)] else: cutoffs = [int(i) for i in args.cutoffs.split()] assert (len(cutoffs) == 3) tmp = self.input_text[:] print(((('=' * 20) + '\tcharacters\t') + ('=' * 20))) self.input_text = tmp primer = 'Who is somebody in the story?' cutoff = 10 (t, p) = self.extractEntity(primer, threshold=threshold, cutoff=cutoff) while ((t is not None) and (len(t) > 1)): if (len(chars) > 1): cutoff = cutoffs[0] chars.append(t) (t, p) = self.extractEntity(primer, threshold=threshold, cutoff=cutoff) print(((('=' * 20) + '\tlocations\t') + ('=' * 20))) self.input_text = tmp primer = 'Where is the location in the story?' cutoff = 10 (t, p) = self.extractEntity(primer, threshold=threshold, cutoff=cutoff) while ((t is not None) and (len(t) > 1)): locs.append(t) if (len(locs) > 1): cutoff = cutoffs[1] (t, p) = self.extractEntity(primer, threshold=threshold, cutoff=cutoff) print(((('=' * 20) + '\tobjects\t\t') + ('=' * 20))) self.input_text = tmp primer = 'What is an object in the story?' cutoff = 10 (t, p) = self.extractEntity(primer, threshold=threshold, cutoff=cutoff) while ((t is not None) and (len(t) > 1)): if (len(objs) > 1): cutoff = cutoffs[2] objs.append(t) (t, p) = self.extractEntity(primer, threshold=threshold, cutoff=cutoff) self.input_text = tmp self.graph.add_nodes_from(locs, type='location', fillcolor='yellow', style='filled') self.graph.add_nodes_from(chars, type='character', fillcolor='orange', style='filled') self.graph.add_nodes_from(objs, type='object', fillcolor='white', style='filled') self.autocomplete() def autocomplete(self): self.generateNeighbors(self.args.nsamples) print(((('=' * 20) + '\trelations\t') + ('=' * 20))) while (not self.is_connected()): components = list(nx.connected_components(self.graph)) best = ((- 1), next(iter(components[0])), next(iter(components[1]))) main = components[0] loc_done = True for c in components[1:]: for v in c: if (self.graph.nodes[v]['type'] == 'location'): loc_done = False for u in main: if (self.graph.nodes[u]['type'] != 'location'): continue for c in components[1:]: for v in c: if ((not loc_done) and (self.graph.nodes[v]['type'] != 'location')): continue best = max(best, (self.relatedness(u, v, self.graph.nodes[v]['type']), u, v)) (_, u, v) = best if ((_ == 0) or args.random): candidates = [] for c in components[0]: if (self.graph.nodes[c]['type'] == 'location'): candidates.append(c) u = random.choice(candidates) if ((self.graph.nodes[u]['type'] == 'location') and (self.graph.nodes[v]['type'] == 'location')): type = 'connected to' else: type = 'located in' print('{} {} {}'.format(v, type, u)) self.graph.add_edge(v, u, label=type) self.edge_labels[(v, u)] = type def export(self, filename='graph.dot'): nx.nx_pydot.write_dot(self.graph, filename) nx.write_gml(self.graph, 'graph.gml', stringizer=None) def draw(self, filename='./graph.svg'): self.export() if args.write_sfdp: cmd = 'sfdp -x -Goverlap=False -Tsvg graph.dot'.format(filename) returned_value = subprocess.check_output(cmd, shell=True) with open(filename, 'wb') as f: f.write(returned_value) cmd = 'inkscape -z -e {}.png {}.svg'.format(filename[:(- 4)], filename[:(- 4)]) returned_value = subprocess.check_output(cmd, shell=True) else: nx.draw(self.graph, with_labels=True) plt.savefig((filename[:(- 4)] + '.png'))
class FashionMNIST_truncated_WO_reload(data.Dataset): def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None, full_dataset=None): self.root = root self.dataidxs = dataidxs self.train = train self.transform = transform self.target_transform = target_transform self.full_dataset = full_dataset (self.data, self.targets) = self.__build_truncated_dataset__() def __build_truncated_dataset__(self): data = self.full_dataset.data targets = np.array(self.full_dataset.targets) if (self.dataidxs is not None): data = data[self.dataidxs] targets = targets[self.dataidxs] return (data, targets) def __getitem__(self, index): (img, targets) = (self.data[index], self.targets[index]) img = Image.fromarray(img.numpy(), mode='L') if (self.transform is not None): img = self.transform(img) if (self.target_transform is not None): targets = self.target_transform(targets) return (img, targets) def __len__(self): return len(self.data)
class AudioSource(Component): playOnStart = ShowInInspector(bool, False) loop = ShowInInspector(bool, False) clip = ShowInInspector(AudioClip, None) def __init__(self): super(AudioSource, self).__init__() global channels self.channel = channels channels += 1 if config.audio: mixer.Mix_AllocateChannels(channels) def SetClip(self, clip): self.clip = clip def Play(self): if (self.clip is None): Logger.LogLine(Logger.WARN, 'AudioSource has no AudioClip') return if (not config.audio): return if (self.clip.music is None): self.clip.music = mixer.Mix_LoadWAV(self.clip.path.encode()) if (mixer.Mix_PlayChannel(self.channel, self.clip.music, 0) == (- 1)): Logger.LogLine(Logger.WARN, ('Unable to play file: %s' % mixer.Mix_GetError().decode())) def Stop(self): if (not config.audio): return if (self.clip is None): Logger.LogLine(Logger.WARN, 'AudioSource has no AudioClip') mixer.Mix_HaltChannel(self.channel) def Pause(self): if (not config.audio): return if (self.clip is None): Logger.LogLine(Logger.WARN, 'AudioSource has no AudioClip') mixer.Mix_Pause(self.channel) def UnPause(self): if (not config.audio): return if (self.clip is None): Logger.LogLine(Logger.WARN, 'AudioSource has no AudioClip') mixer.Mix_Resume(self.channel) def Playing(self): if (not config.audio): return False if (self.clip is None): Logger.LogLine(Logger.WARN, 'AudioSource has no AudioClip') return mixer.Mix_Playing(self.channel)
class Effect4060(BaseEffect): runTime = 'early' type = ('projected', 'passive') def handler(fit, beacon, context, projectionRange, **kwargs): fit.modules.filteredChargeMultiply((lambda mod: mod.charge.requiresSkill('Rockets')), 'thermalDamage', beacon.getModifiedItemAttr('smallWeaponDamageMultiplier'), stackingPenalties=True, penaltyGroup='postMul', **kwargs)
class TestMemcachedCollector(CollectorTestCase): def setUp(self): config = get_collector_config('MemcachedCollector', {'interval': 10, 'hosts': ['localhost:11211']}) self.collector = MemcachedCollector(config, None) def test_import(self): self.assertTrue(MemcachedCollector) ('socket.socket') def test_get_raw_stats_works_across_packet_boundaries(self, socket_mock): socket_instance = MagicMock() socket_mock.return_value = socket_instance stats_packets = ['stat foo 1\r\n', 'END\r\n'] socket_instance.recv.side_effect = stats_packets stats = self.collector.get_raw_stats('', None) self.assertEqual(stats, ''.join(stats_packets)) (Collector, 'publish') def test_should_work_with_real_data(self, publish_mock): patch_raw_stats = patch.object(MemcachedCollector, 'get_raw_stats', Mock(return_value=self.getFixture('stats').getvalue())) patch_raw_stats.start() self.collector.collect() patch_raw_stats.stop() metrics = {'localhost.reclaimed': 0.0, 'localhost.expired_unfetched': 0.0, 'localhost.hash_is_expanding': 0.0, 'localhost.cas_hits': 0.0, 'localhost.uptime': 25763, 'localhost.touch_hits': 0.0, 'localhost.delete_misses': 0.0, 'localhost.listen_disabled_num': 0.0, 'localhost.cas_misses': 0.0, 'localhost.decr_hits': 0.0, 'localhost.cmd_touch': 0.0, 'localhost.incr_hits': 0.0, 'localhost.auth_cmds': 0.0, 'localhost.limit_maxbytes': .0, 'localhost.bytes_written': 0.0, 'localhost.incr_misses': 0.0, 'localhost.rusage_system': 0.195071, 'localhost.total_items': 0.0, 'localhost.cmd_get': 0.0, 'localhost.curr_connections': 10.0, 'localhost.touch_misses': 0.0, 'localhost.threads': 4.0, 'localhost.total_connections': 0, 'localhost.cmd_set': 0.0, 'localhost.curr_items': 0.0, 'localhost.conn_yields': 0.0, 'localhost.get_misses': 0.0, 'localhost.reserved_fds': 20.0, 'localhost.bytes_read': 0, 'localhost.hash_bytes': 524288.0, 'localhost.evicted_unfetched': 0.0, 'localhost.cas_badval': 0.0, 'localhost.cmd_flush': 0.0, 'localhost.evictions': 0.0, 'localhost.bytes': 0.0, 'localhost.connection_structures': 11.0, 'localhost.hash_power_level': 16.0, 'localhost.auth_errors': 0.0, 'localhost.rusage_user': 0.231516, 'localhost.delete_hits': 0.0, 'localhost.decr_misses': 0.0, 'localhost.get_hits': 0.0, 'localhost.repcached_qi_free': 0.0} self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) self.assertPublishedMany(publish_mock, metrics)
class StarletteOpenAPIValidRequestHandler(): def __init__(self, request: Request, call_next: RequestResponseEndpoint): self.request = request self.call_next = call_next async def __call__(self, request_unmarshal_result: RequestUnmarshalResult) -> Response: self.request.scope['openapi'] = request_unmarshal_result return (await self.call_next(self.request))
_module() class UDAConcatDataset(ConcatDataset): def __init__(self, datasets, separate_eval=True): try: super(ConcatDataset, self).__init__(datasets) except NotImplementedError as e: print(e) print('Since our program does not use the special authentication method of cityscapes, we ignore this warning') self.CLASSES = datasets[0].CLASSES self.PALETTE = datasets[0].PALETTE self.separate_eval = separate_eval assert (separate_eval in [True, False]), f'separate_eval can only be True or False,but get {separate_eval}'
def tensors_to_np(tensors): if isinstance(tensors, dict): new_np = {} for (k, v) in tensors.items(): if isinstance(v, torch.Tensor): v = v.cpu().numpy() if (type(v) is dict): v = tensors_to_np(v) new_np[k] = v elif isinstance(tensors, list): new_np = [] for v in tensors: if isinstance(v, torch.Tensor): v = v.cpu().numpy() if (type(v) is dict): v = tensors_to_np(v) new_np.append(v) elif isinstance(tensors, torch.Tensor): v = tensors if isinstance(v, torch.Tensor): v = v.cpu().numpy() if (type(v) is dict): v = tensors_to_np(v) new_np = v else: raise Exception(f'tensors_to_np does not support type {type(tensors)}.') return new_np
class FastAPIInstrumentor(BaseInstrumentor): _original_fastapi = None def instrument_app(app: fastapi.FastAPI, server_request_hook: _ServerRequestHookT=None, client_request_hook: _ClientRequestHookT=None, client_response_hook: _ClientResponseHookT=None, tracer_provider=None, meter_provider=None, excluded_urls=None): if (not hasattr(app, '_is_instrumented_by_opentelemetry')): app._is_instrumented_by_opentelemetry = False if (not getattr(app, '_is_instrumented_by_opentelemetry', False)): if (excluded_urls is None): excluded_urls = _excluded_urls_from_env else: excluded_urls = parse_excluded_urls(excluded_urls) app.add_middleware(OpenTelemetryMiddleware, excluded_urls=excluded_urls, default_span_details=_get_default_span_details, server_request_hook=server_request_hook, client_request_hook=client_request_hook, client_response_hook=client_response_hook, tracer_provider=tracer_provider, meter_provider=meter_provider) app._is_instrumented_by_opentelemetry = True if (app not in _InstrumentedFastAPI._instrumented_fastapi_apps): _InstrumentedFastAPI._instrumented_fastapi_apps.add(app) else: _logger.warning('Attempting to instrument FastAPI app while already instrumented') def uninstrument_app(app: fastapi.FastAPI): app.user_middleware = [x for x in app.user_middleware if (x.cls is not OpenTelemetryMiddleware)] app.middleware_stack = app.build_middleware_stack() app._is_instrumented_by_opentelemetry = False def instrumentation_dependencies(self) -> Collection[str]: return _instruments def _instrument(self, **kwargs): self._original_fastapi = fastapi.FastAPI _InstrumentedFastAPI._tracer_provider = kwargs.get('tracer_provider') _InstrumentedFastAPI._server_request_hook = kwargs.get('server_request_hook') _InstrumentedFastAPI._client_request_hook = kwargs.get('client_request_hook') _InstrumentedFastAPI._client_response_hook = kwargs.get('client_response_hook') _excluded_urls = kwargs.get('excluded_urls') _InstrumentedFastAPI._excluded_urls = (_excluded_urls_from_env if (_excluded_urls is None) else parse_excluded_urls(_excluded_urls)) _InstrumentedFastAPI._meter_provider = kwargs.get('meter_provider') fastapi.FastAPI = _InstrumentedFastAPI def _uninstrument(self, **kwargs): for instance in _InstrumentedFastAPI._instrumented_fastapi_apps: self.uninstrument_app(instance) _InstrumentedFastAPI._instrumented_fastapi_apps.clear() fastapi.FastAPI = self._original_fastapi
def test_emit_warning_when_event_loop_is_explicitly_requested_in_async_gen_fixture(pytester: Pytester): pytester.makepyfile(dedent(' import pytest\n import pytest_asyncio\n\n _asyncio.fixture\n async def emits_warning(event_loop):\n yield\n\n .asyncio\n async def test_uses_fixture(emits_warning):\n pass\n ')) result = pytester.runpytest('--asyncio-mode=strict', '-W default') result.assert_outcomes(passed=1, warnings=1) result.stdout.fnmatch_lines(['*is asynchronous and explicitly requests the "event_loop" fixture*'])
def with_qutip_qip_stub(tmp_path, monkeypatch): pkg_dir = (tmp_path / 'qutip_qip') pkg_dir.mkdir() init_file = (pkg_dir / '__init__.py') init_file.write_text("__version__ = 'x.y.z'") circuit_file = (pkg_dir / 'circuit.py') circuit_file.write_text('class QubitCircuit:\n pass') monkeypatch.syspath_prepend(tmp_path) monkeypatch.delitem(sys.modules, 'qutip_qip', raising=False) monkeypatch.delitem(sys.modules, 'qutip.qip', raising=False)
def parse_args(): parser = argparse.ArgumentParser(description='Semantic Segmentation Training With Pytorch') parser.add_argument('--teacher-model', type=str, default='deeplabv3', help='model name') parser.add_argument('--student-model', type=str, default='deeplabv3', help='model name') parser.add_argument('--student-backbone', type=str, default='resnet18', help='backbone name') parser.add_argument('--teacher-backbone', type=str, default='resnet101', help='backbone name') parser.add_argument('--dataset', type=str, default='citys', help='dataset name') parser.add_argument('--data', type=str, default='./dataset/cityscapes/', help='dataset directory') parser.add_argument('--crop-size', type=int, default=[512, 1024], nargs='+', help='crop image size: [height, width]') parser.add_argument('--workers', '-j', type=int, default=8, metavar='N', help='dataloader threads') parser.add_argument('--ignore-label', type=int, default=(- 1), metavar='N', help='ignore label') parser.add_argument('--aux', action='store_true', default=False, help='Auxiliary loss') parser.add_argument('--batch-size', type=int, default=16, metavar='N', help='input batch size for training (default: 8)') parser.add_argument('--start_epoch', type=int, default=0, metavar='N', help='start epochs (default:0)') parser.add_argument('--max-iterations', type=int, default=40000, metavar='N', help='number of epochs to train (default: 50)') parser.add_argument('--lr', type=float, default=0.02, metavar='LR', help='learning rate (default: 1e-4)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.0001, metavar='M', help='w-decay (default: 5e-4)') parser.add_argument('--kd-temperature', type=float, default=1.0, help='logits KD temperature') parser.add_argument('--lambda-kd', type=float, default=0.0, help='lambda_kd') parser.add_argument('--lambda-adv', type=float, default=0.0, help='lambda adversarial loss') parser.add_argument('--lambda-d', type=float, default=0.0, help='lambda discriminator loss') parser.add_argument('--lambda-skd', type=float, default=0.0, help='lambda skd') parser.add_argument('--lambda-cwd-fea', type=float, default=0.0, help='lambda cwd feature') parser.add_argument('--lambda-cwd-logit', type=float, default=0.0, help='lambda cwd logit') parser.add_argument('--lambda-ifv', type=float, default=0.0, help='lambda ifvd') parser.add_argument('--lambda-fitnet', type=float, default=0.0, help='lambda fitnet') parser.add_argument('--lambda-at', type=float, default=0.0, help='lambda attention transfer') parser.add_argument('--lambda-psd', type=float, default=0.0, help='lambda pixel similarity KD') parser.add_argument('--lambda-csd', type=float, default=0.0, help='lambda category similarity KD') parser.add_argument('--gpu-id', type=str, default='0') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed') parser.add_argument('--save-dir', default='~/.torch/models', help='Directory for saving checkpoint models') parser.add_argument('--save-epoch', type=int, default=10, help='save model every checkpoint-epoch') parser.add_argument('--log-dir', default='../runs/logs/', help='Directory for saving checkpoint models') parser.add_argument('--log-iter', type=int, default=10, help='print log every log-iter') parser.add_argument('--save-per-iters', type=int, default=800, help='per iters to save') parser.add_argument('--val-per-iters', type=int, default=800, help='per iters to val') parser.add_argument('--teacher-pretrained-base', type=str, default='None', help='pretrained backbone') parser.add_argument('--teacher-pretrained', type=str, default='None', help='pretrained seg model') parser.add_argument('--student-pretrained-base', type=str, default='None', help='pretrained backbone') parser.add_argument('--student-pretrained', type=str, default='None', help='pretrained seg model') parser.add_argument('--val-epoch', type=int, default=1, help='run validation every val-epoch') parser.add_argument('--skip-val', action='store_true', default=False, help='skip validation during training') args = parser.parse_args() num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1) if ((num_gpus > 1) and (args.local_rank == 0)): if (not os.path.exists(args.log_dir)): os.makedirs(args.log_dir) if (not os.path.exists(args.save_dir)): os.makedirs(args.save_dir) if args.student_backbone.startswith('resnet'): args.aux = True elif args.student_backbone.startswith('mobile'): args.aux = False else: raise ValueError('no such network') return args
def phi_structure(subsystem: Subsystem, sia: SystemIrreducibilityAnalysis=None, distinctions: CauseEffectStructure=None, relations: Relations=None, sia_kwargs: dict=None, ces_kwargs: dict=None, relations_kwargs: dict=None) -> PhiStructure: sia_kwargs = (sia_kwargs or dict()) ces_kwargs = (ces_kwargs or dict()) relations_kwargs = (relations_kwargs or dict()) if (sia is None): sia = _sia(subsystem, **sia_kwargs) if (distinctions is None): distinctions = compute.ces(subsystem, **ces_kwargs) distinctions = distinctions.resolve_congruence(sia.system_state) if (relations is None): relations = compute_relations(distinctions, **relations_kwargs) return PhiStructure(sia=sia, distinctions=distinctions, relations=relations)
def main(): from server import pypilotServer server = pypilotServer() client = pypilotClient(server) boatimu = BoatIMU(client) quiet = ('-q' in sys.argv) lastprint = 0 while True: t0 = time.monotonic() server.poll() client.poll() data = boatimu.read() boatimu.poll() if (data and (not quiet)): if ((t0 - lastprint) > 0.25): printline('pitch', data['pitch'], 'roll', data['roll'], 'heading', data['heading']) lastprint = t0 while True: dt = ((1 / boatimu.rate.value) - (time.monotonic() - t0)) if (dt < 0): break if (dt > 0): time.sleep(dt)
def false_negative_rate(tp: torch.LongTensor, fp: torch.LongTensor, fn: torch.LongTensor, tn: torch.LongTensor, reduction: Optional[str]=None, class_weights: Optional[List[float]]=None, zero_division: Union[(str, float)]=1.0) -> torch.Tensor: return _compute_metric(_false_negative_rate, tp, fp, fn, tn, reduction=reduction, class_weights=class_weights, zero_division=zero_division)
def pauli_measurement_circuit(op: str, qubit: QuantumRegister, clbit: ClassicalRegister) -> QuantumCircuit: circ = QuantumCircuit([qubit, clbit]) if (op == 'X'): circ.h(qubit) circ.measure(qubit, clbit) if (op == 'Y'): circ.sdg(qubit) circ.h(qubit) circ.measure(qubit, clbit) if (op == 'Z'): circ.measure(qubit, clbit) return circ
def test_sqliteio_write_updates_existing_text_item(tmpfile, view): item = BeeTextItem(text='foo bar') view.scene.addItem(item) item.setScale(1.3) item.setPos(44, 55) item.setZValue(0.22) item.setRotation(33) item.save_id = 1 io = SQLiteIO(tmpfile, view.scene, create_new=True) io.write() item.setScale(0.7) item.setPos(20, 30) item.setZValue(0.33) item.setRotation(100) item.do_flip() item.setPlainText('updated') io.create_new = False io.write() assert (io.fetchone('SELECT COUNT(*) from items') == (1,)) result = io.fetchone('SELECT x, y, z, scale, rotation, flip, items.data, sqlar.data FROM items LEFT OUTER JOIN sqlar on sqlar.item_id = items.id') assert (result[0] == 20) assert (result[1] == 30) assert (result[2] == 0.33) assert (result[3] == 0.7) assert (result[4] == 100) assert (result[5] == (- 1)) assert (json.loads(result[6]) == {'text': 'updated'}) assert (result[7] is None)
def makeUpdateMatrixGraph(qnnArch, currentUnitaries, lda, currentOutput, adjMatrix, storedStates, l, m): numInputQubits = qnnArch[(l - 1)] summ = 0 for i in range(len(adjMatrix[0])): for j in range(i, len(adjMatrix[0])): if (adjMatrix[i][j] != 0): firstPart = updateMatrixFirstPartGraph(qnnArch, currentUnitaries, (storedStates[i][(l - 1)] - storedStates[j][(l - 1)]), l, m) secondPart = updateMatrixSecondPartGraph(qnnArch, currentUnitaries, (currentOutput[i] - currentOutput[j]), l, m) mat = qt.commutator(firstPart, secondPart) keep = list(range(numInputQubits)) keep.append((numInputQubits + m)) mat = partialTraceKeep(mat, keep) summ = (summ + (adjMatrix[i][j] * mat)) summ = ((((0 + 1j) * (2 ** (numInputQubits + 1))) / lda) * summ) return summ
class BatchInferenceMethod(PromptMethod): def __init__(self, **kwargs: Any): super().__init__(**kwargs) def run(self, x: List[Union[(str, Dict)]], in_context_examples: List[Dict]=None, prompt_file_path: Optional[str]=None, **kwargs: Any) -> Union[(str, List[str])]: verbose = kwargs.get('verbose', False) prompt = PromptBuilder.build_prompt(x=x, in_context_examples=(in_context_examples if in_context_examples else self.kwargs.get('in_context_examples', None)), prompt_file_path=(prompt_file_path if prompt_file_path else self.kwargs.get('prompt_file_path', None)), transform=(kwargs['transform'] if ('transform' in kwargs) else self.kwargs.get('transform', None)), extraction_words=(kwargs['extraction_words'] if ('extraction_words' in kwargs) else self.kwargs.get('extraction_words', None))) if verbose: print(f'''Prompt:: {prompt}''') response = self.run_lm(prompt, **kwargs) if verbose: print(f'''Response:: {response}''') y = HocPoster.post_hoc(response, extract=(kwargs['extract'] if ('extract' in kwargs) else self.kwargs.get('extract', None)), aggregation=(kwargs['aggregation'] if ('aggregation' in kwargs) else self.kwargs.get('aggregation', None)), extraction_regex=(kwargs['extraction_regex'] if ('extraction_regex' in kwargs) else self.kwargs.get('extraction_regex', None))) if verbose: print(f'''Extracted y:: {y}''') return y
.usefixtures('dbus') def test_statusnotifier_defaults_vertical_bar(manager_nospawn, sni_config): screen = sni_config.screens[0] screen.left = screen.top screen.top = None manager_nospawn.start(sni_config) widget = manager_nospawn.c.widget['statusnotifier'] assert (widget.info()['height'] == 0) win = manager_nospawn.test_window('TestSNI', export_sni=True) wait_for_icon(widget, hidden=False, prop='height') manager_nospawn.kill_window(win) wait_for_icon(widget, hidden=True, prop='height')
def bert_binaryclassification_model_fn_builder(bert_config_file, init_checkpoints, args): def model_fn(features, labels, mode, params): logger.info('*** Features ***') if isinstance(features, dict): features = (features['words'], features['token_type_ids'], features['text_length']) (input_ids, token_type_ids, text_length_list) = features is_training = (mode == tf.estimator.ModeKeys.TRAIN) is_testing = (mode == tf.estimator.ModeKeys.PREDICT) bert_config = modeling.BertConfig.from_json_file(bert_config_file) tag_model = bertEventType(params, bert_config) if is_testing: pred_ids = tag_model(input_ids, labels, text_length_list, token_type_ids, is_training, is_testing) else: (per_example_loss, loss, pred_ids) = tag_model(input_ids, labels, text_length_list, token_type_ids, is_training) tvars = tf.trainable_variables() if init_checkpoints: (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoints) tf.train.init_from_checkpoint(init_checkpoints, assignment_map) output_spec = None if (mode == tf.estimator.ModeKeys.TRAIN): train_op = optimization.create_optimizer(loss, args.lr, params['decay_steps'], None, False) hook_dict = {} hook_dict['loss'] = loss hook_dict['global_steps'] = tf.train.get_or_create_global_step() logging_hook = tf.train.LoggingTensorHook(hook_dict, every_n_iter=args.print_log_steps) output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, training_hooks=[logging_hook]) elif (mode == tf.estimator.ModeKeys.EVAL): pred_ids = tf.where((pred_ids > 0.5), tf.ones_like(pred_ids), tf.zeros_like(pred_ids)) print(pred_ids) print(labels) (f1_score_val_micro, f1_update_op_val_micro) = f1(labels=labels, predictions=pred_ids, num_classes=2) eval_metrics = {'f1_score_micro': (f1_score_val_micro, f1_update_op_val_micro)} eval_metrics['eval_loss'] = tf.metrics.mean(values=per_example_loss) output_spec = tf.estimator.EstimatorSpec(eval_metric_ops=eval_metrics, mode=mode, loss=loss) else: output_spec = tf.estimator.EstimatorSpec(mode=mode, predictions=pred_ids) return output_spec return model_fn
def ribbon(): cfg = get_config() disable_ribbon = False if cfg.has_option('JUPYTER', 'disable_ribbon'): try: disable_ribbon = bool(int(cfg.get('JUPYTER', 'disable_ribbon'))) except (ValueError, TypeError): _log.error('Unexpected value for JUPYTER.disable_ribbon.') if disable_ribbon: return [] ribbon = _resource_bytes('pyxll_jupyter', 'resources/ribbon.xml').decode('utf-8') return [(None, ribbon)]
class IndexLoader(): def __init__(self, index_path, use_gpu=True): self.index_path = index_path self.use_gpu = use_gpu self._load_codec() self._load_ivf() self._load_doclens() self._load_embeddings() def _load_codec(self): print_message(f'#> Loading codec...') self.codec = ResidualCodec.load(self.index_path) def _load_ivf(self): print_message(f'#> Loading IVF...') if os.path.exists(os.path.join(self.index_path, 'ivf.pid.pt')): (ivf, ivf_lengths) = torch.load(os.path.join(self.index_path, 'ivf.pid.pt'), map_location='cpu') else: assert os.path.exists(os.path.join(self.index_path, 'ivf.pt')) (ivf, ivf_lengths) = torch.load(os.path.join(self.index_path, 'ivf.pt'), map_location='cpu') (ivf, ivf_lengths) = optimize_ivf(ivf, ivf_lengths, self.index_path) if False: ivf = ivf.tolist() ivf = [ivf[offset:endpos] for (offset, endpos) in lengths2offsets(ivf_lengths)] else: ivf = StridedTensor(ivf, ivf_lengths, use_gpu=self.use_gpu) self.ivf = ivf def _load_doclens(self): doclens = [] print_message('#> Loading doclens...') for chunk_idx in tqdm.tqdm(range(self.num_chunks)): with open(os.path.join(self.index_path, f'doclens.{chunk_idx}.json')) as f: chunk_doclens = ujson.load(f) doclens.extend(chunk_doclens) self.doclens = torch.tensor(doclens) def _load_embeddings(self): self.embeddings = ResidualCodec.Embeddings.load_chunks(self.index_path, range(self.num_chunks), self.num_embeddings) def metadata(self): try: self._metadata except: with open(os.path.join(self.index_path, 'metadata.json')) as f: self._metadata = ujson.load(f) return self._metadata def config(self): raise NotImplementedError() def num_chunks(self): return self.metadata['num_chunks'] def num_embeddings(self): return self.metadata['num_embeddings']
class TrainerControl(): should_training_stop: bool = False should_epoch_stop: bool = False should_save: bool = False should_evaluate: bool = False should_log: bool = False def _new_training(self): self.should_training_stop = False def _new_epoch(self): self.should_epoch_stop = False def _new_step(self): self.should_save = False self.should_evaluate = False self.should_log = False
def polygon_for_parent(polygon, parent): childp = Polygon(polygon) if isinstance(parent, PageType): if parent.get_Border(): parentp = Polygon(polygon_from_points(parent.get_Border().get_Coords().points)) else: parentp = Polygon([[0, 0], [0, parent.get_imageHeight()], [parent.get_imageWidth(), parent.get_imageHeight()], [parent.get_imageWidth(), 0]]) else: parentp = Polygon(polygon_from_points(parent.get_Coords().points)) if childp.within(parentp): return polygon childp = make_valid(childp) parentp = make_valid(parentp) interp = childp.intersection(parentp) if (interp.is_empty or (interp.area == 0.0)): return None if (interp.type == 'GeometryCollection'): interp = unary_union([geom for geom in interp.geoms if (geom.area > 0)]) if (interp.type == 'MultiPolygon'): interp = interp.convex_hull if (interp.minimum_clearance < 1.0): interp = asPolygon(np.round(interp.exterior.coords)) interp = make_valid(interp) return interp.exterior.coords[:(- 1)]
class SystemAccount(Base): __tablename__ = 'systemaccount' id = Column(Integer, primary_key=True) discriminator = Column('row_type', String(40)) __mapper_args__ = {'polymorphic_identity': 'systemaccount', 'polymorphic_on': discriminator} owner_party_id = Column(Integer, ForeignKey(Party.id)) owner = relationship(Party) registration_date = Column(DateTime) account_enabled = Column(Boolean, nullable=False, default=False) failed_logins = Column(Integer, nullable=False, default=0) def registration_activated(self): return (self.registration_date is not None) def status(self): if (not self.registration_activated): return AccountNotActivated() if (not self.account_enabled): return AccountDisabled() return AccountActive() def assert_account_live(self): if (not self.status.is_active()): raise AccountNotActiveException(self.status) def activate(self): self.registration_date = datetime.now() self.enable() def cancel_reservation(self): if self.account_enabled: raise ProgrammerError('attempted to cancel a reserved account which is already active') Session.delete(self) def enable(self): self.account_enabled = True def disable(self): self.account_enabled = False
def xls2ld(fn, header=[], sheetname=True, keymap={}): try: import xlrd except ImportError: raise Exception('\n\t\t\tIn order to load Excel files, you need to install the xlrd python module. Run:\n\t\t\tpip install xlrd\n\t\t\t') headerset = (True if len(header) else False) f = xlrd.open_workbook(fn) ld = [] def _boot_xls_sheet(sheet, header=[]): ld2 = [] for y in range(sheet.nrows): if (not header): for xi in range(sheet.ncols): cell = sheet.cell_value(rowx=y, colx=xi) header += [cell] continue d = {} for key in header: try: value = sheet.cell_value(rowx=y, colx=header.index(key)) d[key] = value except: pass if len(d): if sheetname: d['sheetname'] = sheet.name ld2.append(d) return ld2 if (f.nsheets > 1): sheetnames = sorted(f.sheet_names()) for sheetname in sheetnames: sheet = f.sheet_by_name(sheetname) for d in _boot_xls_sheet(sheet, header=(header if headerset else [])): ld.append(d) else: sheet = f.sheet_by_index(0) ld.extend(_boot_xls_sheet(sheet, header=(header if headerset else []))) return ld
def format_with_duration(timestamp: (Timestamp | None), other_timestamp: (Timestamp | None)=None, max_units: int=2) -> (str | None): if (timestamp is None): return None if (other_timestamp is None): other_timestamp = arrow.utcnow() formatted_timestamp = discord_timestamp(timestamp) duration = humanize_delta(timestamp, other_timestamp, max_units=max_units) return f'{formatted_timestamp} ({duration})'
def train_and_evaluate(dataset_name, batch_size=100, n_epochs=5, learning_rate=0.0001, z_dim=2, pixel=64, load_model=False, w=1, scale=False): device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) (siamese, optimizer) = get_instance_model_optimizer(device, learning_rate, z_dim, pixel) (train_loader, test_loader, mu, std) = data_loader(dataset_name, pixel, batch_size) if load_model: (siamese, optimizer, start_epoch, epoch_train_loss, epoch_valid_loss, valid_loss_min) = load_ckp(siamese, optimizer, (('best_model_Harmony_fc' + dataset_name) + '_z_dim_{}_w_{}.pt'.format(z_dim, w))) else: valid_loss_min = np.inf start_epoch = 0 epoch_train_loss = [] epoch_valid_loss = [] train_model(dataset_name, siamese, optimizer, train_loader, test_loader, device, start_epoch, n_epochs, epoch_train_loss, epoch_valid_loss, valid_loss_min, z_dim, pixel, batch_size, w, scale) evaluate_model(dataset_name, siamese, z_dim, pixel, batch_size, device, scale)
def parallel_map(task, values, task_args=None, task_kwargs=None, reduce_func=None, map_kw=None, progress_bar=None, progress_bar_kwargs={}): if (task_args is None): task_args = () if (task_kwargs is None): task_kwargs = {} map_kw = _read_map_kw(map_kw) end_time = (map_kw['timeout'] + time.time()) progress_bar = progress_bars[progress_bar](len(values), **progress_bar_kwargs) errors = {} finished = [] if (reduce_func is not None): results = None def result_func(_, value): return reduce_func(value) else: results = ([None] * len(values)) result_func = results.__setitem__ def _done_callback(future): if (not future.cancelled()): ex = future.exception() if isinstance(ex, KeyboardInterrupt): return if isinstance(ex, Exception): errors[future._i] = ex else: result = future.result() remaining_ntraj = result_func(future._i, result) if ((remaining_ntraj is not None) and (remaining_ntraj <= 0)): finished.append(True) progress_bar.update() if (sys.version_info >= (3, 7)): ctx_kw = {'mp_context': mp_context} else: ctx_kw = {} os.environ['QUTIP_IN_PARALLEL'] = 'TRUE' try: with concurrent.futures.ProcessPoolExecutor(max_workers=map_kw['num_cpus'], **ctx_kw) as executor: waiting = set() i = 0 while (i < len(values)): if (len(waiting) >= map_kw['num_cpus']): timeout = max(0, (end_time - time.time())) (_done, waiting) = concurrent.futures.wait(waiting, timeout=timeout, return_when=concurrent.futures.FIRST_COMPLETED) if ((time.time() >= end_time) or (errors and map_kw['fail_fast']) or finished): break while ((len(waiting) < map_kw['num_cpus']) and (i < len(values))): value = values[i] future = executor.submit(task, *((value,) + task_args), **task_kwargs) future._i = i future.add_done_callback(_done_callback) waiting.add(future) i += 1 timeout = max(0, (end_time - time.time())) concurrent.futures.wait(waiting, timeout=timeout) finally: os.environ['QUTIP_IN_PARALLEL'] = 'FALSE' progress_bar.finished() if (errors and map_kw['fail_fast']): raise list(errors.values())[0] elif errors: raise MapExceptions(f'{len(errors)} iterations failed in parallel_map', errors, results) return results
class YicesInstaller(SolverInstaller): SOLVER = 'yices' def __init__(self, install_dir, bindings_dir, solver_version, mirror_link=None, yicespy_version='HEAD'): archive_name = ('Yices-%s.tar.gz' % solver_version) native_link = ' SolverInstaller.__init__(self, install_dir=install_dir, bindings_dir=bindings_dir, solver_version=solver_version, archive_name=archive_name, native_link=native_link, mirror_link=mirror_link) self.extract_path = os.path.join(self.base_dir, ('yices2-Yices-%s' % self.solver_version)) self.yices_path = os.path.join(self.bindings_dir, 'yices_bin') self.yicespy_git_version = yicespy_version def install_yicespy(self): yicespy_git_version = self.yicespy_git_version yicespy_base_name = 'yicespy' yicespy_archive_name = ('%s.tar.gz' % yicespy_base_name) yicespy_archive = os.path.join(self.base_dir, yicespy_archive_name) yicespy_dir_path = os.path.join(self.base_dir, ((yicespy_base_name + '-') + yicespy_git_version)) yicespy_download_link = (' % yicespy_git_version) SolverInstaller.do_download(yicespy_download_link, yicespy_archive) SolverInstaller.clean_dir(yicespy_dir_path) SolverInstaller.untar(yicespy_archive, self.base_dir) SolverInstaller.run_python(('setup.py --yices-dir=%s -- build_ext bdist_wheel --dist-dir=%s ' % (self.yices_path, self.base_dir)), directory=yicespy_dir_path) wheel_file = glob.glob((os.path.join(self.base_dir, 'yicespy') + '*.whl'))[0] SolverInstaller.unzip(wheel_file, self.bindings_dir) def compile(self): SolverInstaller.clean_dir(self.yices_path) SolverInstaller.run('autoconf', directory=self.extract_path) SolverInstaller.run(('bash configure --prefix %s' % self.yices_path), directory=self.extract_path) SolverInstaller.run('make', directory=self.extract_path) SolverInstaller.run('make install', directory=self.extract_path) self.install_yicespy() def get_installed_version(self): return self.get_installed_version_script(self.bindings_dir, 'yices')
.end_to_end() def test_ini_markers_whitespace(runner, tmp_path): tmp_path.joinpath('pyproject.toml').write_text("[tool.pytask.ini_options]\nmarkers = {'a1 ' = 'this is a whitespace marker'}") source = '\n import pytask\n .a1\n def task_markers():\n assert True\n ' tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source)) result = runner.invoke(cli, [tmp_path.as_posix()]) assert (result.exit_code == ExitCode.OK) assert ('1 Succeeded' in result.output)
class MP4Cover(bytes): FORMAT_JPEG = AtomDataType.JPEG FORMAT_PNG = AtomDataType.PNG def __new__(cls, data, *args, **kwargs): return bytes.__new__(cls, data) def __init__(self, data, imageformat=FORMAT_JPEG): self.imageformat = imageformat __hash__ = bytes.__hash__ def __eq__(self, other): if (not isinstance(other, MP4Cover)): return (bytes(self) == other) return ((bytes(self) == bytes(other)) and (self.imageformat == other.imageformat)) def __ne__(self, other): return (not self.__eq__(other)) def __repr__(self): return ('%s(%r, %r)' % (type(self).__name__, bytes(self), AtomDataType(self.imageformat)))
def find_role_replicas(app: specs.AppDef, role_name: Optional[str]) -> List[Tuple[(str, int)]]: role_replicas = [] for role in app.roles: if ((role_name is None) or (role_name == role.name)): for i in range(role.num_replicas): role_replicas.append((role.name, i)) return role_replicas
def get_image_metadata(file_path): size = os.path.getsize(file_path) with open(file_path, 'rb') as input: height = (- 1) width = (- 1) data = input.read(26) msg = ' raised while trying to decode as JPEG.' if ((size >= 10) and (data[:6] in (b'GIF87a', b'GIF89a'))): imgtype = GIF (w, h) = struct.unpack('<HH', data[6:10]) width = int(w) height = int(h) elif ((size >= 24) and data.startswith(b'\x89PNG\r\n\x1a\n') and (data[12:16] == b'IHDR')): imgtype = PNG (w, h) = struct.unpack('>LL', data[16:24]) width = int(w) height = int(h) elif ((size >= 16) and data.startswith(b'\x89PNG\r\n\x1a\n')): imgtype = PNG (w, h) = struct.unpack('>LL', data[8:16]) width = int(w) height = int(h) elif ((size >= 2) and data.startswith(b'\xff\xd8')): imgtype = JPEG input.seek(0) input.read(2) b = input.read(1) try: while (b and (ord(b) != 218)): while (ord(b) != 255): b = input.read(1) while (ord(b) == 255): b = input.read(1) if ((ord(b) >= 192) and (ord(b) <= 195)): input.read(3) (h, w) = struct.unpack('>HH', input.read(4)) break else: input.read((int(struct.unpack('>H', input.read(2))[0]) - 2)) b = input.read(1) width = int(w) height = int(h) except struct.error: raise UnknownImageFormat(('StructError' + msg)) except ValueError: raise UnknownImageFormat(('ValueError' + msg)) except Exception as e: raise UnknownImageFormat((e.__class__.__name__ + msg)) elif ((size >= 26) and data.startswith(b'BM')): imgtype = 'BMP' headersize = struct.unpack('<I', data[14:18])[0] if (headersize == 12): (w, h) = struct.unpack('<HH', data[18:22]) width = int(w) height = int(h) elif (headersize >= 40): (w, h) = struct.unpack('<ii', data[18:26]) width = int(w) height = abs(int(h)) else: raise UnknownImageFormat(('Unkown DIB header size:' + str(headersize))) elif ((size >= 8) and (data[:4] in (b'II*\x00', b'MM\x00*'))): imgtype = TIFF byteOrder = data[:2] boChar = ('>' if (byteOrder == 'MM') else '<') tiffTypes = {1: (1, (boChar + 'B')), 2: (1, (boChar + 'c')), 3: (2, (boChar + 'H')), 4: (4, (boChar + 'L')), 5: (8, (boChar + 'LL')), 6: (1, (boChar + 'b')), 7: (1, (boChar + 'c')), 8: (2, (boChar + 'h')), 9: (4, (boChar + 'l')), 10: (8, (boChar + 'll')), 11: (4, (boChar + 'f')), 12: (8, (boChar + 'd'))} ifdOffset = struct.unpack((boChar + 'L'), data[4:8])[0] try: countSize = 2 input.seek(ifdOffset) ec = input.read(countSize) ifdEntryCount = struct.unpack((boChar + 'H'), ec)[0] ifdEntrySize = 12 for i in range(ifdEntryCount): entryOffset = ((ifdOffset + countSize) + (i * ifdEntrySize)) input.seek(entryOffset) tag = input.read(2) tag = struct.unpack((boChar + 'H'), tag)[0] if ((tag == 256) or (tag == 257)): type = input.read(2) type = struct.unpack((boChar + 'H'), type)[0] if (type not in tiffTypes): raise UnknownImageFormat(('Unkown TIFF field type:' + str(type))) typeSize = tiffTypes[type][0] typeChar = tiffTypes[type][1] input.seek((entryOffset + 8)) value = input.read(typeSize) value = int(struct.unpack(typeChar, value)[0]) if (tag == 256): width = value else: height = value if ((width > (- 1)) and (height > (- 1))): break except Exception as e: raise UnknownImageFormat(str(e)) elif (size >= 2): imgtype = 'ICO' input.seek(0) reserved = input.read(2) if (0 != struct.unpack('<H', reserved)[0]): raise UnknownImageFormat(FILE_UNKNOWN) format = input.read(2) assert (1 == struct.unpack('<H', format)[0]) num = input.read(2) num = struct.unpack('<H', num)[0] if (num > 1): import warnings warnings.warn('ICO File contains more than one image') w = input.read(1) h = input.read(1) width = ord(w) height = ord(h) else: raise UnknownImageFormat(FILE_UNKNOWN) return Image(path=file_path, type=imgtype, file_size=size, width=width, height=height)
('/v1/organization/<orgname>/robots/<robot_shortname>') _param('orgname', 'The name of the organization') _param('robot_shortname', 'The short name for the robot, without any user or organization prefix') _user_resource(UserRobot) class OrgRobot(ApiResource): schemas = {'CreateRobot': CREATE_ROBOT_SCHEMA} _scope(scopes.ORG_ADMIN) ('getOrgRobot') def get(self, orgname, robot_shortname): permission = AdministerOrganizationPermission(orgname) if (permission.can() or allow_if_superuser()): robot = model.get_org_robot(robot_shortname, orgname) return robot.to_dict(include_metadata=True, include_token=True) raise Unauthorized() _scope(scopes.ORG_ADMIN) ('createOrgRobot') _json_size(ROBOT_MAX_SIZE) _json_request('CreateRobot', optional=True) def put(self, orgname, robot_shortname): permission = AdministerOrganizationPermission(orgname) if (permission.can() or allow_if_superuser()): create_data = (request.get_json(silent=True) or {}) try: robot = model.create_org_robot(robot_shortname, orgname, create_data.get('description'), create_data.get('unstructured_metadata')) except InvalidRobotException as e: raise request_error(message=str(e)) log_action('create_robot', orgname, {'robot': robot_shortname, 'description': create_data.get('description'), 'unstructured_metadata': create_data.get('unstructured_metadata')}) return (robot.to_dict(include_metadata=True, include_token=True), 201) raise Unauthorized() _scope(scopes.ORG_ADMIN) ('deleteOrgRobot') def delete(self, orgname, robot_shortname): permission = AdministerOrganizationPermission(orgname) if (permission.can() or allow_if_superuser()): robot_username = format_robot_username(orgname, robot_shortname) if (not model.robot_has_mirror(robot_username)): model.delete_robot(robot_username) log_action('delete_robot', orgname, {'robot': robot_shortname}) return ('', 204) else: raise request_error(message='Robot is being used by a mirror') raise Unauthorized()
def test_expired_membership_with_overlapping_payments(): with time_machine.travel('2020-10-10 10:00:00', tick=False): membership_1 = MembershipFactory(status=MembershipStatus.CANCELED) membership_1.add_pretix_payment(organizer='python-italia', event='pycon-demo', order_code='XXYYZZ', total=1000, status=PaymentStatus.PAID, payment_date=datetime.datetime(2019, 10, 10, 1, 4, 43, tzinfo=timezone.utc), period_start=datetime.datetime(2019, 10, 10, 1, 4, 43, tzinfo=timezone.utc), period_end=datetime.datetime(2020, 10, 10, 1, 4, 43, tzinfo=timezone.utc)) membership_1.add_pretix_payment(organizer='python-italia', event='pycon-demo', order_code='ABCABCABC', total=1000, status=PaymentStatus.PAID, payment_date=datetime.datetime(2020, 1, 1, 1, 4, 43, tzinfo=timezone.utc), period_start=datetime.datetime(2020, 1, 1, 1, 4, 43, tzinfo=timezone.utc), period_end=datetime.datetime(2021, 1, 1, 1, 4, 43, tzinfo=timezone.utc)) membership_1.save() membership_check_status({}) updated_membership_1 = Membership.objects.get(id=membership_1.id) assert (updated_membership_1.status == MembershipStatus.ACTIVE)
def test_admin_player_kick_last(solo_two_world_session, flask_app, mocker, mock_audit): mock_emit = mocker.patch('flask_socketio.emit') user = database.User.get_by_id(1234) sa = MagicMock() sa.get_current_user.return_value = user session = database.MultiplayerSession.get_by_id(1) with flask_app.test_request_context(): session_admin.admin_player(sa, 1, 1234, SessionAdminUserAction.KICK.value) for table in [database.MultiplayerSession, database.World, database.MultiplayerMembership, database.WorldAction]: assert (list(table.select()) == []) assert (database.User.get_by_id(1234) == user) mock_emit.assert_called_once_with('multiplayer_session_meta_update', {'id': 1, 'name': 'Debug', 'visibility': 'visible', 'users_list': [], 'worlds': [], 'game_details': {'seed_hash': '55SQZAV4', 'spoiler': True, 'word_hash': 'Screw Omega Mines'}, 'generation_in_progress': None, 'allowed_games': ANY, 'allow_coop': False, 'allow_everyone_claim_world': False}, room='multiplayer-session-1', namespace='/') mock_audit.assert_called_once_with(sa, session, 'Left session')
class Wing_Loss(torch.nn.Module): def __init__(self, w=10, eps=2): super(Wing_Loss, self).__init__() self.w = w self.eps = eps self.C = (w - (w * np.log((1 + (w / eps))))) def forward(self, prediction, target): differ = (prediction - target).abs() log_idxs = (differ < self.w).nonzero() l1_idxs = (differ >= self.w).nonzero() loss = prediction.new_zeros(prediction.shape[0]) loss[log_idxs] = (self.w * torch.log(((differ[log_idxs] / self.eps) + 1))) loss[l1_idxs] = (differ[l1_idxs] - self.C) return loss
def extract_archive(archive, solver, put_inside=False): print('extracting {0}'.format(archive)) root = os.path.join('solvers', (solver if put_inside else '')) if archive.endswith('.tar.gz'): if os.path.exists(archive[:(- 7)]): shutil.rmtree(archive[:(- 7)]) tfile = tarfile.open(archive, 'r:gz') tfile.extractall(root) for name in tfile.getnames(): if (not name.startswith('./.')): directory = name break elif archive.endswith('.zip'): if os.path.exists(archive[:(- 4)]): shutil.rmtree(archive[:(- 4)]) myzip = zipfile.ZipFile(archive, 'r') myzip.extractall(root) directory = myzip.namelist()[0] directory = directory.rstrip('/').split('/')[0] myzip.close() if ((not put_inside) and (directory != solver)): if os.path.exists(os.path.join('solvers', solver)): shutil.rmtree(os.path.join('solvers', solver)) shutil.move(os.path.join('solvers', directory), os.path.join('solvers', solver))
class TransformValuesMapping(Feature): def __init__(self, values_to_transforms): self.values_to_transforms = values_to_transforms.copy() def on_attach(self, fgraph): if hasattr(fgraph, 'values_to_transforms'): raise AlreadyThere() fgraph.values_to_transforms = self.values_to_transforms
class DataConfig(object): def __init__(self, image_height, shuffle_buffer_size, read_buffer_size_bytes, num_prefetch): self.image_height = image_height self.shuffle_buffer_size = shuffle_buffer_size self.read_buffer_size_bytes = read_buffer_size_bytes self.num_prefetch = num_prefetch
def test_equal_diag(): np.random.seed(42) for _ in range(3): diag = np.random.rand(5) x = floatX(np.random.randn(5)) pots = [quadpotential.quad_potential(diag, False), quadpotential.quad_potential((1.0 / diag), True), quadpotential.quad_potential(np.diag(diag), False), quadpotential.quad_potential(np.diag((1.0 / diag)), True)] if quadpotential.chol_available: diag_ = scipy.sparse.csc_matrix(np.diag((1.0 / diag))) pots.append(quadpotential.quad_potential(diag_, True)) v = np.diag((1.0 / diag)).dot(x) e = (x.dot(np.diag((1.0 / diag)).dot(x)) / 2) for pot in pots: v_ = pot.velocity(x) e_ = pot.energy(x) npt.assert_allclose(v_, v, rtol=1e-06) npt.assert_allclose(e_, e, rtol=1e-06)
def entry_point_ensemble_folders(): parser = argparse.ArgumentParser() parser.add_argument('-i', nargs='+', type=str, required=True, help='list of input folders') parser.add_argument('-o', type=str, required=True, help='output folder') parser.add_argument('-np', type=int, required=False, default=default_num_processes, help=f'Numbers of processes used for ensembling. Default: {default_num_processes}') parser.add_argument('--save_npz', action='store_true', required=False, help='Set this flag to store output probabilities in separate .npz files') args = parser.parse_args() ensemble_folders(args.i, args.o, args.save_npz, args.np)
class PasswordField(Field): def __init__(self, default=None, required=False, required_message=None, label=None, writable=None, min_length=6, max_length=20): label = (label or '') super().__init__(default, required, required_message, label, readable=Allowed(False), writable=writable, min_length=min_length, max_length=max_length)
class GACOSGrid(object): def __init__(self, filename, time, data, ulLat, ulLon, dLat, dLon): self.filename = filename self.time = time self.data = data (self.rows, self.cols) = data.shape self.dLat = dLat self.dLon = dLon self.ulLat = ulLat self.ulLon = ulLon self.llLat = (self.ulLat + (self.rows * self.dLat)) self.llLon = ulLon self.urLon = (ulLon + (self.cols * self.dLon)) def contains(self, llLat, llLon, dLat, dLon, rows, cols): ulLat = (llLat + (dLat * rows)) urLon = (llLon + (dLon * cols)) boundary_exception = AssertionError(('GACOS Grid does not contain scene!\n llLat: %.4f urLat: %.4f\n llLon: %.4f urLon: %.4f\nScene:\n llLat: %.4f urLat: %.4f\n llLon: %.4f urLon: %.4f' % (self.llLat, self.ulLat, self.llLon, self.urLon, llLat, ulLat, llLon, urLon))) assert (llLat >= self.llLat), boundary_exception assert (llLon >= self.llLon), boundary_exception assert (urLon <= self.urLon), boundary_exception assert (ulLat <= self.ulLat), boundary_exception def get_corrections(self, llLat, llLon, dLat, dLon, rows, cols): self.contains(llLat, llLon, dLat, dLon, rows, cols) ulLat = (llLat + (dLat * rows)) ulLon = llLon urLon = (llLon + (dLon * cols)) row_offset = ((self.ulLat - ulLat) // (- self.dLat)) col_offset = ((ulLon - self.llLon) // self.dLon) idx_rows = (row_offset + ((np.arange(rows) * dLat) // (- self.dLat))) idx_cols = (col_offset + ((np.arange(cols) * dLon) // self.dLon)) idx_rows = np.repeat(idx_rows, cols).astype(np.intp) idx_cols = np.tile(idx_cols, rows).astype(np.intp) return np.flipud(self.data[(idx_rows, idx_cols)].reshape(rows, cols)) def load(cls, filename): rsc_file = (filename + '.rsc') if ((not op.exists(filename)) and (not op.exists(rsc_file))): raise FileNotFoundError(('Could not find %s or .rsc file %s' % (filename, rsc_file))) params = load_params(rsc_file) time = datetime.strptime(op.basename(filename)[:8], '%Y%m%d') hour = timedelta(hours=float(params['TIME_OF_DAY'].rstrip('UTC'))) time += hour rows = int(params['FILE_LENGTH']) cols = int(params['WIDTH']) ulLat = float(params['Y_FIRST']) ulLon = float(params['X_FIRST']) dLat = float(params['Y_STEP']) dLon = float(params['X_STEP']) data = np.memmap(filename, dtype=np.float32, mode='r', shape=(rows, cols)) return cls(filename, time, data, ulLat, ulLon, dLat, dLon)
class TestUCCSDHartreeFock(QiskitChemistryTestCase): def setUp(self): super().setUp() self.reference_energy = (- 1.) self.seed = 700 aqua_globals.random_seed = self.seed self.driver = HDF5Driver(self.get_resource_path('test_driver_hdf5.hdf5')) fermionic_transformation = FermionicTransformation(qubit_mapping=QubitMappingType.PARITY, two_qubit_reduction=False) (self.qubit_op, _) = fermionic_transformation.transform(self.driver) self.fermionic_transformation = fermionic_transformation self.optimizer = SLSQP(maxiter=100) initial_state = HartreeFock(fermionic_transformation.molecule_info['num_orbitals'], fermionic_transformation.molecule_info['num_particles'], qubit_mapping=fermionic_transformation._qubit_mapping, two_qubit_reduction=fermionic_transformation._two_qubit_reduction) self.var_form = UCCSD(num_orbitals=fermionic_transformation.molecule_info['num_orbitals'], num_particles=fermionic_transformation.molecule_info['num_particles'], initial_state=initial_state, qubit_mapping=fermionic_transformation._qubit_mapping, two_qubit_reduction=fermionic_transformation._two_qubit_reduction) def test_uccsd_hf(self): backend = BasicAer.get_backend('statevector_simulator') solver = VQE(var_form=self.var_form, optimizer=self.optimizer, quantum_instance=QuantumInstance(backend=backend)) gsc = GroundStateEigensolver(self.fermionic_transformation, solver) result = gsc.solve(self.driver) self.assertAlmostEqual(result.total_energies[0], self.reference_energy, places=6) def test_uccsd_hf_qasm(self): backend = BasicAer.get_backend('qasm_simulator') optimizer = SPSA(maxiter=200, last_avg=5) solver = VQE(var_form=self.var_form, optimizer=optimizer, expectation=PauliExpectation(), quantum_instance=QuantumInstance(backend=backend, seed_simulator=aqua_globals.random_seed, seed_transpiler=aqua_globals.random_seed)) gsc = GroundStateEigensolver(self.fermionic_transformation, solver) result = gsc.solve(self.driver) self.assertAlmostEqual(result.total_energies[0], (- 1.138), places=2) def test_uccsd_hf_aer_statevector(self): try: from qiskit import Aer backend = Aer.get_backend('statevector_simulator') except ImportError as ex: self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return solver = VQE(var_form=self.var_form, optimizer=self.optimizer, quantum_instance=QuantumInstance(backend=backend)) gsc = GroundStateEigensolver(self.fermionic_transformation, solver) result = gsc.solve(self.driver) self.assertAlmostEqual(result.total_energies[0], self.reference_energy, places=6) def test_uccsd_hf_aer_qasm(self): try: from qiskit import Aer backend = Aer.get_backend('qasm_simulator') except ImportError as ex: self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return optimizer = SPSA(maxiter=200, last_avg=5) solver = VQE(var_form=self.var_form, optimizer=optimizer, expectation=PauliExpectation(), quantum_instance=QuantumInstance(backend=backend, seed_simulator=aqua_globals.random_seed, seed_transpiler=aqua_globals.random_seed)) gsc = GroundStateEigensolver(self.fermionic_transformation, solver) result = gsc.solve(self.driver) self.assertAlmostEqual(result.total_energies[0], (- 1.138), places=2) def test_uccsd_hf_aer_qasm_snapshot(self): try: from qiskit import Aer backend = Aer.get_backend('qasm_simulator') except ImportError as ex: self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return optimizer = SPSA(maxiter=200, last_avg=5) solver = VQE(var_form=self.var_form, optimizer=optimizer, expectation=AerPauliExpectation(), quantum_instance=QuantumInstance(backend=backend)) gsc = GroundStateEigensolver(self.fermionic_transformation, solver) result = gsc.solve(self.driver) self.assertAlmostEqual(result.total_energies[0], self.reference_energy, places=3) EXCITATION_RESULTS = [[[[0, 1], [0, 2], [3, 4], [3, 5]], [[0, 1, 3, 4], [0, 1, 3, 5], [0, 2, 3, 4], [0, 2, 3, 5]]], [[[0, 2], [3, 5]], [[0, 2, 3, 5]]], [[[0, 1], [0, 2], [3, 4], [3, 5]], []], [[], [[0, 1, 3, 4], [0, 1, 3, 5], [0, 2, 3, 4], [0, 2, 3, 5]]], [[[0, 1], [3, 4]], []], [[[0, 2], [1, 2], [3, 5], [4, 5]], [[0, 2, 3, 5], [0, 2, 4, 5], [1, 2, 3, 5], [1, 2, 4, 5]]], [[[1, 2], [4, 5]], [[1, 2, 4, 5]]], [[[0, 2], [0, 3], [1, 2], [1, 3], [4, 6], [4, 7], [5, 6], [5, 7]], [[0, 2, 4, 6], [0, 2, 4, 7], [0, 2, 5, 6], [0, 2, 5, 7], [0, 3, 4, 6], [0, 3, 4, 7], [0, 3, 5, 6], [0, 3, 5, 7], [1, 2, 4, 6], [1, 2, 4, 7], [1, 2, 5, 6], [1, 2, 5, 7], [1, 3, 4, 6], [1, 3, 4, 7], [1, 3, 5, 6], [1, 3, 5, 7], [0, 2, 1, 3], [4, 6, 5, 7]]], [[[0, 2], [0, 3], [1, 2], [1, 3], [4, 6], [4, 7], [5, 6], [5, 7]], [[0, 2, 4, 6], [0, 2, 4, 7], [0, 2, 5, 6], [0, 2, 5, 7], [0, 3, 4, 6], [0, 3, 4, 7], [0, 3, 5, 6], [0, 3, 5, 7], [1, 2, 4, 6], [1, 2, 4, 7], [1, 2, 5, 6], [1, 2, 5, 7], [1, 3, 4, 6], [1, 3, 4, 7], [1, 3, 5, 6], [1, 3, 5, 7]]]] ([[0, 6, 2], [0, 6, 2, [0], [0, 1]], [1, 6, 2, [0], [1]], [0, 6, 2, [0], [0, 1], False], [2, 6, 2, None, None, True, 'both', 'ucc', 's'], [3, 6, 2, None, [0, 1], True, 'both', 'ucc', 'd'], [4, 6, 2, [0], [0], False, 'both', 'ucc', 's'], [5, 6, 4], [5, 6, 4, [0, 1], [0]], [6, 6, 4, [1], [0]], [7, 8, 4], [8, 8, 4, None, None, False]]) def test_uccsd_excitations(self, expected_result_idx, num_orbitals, num_particles, active_occupied=None, active_unoccupied=None, same_spin_doubles=True, method_singles='both', method_doubles='ucc', excitation_type='sd'): excitations = UCCSD.compute_excitation_lists(num_orbitals=num_orbitals, num_particles=num_particles, active_occ_list=active_occupied, active_unocc_list=active_unoccupied, same_spin_doubles=same_spin_doubles, method_singles=method_singles, method_doubles=method_doubles, excitation_type=excitation_type) self.assertListEqual(list(excitations), self.EXCITATION_RESULTS[expected_result_idx])
class HTTPBackend(BaseStorageBackend): def get(self, filepath): value_buf = urlopen(filepath).read() return value_buf def get_text(self, filepath, encoding='utf-8'): value_buf = urlopen(filepath).read() return value_buf.decode(encoding) def get_local_path(self, filepath: str) -> Iterable[str]: try: f = tempfile.NamedTemporaryFile(delete=False) f.write(self.get(filepath)) f.close() (yield f.name) finally: os.remove(f.name)
_config def test_display_kb(manager): from pprint import pprint cmd = '-s {} -o cmd -f display_kb'.format(manager.sockfile) table = run_qtile_cmd(cmd) print(table) pprint(table) assert (table.count('\n') >= 2) assert re.match('(?m)^Mode\\s{3,}KeySym\\s{3,}Mod\\s{3,}Command\\s{3,}Desc\\s*$', table) assert re.search("(?m)^<root>\\s{3,}Return\\s{3,}mod4\\s{3,}spawn\\('xterm'\\)\\s*$", table) assert re.search("(?m)^<root>\\s{3,}t\\s{3,}mod4\\s{3,}spawn\\('xterm'\\)\\s{3,}dummy description\\s*$", table) assert re.search('(?m)^<root>\\s{3,}q\\s{3,}mod4\\s{13,}Enter named mode\\s*$', table) assert re.search('(?m)^named\\s{3,}q\\s{13,}Enter <unnamed> mode\\s*$', table) assert re.search("(?m)^named\\s{3,}b\\s{9,}togroup\\('b'\\)\\s*$", table) assert re.search("(?m)^named>_\\s{3,}a\\s{9,}togroup\\('a'\\)\\s*$", table) assert (re.search('(?m)^<root>\\s{3,}y\\s{9,}\\s*$', table) is None)
def _is_character_face_to(state: EnvironmentState, node: Node, char_index: int): if state.evaluate(ExistsRelation(CharacterNode(char_index), Relation.FACING, NodeInstanceFilter(node))): return True for face_node in state.get_nodes_from(_get_character_node(state, char_index), Relation.FACING): if state.evaluate(ExistsRelation(NodeInstance(face_node), Relation.FACING, NodeInstanceFilter(node))): return True return False
.parametrize('date_str', ['01-Jan-2000', '29-Feb-2016', '31-Dec-2000', '01-Apr-2003', '01-Apr-2007', '01-Apr-2009', '01-Jan-1990']) def test_date_checker_valid(date_str: str): warnings = [warning for (_, warning) in check_peps._date(1, date_str, '<Prefix>')] assert (warnings == []), warnings
def validate_3d(config, model, loader, output_dir, writer_dict=None, epoch=None): batch_time = AverageMeter() data_time = AverageMeter() losses_depth = AverageMeter() losses_pitch = AverageMeter() losses_depth_mean = AverageMeter() losses_depth_max = AverageMeter() model.eval() (preds, backbone_outputs, proposal_grid_centers, frame_valids) = ([], [], [], []) with torch.no_grad(): end = time.time() for (i, (inputs, targets_2d, weights_2d, targets_3d, meta, input_AGR)) in enumerate(tqdm(loader, ncols=100)): data_time.update((time.time() - end)) (pred, heatmaps, output, grid_centers, loss_dict) = model(views=inputs, meta=meta, targets_2d=targets_2d, weights_2d=weights_2d, targets_3d=targets_3d, input_AGR=input_AGR) pred = pred.detach().cpu().numpy() for b in range(pred.shape[0]): preds.append(pred[b]) backbone_outputs.append({key: output[key][b].detach().cpu().numpy() for key in output.keys()}) proposal_grid_centers.append(grid_centers[b].cpu().numpy()) frame_valids.append(meta['frame_valid'][b]) batch_time.update((time.time() - end)) end = time.time() if ((config.PRINT_FREQ > 0) and (((i % config.PRINT_FREQ) == 0) or (i == (len(loader) - 1)))): gpu_memory_usage = torch.cuda.memory_allocated(0) if config.PRINT_LOG: msg = 'Test: [{0}/{1}]\tTime: {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\tSpeed: {speed:.1f} samples/s\tData: {data_time.val:.3f}s ({data_time.avg:.3f}s)\tMemory {memory:.1f}'.format(i, len(loader), batch_time=batch_time, speed=((len(inputs) * inputs[0].size(0)) / batch_time.val), data_time=data_time, memory=gpu_memory_usage) logger.info(msg) prefix = '{}_{:08}'.format(os.path.join(output_dir, 'val'), i) save_debug_images_multi(config, inputs, meta, targets_2d, heatmaps, pred, prefix) prefix2 = '{}_{:08}'.format(os.path.join(output_dir, 'val'), i) save_debug_3d_cubes(config, meta, grid_centers, prefix2) save_debug_3d_images(config, meta, pred, prefix2) save_debug_detection(meta, inputs, output, heatmaps, targets_2d, prefix2) (mpjpe_metric, eval_list) = loader.dataset.evaluate(preds, backbone_outputs, proposal_grid_centers, frame_valids) if (writer_dict is not None): writer = writer_dict['writer'] writer.add_scalar('val/mpjpe_500mm', mpjpe_metric['mpjpe'], epoch) writer.add_scalar('val/mpjpe_aligned_500mm', mpjpe_metric['mpjpe_aligned'], epoch) writer.add_scalar('val/mrpe_500mm', mpjpe_metric['mrpe']['root'], epoch) writer.add_scalar('val/mrpe_z_500mm', mpjpe_metric['mrpe']['z'], epoch) return eval_list
class Parameters(ConditionDict): __slots__ = ['broadening_method', 'truncation', 'neighbour_lines', 'chunksize', 'cutoff', 'db_use_cached', 'dbformat', 'dbpath', 'dxL', 'dxG', 'export_lines', 'export_populations', 'folding_thresh', 'include_neighbouring_lines', 'levelsfmt', 'lvl_use_cached', 'optimization', 'parfuncfmt', 'parfuncpath', 'parsum_mode', 'pseudo_continuum_threshold', 'sparse_ldm', 'warning_broadening_threshold', 'warning_linestrength_cutoff', 'wavenum_max_calc', 'wavenum_min_calc', 'waveunit', 'wstep', 'diluent'] def __init__(self): super(Parameters, self).__init__() self.truncation = None self.neighbour_lines = None self.cutoff = None self.broadening_method = '' self.optimization = None self.db_use_cached = None self.dbformat = None self.dbpath = None self.levelsfmt = None self.lvl_use_cached = None self.parfuncfmt = None self.parfuncpath = None self.pseudo_continuum_threshold = 0 self.wavenum_max_calc = None self.wavenum_min_calc = None self.waveunit = 'cm-1' self.wstep = None self.diluent = {} self.dxL = _lorentzian_step(0.01) self.dxG = _gaussian_step(0.01) self.include_neighbouring_lines = True self.parsum_mode = 'full summation' self.sparse_ldm = 'auto'
class GraphiteSparse(Layer): def __init__(self, input_dim, output_dim, dropout=0.0, act=tf.nn.relu, **kwargs): super(GraphiteSparse, self).__init__(**kwargs) with tf.variable_scope((self.name + '_vars')): self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name='weights') self.dropout = dropout self.act = act def _call(self, inputs): x = inputs[0] recon_1 = inputs[1] recon_2 = inputs[2] x = tf.sparse_tensor_dense_matmul(x, self.vars['weights']) x = (tf.matmul(recon_1, tf.matmul(tf.transpose(recon_1), x)) + tf.matmul(recon_2, tf.matmul(tf.transpose(recon_2), x))) outputs = self.act(x) return outputs
def test_write_colormap(tmpdir): with rasterio.open('tests/data/shade.tif') as src: shade = src.read(1) meta = src.meta tiffname = str(tmpdir.join('foo.png')) meta['driver'] = 'PNG' with rasterio.open(tiffname, 'w', **meta) as dst: dst.write(shade, indexes=1) dst.write_colormap(1, {0: (255, 0, 0, 255), 255: (0, 0, 0, 0)}) cmap = dst.colormap(1) assert (cmap[0] == (255, 0, 0, 255)) assert (cmap[255] == (0, 0, 0, 0)) with rasterio.open(tiffname) as src: cmap = src.colormap(1) assert (cmap[0] == (255, 0, 0, 255)) assert (cmap[255] == (0, 0, 0, 0))
class EarthquakeCatalog(object): def get_event(self, name): raise NotImplementedError def iter_event_names(self, time_range, **kwargs): raise NotImplementedError def get_event_names(self, time_range, **kwargs): return list(self.iter_event_names(time_range, **kwargs)) def get_events(self, time_range, **kwargs): return list(self.iter_events(time_range, **kwargs)) def iter_events(self, time_range, **kwargs): for name in self.iter_event_names(time_range, **kwargs): (yield self.get_event(name))
def validate_unique(self, exclude=None): (unique_checks, date_checks) = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for (k, v) in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors)
def test_create_venv_finds_no_python_executable(manager: EnvManager, poetry: Poetry, config: Config, mocker: MockerFixture, config_virtualenvs_path: Path, venv_name: str) -> None: if ('VIRTUAL_ENV' in os.environ): del os.environ['VIRTUAL_ENV'] poetry.package.python_versions = '^3.6' mocker.patch('sys.version_info', (2, 7, 16)) mocker.patch('shutil.which', return_value=None) with pytest.raises(NoCompatiblePythonVersionFound) as e: manager.create_venv() expected_message = 'Poetry was unable to find a compatible version. If you have one, you can explicitly use it via the "env use" command.' assert (str(e.value) == expected_message)
def test_normalize_currency(): assert (normalize_currency('EUR') == 'EUR') assert (normalize_currency('eUr') == 'EUR') assert (normalize_currency('FUU') is None) assert (normalize_currency('') is None) assert (normalize_currency(None) is None) assert (normalize_currency(' EUR ') is None) assert (normalize_currency(' ') is None) assert (normalize_currency([]) is None) assert (normalize_currency(set()) is None)
class TrainerModel(torch.nn.Module): def __init__(self, grid_size, dht): super().__init__() self.dummy = torch.nn.Parameter(torch.randn(1, 1), requires_grad=True) self.head = BalancedRemoteExpert(grid_size=grid_size, dht=dht, forward_timeout=TIMEOUT, backward_timeout=BACKWARD_TIMEOUT, uid_prefix='head.') self.body1 = BalancedRemoteExpert(grid_size=grid_size, dht=dht, forward_timeout=TIMEOUT, backward_timeout=BACKWARD_TIMEOUT, uid_prefix='body1.') self.body2 = BalancedRemoteExpert(grid_size=grid_size, dht=dht, forward_timeout=TIMEOUT, backward_timeout=BACKWARD_TIMEOUT, uid_prefix='body2.') self.tail = BalancedRemoteExpert(grid_size=grid_size, dht=dht, forward_timeout=TIMEOUT, backward_timeout=BACKWARD_TIMEOUT, uid_prefix='tail.') def forward(self, input_ids, **kwargs): hidden = self.head(input_ids) hidden = self.body1(hidden) hidden = self.body2(hidden) loss = self.tail(hidden, input_ids) loss = loss.mean() return (loss, None)
def downgrade(op, tables, tester): op.drop_column('repository', 'state') op.drop_table('repomirrorconfig') op.drop_table('repomirrorrule') for logentrykind in ['repo_mirror_enabled', 'repo_mirror_disabled', 'repo_mirror_config_changed', 'repo_mirror_sync_started', 'repo_mirror_sync_failed', 'repo_mirror_sync_success', 'repo_mirror_sync_now_requested', 'repo_mirror_sync_tag_success', 'repo_mirror_sync_tag_failed', 'repo_mirror_sync_test_success', 'repo_mirror_sync_test_failed', 'repo_mirror_sync_test_started', 'change_repo_state']: op.execute(tables.logentrykind.delete().where((tables.logentrykind.c.name == op.inline_literal(logentrykind))))
class Effect6351(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'kineticDamage', src.getModifiedItemAttr('shipBonusCC3'), skill='Caldari Cruiser', **kwargs)
.parametrize('n_bytes, expected_size', [(None, '0 bytes'), (5, '5 bytes'), ((3 * 1024), '3.00 KB'), ((1024 * 3000), '2.93 MB'), (((1024 * 1024) * 5000), '4.88 GB'), ((((1024 * 1024) * 5000) * 1000), '4882.81 GB')]) def test_format_file_size(n_bytes, expected_size): assert (format_file_size(n_bytes) == expected_size)
def iter_paths(root: fsnative, exclude: (Iterable[fsnative] | None)=None, skip_hidden: bool=True) -> Generator[(fsnative, None, None)]: assert isinstance(root, fsnative) exclude = (exclude or []) assert all((isinstance(p, fsnative) for p in exclude)) assert os.path.abspath(root) def skip(path): if (skip_hidden and is_hidden(path)): return True return any((path.startswith(p) for p in exclude)) if (skip_hidden and is_hidden(root)): return for (path, dnames, fnames) in os.walk(root): if skip_hidden: dnames[:] = [d for d in dnames if (not is_hidden(path2fsn(os.path.join(path, d))))] for filename in fnames: full_filename = path2fsn(os.path.join(path, filename)) if skip(full_filename): continue full_filename = path2fsn(os.path.realpath(full_filename)) if skip(full_filename): continue (yield full_filename)
def run_segmentation(img, settings=NNUNET_SETTINGS_DEFAULTS): setup_nnunet_environment() try: from nnunet.inference.predict import predict_from_folder except ImportError: logger.error('nnUNet is not installed. Please pip install nnunet to use this functionality') nnunet_model_path = Path(os.environ['RESULTS_FOLDER']) task = settings['task'] model = settings['model'] task_path = nnunet_model_path.joinpath('nnUNet', model, task) print(task_path) if (not task_path.exists()): available_models = available_nnunet_models() if (not (task in available_models)): raise ValueError(f'{task} not available') task_url = available_models[task]['url'] download_and_install_nnunet_task(task, task_url) input_path = Path(tempfile.mkdtemp()) io_path = input_path.joinpath(f"{settings['task']}_0000.nii.gz") sitk.WriteImage(img, str(io_path)) output_path = Path(tempfile.mkdtemp()) model = settings['model'] folds = settings['folds'] num_threads_preprocessing = settings['num_threads_preprocessing'] num_threads_nifti_save = settings['num_threads_nifti_save'] lowres_segmentations = settings['lowres_segmentations'] all_in_gpu = settings['all_in_gpu'] disable_mixed_precision = settings['disable_mixed_precision'] disable_tta = settings['disable_tta'] trainer_class_name = settings['trainer_class_name'] cascade_trainer_class_name = settings['cascade_trainer_class_name'] mode = 'normal' default_plans_identifier = 'nnUNetPlansv2.1' chk = settings['chk'] if (model == '3d_cascade_fullres'): trainer = cascade_trainer_class_name else: trainer = trainer_class_name model_folder_name = task_path.joinpath((trainer + f'__{default_plans_identifier}')) predict_from_folder(str(model_folder_name), str(input_path), str(output_path), folds, False, num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations, 0, 1, (not disable_tta), overwrite_existing=True, mode=mode, overwrite_all_in_gpu=all_in_gpu, mixed_precision=(not disable_mixed_precision), step_size=0.5, checkpoint_name=chk) results = {} for op in output_path.glob('*.nii.gz'): label_map = sitk.ReadImage(str(op)) num_labels = sitk.GetArrayViewFromImage(label_map).max() for l in range(num_labels): results[f'Struct_{l}'] = (label_map == (l + 1)) os.remove(io_path) return results
def compute_obj_class_precision(metrics, gt_dict, classes_out): interact_idxs = torch.nonzero(gt_dict['action_valid_interact']) obj_classes_prob = classes_out[tuple(interact_idxs.T)] obj_classes_pred = obj_classes_prob.max(1)[1] obj_classes_gt = torch.cat(gt_dict['object'], dim=0) precision = (torch.sum((obj_classes_pred == obj_classes_gt)) / len(obj_classes_gt)) metrics['action/object'].append(precision.item())
class TestReparentNotify(EndianTest): def setUp(self): self.evt_args_0 = {'event': , 'override': 0, 'parent': , 'sequence_number': 43356, 'type': 128, 'window': , 'x': (- 19227), 'y': (- 30992)} self.evt_bin_0 = b'\x80\x00\\\xa9\x05oHo\xf6\xb4tE~\n#;\xe5\xb4\xf0\x86\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' def testPack0(self): bin = event.ReparentNotify._fields.to_binary(*(), **self.evt_args_0) self.assertBinaryEqual(bin, self.evt_bin_0) def testUnpack0(self): (args, remain) = event.ReparentNotify._fields.parse_binary(self.evt_bin_0, dummy_display, 1) self.assertBinaryEmpty(remain) self.assertEqual(args, self.evt_args_0)
def get_syscall_mapper(archtype: QL_ARCH): syscall_table = {QL_ARCH.X8664: x8664_syscall_table, QL_ARCH.ARM64: arm64_syscall_table}[archtype] syscall_fixup = {QL_ARCH.X8664: (lambda n: ((n - ) if ( <= n <= ) else n)), QL_ARCH.ARM64: (lambda n: ((n - ) if (n >= ) else n))}[archtype] def __mapper(syscall_num: int) -> str: return f'{SYSCALL_PREF}{syscall_table[syscall_fixup(syscall_num)]}' return __mapper
def createMailModel(parent): model = QStandardItemModel(0, 3, parent) model.setHeaderData(SUBJECT, Qt.Horizontal, 'Subject') model.setHeaderData(SENDER, Qt.Horizontal, 'Sender') model.setHeaderData(DATE, Qt.Horizontal, 'Date') addMail(model, 'Happy New Year!', 'Grace K. <-inc.com>', QDateTime(QDate(2006, 12, 31), QTime(17, 3))) addMail(model, 'Radically new concept', 'Grace K. <-inc.com>', QDateTime(QDate(2006, 12, 22), QTime(9, 44))) addMail(model, 'Accounts', '', QDateTime(QDate(2006, 12, 31), QTime(12, 50))) addMail(model, 'Expenses', 'Joe Bloggs <>', QDateTime(QDate(2006, 12, 25), QTime(11, 39))) addMail(model, 'Re: Expenses', 'Andy <>', QDateTime(QDate(2007, 1, 2), QTime(16, 5))) addMail(model, 'Re: Accounts', 'Joe Bloggs <>', QDateTime(QDate(2007, 1, 3), QTime(14, 18))) addMail(model, 'Re: Accounts', 'Andy <>', QDateTime(QDate(2007, 1, 3), QTime(14, 26))) addMail(model, 'Sports', 'Linda Smith <linda.>', QDateTime(QDate(2007, 1, 5), QTime(11, 33))) addMail(model, 'AW: Sports', 'Rolf Newschweinstein <>', QDateTime(QDate(2007, 1, 5), QTime(12, 0))) addMail(model, 'RE: Sports', 'Petra Schmidt <>', QDateTime(QDate(2007, 1, 5), QTime(12, 1))) return model
def test_rotated_latitude_longitude_operation(): aeaop = RotatedLatitudeLongitudeConversion(o_lat_p=1, o_lon_p=2, lon_0=3) assert (aeaop.name == 'unknown') assert (aeaop.method_name == 'PROJ ob_tran o_proj=longlat') assert (_to_dict(aeaop) == {'o_lat_p': 1.0, 'o_lon_p': 2.0, 'lon_0': 3.0})
class LinkStats(ctypes.Structure): _fields_ = [('maxFlow', ctypes.c_double), ('maxFlowDate', ctypes.c_double), ('maxVeloc', ctypes.c_double), ('maxDepth', ctypes.c_double), ('timeNormalFlow', ctypes.c_double), ('timeInletControl', ctypes.c_double), ('timeSurcharged', ctypes.c_double), ('timeFullUpstream', ctypes.c_double), ('timeFullDnstream', ctypes.c_double), ('timeFullFlow', ctypes.c_double), ('timeCapacityLimited', ctypes.c_double), ('timeInFlowClass', FlowClassArray), ('timeCourantCritical', ctypes.c_double), ('flowTurns', ctypes.c_long), ('flowTurnSign', ctypes.c_int)] _py_alias_ids = {'maxFlow': 'peak_flow', 'maxFlowDate': 'peak_flow_date', 'maxVeloc': 'peak_velocity', 'maxDepth': 'peak_depth', 'timeNormalFlow': 'time_normal_flow', 'timeInletControl': 'time_inlet_control', 'timeSurcharged': 'time_surcharged', 'timeFullUpstream': 'time_full_upstream', 'timeFullDnstream': 'time_full_downstream', 'timeFullFlow': 'time_full_flow', 'timeCapacityLimited': 'time_capacity_limited', 'timeInFlowClass': 'time_in_flow_class', 'timeCourantCritical': 'time_courant_crit', 'flowTurns': 'flow_turns', 'flowTurnSign': 'flow_turn_sign'}
class TestClassyModel(unittest.TestCase): def setUp(self) -> None: self.base_dir = tempfile.mkdtemp() self.orig_wrapper_cls_1 = MyTestModel.wrapper_cls self.orig_wrapper_cls_2 = MyTestModel2.wrapper_cls def tearDown(self) -> None: shutil.rmtree(self.base_dir) MyTestModel.wrapper_cls = self.orig_wrapper_cls_1 MyTestModel2.wrapper_cls = self.orig_wrapper_cls_2 def get_model_config(self, use_head): config = {'name': 'my_test_model'} if use_head: config['heads'] = [{'name': 'fully_connected', 'unique_id': 'default_head', 'num_classes': 3, 'fork_block': 'linear', 'in_plane': 5}] return config def test_from_checkpoint(self): config = get_test_task_config() for use_head in [True, False]: config['model'] = self.get_model_config(use_head) task = build_task(config) task.prepare() checkpoint_folder = f'{self.base_dir}/{use_head}/' input_args = {'config': config} for param in task.model.parameters(): param.data.zero_() checkpoint_hook = CheckpointHook(checkpoint_folder, input_args, phase_types=['train']) os.mkdir(checkpoint_folder) checkpoint_hook.on_start(task) task.train = True checkpoint_hook.on_phase_end(task) checkpoint = load_checkpoint(checkpoint_folder) model = ClassyModel.from_checkpoint(checkpoint) self.assertTrue(isinstance(model, MyTestModel)) for param in model.parameters(): self.assertTrue(torch.all((param.data == 0))) def test_classy_model_wrapper_instance(self): MyTestModel.wrapper_cls = None model = MyTestModel() self.assertEqual(type(model), MyTestModel) self.assertIsInstance(model, MyTestModel) self.assertIsInstance(model, ClassyModel) self.assertIsInstance(model, nn.Module) MyTestModel.wrapper_cls = ClassyModelWrapper model = MyTestModel() self.assertEqual(type(model), ClassyModelWrapper) self.assertIsInstance(model, MyTestModel) self.assertIsInstance(model, ClassyModel) self.assertIsInstance(model, nn.Module) def test_classy_model_wrapper_torch_scriptable(self): input = torch.ones((2, 2)) for (wrapper_cls, expected_output) in [(None, (input + 1))]: MyTestModel2.wrapper_cls = wrapper_cls model = MyTestModel2() scripted_model = torch.jit.script(model) self.assertTrue(torch.allclose(expected_output, model(input))) self.assertTrue(torch.allclose(expected_output, scripted_model(input))) def test_classy_model_wrapper_torch_jittable(self): input = torch.ones((2, 2)) for (wrapper_cls, expected_output) in [(None, (input + 1)), (TestSimpleClassyModelWrapper, ((input + 1) * 2))]: MyTestModel2.wrapper_cls = wrapper_cls model = MyTestModel2() jitted_model = torch.jit.trace(model, input) self.assertTrue(torch.allclose(expected_output, model(input))) self.assertTrue(torch.allclose(expected_output, jitted_model(input))) def test_classy_model_set_state_strict(self): model_1 = build_model(self.get_model_config(use_head=True)) model_state_1 = model_1.get_classy_state(deep_copy=True) model_2 = build_model(self.get_model_config(use_head=False)) model_2.set_heads({'linear': [IdentityHead('default_head')]}) with self.assertRaises(RuntimeError): model_2.set_classy_state(model_state_1) model_2.set_classy_state(model_state_1, strict=False)
class ClientTests(unittest.TestCase): def test_connection(self): with run_server() as server: with run_client(server) as client: self.assertEqual(client.protocol.state.name, 'OPEN') def test_connection_fails(self): def remove_accept_header(self, request, response): del response.headers['Sec-WebSocket-Accept'] with run_server(do_nothing, process_response=remove_accept_header) as server: with self.assertRaisesRegex(InvalidHandshake, 'missing Sec-WebSocket-Accept header'): with run_client(server, close_timeout=MS): self.fail('did not raise') def test_tcp_connection_fails(self): with self.assertRaises(OSError): with run_client('ws://localhost:54321'): self.fail('did not raise') def test_existing_socket(self): with run_server() as server: with socket.create_connection(server.socket.getsockname()) as sock: with run_client('ws://invalid/', sock=sock) as client: self.assertEqual(client.protocol.state.name, 'OPEN') def test_additional_headers(self): with run_server() as server: with run_client(server, additional_headers={'Authorization': 'Bearer ...'}) as client: self.assertEqual(client.request.headers['Authorization'], 'Bearer ...') def test_override_user_agent(self): with run_server() as server: with run_client(server, user_agent_header='Smith') as client: self.assertEqual(client.request.headers['User-Agent'], 'Smith') def test_remove_user_agent(self): with run_server() as server: with run_client(server, user_agent_header=None) as client: self.assertNotIn('User-Agent', client.request.headers) def test_compression_is_enabled(self): with run_server() as server: with run_client(server) as client: self.assertEqual([type(ext) for ext in client.protocol.extensions], [PerMessageDeflate]) def test_disable_compression(self): with run_server() as server: with run_client(server, compression=None) as client: self.assertEqual(client.protocol.extensions, []) def test_custom_connection_factory(self): def create_connection(*args, **kwargs): client = ClientConnection(*args, **kwargs) client.create_connection_ran = True return client with run_server() as server: with run_client(server, create_connection=create_connection) as client: self.assertTrue(client.create_connection_ran) def test_timeout_during_handshake(self): gate = threading.Event() def stall_connection(self, request): gate.wait() with run_server(do_nothing, process_request=stall_connection) as server: try: with self.assertRaisesRegex(TimeoutError, 'timed out during handshake'): with run_client(server, open_timeout=(5 * MS)): self.fail('did not raise') finally: gate.set() def test_connection_closed_during_handshake(self): def close_connection(self, request): self.close_socket() with run_server(process_request=close_connection) as server: with self.assertRaisesRegex(ConnectionError, 'connection closed during handshake'): with run_client(server): self.fail('did not raise')
def _wasserstein_update_input_check(x: torch.Tensor, y: torch.Tensor, x_weights: Optional[torch.Tensor]=None, y_weights: Optional[torch.Tensor]=None) -> None: if ((x.nelement() == 0) or (y.nelement() == 0)): raise ValueError('Distribution cannot be empty.') if ((x.dim() > 1) or (y.dim() > 1)): raise ValueError('Distribution has to be one dimensional.') if (not (x.device == y.device)): raise ValueError('Expected all the tensors to be on the same device.') if (x_weights is not None): if (x_weights.nelement() == 0): raise ValueError('Weights cannot be empty.') if (not torch.all((x_weights > 0))): raise ValueError('All weights must be non-negative.') if (not (0 < torch.sum(x_weights) < torch.inf)): raise ValueError('Weight tensor sum must be positive-finite.') if (not (x_weights.device == x.device)): raise ValueError('Expected values and weights to be on the same device.') if (x_weights.shape != x.shape): raise ValueError(f'Distribution values and weight tensors must be of the same shape, got shapes {x.shape} and {x_weights.shape}.') if (y_weights is not None): if (y_weights.nelement() == 0): raise ValueError('Weights cannot be empty.') if (not torch.all((y_weights > 0))): raise ValueError('All weights must be non-negative.') if (not (0 < torch.sum(y_weights) < torch.inf)): raise ValueError('Weight tensor sum must be positive-finite.') if (not (y_weights.device == y.device)): raise ValueError('Expected values and weights to be on the same device.') if (y_weights.shape != y.shape): raise ValueError(f'Distribution values and weight tensors must be of the same shape, got shapes {y.shape} and {y_weights.shape}.')
class CornerCornerAssociate(nn.Module): def __init__(self, im_size, configs): super(CornerCornerAssociate, self).__init__() drn_22 = drn.drn_d_22(pretrained=False) drn_modules = list(drn_22.children()) self.im_size = im_size self.configs = configs drn_modules[0][0] = nn.Conv2d(configs.input_channels, 16, kernel_size=(7, 7), padding=(3, 3), bias=False) self.drn_encoder = nn.Sequential(*drn_modules) self.final_linear = nn.Linear(1000, 1) self.sigmoid = nn.Sigmoid() def forward(self, x): x = self.drn_encoder(x) x = x.view(x.size(0), (- 1)) logits = self.final_linear(x) preds = self.sigmoid(logits) return (logits, preds)
def test_append_with_strings_unpack(): context = Context({'arblist': [1, 2], 'append': {'list': PyString('arblist'), 'addMe': 'xy', 'unpack': True}}) append.run_step(context) context['append']['addMe'] = 'z' append.run_step(context) assert (context['arblist'] == [1, 2, 'x', 'y', 'z']) assert (len(context) == 2)
def kernel2d_conv(feat_in, kernel, ksize): channels = feat_in.size(1) (N, kernels, H, W) = kernel.size() pad = ((ksize - 1) // 2) feat_in = F.pad(feat_in, (pad, pad, pad, pad), mode='replicate') feat_in = feat_in.unfold(2, ksize, 1).unfold(3, ksize, 1) feat_in = feat_in.permute(0, 2, 3, 1, 5, 4).contiguous() feat_in = feat_in.reshape(N, H, W, channels, (- 1)) kernel = kernel.permute(0, 2, 3, 1).reshape(N, H, W, channels, ksize, ksize) kernel = kernel.permute(0, 1, 2, 3, 5, 4).reshape(N, H, W, channels, (- 1)) feat_out = torch.sum((feat_in * kernel), (- 1)) feat_out = feat_out.permute(0, 3, 1, 2).contiguous() return feat_out
class MultipleAccountsTest(AssociateActionTest): alternative_user_data_body = json.dumps({'login': 'foobar2', 'id': 2, 'avatar_url': ' 'gravatar_id': 'somehexcode', 'url': ' 'name': 'monalisa foobar2', 'company': 'GitHub', 'blog': ' 'location': 'San Francisco', 'email': '', 'hireable': False, 'bio': 'There once was...', 'public_repos': 2, 'public_gists': 1, 'followers': 20, 'following': 0, 'html_url': ' 'created_at': '2008-01-14T04:33:35Z', 'type': 'User', 'total_private_repos': 100, 'owned_private_repos': 100, 'private_gists': 81, 'disk_usage': 10000, 'collaborators': 8, 'plan': {'name': 'Medium', 'space': 400, 'collaborators': 10, 'private_repos': 20}}) def test_multiple_social_accounts(self): self.do_login() self.do_login(user_data_body=self.alternative_user_data_body) self.assertEqual(len(self.user.social), 2) self.assertEqual(self.user.social[0].provider, 'github') self.assertEqual(self.user.social[1].provider, 'github')
def compute_acc(gold, pred, slot_temp): miss_gold = 0 miss_slot = [] for g in gold: if (g not in pred): miss_gold += 1 miss_slot.append(g.rsplit('-', 1)[0]) wrong_pred = 0 for p in pred: if ((p not in gold) and (p.rsplit('-', 1)[0] not in miss_slot)): wrong_pred += 1 ACC_TOTAL = len(slot_temp) ACC = ((len(slot_temp) - miss_gold) - wrong_pred) ACC = (ACC / float(ACC_TOTAL)) return ACC
class ConvBlockWOutput(nn.Module): def __init__(self, conv_params, output_params): super(ConvBlockWOutput, self).__init__() input_channels = conv_params[0] output_channels = conv_params[1] max_pool_size = conv_params[2] batch_norm = conv_params[3] add_output = output_params[0] num_classes = output_params[1] input_size = output_params[2] self.output_id = output_params[3] self.depth = 1 conv_layers = [] conv_layers.append(nn.Conv2d(in_channels=input_channels, out_channels=output_channels, kernel_size=3, padding=1, stride=1)) if batch_norm: conv_layers.append(nn.BatchNorm2d(output_channels)) conv_layers.append(nn.ReLU()) if (max_pool_size > 1): conv_layers.append(nn.MaxPool2d(kernel_size=max_pool_size)) self.layers = nn.Sequential(*conv_layers) if add_output: self.output = af.InternalClassifier(input_size, output_channels, num_classes) self.no_output = False else: self.output = nn.Sequential() self.forward = self.only_forward self.no_output = True def forward(self, x): fwd = self.layers(x) return (fwd, 1, self.output(fwd)) def only_output(self, x): fwd = self.layers(x) return self.output(fwd) def only_forward(self, x): fwd = self.layers(x) return (fwd, 0, None)
_fixture(autouse=True) def mocked_browser(browser_pool, request): for browser in browser_pool.values(): browser.quit() browser_pool.clear() def mocked_browser(driver_name, *args, **kwargs): mocked_browser = mock.MagicMock() mocked_browser.driver = mock.MagicMock() mocked_browser.driver.profile = mock.MagicMock() mocked_browser.driver_name = driver_name mocked_browser.html = u'<html></html>' def save_screenshot(path): with open(path, 'w'): pass mocked_browser.driver.save_screenshot = save_screenshot return mocked_browser patcher = mock.patch('pytest_splinter.plugin.splinter.Browser', mocked_browser) (yield patcher.start()) patcher.stop()
class RPS(commands.Cog): (case_insensitive=True) async def rps(self, ctx: commands.Context, move: str) -> None: move = move.lower() player_mention = ctx.author.mention if ((move not in CHOICES) and (move not in SHORT_CHOICES)): raise commands.BadArgument(f"Invalid move. Please make move from options: {', '.join(CHOICES).upper()}.") bot_move = choice(CHOICES) player_result = WINNER_DICT[move[0]][bot_move[0]] if (player_result == 0): message_string = f"{player_mention} You and Sir Lancebot played {bot_move}, it's a tie." (await ctx.send(message_string)) elif (player_result == 1): (await ctx.send(f'Sir Lancebot played {bot_move}! {player_mention} won!')) else: (await ctx.send(f'Sir Lancebot played {bot_move}! {player_mention} lost!'))
def test_git_clone_default_branch_head(source_url: str, remote_refs: FetchPackResult, remote_default_ref: bytes, mocker: MockerFixture) -> None: spy = mocker.spy(Git, '_clone') spy_legacy = mocker.spy(Git, '_clone_legacy') with Git.clone(url=source_url) as repo: assert (remote_refs.refs[remote_default_ref] == repo.head()) spy_legacy.assert_not_called() spy.assert_called()
def test_sanity_check(): stp = token.STANDARD_TYPES.copy() stp[token.Token] = '---' t = {} for (k, v) in stp.items(): t.setdefault(v, []).append(k) if (len(t) == len(stp)): return for (k, v) in t.items(): if (len(v) > 1): pytest.fail(('%r has more than one key: %r' % (k, v)))
class BpsTradeValueCommissionModel(CommissionModel): def __init__(self, commission: float): self.commission = commission def calculate_commission(self, fill_quantity: float, fill_price: float) -> float: fill_quantity = abs(fill_quantity) commission = (((fill_price * fill_quantity) * self.commission) / 10000) return commission
def get_generation_parser(interactive=False, default_task='translation'): parser = get_parser('Generation', default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_generation_args(parser) if interactive: add_interactive_args(parser) return parser
def _key_identifier_from_public_key(public_key: CertificatePublicKeyTypes) -> bytes: if isinstance(public_key, RSAPublicKey): data = public_key.public_bytes(serialization.Encoding.DER, serialization.PublicFormat.PKCS1) elif isinstance(public_key, EllipticCurvePublicKey): data = public_key.public_bytes(serialization.Encoding.X962, serialization.PublicFormat.UncompressedPoint) else: serialized = public_key.public_bytes(serialization.Encoding.DER, serialization.PublicFormat.SubjectPublicKeyInfo) data = asn1.parse_spki_for_data(serialized) return hashlib.sha1(data).digest()
class EnhancedInvBlock(nn.Module): def __init__(self, subnet_constructor, channel_num, channel_split_num, clamp=1.0): super(EnhancedInvBlock, self).__init__() self.split_len1 = channel_split_num self.split_len2 = (channel_num - channel_split_num) self.clamp = clamp self.E = subnet_constructor(self.split_len2, self.split_len1) self.F = subnet_constructor(self.split_len2, self.split_len1) self.G = subnet_constructor(self.split_len1, self.split_len2) self.H = subnet_constructor(self.split_len1, self.split_len2) def forward(self, x, rev=False): (x1, x2) = (x.narrow(1, 0, self.split_len1), x.narrow(1, self.split_len1, self.split_len2)) if (not rev): self.LR = (x1 + self.E(x2)) y1 = (self.LR - self.F(x2)) self.s = (self.clamp * ((torch.sigmoid(self.H(y1)) * 2) - 1)) y2 = (x2.mul(torch.exp(self.s)) + self.G(y1)) else: self.s = (self.clamp * ((torch.sigmoid(self.H(x1)) * 2) - 1)) y2 = (x2 - self.G(x1)).div(torch.exp(self.s)) self.LR = (x1 + self.F(y2)) y1 = (self.LR - self.E(y2)) return torch.cat((y1, y2), 1) def jacobian(self, x, rev=False): if (not rev): jac = torch.sum(self.s) else: jac = (- torch.sum(self.s)) return (jac / x.shape[0])
class ResNeXt3DBase(ClassyModel): def __init__(self, input_key, input_planes, clip_crop_size, frames_per_clip, num_blocks, stem_name, stem_planes, stem_temporal_kernel, stem_spatial_kernel, stem_maxpool): super(ResNeXt3DBase, self).__init__() self._input_key = input_key self.input_planes = input_planes self.clip_crop_size = clip_crop_size self.frames_per_clip = frames_per_clip self.num_blocks = num_blocks assert (stem_name in model_stems), ('unknown stem: %s' % stem_name) self.stem = model_stems[stem_name](stem_temporal_kernel, stem_spatial_kernel, input_planes, stem_planes, stem_maxpool) def _parse_config(config): ret_config = {} required_args = ['input_planes', 'clip_crop_size', 'skip_transformation_type', 'residual_transformation_type', 'frames_per_clip', 'num_blocks'] for arg in required_args: assert (arg in config), ('resnext3d model requires argument %s' % arg) ret_config[arg] = config[arg] ret_config.update({'input_key': config.get('input_key', None), 'stem_name': config.get('stem_name', 'resnext3d_stem'), 'stem_planes': config.get('stem_planes', 64), 'stem_temporal_kernel': config.get('stem_temporal_kernel', 3), 'stem_spatial_kernel': config.get('stem_spatial_kernel', 7), 'stem_maxpool': config.get('stem_maxpool', False)}) ret_config.update({'stage_planes': config.get('stage_planes', 256), 'stage_temporal_kernel_basis': config.get('stage_temporal_kernel_basis', [[3], [3], [3], [3]]), 'temporal_conv_1x1': config.get('temporal_conv_1x1', [False, False, False, False]), 'stage_temporal_stride': config.get('stage_temporal_stride', [1, 2, 2, 2]), 'stage_spatial_stride': config.get('stage_spatial_stride', [1, 2, 2, 2]), 'num_groups': config.get('num_groups', 1), 'width_per_group': config.get('width_per_group', 64)}) ret_config.update({'zero_init_residual_transform': config.get('zero_init_residual_transform', False)}) assert is_pos_int_list(ret_config['num_blocks']) assert is_pos_int(ret_config['stem_planes']) assert is_pos_int(ret_config['stem_temporal_kernel']) assert is_pos_int(ret_config['stem_spatial_kernel']) assert (type(ret_config['stem_maxpool']) == bool) assert is_pos_int(ret_config['stage_planes']) assert (type(ret_config['stage_temporal_kernel_basis']) == list) assert all((is_pos_int_list(l) for l in ret_config['stage_temporal_kernel_basis'])) assert (type(ret_config['temporal_conv_1x1']) == list) assert is_pos_int_list(ret_config['stage_temporal_stride']) assert is_pos_int_list(ret_config['stage_spatial_stride']) assert is_pos_int(ret_config['num_groups']) assert is_pos_int(ret_config['width_per_group']) return ret_config def _init_parameter(self, zero_init_residual_transform): for m in self.modules(): if isinstance(m, nn.Conv3d): if (hasattr(m, 'final_transform_op') and m.final_transform_op and zero_init_residual_transform): nn.init.constant_(m.weight, 0) else: nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if (m.bias is not None): nn.init.constant_(m.bias, 0) elif (isinstance(m, nn.BatchNorm3d) and m.affine): if (hasattr(m, 'final_transform_op') and m.final_transform_op and zero_init_residual_transform): batchnorm_weight = 0.0 else: batchnorm_weight = 1.0 nn.init.constant_(m.weight, batchnorm_weight) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, mean=0.0, std=0.01) nn.init.constant_(m.bias, 0) def set_classy_state(self, state, strict=True): self.load_head_states(state) attached_heads = self.get_heads() self.clear_heads() current_state = self.state_dict() for (name, weight_src) in state['model']['trunk'].items(): if (name not in current_state): logging.warning(f'weight {name} is not found in current ResNeXt3D state') continue weight_tgt = current_state[name] assert (weight_src.dim() == weight_tgt.dim()), 'weight of source- and target 3D convolution should have same dimension' if ((weight_src.dim() == 5) and (weight_src.shape[2] == 1) and (weight_tgt.shape[2] > 1)): assert ((weight_src.shape[(- 2):] == weight_tgt.shape[(- 2):]) and (weight_src.shape[:2] == weight_tgt.shape[:2])), 'weight shapes of source- and target 3D convolution mismatch' weight_src_inflated = (weight_src.repeat(1, 1, weight_tgt.shape[2], 1, 1) / weight_tgt.shape[2]) weight_src = weight_src_inflated else: assert all(((weight_src.size(d) == weight_tgt.size(d)) for d in range(weight_src.dim()))), ('the shapes of source and target weight mismatch: %s Vs %s' % (str(weight_src.size()), str(weight_tgt.size()))) current_state[name] = weight_src.clone() self.load_state_dict(current_state, strict=strict) self.set_heads(attached_heads) def forward(self, x): assert (isinstance(x, dict) or isinstance(x, torch.Tensor)), 'x must be either a dictionary or a torch.Tensor' if isinstance(x, dict): assert ((self._input_key is not None) and (self._input_key in x)), ('input key (%s) not in the input' % self._input_key) x = x[self._input_key] else: assert (self._input_key is None), 'when input of forward pass is a tensor, input key should not be set' assert ((x.dim() == 4) or (x.dim() == 5)), 'tensor x must be 4D/5D tensor' if (x.dim() == 4): x = torch.unsqueeze(x, 2) out = self.stem([x]) out = self.stages(out) return out def input_shape(self): return (self.input_planes, self.frames_per_clip, self.clip_crop_size, self.clip_crop_size) def input_key(self): return self._input_key
def _range_indices(df: pd.DataFrame, right: pd.DataFrame, first: tuple, second: tuple): (left_on, right_on, op) = first left_c = df[left_on] right_c = right[right_on] (left_on, right_on, _) = second any_nulls = df[left_on].isna() if any_nulls.any(): left_c = left_c[(~ any_nulls)] any_nulls = right[right_on].isna() if any_nulls.any(): right_c = right_c[(~ any_nulls)] outcome = _generic_func_cond_join(left=left_c, right=right_c, op=op, multiple_conditions=True, keep='all') if (outcome is None): return None (left_index, right_index, search_indices) = outcome (left_on, right_on, op) = second right_c = right.loc[(right_index, right_on)] left_c = df.loc[(left_index, left_on)] fastpath = right_c.is_monotonic_increasing if fastpath: outcome = _generic_func_cond_join(left=left_c, right=right_c, op=op, multiple_conditions=False, keep='first') if (outcome is None): return None (left_c, pos) = outcome else: outcome = _generic_func_cond_join(left=left_c, right=right_c.cummax(), op=op, multiple_conditions=True, keep='all') if (outcome is None): return None (left_c, right_index, pos) = outcome if (left_c.size < left_index.size): keep_rows = np.isin(left_index, left_c, assume_unique=True) search_indices = search_indices[keep_rows] left_index = left_c keep_rows = (pos < search_indices) if (not keep_rows.any()): return None if (not keep_rows.all()): left_index = left_index[keep_rows] pos = pos[keep_rows] search_indices = search_indices[keep_rows] repeater = (search_indices - pos) if (repeater == 1).all(): return (left_index, right_index[pos]) right_index = [right_index[start:end] for (start, end) in zip(pos, search_indices)] right_index = np.concatenate(right_index) left_index = np.repeat(left_index, repeater) if fastpath: return (left_index, right_index) left_c = df[left_on]._values[left_index] right_c = right[right_on]._values[right_index] ext_arr = is_extension_array_dtype(left_c) op = operator_map[op] mask = op(left_c, right_c) if ext_arr: mask = mask.to_numpy(dtype=bool, na_value=False) if (not mask.all()): left_index = left_index[mask] right_index = right_index[mask] return (left_index, right_index)
def remove_already_run_experiments(table, experiments): res = [] with Database() as db: for e in experiments: if (len(db.read(table, ['test_loglik'], e)) == 0): res.append(e) s = 'originally {} experiments, but {} have already been run, so running {} experiments' print(s.format(len(experiments), (len(experiments) - len(res)), len(res))) return res
def main(opt): translator = build_translator(opt) out_file = codecs.open(opt.output, 'w+', 'utf-8') src_iter = make_text_iterator_from_file(opt.src) if (opt.tgt is not None): tgt_iter = make_text_iterator_from_file(opt.tgt) else: tgt_iter = None translator.translate(src_data_iter=src_iter, tgt_data_iter=tgt_iter, batch_size=opt.batch_size, out_file=out_file) out_file.close()
class FrameCapture(QObject): finished = pyqtSignal() def __init__(self): super(FrameCapture, self).__init__() self._percent = 0 self._page = QWebPage() self._page.mainFrame().setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff) self._page.mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff) self._page.loadProgress.connect(self.printProgress) self._page.loadFinished.connect(self.saveResult) def load(self, url, outputFileName): cout(('Loading %s\n' % url.toString())) self._percent = 0 index = outputFileName.rfind('.') self._fileName = (((index == (- 1)) and (outputFileName + '.png')) or outputFileName) self._page.mainFrame().load(url) self._page.setViewportSize(QSize(1024, 768)) def printProgress(self, percent): if (self._percent >= percent): return self._percent += 1 while (self._percent < percent): self._percent += 1 cout('#') def saveResult(self, ok): cout('\n') if (not ok): cerr(('Failed loading %s\n' % self._page.mainFrame().url().toString())) self.finished.emit() return self._frameCounter = 0 self.saveFrame(self._page.mainFrame()) self.finished.emit() def saveFrame(self, frame): fileName = self._fileName if self._frameCounter: index = fileName.rfind('.') fileName = ('%s_frame%s%s' % (fileName[:index], self._frameCounter, fileName[index:])) image = QImage(frame.contentsSize(), QImage.Format_ARGB32_Premultiplied) image.fill(Qt.transparent) painter = QPainter(image) painter.setRenderHint(QPainter.Antialiasing, True) painter.setRenderHint(QPainter.TextAntialiasing, True) painter.setRenderHint(QPainter.SmoothPixmapTransform, True) frame.documentElement().render(painter) painter.end() image.save(fileName) self._frameCounter += 1 for childFrame in frame.childFrames(): self.saveFrame(childFrame)
.supported(only_if=(lambda backend: backend.hash_supported(hashes.SHAKE128(digest_size=16))), skip_message='Does not support SHAKE128') class TestSHAKE128(): test_shake128 = generate_hash_test(load_hash_vectors, os.path.join('hashes', 'SHAKE'), ['SHAKE128LongMsg.rsp', 'SHAKE128ShortMsg.rsp'], hashes.SHAKE128(digest_size=16)) def test_shake128_variable(self, backend, subtests): vectors = _load_all_params(os.path.join('hashes', 'SHAKE'), ['SHAKE128VariableOut.rsp'], load_nist_vectors) for vector in vectors: with subtests.test(): output_length = (int(vector['outputlen']) // 8) msg = binascii.unhexlify(vector['msg']) shake = hashes.SHAKE128(digest_size=output_length) m = hashes.Hash(shake, backend=backend) m.update(msg) assert (m.finalize() == binascii.unhexlify(vector['output']))
class AggregationLayer(Block): def __init__(self, **kwargs): super(AggregationLayer, self).__init__(**kwargs) self.alpha1 = nn.Parameter(data=torch.empty(1), requires_grad=True) self.beta1 = nn.Parameter(data=torch.zeros(1), requires_grad=True) init.uniform_(self.alpha1, 0, 0.2) self.alpha2 = nn.Parameter(data=torch.empty(1), requires_grad=True) self.beta2 = nn.Parameter(data=torch.ones(1), requires_grad=True) init.uniform_(self.alpha2, 0.8, 1) def forward(self, x, speed_curr, speed_accu): if self.training: sr1 = x sr2 = (self.body(x) + x) (beta1, beta2) = ConditionFunction.apply(self.alpha1, self.alpha2, self.beta1, self.beta2) (self.beta1.data, self.beta2.data) = (beta1, beta2) x = ((beta1 * sr1) + (beta2 * sr2)) speed_accu = ((beta2 * speed_curr) + speed_accu) return (x, speed_accu) else: if (self.alpha1 >= self.alpha2): pass else: x = (self.body(x) + x) speed_accu = (speed_accu + (self.beta2 * speed_curr)) return (x, speed_accu) def get_num_channels(self): channels = [] for m in self.body.children(): if isinstance(m, nn.Conv2d): channels.append(m.in_channels) channels.append(channels[0]) return channels
def test_script(path, executable='python'): import pybamm b = pybamm.Timer() print((('Test ' + path) + ' ... '), end='') sys.stdout.flush() env = dict(os.environ) env['MPLBACKEND'] = 'Template' cmd = [executable, path] try: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) (stdout, stderr) = p.communicate() if (p.returncode != 0): print('ERROR') print(('-- stdout ' + ('-' * (79 - 10)))) print(str(stdout, 'utf-8')) print(('-- stderr ' + ('-' * (79 - 10)))) print(str(stderr, 'utf-8')) print(('-' * 79)) return False except KeyboardInterrupt: p.terminate() print('ABORTED') sys.exit(1) print(f'ok ({b.time()})') return True
class TestMESolveDecay(): N = 10 a = qutip.destroy(N) kappa = 0.2 tlist = np.linspace(0, 10, 201) ada = (a.dag() * a) (params=[pytest.param([ada, (lambda t, args: 1)], id='Hlist_func'), pytest.param([ada, '1'], id='Hlist_str'), pytest.param([ada, np.ones_like(tlist)], id='Hlist_array'), pytest.param(qutip.QobjEvo([ada, '1']), id='HQobjEvo'), pytest.param((lambda t, args: (qutip.create(10) * qutip.destroy(10))), id='func')]) def H(self, request): return request.param (params=[pytest.param((np.sqrt(kappa) * a), id='const'), pytest.param((lambda t, args: (np.sqrt(args['kappa']) * qutip.destroy(10))), id='func'), pytest.param([a, (lambda t, args: np.sqrt(args['kappa']))], id='list_func'), pytest.param([a, 'sqrt(kappa)'], id='list_str'), pytest.param([a, (np.sqrt(kappa) * np.ones_like(tlist))], id='list_array'), pytest.param(qutip.QobjEvo([a, 'sqrt(kappa)'], args={'kappa': kappa}), id='QobjEvo')]) def cte_c_ops(self, request): return request.param (params=[pytest.param([a, (lambda t, args: np.sqrt((args['kappa'] * np.exp((- t)))))], id='list_func'), pytest.param([a, 'sqrt(kappa * exp(-t))'], id='list_str'), pytest.param([a, np.sqrt((kappa * np.exp((- tlist))))], id='list_array'), pytest.param(qutip.QobjEvo([a, 'sqrt(kappa * exp(-t))'], args={'kappa': kappa}), id='QobjEvo'), pytest.param((lambda t, args: (np.sqrt((args['kappa'] * np.exp((- t)))) * qutip.destroy(10))), id='func')]) def c_ops(self, request): return request.param c_ops_1 = c_ops def testME_CteDecay(self, cte_c_ops): me_error = 1e-06 H = (self.a.dag() * self.a) psi0 = qutip.basis(self.N, 9) c_op_list = [cte_c_ops] options = {'progress_bar': None} medata = mesolve(H, psi0, self.tlist, c_op_list, [H], args={'kappa': self.kappa}, options=options) expt = medata.expect[0] actual_answer = (9.0 * np.exp(((- self.kappa) * self.tlist))) np.testing.assert_allclose(actual_answer, expt, atol=me_error) .parametrize('method', all_ode_method, ids=all_ode_method) def testME_TDDecay(self, c_ops, method): me_error = 1e-05 H = (self.a.dag() * self.a) psi0 = qutip.basis(self.N, 9) c_op_list = [c_ops] options = {'method': method, 'progress_bar': None} medata = mesolve(H, psi0, self.tlist, c_op_list, [H], args={'kappa': self.kappa}, options=options) expt = medata.expect[0] actual_answer = (9.0 * np.exp(((- self.kappa) * (1.0 - np.exp((- self.tlist)))))) np.testing.assert_allclose(actual_answer, expt, rtol=me_error) def testME_2TDDecay(self, c_ops, c_ops_1): me_error = 1e-05 H = (self.a.dag() * self.a) psi0 = qutip.basis(self.N, 9) c_op_list = [c_ops, c_ops_1] options = {'progress_bar': None} medata = mesolve(H, psi0, self.tlist, c_op_list, [H], args={'kappa': self.kappa}, options=options) expt = medata.expect[0] actual_answer = (9.0 * np.exp((((- 2) * self.kappa) * (1.0 - np.exp((- self.tlist)))))) np.testing.assert_allclose(actual_answer, expt, atol=me_error) def testME_TDH_TDDecay(self, H, c_ops): me_error = 5e-06 psi0 = qutip.basis(self.N, 9) c_op_list = [c_ops] options = {'progress_bar': None} medata = mesolve(H, psi0, self.tlist, c_op_list, [self.ada], args={'kappa': self.kappa}, options=options) expt = medata.expect[0] actual_answer = (9.0 * np.exp(((- self.kappa) * (1.0 - np.exp((- self.tlist)))))) np.testing.assert_allclose(actual_answer, expt, atol=me_error) def testME_TDH_longTDDecay(self, H, c_ops): me_error = 2e-05 psi0 = qutip.basis(self.N, 9) if isinstance(c_ops, FunctionType): return if isinstance(c_ops, qutip.QobjEvo): c_op_list = [(c_ops + c_ops)] else: c_op_list = [[c_ops, c_ops]] options = {'progress_bar': None} medata = mesolve(H, psi0, self.tlist, c_op_list, [self.ada], args={'kappa': self.kappa}, options=options) expt = medata.expect[0] actual_answer = (9.0 * np.exp((((- 4) * self.kappa) * (1.0 - np.exp((- self.tlist)))))) np.testing.assert_allclose(actual_answer, expt, atol=me_error) def testME_TDDecayUnitary(self, c_ops): me_error = 5e-06 H = self.ada psi0 = qutip.basis(self.N, 9) rho0vec = qutip.operator_to_vector((psi0 * psi0.dag())) E0 = qutip.sprepost(qutip.qeye(self.N), qutip.qeye(self.N)) options = {'progress_bar': None} c_op_list = [c_ops] out1 = mesolve(H, psi0, self.tlist, c_op_list, [], args={'kappa': self.kappa}, options=options) out2 = mesolve(H, E0, self.tlist, c_op_list, [], args={'kappa': self.kappa}, options=options) fid = fidelitycheck(out1, out2, rho0vec) assert (fid == pytest.approx(1.0, abs=me_error)) def testME_TDDecayliouvillian(self, c_ops): me_error = 5e-06 H = self.ada L = qutip.liouvillian(H) psi0 = qutip.basis(self.N, 9) rho0vec = qutip.operator_to_vector((psi0 * psi0.dag())) E0 = qutip.sprepost(qutip.qeye(self.N), qutip.qeye(self.N)) options = {'progress_bar': None} c_op_list = [c_ops] out1 = mesolve(L, psi0, self.tlist, c_op_list, [], args={'kappa': self.kappa}, options=options) out2 = mesolve(L, E0, self.tlist, c_op_list, [], args={'kappa': self.kappa}, options=options) fid = fidelitycheck(out1, out2, rho0vec) assert (fid == pytest.approx(1.0, abs=me_error)) .parametrize(['state_type'], [pytest.param('ket', id='ket'), pytest.param('dm', id='dm'), pytest.param('unitary', id='unitary')]) def test_mesolve_normalization(self, state_type): H = qutip.Qobj([[1, (- 0.1j)], [(- 0.1j), 1]]) H = qutip.sprepost(H, H) psi0 = qutip.basis(2, 0) options = {'normalize_output': True, 'progress_bar': None} if (state_type in {'ket', 'dm'}): if (state_type == 'dm'): psi0 = qutip.ket2dm(psi0) output = mesolve(H, psi0, self.tlist, e_ops=[], options=options) norms = [state.norm() for state in output.states] np.testing.assert_allclose(norms, [1.0 for _ in self.tlist], atol=1e-15) else: U = qutip.sprepost(qutip.qeye(2), qutip.qeye(2)) output = mesolve(H, U, self.tlist, e_ops=[], options=options) norms = [state.norm() for state in output.states] assert all(((norm > 4) for norm in norms[1:])) def test_mesolver_pickling(self): options = {'progress_bar': None} solver_obj = MESolver(self.ada, c_ops=[self.a], options=options) solver_copy = pickle.loads(pickle.dumps(solver_obj)) e1 = solver_obj.run(qutip.basis(self.N, 9), [0, 1, 2, 3], e_ops=[self.ada]).expect[0] e2 = solver_copy.run(qutip.basis(self.N, 9), [0, 1, 2, 3], e_ops=[self.ada]).expect[0] np.testing.assert_allclose(e1, e2) .parametrize('method', all_ode_method, ids=all_ode_method) def test_mesolver_stepping(self, method): options = {'method': method, 'progress_bar': None} solver_obj = MESolver(self.ada, c_ops=qutip.QobjEvo([self.a, (lambda t, kappa: np.sqrt((kappa * np.exp((- t)))))], args={'kappa': 1.0}), options=options) solver_obj.start(qutip.basis(self.N, 9), 0) state1 = solver_obj.step(1) assert (qutip.expect(self.ada, state1) != qutip.expect(self.ada, qutip.basis(self.N, 9))) state2 = solver_obj.step(2, args={'kappa': 0.0}) np.testing.assert_allclose(qutip.expect(self.ada, state1), qutip.expect(self.ada, state2), 1e-06)
def main(): if ('CITYSCAPES_DATASET' in os.environ): cityscapesPath = os.environ['CITYSCAPES_DATASET'] else: cityscapesPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..') searchFine = os.path.join(cityscapesPath, 'gtFine', '*', '*', '*_gt*_polygons.json') searchCoarse = os.path.join(cityscapesPath, 'gtCoarse', '*', '*', '*_gt*_polygons.json') filesFine = glob.glob(searchFine) filesFine.sort() filesCoarse = glob.glob(searchCoarse) filesCoarse.sort() files = (filesFine + filesCoarse) if (not files): printError('Did not find any files. Please consult the README.') print('Processing {} annotation files'.format(len(files))) progress = 0 print('Progress: {:>3} %'.format(((progress * 100) / len(files))), end=' ') for f in files: dst = f.replace('_polygons.json', '_labelTrainIds.png') try: json2labelImg(f, dst, 'trainIds') except: print('Failed to convert: {}'.format(f)) raise progress += 1 print('\rProgress: {:>3} %'.format(((progress * 100) / len(files))), end=' ') sys.stdout.flush()
def create_dummy_data(size): user_ids = np.random.randint(1, 1000000, 10) product_ids = np.random.randint(1, 1000000, 100) def choice(*values): return np.random.choice(values, size) random_dates = [(datetime.date(2016, 1, 1) + datetime.timedelta(days=int(delta))) for delta in np.random.randint(1, 50, size)] return pd.DataFrame.from_items([('Date', random_dates), ('UserID', choice(*user_ids)), ('ProductID', choice(*product_ids)), ('IntColumn', choice(1, 2, 3)), ('FloatColumn', choice(np.nan, 1.0, 2.0, 3.0)), ('StringColumn', choice('A', 'B', 'C')), ('Gaussian 1', np.random.normal(0, 1, size)), ('Gaussian 2', np.random.normal(0, 1, size)), ('Uniform', np.random.uniform(0, 1, size)), ('Binomial', np.random.binomial(20, 0.1, size)), ('Poisson', np.random.poisson(1.0, size))])