text
string
size
int64
token_count
int64
# coding: utf-8 sales= [255, 100, 353, 400] print len(sales) print sales[2] sales[2] = 100 print sales[2] # 含んでいるか否か print 100 in sales print 500 in sales # range print range(10) print range(3,10) print range(3,10,2)
219
123
#!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import coloredlogs import logging import json import itertools import shlex import time import queue import sys import os import collections import tempfile from jsonpath_ng import jsonpath, parse from .runner import AsyncRunner from .common import merge_pvals, booltest_pval from . import common logger = logging.getLogger(__name__) coloredlogs.install(level=logging.INFO) """ Config can look like this: { "default-cli": "--no-summary --json-out --log-prints --top 128 --no-comb-and --only-top-comb --only-top-deg --no-term-map --topterm-heap --topterm-heap-k 256 --best-x-combs 512", "strategies": [ { "name": "v1", "cli": "", "variations": [ { "bl": [128, 256, 384, 512], "deg": [1], "cdeg": [1], "exclusions": [] } ] }, { "name": "halving", "cli": "--halving", "variations": [ { "bl": [128, 256, 384, 512], "deg": [1, 2, 3], "cdeg": [1, 2, 3], "exclusions": [] } ] } ] } """ def jsonpath(path, obj, allow_none=False): r = [m.value for m in parse(path).find(obj)] return r[0] if not allow_none else (r[0] if r else None) def listize(obj): return obj if (obj is None or isinstance(obj, list)) else [obj] def get_runner(cli, cwd=None, rtt_env=None): async_runner = AsyncRunner(cli, cwd=cwd, shell=False, env=rtt_env) async_runner.log_out_after = False async_runner.preexec_setgrp = True return async_runner class BoolParamGen: def __init__(self, cli, vals): self.cli = cli self.vals = vals if isinstance(vals, list) else [vals] class BoolJob: def __init__(self, cli, name, vinfo='', idx=None): self.cli = cli self.name = name self.vinfo = vinfo self.idx = idx def is_halving(self): return '--halving' in self.cli class BoolRes: def __init__(self, job, ret_code, js_res, is_halving, rejects=False, pval=None, alpha=None, stderr=None): self.job = job # type: BoolJob self.ret_code = ret_code self.js_res = js_res self.is_halving = is_halving self.rejects = rejects self.alpha = alpha self.pval = pval self.stderr = stderr class BoolRunner: def __init__(self): self.args = None self.bool_config = None self.parallel_tasks = None self.bool_wrapper = None self.job_queue = queue.Queue(maxsize=0) self.runners = [] # type: List[Optional[AsyncRunner]] self.comp_jobs = [] # type: List[Optional[BoolJob]] self.results = [] def init_config(self): self.parallel_tasks = self.args.threads or 1 self.bool_wrapper = self.args.booltest_bin try: if self.args.config: with open(self.args.config) as fh: self.bool_config = json.load(fh) if not self.bool_wrapper: self.bool_wrapper = jsonpath("$.wrapper", self.bool_config, True) if not self.args.threads: self.parallel_tasks = jsonpath("$.threads", self.bool_config, True) or self.args.threads or 1 except Exception as e: logger.error("Could not load the config %s" % (e,), exc_info=e) if not self.bool_wrapper: self.bool_wrapper = "\"%s\" -m booltest.booltest_main" % sys.executable def norm_methods(self, methods): res = set() for m in methods: if m == 'v1': res.add('1') elif m == '1': res.add(m) elif m == 'halving': res.add('2') elif m == 'v2': res.add('2') elif m == '2': res.add(m) else: raise ValueError("Unknown method %s" % m) return sorted(list(res)) def norm_params(self, params, default): if params is None or len(params) == 0: return default return [int(x) for x in params] def generate_jobs(self): dcli = self.args.cli if dcli is None: dcli = jsonpath('$.default-cli', self.bool_config, True) if dcli is None: dcli = '--no-summary --json-out --log-prints --top 128 --no-comb-and --only-top-comb --only-top-deg ' \ '--no-term-map --topterm-heap --topterm-heap-k 256 --best-x-combs 512' if '--no-summary' not in dcli: dcli += ' --no-summary' if '--json-out' not in dcli: dcli += ' --json-out' if '--log-prints' not in dcli: dcli += ' --log-prints' strategies = jsonpath('$.strategies', self.bool_config, True) if strategies is None: strategies = [] methods = self.norm_methods(self.args.methods or ["1", "2"]) for mt in methods: strat = collections.OrderedDict() strat['name'] = "v%s" % mt strat['cli'] = "--halving" if mt == '2' else '' strat['variations'] = [collections.OrderedDict([ ('bl', self.norm_params(self.args.block, [128, 256, 384, 512])), ('deg', self.norm_params(self.args.deg, [1, 2])), ('cdeg', self.norm_params(self.args.comb_deg, [1, 2])), ('exclusions', []), ])] strategies.append(strat) for st in strategies: name = st['name'] st_cli = jsonpath('$.cli', st, True) or '' st_vars = jsonpath('$.variations', st, True) or [] ccli = ('%s %s' % (dcli, st_cli)).strip() if not st_vars: yield BoolJob(ccli, name) continue for cvar in st_vars: blocks = listize(jsonpath('$.bl', cvar, True)) or [None] degs = listize(jsonpath('$.deg', cvar, True)) or [None] cdegs = listize(jsonpath('$.cdeg', cvar, True)) or [None] pcli = ['--block', '--degree', '--combine-deg'] vinfo = ['', '', ''] iterator = itertools.product(blocks, degs, cdegs) for el in iterator: c = ' '.join([(('%s %s') % (pcli[ix], dt)) for (ix, dt) in enumerate(el) if dt is not None]) vi = '-'.join([(('%s%s') % (vinfo[ix], dt)).strip() for (ix, dt) in enumerate(el) if dt is not None]) ccli0 = ('%s %s' % (ccli, c)).strip() yield BoolJob(ccli0, name, vi) def run_job(self, cli): async_runner = get_runner(shlex.split(cli)) logger.info("Starting async command %s" % cli) async_runner.start() while async_runner.is_running: time.sleep(1) logger.info("Async command finished") def on_finished(self, job, runner, idx): if runner.ret_code != 0: logger.warning("Return code of job %s is %s" % (idx, runner.ret_code)) stderr = ("\n".join(runner.err_acc)).strip() br = BoolRes(job, runner.ret_code, None, job.is_halving, stderr=stderr) self.results.append(br) return results = runner.out_acc buff = (''.join(results)).strip() try: js = json.loads(buff) is_halving = js['halving'] br = BoolRes(job, 0, js, is_halving) if not is_halving: br.rejects = [m.value for m in parse('$.inputs[0].res[0].rejects').find(js)][0] br.alpha = [m.value for m in parse('$.inputs[0].res[0].ref_alpha').find(js)][0] logger.info('rejects: %s, at alpha %.5e' % (br.rejects, br.alpha)) else: br.pval = [m.value for m in parse('$.inputs[0].res[1].halvings[0].pval').find(js)][0] logger.info('halving pval: %5e' % br.pval) self.results.append(br) except Exception as e: logger.error("Exception processing results: %s" % (e,), exc_info=e) logger.warning("[[[%s]]]" % buff) def on_results_ready(self): try: logger.info("="*80) logger.info("Results") ok_results = [r for r in self.results if r.ret_code == 0] nok_results = [r for r in self.results if r.ret_code != 0] bat_errors = ['Job %d (%s-%s), ret_code %d' % (r.job.idx, r.job.name, r.job.vinfo, r.ret_code) for r in self.results if r.ret_code != 0] if nok_results: logger.warning("Some jobs failed with error: \n%s" % ("\n".join(bat_errors))) for r in nok_results: logger.info("Job %s, (%s-%s)" % (r.job.idx, r.job.name, r.job.vinfo)) logger.info("Stderr: %s" % r.stderr) v1_jobs = [r for r in ok_results if not r.is_halving] v2_jobs = [r for r in ok_results if r.is_halving] v1_sum = collections.OrderedDict() v2_sum = collections.OrderedDict() if v1_jobs: rejects = [r for r in v1_jobs if r.rejects] v1_sum['alpha'] = max([x.alpha for x in v1_jobs]) v1_sum['pvalue'] = booltest_pval(nfails=len(rejects), ntests=len(v1_jobs), alpha=v1_sum['alpha']) v1_sum['npassed'] = sum([1 for r in v1_jobs if not r.rejects]) if v2_jobs: pvals = [r.pval for r in v2_jobs] v2_sum['npassed'] = sum([1 for r in v2_jobs if r.pval >= self.args.alpha]) v2_sum['pvalue'] = merge_pvals(pvals)[0] if len(pvals) > 1 else -1 if v1_jobs: logger.info("V1 results:") self.print_test_res(v1_jobs) if v2_jobs: logger.info("V2 results:") self.print_test_res(v2_jobs) logger.info("=" * 80) logger.info("Summary: ") if v1_jobs: logger.info("v1 tests: %s, #passed: %s, pvalue: %s" % (len(v1_jobs), v1_sum['npassed'], v1_sum['pvalue'])) if v2_jobs: logger.info("v2 tests: %s, #passed: %s, pvalue: %s" % (len(v2_jobs), v2_sum['npassed'], v2_sum['pvalue'])) if not self.args.json_out and not self.args.json_out_file: return jsout = collections.OrderedDict() jsout["nfailed_jobs"] = len(nok_results) jsout["failed_jobs_stderr"] = [r.stderr for r in nok_results] jsout["results"] = common.noindent_poly([r.js_res for r in ok_results]) kwargs = {'indent': 2} if self.args.json_nice else {} if self.args.json_out: print(common.json_dumps(jsout, **kwargs)) if self.args.json_out_file: with open(self.args.json_out_file, 'w+') as fh: common.json_dump(jsout, fh, **kwargs) jsout = common.jsunwrap(jsout) return jsout except Exception as e: logger.warning("Exception in results processing: %s" % (e,), exc_info=e) def print_test_res(self, res): for rs in res: # type: BoolRes passed = (rs.pval >= self.args.alpha if rs.is_halving else not rs.rejects) if rs.ret_code == 0 else None desc_str = "" if rs.is_halving: desc_str = "pvalue: %5e" % (rs.pval,) else: desc_str = "alpha: %5e" % (rs.alpha,) res = rs.js_res["inputs"][0]["res"] dist_poly = jsonpath('$[0].dists[0].poly', res, True) time_elapsed = jsonpath('$.time_elapsed', rs.js_res, True) best_dist_zscore = jsonpath('$[0].dists[0].zscore', res, True) or -1 ref_zscore_min = jsonpath('$[0].ref_minmax[0]', res, True) or -1 ref_zscore_max = jsonpath('$[0].ref_minmax[1]', res, True) or -1 aux_str = "" if rs.is_halving: best_dist_zscore_halving = jsonpath('$[1].dists[0].zscore', res, True) aux_str = "Learn: (z-score: %.5f, acc. zscores: [%.5f, %.5f]), Eval: (z-score: %.5f)" \ % (best_dist_zscore, ref_zscore_min, ref_zscore_max, best_dist_zscore_halving) else: aux_str = "z-score: %.5f, acc. zscores: [%.5f, %.5f]" \ % (best_dist_zscore, ref_zscore_min, ref_zscore_max) logger.info(" - %s %s: passed: %s, %s, dist: %s\n elapsed time: %6.2f s, %s" % (rs.job.name, rs.job.vinfo, passed, desc_str, dist_poly, time_elapsed, aux_str)) def work(self): if len(self.args.files) != 1: raise ValueError("Provide exactly one file to test") ifile = self.args.files[0] if ifile != '-' and not os.path.exists(ifile): raise ValueError("Provided input file not found") tmp_file = None if ifile == '-': tmp_file = tempfile.NamedTemporaryFile(prefix="booltest-bat-inp", delete=True) while True: data = sys.stdin.read(4096) if sys.version_info < (3,) else sys.stdin.buffer.read(4096) if data is None or len(data) == 0: break tmp_file.write(data) ifile = tmp_file.name jobs = [x for x in self.generate_jobs()] for i, j in enumerate(jobs): j.idx = i self.runners = [None] * self.parallel_tasks self.comp_jobs = [None] * self.parallel_tasks for j in jobs: self.job_queue.put_nowait(j) while not self.job_queue.empty() or sum([1 for x in self.runners if x is not None]) > 0: time.sleep(0.1) # Realloc work for i in range(len(self.runners)): if self.runners[i] is not None and self.runners[i].is_running: continue was_empty = self.runners[i] is None if not was_empty: self.job_queue.task_done() logger.info("Task %d done, job queue size: %d, running: %s" % (i, self.job_queue.qsize(), sum([1 for x in self.runners if x]))) self.on_finished(self.comp_jobs[i], self.runners[i], i) # Start a new task, if any try: job = self.job_queue.get_nowait() # type: BoolJob except queue.Empty: self.runners[i] = None continue cli = '%s %s "%s"' % (self.bool_wrapper, job.cli, ifile) self.comp_jobs[i] = job self.runners[i] = get_runner(shlex.split(cli)) logger.info("Starting async command %s %s, %s" % (job.name, job.vinfo, cli)) self.runners[i].start() return self.on_results_ready() def main(self): parser = self.argparser() self.args = parser.parse_args() self.init_config() return self.work() def argparser(self): parser = argparse.ArgumentParser(description='BoolTest Battery Runner') parser.add_argument('--debug', dest='debug', action='store_const', const=True, help='enables debug mode') parser.add_argument('-c', '--config', default=None, help='Test config') parser.add_argument('--alpha', dest='alpha', type=float, default=1e-4, help='Alpha value for pass/fail') parser.add_argument('-t', dest='threads', type=int, default=1, help='Maximum parallel threads') parser.add_argument('--block', dest='block', nargs=argparse.ZERO_OR_MORE, default=None, type=int, help='List of block sizes to test') parser.add_argument('--deg', dest='deg', nargs=argparse.ZERO_OR_MORE, default=None, type=int, help='List of degree to test') parser.add_argument('--comb-deg', dest='comb_deg', nargs=argparse.ZERO_OR_MORE, default=None, type=int, help='List of degree of combinations to test') parser.add_argument('--methods', dest='methods', nargs=argparse.ZERO_OR_MORE, default=None, help='List of methods to test, supported: 1, 2, halving') parser.add_argument('files', nargs=argparse.ONE_OR_MORE, default=[], help='files to process') parser.add_argument('--stdin', dest='stdin', action='store_const', const=True, default=False, help='Read from the stdin') parser.add_argument('--booltest-bin', dest='booltest_bin', help='Specify BoolTest binary launcher. If not specified, autodetected.') parser.add_argument('--cli', dest='cli', help='Specify common BoolTest CLI options') parser.add_argument('--json-out', dest='json_out', action='store_const', const=True, default=False, help='Produce json result') parser.add_argument('--json-out-file', dest='json_out_file', default=None, help='Produce json result to a file') parser.add_argument('--json-nice', dest='json_nice', action='store_const', const=True, default=False, help='Nicely formatted json output') return parser def main(): br = BoolRunner() return br.main() if __name__ == '__main__': main()
18,034
5,823
''' FuSeConv: Fully Separable Convolutions for Fast Inference on Systolic Arrays Authors: Surya Selvam, Vinod Ganesan, Pratyush Kumar Email ID: selvams@purdue.edu, vinodg@cse.iitm.ac.in, pratyush@cse.iitm.ac.in ''' import os import torch import wandb import random import argparse import torchvision import torch.nn as nn import torchvision.datasets as datasets import torchvision.transforms as transforms from utils import * from models import * def dumpData(flag, string): if flag == 'train': meta = open(args.name+'/metadataTrain.txt', "a") meta.write(string) meta.close() else: meta = open(args.name+'/metadataTest.txt', "a") meta.write(string) meta.close() def train(net, trainloader, criterion, optimizer, epoch): print('\nEpoch: %d' % epoch) net.train() train_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs = inputs.cuda() targets = targets.cuda() optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) string = str(epoch) + ',' + str(train_loss) + ',' + str(correct*1.0/total) + '\n' dumpData('train', string) wandb.log({ "Train Loss": train_loss, "Train Accuracy": 100*correct/total}, step=epoch) def test(net, testloader, criterion, epoch): net.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.cuda(), targets.cuda() outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) string = str(epoch) + ',' + str(test_loss) + ',' + str(correct*1.0/total) + '\n' dumpData('test', string) wandb.log({ "Test Loss": test_loss, "Test Accuracy": 100*correct/total}, step=epoch) return correct*1.0/total def main(): wandb.init(name=args.name, project="cifar-224-full-variant") transform_train = transforms.Compose([ transforms.Resize(224), transforms.RandomCrop(224, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.Resize(224), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) if args.Dataset == 'CIFAR10': trainset = torchvision.datasets.CIFAR10(root='data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) numClasses = 10 elif args.Dataset == 'CIFAR100': trainset = torchvision.datasets.CIFAR100(root='data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR100(root='data', train=False, download=True, transform=transform_test) numClasses = 100 trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=4) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=4) if args.variant == 'baseline': if args.Network == 'ResNet': net = ResNet50(numClasses) elif args.Network == 'MobileNetV1': net = MobileNetV1(numClasses) elif args.Network == 'MobileNetV2': net = MobileNetV2(numClasses) elif args.Network == 'MobileNetV3S': net = MobileNetV3('small', numClasses) elif args.Network == 'MobileNetV3L': net = MobileNetV3('large', numClasses) elif args.Network == 'MnasNet': net = MnasNet(numClasses) elif args.variant == 'half': if args.Network == 'ResNet': net = ResNet50FuSeHalf(numClasses) elif args.Network == 'MobileNetV1': net = MobileNetV1FuSeHalf(numClasses) elif args.Network == 'MobileNetV2': net = MobileNetV2FuSeHalf(numClasses) elif args.Network == 'MobileNetV3S': net = MobileNetV3FuSeHalf('small', numClasses) elif args.Network == 'MobileNetV3L': net = MobileNetV3FuSeHalf('large', numClasses) elif args.Network == 'MnasNet': net = MnasNetFuSeHalf(numClasses) elif args.variant == 'full': if args.Network == 'ResNet': net = ResNet50FuSeFull(numClasses) elif args.Network == 'MobileNetV1': net = MobileNetV1FuSeFull(numClasses) elif args.Network == 'MobileNetV2': net = MobileNetV2FuSeFull(numClasses) elif args.Network == 'MobileNetV3S': net = MobileNetV3FuSeFull('small', numClasses) elif args.Network == 'MobileNetV3L': net = MobileNetV3FuSeFull('large', numClasses) elif args.Network == 'MnasNet': net = MnasNetFuSeFull(numClasses) else: print("Provide a valid variant") exit(0) criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD(net.parameters(), 0.1, momentum=0.9, weight_decay=5e-4) net.cuda() wandb.watch(net, log="all") bestAcc = 0 startEpoch = 0 if args.resume == True: assert os.path.isdir(args.name), 'Error: no checkpoint directory found!' checkpoint = torch.load(args.name+'/BestModel.t7') net.load_state_dict(checkpoint['net']) bestAcc = checkpoint['acc'] startEpoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer']) lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 40, 60, 70, 80, 90], gamma=0.1, last_epoch=startEpoch-1) for epoch in range(startEpoch, 60): train(net, trainloader, criterion, optimizer, epoch) lr_scheduler.step() acc = test(net, testloader, criterion, epoch) state = {'net': net.state_dict(), 'acc': acc, 'epoch': epoch+1, 'optimizer' : optimizer.state_dict() } if acc > bestAcc: torch.save(state, args.name+'/BestModel.t7') bestAcc = acc wandb.save('BestModel.h5') else: torch.save(state, args.name+'/LastEpoch.t7') meta = open(args.name+'/stats.txt', "a") s = args.variant meta.write(args.Dataset + ' , ' + args.Network + ' , ' + s + ' , ' + str(bestAcc) + '\n') meta.close() if __name__ == '__main__': random.seed(42) torch.manual_seed(42) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False parser = argparse.ArgumentParser(description = "Train CIFAR Models") parser.add_argument("--Dataset", "-D", type = str, help = 'CIFAR10, CIFAR100', required=True) parser.add_argument("--Network", "-N", type = str, help = 'ResNet, MobileNetV1, MobileNetV2, MobileNetV3S, MobileNetV3L, MnasNet', required=True) parser.add_argument("--name", "-n", type=str, help = 'Name of the run', required=True) parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--variant', '-v', type=str, help='baseline or half or full', required=True) args = parser.parse_args() if not os.path.isdir(args.name): os.mkdir(args.name) main()
8,346
2,845
import os.path from fabric.api import local, env from fabric.utils import fastprint from prezi.fabric.s3 import CommonTasks, S3Deploy, NoopServiceManager env.forward_agent = True env.user = 'publisher' env.roledefs = {'production': [], 'stage': [], 'local': []} class SingleVirtualenvS3Deploy(S3Deploy): def __init__(self, app_name, buckets, revno): super(SingleVirtualenvS3Deploy, self).__init__(app_name, buckets, revno) self.service = NoopServiceManager(self) self.virtualenv = SingleVirtualenvService(self) class SingleVirtualenvService(object): def __init__(self, deployer): self.deployer = deployer self.tarball_path = self.deployer.build_dir + '.tar' self.tarbz_path = self.tarball_path + '.bz2' self.tarbz_name = os.path.basename(self.tarbz_path) def build_tarbz(self): self.build_venv() self.compress_venv() def cleanup(self): local('rm -rf %s %s' % (self.tarbz_path, self.deployer.build_dir)) def build_venv(self): fastprint('Building single virtualenv service in %s\n' % self.deployer.build_dir) # init + update pip submodule local('git submodule init; git submodule update') # builds venv self.run_virtualenv_cmd("--distribute --no-site-packages -p python2.7 %s" % self.deployer.build_dir) # installs app + dependencies local(' && '.join( ['. %s/bin/activate' % self.deployer.build_dir, 'pip install --exists-action=s -e `pwd`/pip#egg=pip -e `pwd`@master#egg=snakebasket -r requirements-development.txt'] )) # makes venv relocatable self.run_virtualenv_cmd("--relocatable -p python2.7 %s" % self.deployer.build_dir) def compress_venv(self): fastprint('Compressing virtualenv') local('tar -C %(build_dir)s/.. -cjf %(tarbz_path)s %(dirname)s' % { 'build_dir': self.deployer.build_dir, 'tarbz_path': self.tarbz_path, 'dirname': os.path.basename(self.deployer.build_dir) }) def run_virtualenv_cmd(self, args): if not isinstance(args, list): args = args.split() fastprint('Running virtualenv with args %s\n' % args) local("env VERSIONER_PYTHON_VERSION='' virtualenv %s" % ' '.join(args)) @property def upload_source(self): return self.tarbz_path @property def upload_target(self): return self.tarbz_name tasks = CommonTasks(SingleVirtualenvS3Deploy, 'snakebasket', None) snakebasket_build = tasks.build cleanup = tasks.cleanup
2,590
870
from __future__ import unicode_literals import warnings from django import get_version as get_django_version __title__ = "dj-stripe" __summary__ = "Django + Stripe Made Easy" __uri__ = "https://github.com/pydanny/dj-stripe/" __version__ = "0.5.0" __author__ = "Daniel Greenfeld" __email__ = "pydanny@gmail.com" __license__ = "BSD" __license__ = "License :: OSI Approved :: BSD License" __copyright__ = "Copyright 2015 Daniel Greenfeld" if get_django_version() <= '1.6.x': msg = "dj-stripe deprecation notice: Django 1.6 and lower are deprecated\n" \ "and will be removed in dj-stripe 0.6.0.\n" \ "Reference: https://github.com/pydanny/dj-stripe/issues/173" warnings.warn(msg)
706
270
import logging import os import time import numpy as np from autolamella.acquire import ( grab_images, save_reference_images, save_final_images, ) from autolamella.align import realign from autolamella.autoscript import reset_state def milling( microscope, settings, stage_settings, my_lamella, pattern, # "upper", "lower", "both" filename_prefix="", demo_mode=False, ): from autoscript_core.common import ApplicationServerException from autoscript_sdb_microscope_client.structures import StagePosition # Sanity-check pattern parameter if pattern not in ("upper", "lower", "both"): raise ValueError(f"Invalid pattern type:\n" f"Should be \"upper\", \"lower\" or \"both\", not \"{pattern}\"") # Setup and realign to fiducial marker setup_milling(microscope, settings, stage_settings, my_lamella) tilt_in_radians = np.deg2rad(stage_settings["overtilt_degrees"]) if pattern == "upper": microscope.specimen.stage.relative_move(StagePosition(t=-tilt_in_radians)) elif pattern == "lower": microscope.specimen.stage.relative_move(StagePosition(t=+tilt_in_radians)) # Realign three times for abc in "abc": image_unaligned = grab_images( microscope, settings, my_lamella, prefix="IB_" + filename_prefix, suffix=f"_0{abc}-unaligned", ) realign(microscope, image_unaligned, my_lamella.fiducial_image) # Save the refined position to prevent gradual stage-drift my_lamella.fibsem_position.ion_beam.update_beam_shift() # Save the newly aligned image for the next alignment stage my_lamella.fiducial_image = grab_images( microscope, settings, my_lamella, # can remove prefix="IB_" + filename_prefix, suffix="_1-aligned", ) # Create and mill patterns if pattern == "upper" or pattern == "both": _milling_coords(microscope, stage_settings, my_lamella, "upper") if pattern == "lower" or pattern == "both": _milling_coords(microscope, stage_settings, my_lamella, "lower") # Create microexpansion joints (if applicable) _microexpansion_coords(microscope, stage_settings, my_lamella) if 'patterning_mode' in stage_settings: microscope.patterning.mode = stage_settings['patterning_mode'] if not demo_mode: microscope.imaging.set_active_view(2) # the ion beam view print("Milling pattern...") try: microscope.patterning.run() except ApplicationServerException: logging.error("ApplicationServerException: could not mill!") microscope.patterning.clear_patterns() grab_images( microscope, settings, my_lamella, # can remove prefix="IB_" + filename_prefix, suffix=f"_2-after-{pattern}-milling", ) return microscope def _milling_coords(microscope, stage_settings, my_lamella, pattern): """Create milling pattern for lamella position.""" # Sanity-check pattern parameter if pattern not in ("upper", "lower"): raise ValueError(f"Invalid pattern type for milling coords generation:\n" f"Should be \"upper\" or \"lower\", not \"{pattern}\"") microscope.imaging.set_active_view(2) # the ion beam view lamella_center_x, lamella_center_y = my_lamella.center_coord_realspace if my_lamella.custom_milling_depth is not None: milling_depth = my_lamella.custom_milling_depth else: milling_depth = stage_settings["milling_depth"] height = float( stage_settings["total_cut_height"] * stage_settings.get(f"percentage_roi_height_{pattern}", stage_settings["percentage_roi_height"]) ) center_offset = ( (0.5 * stage_settings["lamella_height"]) + (stage_settings["total_cut_height"] * stage_settings["percentage_from_lamella_surface"]) + (0.5 * height) ) center_y = lamella_center_y + center_offset \ if pattern == "upper" \ else lamella_center_y - center_offset # milling_roi = microscope.patterning.create_cleaning_cross_section( milling_roi = microscope.patterning.create_rectangle( lamella_center_x, center_y, stage_settings.get(f'lamella_width_{pattern}', stage_settings["lamella_width"]), height, milling_depth, ) if pattern == "upper": milling_roi.scan_direction = "TopToBottom" elif pattern == "lower": milling_roi.scan_direction = "BottomToTop" return milling_roi def _microexpansion_coords(microscope, stage_settings, my_lamella): """Mill microexpansion joints (TODO: add reference)""" if not ("microexpansion_width" in stage_settings and "microexpansion_distance_from_lamella" in stage_settings and "microexpansion_percentage_height" in stage_settings): return None microscope.imaging.set_active_view(2) # the ion beam view lamella_center_x, lamella_center_y = my_lamella.center_coord_realspace if my_lamella.custom_milling_depth is not None: milling_depth = my_lamella.custom_milling_depth else: milling_depth = stage_settings["milling_depth"] height = float( ( 2 * stage_settings["total_cut_height"] * (stage_settings["percentage_roi_height"] + stage_settings["percentage_from_lamella_surface"]) + stage_settings["lamella_height"] ) * stage_settings["microexpansion_percentage_height"] ) offset_x = (stage_settings["lamella_width"] + stage_settings["microexpansion_width"]) / 2 \ + stage_settings["microexpansion_distance_from_lamella"] milling_rois = [] for scan_direction, offset_x in (("LeftToRight", -offset_x), ("RightToLeft", offset_x)): milling_roi = microscope.patterning.create_rectangle( lamella_center_x + offset_x, lamella_center_y, stage_settings["microexpansion_width"], height, milling_depth, ) milling_roi.scan_direction = scan_direction milling_rois.append(milling_roi) return milling_rois def setup_milling(microscope, settings, stage_settings, my_lamella): """Setup the ion beam system ready for milling.""" system_settings = settings["system"] ccs_file = system_settings["application_file_cleaning_cross_section"] microscope = reset_state(microscope, settings, application_file=ccs_file) my_lamella.fibsem_position.restore_state(microscope) microscope.beams.ion_beam.beam_current.value = stage_settings["milling_current"] return microscope def run_drift_corrected_milling(microscope, correction_interval, reduced_area=None): """ Parameters ---------- microscope : Autoscript microscope object correction_interval : Time in seconds between drift correction realignment reduced_area : Autoscript Rectangle() object Describes the reduced area view in relative coordinates, with the origin in the top left corner. Default value is None, which will create a Rectangle(0, 0, 1, 1), which means the imaging will use the whole field of view. """ from autoscript_core.common import ApplicationServerException from autoscript_sdb_microscope_client.structures import (GrabFrameSettings, Rectangle) if reduced_area is None: reduced_area = Rectangle(0, 0, 1, 1) s = GrabFrameSettings(reduced_area=reduced_area) reference_image = microscope.imaging.grab_frame(s) # start drift corrected patterning (is a blocking function, not asynchronous) microscope.patterning.start() while microscope.patterning.state == "Running": time.sleep(correction_interval) try: microscope.patterning.pause() except ApplicationServerException: continue else: new_image = microscope.imaging.grab_frame(s) realign(microscope, new_image, reference_image) microscope.patterning.resume() def mill_single_stage( microscope, settings, stage_settings, stage_number, my_lamella, lamella_number ): """Run ion beam milling for a single milling stage in the protocol.""" filename_prefix = f"lamella{lamella_number + 1}_stage{stage_number + 1}" demo_mode = settings["demo_mode"] milling( microscope, settings, stage_settings, my_lamella, pattern="both", filename_prefix=filename_prefix, demo_mode=demo_mode, ) def mill_all_stages( microscope, protocol_stages, lamella_list, settings, output_dir="output_images" ): """Run all milling stages in the protocol.""" if lamella_list == []: logging.info("Lamella sample list is empty, nothing to mill here.") return if not os.path.isdir(output_dir): os.mkdir(output_dir) for stage_number, stage_settings in enumerate(protocol_stages): logging.info( f"Protocol stage {stage_number + 1} of {len(protocol_stages)}" ) for lamella_number, my_lamella in enumerate(lamella_list): logging.info( f"Lamella number {lamella_number + 1} of {len(lamella_list)}" ) # save all the reference images you took creating the fiducial if stage_number == 0: save_reference_images(settings, my_lamella, lamella_number) mill_single_stage( microscope, settings, stage_settings, stage_number, my_lamella, lamella_number, ) # If this is the very last stage, take an image if stage_number + 1 == len(protocol_stages): save_final_images(microscope, settings, lamella_number) reset_state(microscope, settings) # Return ion beam current to imaging current (20 pico-Amps) microscope.beams.ion_beam.beam_current.value = 20e-12
10,283
3,076
import pygame from Robot import Robot class SlowRobot(Robot): moveState = -15 shootState = 0 def __init__(self, image, name): super().__init__(image, name) self.movingLeft = False self.movingRight = True self.movingUp = False self.movingDown = True def update(self): super().update() SlowRobot.moveState = SlowRobot.moveState + 1 if((SlowRobot.moveState)% 25 == 0 or SlowRobot.moveState < 0): preX = self.getRect().centerx preY = self.getRect().centery if self.movingLeft: self.movingLeft = self.moveLeft() if not self.movingLeft: self.movingRight = True if self.movingUp: self.movingUp = self.moveUp() if not self.movingUp: self.movingDown = True else: self.movingDown = self.moveDown() if not self.movingDown: self.movingUp = True else: self.movingRight = self.moveRight() if not self.movingRight: self.movingLeft = True if self.movingDown: self.movingDown = self.moveDown() if not self.movingDown: self.movingUp = True else: self.movingUp = self.moveUp() if not self.movingUp: self.movingDown = True if self.movingLeft and self.movingUp: self.turnTowardsAngle(135) elif self.movingLeft and self.movingDown: self.turnTowardsAngle(-135) elif self.movingRight and self.movingUp: self.turnTowardsAngle(45) else: self.turnTowardsAngle(-45) SlowRobot.shootState = SlowRobot.shootState + 1 if((SlowRobot.shootState)% 10 == 0): self.shoot()
2,210
610
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com> # ---------- # # ---------- from typing import Iterable from itertools import zip_longest from .internal import TypeMatcher from .g import isinstanceof, issubclassof def match(args: (list, tuple), types: Iterable[type]) -> bool: ''' check whether args match types. example: ``` py `match(('', 1), (str, int)) # True ``` ''' try: if len(args) != len(types): return False except TypeError: # object of type 'types' has no len() pass empty = object() for item, typ in zip_longest(args, types, fillvalue=empty): if item is empty or typ is empty: return False if not isinstanceof(item, typ): return False return True __all__ = [ 'TypeMatcher', 'isinstanceof', 'issubclassof', 'match' ]
934
312
from aetherling.space_time.space_time_types import * from aetherling.space_time.nested_counters import * from aetherling.modules.ram_any_type import * from aetherling.modules.term_any_type import TermAnyType from aetherling.modules.mux_any_type import DefineMuxAnyType from aetherling.modules.map_fully_parallel_sequential import DefineNativeMapParallel from aetherling.helpers.nameCleanup import cleanName from mantle.coreir.memory import getRAMAddrWidth from mantle.common.countermod import Decode from aetherling.modules.ram_any_type import * from magma import * from magma.circuit import DefineCircuitKind, Circuit __all__ = ['DefineRAM_ST', 'RAM_ST'] @cache_definition def DefineRAM_ST(t: ST_Type, n: int, has_reset = False, read_latency = 0) -> DefineCircuitKind: """ Generate a RAM where t store n objects each of type t. WE, RE and RESET affect where in a t is being written/read. This is different from normal magma RAMs that don't have values that take multiple clocks. RADDR : In(Array[log_2(n), Bit)], RDATA : Out(t.magma_repr()), WADDR : In(Array(log_2(n), Bit)), WDATA : In(t.magma_repr()), WE: In(Bit) RE: In(Bit) if has_reset: RESET : In(Bit) """ class _RAM_ST(Circuit): name = 'RAM_ST_{}_hasReset{}'.format(cleanName(str(t)), str(has_reset)) addr_width = getRAMAddrWidth(n) IO = ['RADDR', In(Bits[addr_width]), 'RDATA', Out(t.magma_repr()), 'WADDR', In(Bits[addr_width]), 'WDATA', In(t.magma_repr()), 'WE', In(Bit), 'RE', In(Bit) ] + ClockInterface(has_ce=False, has_reset=has_reset) @classmethod def definition(cls): # each valid clock, going to get a magma_repr in # read or write each one of those to a location rams = [DefineRAMAnyType(t.magma_repr(), t.valid_clocks(), read_latency=read_latency)() for _ in range(n)] read_time_position_counter = DefineNestedCounters(t, has_cur_valid=True, has_ce=True, has_reset=has_reset)() read_valid_term = TermAnyType(Bit) read_last_term = TermAnyType(Bit) write_time_position_counter = DefineNestedCounters(t, has_cur_valid=True, has_ce=True, has_reset=has_reset)() write_valid_term = TermAnyType(Bit) write_last_term = TermAnyType(Bit) read_selector = DefineMuxAnyType(t.magma_repr(), n)() for i in range(n): wire(cls.WDATA, rams[i].WDATA) wire(write_time_position_counter.cur_valid, rams[i].WADDR) wire(read_selector.data[i], rams[i].RDATA) wire(read_time_position_counter.cur_valid, rams[i].RADDR) write_cur_ram = Decode(i, cls.WADDR.N)(cls.WADDR) wire(write_cur_ram & write_time_position_counter.valid, rams[i].WE) wire(cls.RADDR, read_selector.sel) wire(cls.RDATA, read_selector.out) wire(cls.WE, write_time_position_counter.CE) wire(cls.RE, read_time_position_counter.CE) wire(read_time_position_counter.valid, read_valid_term.I) wire(read_time_position_counter.last, read_last_term.I) wire(write_time_position_counter.valid, write_valid_term.I) wire(write_time_position_counter.last, write_last_term.I) if has_reset: wire(cls.RESET, write_time_position_counter.RESET) wire(cls.RESET, read_time_position_counter.RESET) return _RAM_ST def RAM_ST(t: ST_Type, n: int, has_reset: bool = False) -> Circuit: DefineRAM_ST(t, n, has_reset)
3,669
1,245
# Process two rose images by summing them together # FIN Laske kaksi ruusukuvaa yhteen # # Samuli Siltanen April 2021 # Python-käännös Ville Tilvis 2021 import numpy as np import matplotlib.pyplot as plt # Read in the images # FIN Lue kuvat levyltä im1 = plt.imread('../_kuvat/ruusu1.png') im2 = plt.imread('../_kuvat/ruusu2.png') print('Images read') # Normalize images # FIN Normalisoi kuva-alkiot nollan ja ykkösen välille MAX = np.max([np.max(im1),np.max(im2)]) im1 = im1/MAX im2 = im2/MAX print('Images normalized') # Gamma correction for brightening images # FIN Gammakorjaus ja kynnystyksiä gammacorrB = .6 blackthr = .03 whitethr = .95 # Save the summed image to file # FIN Laske summakuva im3 = (im1+im2)/2 # FIN Kohenna kuvaa im3 = im3-np.min(im3); im3 = im3/np.max(im3); blackthrarray = blackthr*np.ones(im3.shape) im3 = np.maximum(im3,blackthrarray)-blackthrarray im3 = im3/(whitethr*np.max(im3)); im3 =np.minimum(im3, np.ones(im3.shape)) im3 = np.power(im3,gammacorrB) print('New image ready') # FIN Tallenna levylle plt.imsave('../_kuvat/ruusu12.png', im3); print('Wrote new image to file') # FIN Katso, miltä kuva näyttää plt.figure(1) plt.clf plt.axis('off') plt.gcf().set_dpi(600) plt.imshow(im3)
1,227
566
############################## # Copyright (C) 2009-2011 by # Dent Earl (dearl@soe.ucsc.edu, dent.earl@gmail.com) # Benedict Paten (benedict@soe.ucsc.edu, benedict.paten@gmail.com) # Mark Diekhans (markd@soe.ucsc.edu) # ... and other members of the Reconstruction Team of David Haussler's # lab (BME Dept. UCSC). # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ############################## import unittest import os import sys myBinDir = os.path.normpath( os.path.dirname( sys.argv[0] ) ) #sys.path.append(myBinDir + "/../../..") #os.environ["PATH"] = myBinDir + "/../../../../bin:" + os.environ["PATH"] class RoundTripCheck( unittest.TestCase ): import os knownValues = (('''>name1 ACGTnnnACGT >name2 ACGttttttttt ttttttttt ''','''>contig000001 ACGTnnnACGT >contig000002 ACGttttttttt ttttttttt '''), ('''>apple ACTGT >apple2 ACTGTACTGT >Horrible W0rds and a tab 4@!#@!!!$&*){} ACGTACGT >emptyContig >Some other stuff, odd extra space. ACGT >Last one TGCATGCAacgt bad characters ''', '''>contig000001 ACTGT >contig000002 ACTGTACTGT >contig000003 ACGTACGT >contig000004 >contig000005 ACGT >contig000006 TGCATGCAacgt bad characters ''')) if not os.path.exists( 'tempTestFiles' ): os.mkdir( 'tempTestFiles' ) def test_oneWay( self ): """fastaHeaderMapper should produce known results.""" import subprocess for pre, post in self.knownValues: # generate map cmd = [os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--createMap=%s' % os.path.join('tempTestFiles','testMap.map'), '--label=%s' % 'contig' ] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( sout ) = p.communicate( pre )[0] # go forward cmd = [os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--map=%s' % os.path.join('tempTestFiles','testMap.map'), '--goForward', '--label=%s' % 'contig' ] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( outFor ) = p.communicate( pre )[0] self.assertEqual( post, outFor ) def test_roundTrip( self ): """fastaHeaderMapper should be invertible.""" import subprocess for pre, post in self.knownValues: # generate map cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--createMap=%s' % os.path.join('tempTestFiles','testMap.map'), '--label=%s' % 'contig' ] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( sout ) = p.communicate( pre )[0] # go forward cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--map=%s' % os.path.join('tempTestFiles','testMap.map'), '--goForward', '--label=%s' % 'contig' ] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( outFor ) = p.communicate( pre )[0] self.assertEqual( post, outFor ) # go backward cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--map=%s' % os.path.join('tempTestFiles','testMap.map'), '--goBackward', '--label=%s' % 'contig' ] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( outBack ) = p.communicate( outFor )[0] self.assertEqual( pre, outBack ) def test_roundTripPrefix( self ): """fastaHeaderMapper should be invertible with prefixes.""" import random import string import subprocess print ' ' chars = string.letters + string.digits + ' ' + '\t' + string.punctuation for i in xrange(50): prefix = ''.join( random.choice( chars ) for x in xrange(30)) for pre, post in self.knownValues: #add prefix to post post2 = '' j = 0 for p in post.split('\n'): j += 1 p = p.strip() if p == '': if j != len( post.split('\n') ): post2 += '\n' continue if p.startswith('>'): post2+= '>%s.%s\n' % ( prefix, p[1:] ) else: post2+= '%s\n' % p post = post2 # generate map cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--createMap=%s' % os.path.join('tempTestFiles','testMap.map'), '--prefix=%s' % prefix] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( sout ) = p.communicate( pre )[0] # go forward cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--map=%s' % os.path.join('tempTestFiles','testMap.map'), '--goForward'] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( outFor ) = p.communicate( pre )[0] self.assertEqual( post, outFor ) # go backward cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--map=%s' % os.path.join('tempTestFiles','testMap.map'), '--goBackward'] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( outBack ) = p.communicate( outFor )[0] self.assertEqual( pre, outBack ) if __name__ == '__main__': unittest.main()
6,931
2,323
from django.contrib import admin from django.db.models import Q from django.utils.translation import ugettext_lazy as _ from django.utils.html import format_html from datetime import date, datetime from django_summernote.admin import SummernoteModelAdmin from mptt.admin import MPTTModelAdmin, DraggableMPTTAdmin from .models import Priority, Status, Sprint, Project, Task, TaskType from ..accounts.models import Profile def make_done(modeladmin, request, queryset): status = Status.objects.filter(done=True).first() queryset.update(status=status, done=True, done_on=datetime.now()) make_done.short_description = ''' Marcar tarefas selecionadas como concluído''' def make_archive(modeladmin, request, queryset): if request.user.is_superuser: queryset.update(archived=True, archived_on=datetime.now()) make_archive.short_description = ''' Marcar tarefas selecionadas como arquivado''' @admin.register(Task) class TaskAdmin(SummernoteModelAdmin, DraggableMPTTAdmin): # change_list_template = 'admin/task_change_list.html' mptt_indent_field = 'title' list_per_page = 100 list_display = [ 'tree_actions', 'indented_title', 'owner_thumb', 'colored_priority', 'colored_status', 'colored_task_type', 'formatted_finish', 'project', 'sprint' ] list_display_links = [ 'indented_title', ] list_filter = [ ('sprint', admin.RelatedFieldListFilter), ('owner', admin.RelatedFieldListFilter), ('project', admin.RelatedFieldListFilter), ('status', admin.RelatedFieldListFilter), 'archived', ] search_fields = ['title', 'description'] summernote_fields = ['description'] actions = [make_done, make_archive] def get_exclude(self, request, obj=None): excluded = super().get_exclude(request, obj) or [] if not request.user.is_superuser: return excluded + ['done', 'done_on', 'archived', 'archived_on'] return excluded def get_queryset(self, request): qs = super().get_queryset(request) if request.user.is_superuser: return qs # return qs.filter(Q(status=None) | Q(status__archive=False)) return qs.filter(archived=False) def formatted_finish(self, obj): if not obj.finish_on: return '' color = '#373A3C' status_done = None status = None if obj.status: status_done = obj.status.done status = obj.status if (obj.finish_on.date() < date.today()) and ( not status_done or not status): color = '#E0465E' return format_html( '<span style="color: {}; font-weight: bold;">{}</span>'.format( color, obj.finish_on.strftime('%b %-d'))) formatted_finish.allow_tags = True formatted_finish.admin_order_field = 'finish_on' formatted_finish.short_description = _('Data') def colored_priority(self, obj): if obj.priority: name = obj.priority.name color = obj.priority.color color_text = obj.priority.color_text else: name = '-' color = '#C4C4C4' color_text = '#FFFFFF' return format_html( '<div style="background:{}; color:{}; ' 'text-align:center; padding: 4px;">{}</div>'.format( color, color_text, name)) colored_priority.allow_tags = True colored_priority.admin_order_field = 'priority' colored_priority.short_description = _('Prioridade') def colored_status(self, obj): if obj.status: name = obj.status.name color = obj.status.color color_text = obj.status.color_text else: name = '-' color = '#C4C4C4' color_text = '#FFFFFF' return format_html( '<div style="background:{}; color:{}; ' 'text-align:center; padding: 4px;">{}</div>'.format( color, color_text, name)) colored_status.allow_tags = True colored_status.admin_order_field = 'status' colored_status.short_description = _('Status') def colored_task_type(self, obj): if obj.task_type: name = obj.task_type.name color = obj.task_type.color color_text = obj.task_type.color_text else: name = '-' color = '#C4C4C4' color_text = '#FFFFFF' return format_html( '<div style="background:{}; color:{}; ' 'text-align:center; padding: 4px;">{}</div>'.format( color, color_text, name)) colored_task_type.allow_tags = True colored_task_type.admin_order_field = 'task_type' colored_task_type.short_description = _('Tipo') def owner_thumb(self, obj): if obj.owner: profile = Profile.objects.filter(user=obj.owner) for item in profile: if item.photo: img = item.photo_thumbnail.url else: img = None if img: return format_html( '<img src="{0}" width="35" />'.format(img) ) owner = obj.owner else: owner = '' return '{}'.format(owner) owner_thumb.allow_tags = True owner_thumb.admin_order_field = 'owner' owner_thumb.short_description = _('Resp.') class Media: css = { 'all': ('css/likebee.css',) } admin.site.register(Priority) admin.site.register(Status) admin.site.register(Sprint) admin.site.register(Project) admin.site.register(TaskType)
5,712
1,739
# connectorDetails # Returns a connector object if a valid identifier was provided. # Reference: https://fivetran.com/docs/rest-api/connectors#retrieveconnectordetails import fivetran_api # Fivetran API URL (Replace {connector_id} with a valid connector id). url = "https://api.fivetran.com/v1/connectors/{connector_id}" fivetran_api.dump(fivetran_api.get_url(url))
369
132
from django.contrib import admin from .models import Address, Coupon, Item, Order, OrderItem, Payment, Session def make_refund_accepted(modeladmin, request, queryset): queryset.update(refund_requested=False, refund_granted=True) make_refund_accepted.short_description = "Update orders to refound granted" class OrderAdmin(admin.ModelAdmin): list_display = [ "session", "user", "ordered", "being_delivered", "received", "refund_requested", "refund_granted", "billing_address", "shipping_address", "payment", "coupon", ] list_filter = [ "ordered", "being_delivered", "received", "refund_requested", "refund_granted", ] list_display_links = [ "session", "billing_address", "payment", "coupon", "shipping_address", ] search_fields = ["user__username", "reference", "session__session_number"] actions = [make_refund_accepted] class AddressAdmin(admin.ModelAdmin): list_display = [ "user", "street_address", "apartment_address", "country", "zip", "address_type", "default", ] list_filter = ["address_type", "default", "country"] search_fields = ["user", "street_address", "apartment_address", "zip"] class SessionAdmin(admin.ModelAdmin): readonly_fields = ("start_date",) admin.site.register(Item) admin.site.register(Order, OrderAdmin) admin.site.register(OrderItem) admin.site.register(Payment) admin.site.register(Address, AddressAdmin) admin.site.register(Coupon) admin.site.register(Session, SessionAdmin)
1,703
534
"""Metadata for cleaning, re-encoding, and documenting coded data columns. These dictionaries are used to create Encoder instances. They contain the following keys: 'df': A dataframe associating short codes with long descriptions and other information. 'code_fixes': A dictionary mapping non-standard codes to canonical, standardized codes. 'ignored_codes': A list of non-standard codes which appear in the data, and will be set to NA. """ from typing import Any, Dict import numpy as np import pandas as pd CODE_METADATA: Dict[str, Dict[str, Any]] = { "coalmine_types_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ("P", "preparation_plant", "A coal preparation plant."), ("S", "surface", "A surface mine."), ("U", "underground", "An underground mine."), ( "US", "underground_and_surface", "Both an underground and surface mine with most coal extracted from underground", ), ( "SU", "surface_and_underground", "Both an underground and surface mine with most coal extracted from surface", ), ], ).convert_dtypes(), "code_fixes": { "p": "P", "U/S": "US", "S/U": "SU", "Su": "S", }, "ignored_codes": [], }, "power_purchase_types_ferc1": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ( "AD", "adjustment", 'Out-of-period adjustment. Use this code for any accounting adjustments or "true-ups" for service provided in prior reporting years. Provide an explanation in a footnote for each adjustment.', ), ( "EX", "electricity_exchange", "Exchanges of electricity. Use this category for transactions involving a balancing of debits and credits for energy, capacity, etc. and any settlements for imbalanced exchanges.", ), ( "IF", "intermediate_firm", 'Intermediate-term firm service. The same as LF service expect that "intermediate-term" means longer than one year but less than five years.', ), ( "IU", "intermediate_unit", 'Intermediate-term service from a designated generating unit. The same as LU service expect that "intermediate-term" means longer than one year but less than five years.', ), ( "LF", "long_firm", 'Long-term firm service. "Long-term" means five years or longer and "firm" means that service cannot be interrupted for economic reasons and is intended to remain reliable even under adverse conditions (e.g., the supplier must attempt to buy emergency energy from third parties to maintain deliveries of LF service). This category should not be used for long-term firm service firm service which meets the definition of RQ service. For all transaction identified as LF, provide in a footnote the termination date of the contract defined as the earliest date that either buyer or seller can unilaterally get out of the contract.', ), ( "LU", "long_unit", 'Long-term service from a designated generating unit. "Long-term" means five years or longer. The availability and reliability of service, aside from transmission constraints, must match the availability and reliability of the designated unit.', ), ( "OS", "other_service", "Other service. Use this category only for those services which cannot be placed in the above-defined categories, such as all non-firm service regardless of the Length of the contract and service from designated units of Less than one year. Describe the nature of the service in a footnote for each adjustment.", ), ( "RQ", "requirement", "Requirements service. Requirements service is service which the supplier plans to provide on an ongoing basis (i.e., the supplier includes projects load for this service in its system resource planning). In addition, the reliability of requirement service must be the same as, or second only to, the supplier’s service to its own ultimate consumers.", ), ( "SF", "short_firm", "Short-term service. Use this category for all firm services, where the duration of each period of commitment for service is one year or less.", ), ], ).convert_dtypes(), "code_fixes": {}, "ignored_codes": [ "", "To", 'A"', 'B"', 'C"', "ÿ\x16", "NA", " -", "-", "OC", "N/", "Pa", "0", ], }, "momentary_interruptions_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ( "L", "less_than_1_minute", "Respondent defines a momentary interruption as less than 1 minute.", ), ( "F", "less_than_5_minutes", "Respondent defines a momentary interruption as less than 5 minutes.", ), ( "O", "other", "Respondent defines a momentary interruption using some other criteria.", ), ], ).convert_dtypes(), "code_fixes": {}, "ignored_codes": [], }, "entity_types_eia": { "df": pd.DataFrame( columns=[ "code", "label", "description", ], data=[ ( "A", "municipal_marketing_authority", "Municipal Marketing Authority. Voted into existence by the residents of a municipality and given authority for creation by the state government. They are nonprofit organizations", ), ( "B", "behind_the_meter", "Behind the Meter. Entities that install, own, and/or operate a system (usually photovoltaic), and sell, under a long term power purchase agreement (PPA) or lease, all the production from the system to the homeowner or business with which there is a net metering agreement. Third Party Owners (TPOs) of PV solar installations use this ownership code.", ), ("C", "cooperative", "Cooperative. Member-owned organizations."), ("COM", "commercial", "Commercial facility."), ( "D", "nonutility_dsm_administrator", "Non-utility DSM Administrator. Only involved with Demand-Side Management activities.", ), ( "F", "federal", "Federal. Government agencies with the authority to deliver energy to end-use customers.", ), ("G", "community_choice_aggregator", "Community Choice Aggregator."), ( "I", "investor_owned", "Investor-owned Utilities. Entities that are privately owned and provide a public service.", ), ("IND", "industrial", "Industrial facility."), ( "M", "municipal", "Municipal: Entities that are organized under authority of state statute to provide a public service to residents of that area.", ), ("O", "other", "Other entity type."), ( "P", "political_subdivision", 'Political Subdivision. (also called "public utility district"): Independent of city or county government and voted into existence by a majority of the residents of any given area for the specific purpose of providing utility service to the voters. State laws provide for the formation of such districts.', ), ("PO", "power_marketer", "Power marketer."), ("PR", "private", "Private entity."), ( "Q", "independent_power_producer", "Independent Power Producer or Qualifying Facility. Entities that own power plants and sell their power into the wholesale market.", ), ( "R", "retail_power_marketer", "Retail Power Marketer or Energy Service Provider: Entities that market power to customers in restructured markets.", ), ( "S", "state", "State entities that own or operate facilities or provide a public service.", ), ( "T", "transmission", "Transmission: Entities that operate or own high voltage transmission wires that provide bulk power services.", ), ("U", "unknown", "Unknown entity type."), ( "W", "wholesale_power_marketer", "Wholesale Power Marketer: Entities that buy and sell power in the wholesale market.", ), ], ).convert_dtypes(), "code_fixes": { "Behind the Meter": "B", "Community Choice Aggregator": "G", "Cooperative": "C", "Facility": "Q", "Federal": "F", "Investor Owned": "I", "Municipal": "M", "Political Subdivision": "P", "Power Marketer": "PO", "Retail Power Marketer": "R", "State": "S", "Unregulated": "Q", "Wholesale Power Marketer": "W", }, "ignored_codes": [], }, "energy_sources_eia": { "df": pd.DataFrame( columns=[ "code", "label", "fuel_units", "min_fuel_mmbtu_per_unit", "max_fuel_mmbtu_per_unit", "fuel_group_eia", "fuel_derived_from", "fuel_phase", "fuel_type_code_pudl", "description", ], data=[ ( "AB", "agricultural_byproducts", "short_tons", 7.0, 18.0, "renewable", "biomass", "solid", "waste", "Agricultural by-products", ), ( "ANT", "anthracite", "short_tons", 22.0, 28.0, "fossil", "coal", "solid", "coal", "Anthracite coal", ), ( "BFG", "blast_furnace_gas", "mcf", 0.07, 0.12, "fossil", "gas", "gas", "gas", "Blast furnace gas", ), ( "BIT", "bituminous_coal", "short_tons", 20.0, 29.0, "fossil", "coal", "solid", "coal", "Bituminous coal", ), ( "BLQ", "black_liquor", "short_tons", 10.0, 14.0, "renewable", "biomass", "liquid", "waste", "Black liquor", ), ( "DFO", "distillate_fuel_oil", "barrels", 5.5, 6.2, "fossil", "petroleum", "liquid", "oil", "Distillate fuel oil, including diesel, No. 1, No. 2, and No. 4 fuel oils", ), ( "GEO", "geothermal", pd.NA, np.nan, np.nan, "renewable", "other", pd.NA, "other", "Geothermal", ), ( "JF", "jet_fuel", "barrels", 5.0, 6.0, "fossil", "petroleum", "liquid", "oil", "Jet fuel", ), ( "KER", "kerosene", "barrels", 5.6, 6.1, "fossil", "petroleum", "liquid", "oil", "Kerosene", ), ( "LFG", "landfill_gas", "mcf", 0.3, 0.6, "renewable", "biomass", "gas", "waste", "Landfill gas", ), ( "LIG", "lignite", "short_tons", 10.0, 14.5, "fossil", "coal", "solid", "coal", "Lignite coal", ), ( "MSB", "municipal_solid_waste_biogenic", "short_tons", 9.0, 12.0, "renewable", "biomass", "solid", "waste", "Municipal solid waste (biogenic)", ), ( "MSN", "municipal_solid_nonbiogenic", "short_tons", 9.0, 12.0, "fossil", "petroleum", "solid", "waste", "Municipal solid waste (non-biogenic)", ), ( "MSW", "municipal_solid_waste", "short_tons", 9.0, 12.0, "renewable", "biomass", "solid", "waste", "Municipal solid waste (all types)", ), ( "MWH", "electricity_storage", "mwh", np.nan, np.nan, "other", "other", pd.NA, "other", "Electricity used for electricity storage", ), ( "NG", "natural_gas", "mcf", 0.8, 1.1, "fossil", "gas", "gas", "gas", "Natural gas", ), ( "NUC", "nuclear", pd.NA, np.nan, np.nan, "other", "other", pd.NA, "nuclear", "Nuclear, including uranium, plutonium, and thorium", ), ( "OBG", "other_biomass_gas", "mcf", 0.36, 1.6, "renewable", "biomass", "gas", "waste", "Other biomass gas, including digester gas, methane, and other biomass gasses", ), ( "OBL", "other_biomass_liquid", "barrels", 3.5, 4.0, "renewable", "biomass", "liquid", "waste", "Other biomass liquids", ), ( "OBS", "other_biomass_solid", "short_tons", 8.0, 25.0, "renewable", "biomass", "solid", "waste", "Other biomass solids", ), ( "OG", "other_gas", "mcf", 0.32, 3.3, "fossil", "other", "gas", "gas", "Other gas", ), ( "OTH", "other", pd.NA, np.nan, np.nan, "other", "other", pd.NA, "other", "Other", ), ( "PC", "petroleum_coke", "short_tons", 24.0, 30.0, "fossil", "petroleum", "solid", "coal", "Petroleum coke", ), ( "PG", "propane_gas", "mcf", 2.5, 2.75, "fossil", "petroleum", "gas", "gas", "Gaseous propane", ), ( "PUR", "purchased_steam", pd.NA, np.nan, np.nan, "other", "other", pd.NA, "other", "Purchased steam", ), ( "RC", "refined_coal", "short_tons", 20.0, 29.0, "fossil", "coal", "solid", "coal", "Refined coal", ), ( "RFO", "residual_fuel_oil", "barrels", 5.7, 6.9, "fossil", "petroleum", "liquid", "oil", "Residual fuel oil, including Nos. 5 & 6 fuel oils and bunker C fuel oil", ), ( "SC", "coal_synfuel", "short_tons", np.nan, np.nan, "fossil", "coal", "solid", "coal", "Coal synfuel. Coal-based solid fuel that has been processed by a coal synfuel plant, and coal-based fuels such as briquettes, pellets, or extrusions, which are formed from fresh or recycled coal and binding materials.", ), ( "SG", "syngas_other", "mcf", np.nan, np.nan, "fossil", "other", "gas", "gas", "Synthetic gas, other than coal-derived", ), ( "SGC", "syngas_coal", "mcf", 0.2, 0.3, "fossil", "coal", "gas", "gas", "Coal-derived synthesis gas", ), ( "SGP", "syngas_petroleum_coke", "mcf", 0.2, 1.1, "fossil", "petroleum", "gas", "gas", "Synthesis gas from petroleum coke", ), ( "SLW", "sludge_waste", "short_tons", 10.0, 16.0, "renewable", "biomass", "liquid", "waste", "Sludge waste", ), ( "SUB", "subbituminous_coal", "short_tons", 15.0, 20.0, "fossil", "coal", "solid", "coal", "Sub-bituminous coal", ), ( "SUN", "solar", pd.NA, np.nan, np.nan, "renewable", "other", pd.NA, "solar", "Solar", ), ( "TDF", "tire_derived_fuels", "short_tons", 16.0, 32.0, "other", "other", "solid", "waste", "Tire-derived fuels", ), ( "WAT", "water", pd.NA, np.nan, np.nan, "renewable", "other", pd.NA, "hydro", "Water at a conventional hydroelectric turbine, and water used in wave buoy hydrokinetic technology, current hydrokinetic technology, and tidal hydrokinetic technology, or pumping energy for reversible (pumped storage) hydroelectric turbine", ), ( "WC", "waste_coal", "short_tons", 6.5, 16.0, "fossil", "coal", "solid", "coal", "Waste/Other coal, including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.", ), ( "WDL", "wood_liquids", "barrels", 8.0, 14.0, "renewable", "biomass", "liquid", "waste", "Wood waste liquids excluding black liquor, including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids", ), ( "WDS", "wood_solids", "short_tons", 7.0, 18.0, "renewable", "biomass", "solid", "waste", "Wood/Wood waste solids, including paper pellets, railroad ties, utility poles, wood chips, park, and wood waste solids", ), ( "WH", "waste_heat", pd.NA, np.nan, np.nan, "other", "other", pd.NA, "other", "Waste heat not directly attributed to a fuel source. WH should only be reported when the fuel source is undetermined, and for combined cycle steam turbines that do not have supplemental firing.", ), ( "WND", "wind", pd.NA, np.nan, np.nan, "renewable", "other", pd.NA, "wind", "Wind", ), ( "WO", "waste_oil", "barrels", 3.0, 5.8, "fossil", "petroleum", "liquid", "oil", "Waste/Other oil, including crude oil, liquid butane, liquid propane, naptha, oil waste, re-refined motor oil, sludge oil, tar oil, or other petroleum-based liquid wastes", ), ], ).convert_dtypes(), "code_fixes": { "BL": "BLQ", "HPS": "WAT", "ng": "NG", "WOC": "WC", "OW": "WO", "WT": "WND", "H2": "OG", "OOG": "OG", }, "ignored_codes": [ 0, "0", "OO", "BM", "CBL", "COL", "N", "no", "PL", "ST", ], }, "fuel_transportation_modes_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ( "GL", "great_lakes", "Shipments of coal moved to consumers via the Great Lakes. These shipments are moved via the Great Lakes coal loading docks.", ), ( "OP", "onsite_production", "Fuel is produced on-site, making fuel shipment unnecessary.", ), ( "RR", "rail", "Shipments of fuel moved to consumers by rail (private or public/commercial). Included is coal hauled to or away from a railroad siding by truck if the truck did not use public roads.", ), ( "RV", "river", "Shipments of fuel moved to consumers via river by barge. Not included are shipments to Great Lakes coal loading docks, tidewater piers, or coastal ports.", ), ("PL", "pipeline", "Shipments of fuel moved to consumers by pipeline"), ( "SP", "slurry_pipeline", "Shipments of coal moved to consumers by slurry pipeline.", ), ( "TC", "tramway_conveyor", "Shipments of fuel moved to consumers by tramway or conveyor.", ), ( "TP", "tidewater_port", "Shipments of coal moved to Tidewater Piers and Coastal Ports for further shipments to consumers via coastal water or ocean.", ), ( "TR", "truck", "Shipments of fuel moved to consumers by truck. Not included is fuel hauled to or away from a railroad siding by truck on non-public roads.", ), ( "WT", "other_waterway", "Shipments of fuel moved to consumers by other waterways.", ), ], ).convert_dtypes(), "code_fixes": { "TK": "TR", "tk": "TR", "tr": "TR", "WA": "WT", "wa": "WT", "CV": "TC", "cv": "TC", "rr": "RR", "pl": "PL", "rv": "RV", }, "ignored_codes": ["UN"], }, "fuel_types_aer_eia": { "df": pd.DataFrame( columns=["code", "description"], data=[ ("SUN", "Solar PV and thermal"), ("COL", "Coal"), ("DFO", "Distillate Petroleum"), ("GEO", "Geothermal"), ("HPS", "Hydroelectric Pumped Storage"), ("HYC", "Hydroelectric Conventional"), ("MLG", "Biogenic Municipal Solid Waste and Landfill Gas"), ("NG", "Natural Gas"), ("NUC", "Nuclear"), ("OOG", "Other Gases"), ("ORW", "Other Renewables"), ("OTH", "Other (including Nonbiogenic Municipal Solid Waste)"), ("PC", "Petroleum Coke"), ("RFO", "Residual Petroleum"), ("WND", "Wind"), ("WOC", "Waste Coal"), ("WOO", "Waste Oil"), ("WWW", "Wood and Wood Waste"), ], ).convert_dtypes(), "code_fixes": {}, "ignored_codes": [], }, "contract_types_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ( "C", "contract", "Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ", ), ( "NC", "new_contract", "Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month", ), ("S", "spot_purchase", "Fuel obtained through a spot market purchase"), ( "T", "tolling_agreement", "Fuel received under a tolling agreement (bartering arrangement of fuel for generation)", ), ], ).convert_dtypes(), "code_fixes": {"N": "NC"}, "ignored_codes": [], }, "prime_movers_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ("BA", "battery_storage", "Energy Storage, Battery"), ( "BT", "binary_cycle_turbine", "Turbines Used in a Binary Cycle. Including those used for geothermal applications", ), ( "CA", "combined_cycle_steam_turbine", "Combined-Cycle, Steam Turbine Part", ), ("CC", "combined_cycle_total", "Combined-Cycle, Total Unit"), ("CE", "compressed_air_storage", "Energy Storage, Compressed Air"), ( "CP", "concentrated_solar_storage", "Energy Storage, Concentrated Solar Power", ), ( "CS", "combined_cycle_single_shaft", "Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single", ), ( "CT", "combined_cycle_combustion_turbine", "Combined-Cycle Combustion Turbine Part", ), ( "ES", "other_storage", "Energy Storage, Other (Specify on Schedule 9, Comments)", ), ("FC", "fuel_cell", "Fuel Cell"), ("FW", "flywheel_storage", "Energy Storage, Flywheel"), ( "GT", "gas_combustion_turbine", "Combustion (Gas) Turbine. Including Jet Engine design", ), ("HA", "hydrokinetic_axial_flow", "Hydrokinetic, Axial Flow Turbine"), ("HB", "hydrokinetic_wave_buoy", "Hydrokinetic, Wave Buoy"), ("HK", "hydrokinetic_other", "Hydrokinetic, Other"), ( "HY", "hydraulic_turbine", "Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.", ), ( "IC", "internal_combustion", "Internal Combustion (diesel, piston, reciprocating) Engine", ), ("OT", "other", "Other"), ( "PS", "pumped_storage", "Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)", ), ("PV", "solar_pv", "Solar Photovoltaic"), ( "ST", "steam_turbine", "Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).", ), ("UNK", "unknown", "Unknown prime mover."), ("WS", "wind_offshore", "Wind Turbine, Offshore"), ("WT", "wind_onshore", "Wind Turbine, Onshore"), ], ).convert_dtypes(), "code_fixes": {}, "ignored_codes": [], }, "sector_consolidated_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ (1, "electric_utility", "Traditional regulated electric utilities."), ( 2, "ipp_non_cogen", "Independent power producers which are not cogenerators.", ), ( 3, "ipp_cogen", "Independent power producers which are cogenerators, but whose primary business purpose is the same of electricity to the public.", ), ( 4, "commercial_non_cogen", "Commercial non-cogeneration facilities that produce electric power, are connected to the grid, and can sell power to the public.", ), ( 5, "commercial_cogen", "Commercial cogeneration facilities that produce electric power, are connected to the grid, and can sell power to the public.", ), ( 6, "industrial_non_cogen", "Industrial non-cogeneration facilities that produce electric power, are connected to the grid, and can sell power to the public.", ), ( 7, "industrial_cogen", "Industrial cogeneration facilities that produce electric power, are connected to the grid, and can sell power to the public", ), ], ).convert_dtypes(), "code_fixes": {}, "ignored_codes": [], }, }
37,194
9,223
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. import unittest from cdm.utilities import StorageUtils class StorageUtilsTest(unittest.TestCase): """Test to validate StorageUtils functions""" def test_split_namespace_path(self): """Test split_namespace_path function on different paths""" self.assertIsNone(StorageUtils.split_namespace_path(None)) path_tuple_1 = StorageUtils.split_namespace_path('local:/some/path') self.assertIsNotNone(path_tuple_1) self.assertEqual('local', path_tuple_1[0]) self.assertEqual('/some/path', path_tuple_1[1]) path_tuple_2 = StorageUtils.split_namespace_path('/some/path') self.assertIsNotNone(path_tuple_2) self.assertEqual('', path_tuple_2[0]) self.assertEqual('/some/path', path_tuple_2[1]) path_tuple_3 = StorageUtils.split_namespace_path('adls:/some/path:with:colons') self.assertIsNotNone(path_tuple_3) self.assertEqual('adls', path_tuple_3[0]) self.assertEqual('/some/path:with:colons', path_tuple_3[1])
1,179
360
import raguel.fptp import raguel.irv
36
15
# -*- coding: utf-8 -*- """ Part of slugdetection package @author: Deirdree A Polak github: dapolak """ import numpy as np import pandas as pd from datetime import datetime import matplotlib.pyplot as plt from pyspark.sql import functions as F from pyspark.sql.window import Window class Data_Engineering: """ Tools to crop and select the raw well data. Converts data from a Spark dataframe to Pandas. Parameters ---------- well : Spark data frame data frame containing the pressure, temperature and choke data from a well. Attributes ---------- well_df : Spark data frame data frame containing all of the pressure, temperature and choke data from a well. None values have been dropped well_og : Spark data frame original data frame copy, with None values features : list of strings List of the features of the well, default "WH_P", "DH_P", "WH_T", "DH_T" and "WH_choke" thresholds : dictionary Dictionary with important features as keys, and their lower and upper thresholds as values. This is used for cropping out of range values. The set_thresholds method allows user to change or add values. """ def __init__(self, well): self.well_df = well.na.drop() self.well_og = well self.features = ["WH_P", "DH_P", "WH_T", "DH_T", "WH_choke"] self.thresholds = {"WH_P": [0, 100], "DH_P": [90, 150], "WH_T": [0, 100], "DH_T": [75, 95], "WH_choke": [-1000, 1000]} def stats(self): """ Describes the data in terms of the most common statistics, such as mean, std, max, min and count Returns ------- stats : Spark DataFrame Stats of data frame attribute well_df """ return self.well_df.describe() def shape(self): """ Describes the shape of the Spark data frame well_df, with number of rows and number of columns Returns ------- shape : int, int number of rows, number of columns """ return self.well_df.count(), len(self.well_df.columns) def reset_well_df(self): """ Resets Spark data frame attribute well_df to original state by overriding the well_df attribute """ self.well_df = self.well_og.na.drop() def timeframe(self, start="01-JAN-01 00:01", end="01-JUL-19 00:01", date_format="dd-MMM-yy HH:mm", datetime_format='%d-%b-%y %H:%M'): """ For Spark DataFrame well_df attribute, crops the data to the inputted start and end date Parameters ---------- start : str (optional) Wanted start date of cropped data frame (default is "01-JAN-01 00:01") end : str (optional) Wanted end date of cropped data frame (default is "01-JAN-19 00:01") date_format : str (optional) String format of inputted dates (default is "dd-MMM-yy HH:mm") datetime_format : str (optional) C standard data format for datetime (default is '%d-%b-%y %H:%M') """ d1 = datetime.strptime(start, datetime_format) d2 = datetime.strptime(end, datetime_format) assert max((d1, d2)) == d2, "Assert end date is later than start date" # Crop to start date self.well_df = self.well_df.filter( F.col("ts") > F.to_timestamp(F.lit(start), format=date_format).cast('timestamp')) # Crop to end date self.well_df = self.well_df.filter( F.col("ts") < F.to_timestamp(F.lit(end), format=date_format).cast('timestamp')) return def set_thresholds(self, variable, max_, min_): """ Sets the thresholds value of a variable Parameters ---------- variable : str Name of variable, for example "WH_P" max_ : float Upper threshold of variable min_ : float Lower threshold of variable """ assert isinstance(min_, float), "Minimum threshold must be a number" assert isinstance(max_, float), "Maximum threshold must be a number" assert max(min_, max_) == max_, "Maximum value must be larger than min" self.thresholds[variable] = [min_, max_] def data_range(self, verbose=True): """ Ensures variables within the dataframe well_df are within range, as set by the attribute thresholds. The out of range values are replaced by the previous in range value Parameters ---------- verbose : bool (optional) whether to allow for verbose (default is True) """ window = Window.orderBy("ts") # Spark Window ordering data frames by time lag_names = [] # Empty list to store column names for well_columns in self.well_df.schema.names: # loop through all components (columns) of data if well_columns != "ts": # no tresholding for timestamp if well_columns in self.thresholds.keys(): tresh = self.thresholds[well_columns] # set thresholds values for parameter from dictionary else: tresh = [-1000, 1000] # if feature not in thresholds attribute, set large thresholds if verbose: print(well_columns, "treshold is", tresh) for i in range(1, 10): # Naive approach, creating large amount of lagged features columns lag_col = well_columns + "_lag_" + str(i) lag_names.append(lag_col) self.well_df = self.well_df.withColumn(lag_col, F.lag(well_columns, i, 0).over(window)) for i in range(8, 0, -1): lag_col = well_columns + "_lag_" + str(i) prev_lag = well_columns + "_lag_" + str(i + 1) # apply minimum and maximum threshold to column, and replace out of range values with previous value self.well_df = self.well_df.withColumn(lag_col, F.when(F.col(lag_col) < tresh[0], F.col(prev_lag)) .otherwise(F.col(lag_col))) self.well_df = self.well_df.withColumn(lag_col, F.when(F.col(lag_col) > tresh[1], F.col(prev_lag)).otherwise(F.col(lag_col))) # apply minimum and maximum threshold to column, and replace out of range values with previous value lag_col = well_columns + "_lag_1" self.well_df = self.well_df.withColumn(well_columns, F.when(F.col(well_columns) < tresh[0], F.col(lag_col)) .otherwise(F.col(well_columns))) self.well_df = self.well_df.withColumn(well_columns, F.when(F.col(well_columns) > tresh[1], F.col(lag_col)) .otherwise(F.col(well_columns))) self.well_df = self.well_df.drop(*lag_names) return def clean_choke(self, method="99"): """ Method to clean WH_choke variables values from the well_df Spark data frame attribute Parameters ---------- method : str (optional) Method to clean out WH_choke values. "99" entails suppressing all the data rows where the choke is lower than 99%. "no_choke" entails setting to None all the rows where the WH_choke value is 0 or where it is non constant i.e. differential is larger than 1 or second differential is larger than 3 (default is '99'). """ assert ("WH_choke" in self.well_df.schema.names), 'In order to clean out WH choke data, WH choke column' \ 'in well_df must exist' if method == "99": self.well_df = self.well_df.where("WH_choke > 99") # Select well_df only where WH is larger than 99% elif method == "no_choke": # Select well_df only where WH choke is constant window = Window.orderBy("ts") # Window ordering by time # Create differential and second differential columns for WH choke self.well_df = self.well_df.withColumn("WH_choke_lag", F.lag("WH_choke", 1, 0).over(window)) self.well_df = self.well_df.withColumn("WH_choke_diff", F.abs(F.col("WH_choke") - F.col("WH_choke_lag"))) self.well_df = self.well_df.withColumn("WH_choke_lag2", F.lag("WH_choke_lag", 1, 0).over(window)) self.well_df = self.well_df.withColumn("WH_choke_diff2", F.abs(F.col("WH_choke") - F.col("WH_choke_lag2"))) for col in self.well_df.schema.names: # Set all rows with WH choke less than 10 to 0 self.well_df = self.well_df.withColumn(col, F.when(F.col("WH_choke") < 10, None). otherwise(F.col(col))) # Select well_df where WH choke gradient is less than 1, set rows with high gradient to None self.well_df = self.well_df.withColumn(col, F.when(F.col("WH_choke_diff") > 1, None). otherwise(F.col(col))) # Select well_df where WH choke curvature is less than 3, set rows with higher values to None self.well_df = self.well_df.withColumn(col, F.when(F.col("WH_choke_diff2") > 3, None). otherwise(F.col(col))) else: print("Clean choke method inputted is not know. Try 99 or no_choke") return def df_toPandas(self, stats=True, **kwargs): """ Creates a copy of Spark data frame attribute well_df in Pandas format. Also calculates and stores the mean and standard deviations of each column in the Pandas data frame in the class attributes means and stds. Parameters ---------- stats : bool (optional) Bool asserting whether or not to calculate means and standard deviations of each columns/variable (default is True) kwargs : features : list of str feature names/ column headers to include in pandas data frame pd_df attribute Returns ------- pd_df : Pandas data frame Pandas data frame of original well_df Spark data frame """ if "features" in kwargs.keys(): # if features specified in kwargs, update feature attribute self.features = kwargs["features"] cols = self.features.copy() cols.append("ts") print("Converting Spark data frame to Pandas") self.pd_df = self.well_df.select(cols).toPandas() # convert selected columns of data frame to Pandas print("Converted") if stats: # If stats is true, calculate and store mean and std as attributes self.means = pd.DataFrame([[0 for i in range(len(self.features))]], columns=self.features) self.stds = pd.DataFrame([[0 for i in range(len(self.features))]], columns=self.features) for f in self.features: self.means[f] = self.pd_df[f].mean() # Compute and store mean of column in means attribute self.stds[f] = self.pd_df[f].std() # Compute and store std of column in stds attribute return self.pd_df def standardise(self, df): """ Standardises the data based on the attributes means and stds as calculated when the original dataframe was converted to Pandas. Parameters ---------- df : Pandas data frame Input data frame to be standardised Returns ------- df : Pandas data frame Input data frame standardised """ for feature_ in self.means.columns: # For all features if (feature_ != 'ts') & (feature_ in df.columns): avg = self.means[feature_][0] # get mean for feature from means attribute std = self.stds[feature_][0] # ger std for feature from stds attribute df[feature_] -= avg # Standardise column df[feature_] /= std return df def plot(self, start=0, end=None, datetime_format="%d-%b-%y %H:%M", title="Well Pressure and Temperature over time", ax2_label="Temperature in C // Choke %", **kwargs): """ Simple plot function to plot the pd_df pandas data frame class attribute. Parameters ---------- start : int or str (optional) Index or date at which to start plotting the data (default is 0) end : int or str (optional) Index or date at which to stop plotting the data (default is None) datetime_format : str (optional) C standard data format for datetime (default is '%d-%b-%y %H:%M') title : str (optional) Plot title (default is "Well Pressure and Temperature over time") ax2_label : str (optional) Label for second axis, for non pressure features (default is "Temperature in C // Choke %") kwargs : features: list of str List of features to include in the plot Returns ------- : Figure data plot figure """ assert hasattr(self, "pd_df"), "Pandas data frame pd_df attribute must exist" assert not self.pd_df.empty, "Pandas data frame cannot be empty" # If features has been specified in kwargs passed if "features" in kwargs.keys(): # if only selected features self.features = kwargs["features"] for f in self.features: # Check features exist assert (f in self.pd_df.columns), f + "must be contained in pd_df" if isinstance(start, int): # If start date inputted as an index assert start >= 0, "Start index must be positive" assert start <= len(self.pd_df), "Start index must be less than the last index of pd_df attribute" if isinstance(end, int): # If start date inputted as an index assert end >= 0, "End index must be positive" if isinstance(start, str): # If a string has been passed for the start date date = datetime.strptime(start, datetime_format) assert np.any(self.pd_df.isin([date])), "Start time must exist in pandas data frame" start = self.pd_df['ts'][self.pd_df['ts'].isin([date])].index.tolist()[0] # Get start date as in index if isinstance(end, str): # If a string has been passed for the end date date = datetime.strptime(end, datetime_format) assert np.any(self.pd_df.isin([date])), "End time must exist in pandas data frame" end = self.pd_df['ts'][self.pd_df['ts'].isin([date])].index.tolist()[0] # Get end date as in index if end is not None: # If end index/date has been specified assert max((start, end)) == end, "Assert end date is later than start date" fig, ax = plt.subplots(1, 1, figsize=(30, 12)) # Create subplot ax2 = ax.twinx() # Instantiate secondary axis that shares the same x-axis lines = [] # Create empty list to store lines and corresponding labels colours = ['C' + str(i) for i in range(len(self.features))] # Create list of colour for plots lines for col, c in zip(self.features, colours): if col[-1] == "P": # If pressure, plot on main axis a, = ax.plot(self.pd_df["ts"][start:end], self.pd_df[col][start:end], str(c) + ".", label=col) ax.set_ylabel("Pressure in BarG") lines.append(a) else: # For other features, like Temperature and Choke, plot on secondary axis a, = ax2.plot(self.pd_df["ts"][start:end], self.pd_df[col][start:end], c + '.', label=col) ax2.set_ylabel(ax2_label) lines.append(a) ax.legend(lines, [l.get_label() for l in lines]) ax.set_xlabel("Time") ax.grid(True, which='both') ax.set_title(title) return fig def confusion_mat(cm, labels, title='Confusion Matrix', cmap='RdYlGn', **kwargs): """ Simple confusion matrix plotting method. Inspired by Scikit Learn Confusionp Matrix plot example. Parameters ---------- cm : numpy array or list Confusion matrix as outputted by Scikit Learn Confusion Matrix method. labels : list of str Labels to use on the plot of the Confusion Matrix. Must match number of rows in the confusion matrix. title : str (optional) Title that will be printed above confusion matrix plot cmap : str (optional) Colour Map of confusion matrix kwargs : figsize : tuple of int or int Matplotlib key word to set size of plot Returns ------- : Figure confusion matrix figure """ assert (len(labels) == len(cm[0])), "There must be the same number of columns in the confusion matrix as there" \ "is labels available" fig, ax = plt.subplots() if "figsize" in kwargs.keys(): # Plot confusion matrix fig, ax = plt.subplots(figsize=kwargs["figsize"]) im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), xticklabels=labels, yticklabels=labels, title=title, ylabel='True label', xlabel='Predicted label') # Loop over data dimensions and create text annotations. fmt = '.2f' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return fig
18,624
5,247
import os import time import yaml import argparse from PIL import Image import matplotlib.pyplot as plt from vietocr.tool.predictor import Predictor from vietocr.tool.config import Cfg def main(): parser = argparse.ArgumentParser() parser.add_argument("--img", required=True, help="foo help") parser.add_argument("--config", required=True, help="foo help") args = parser.parse_args() config = Cfg.load_config_from_file(args.config) config[ "vocab" ] = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\xB0\ \xB2\xC0\xC1\xC2\xC3\xC8\xC9\xCA\xCC\xCD\xD0\xD2\xD3\xD4\xD5\xD6\xD9\xDA\xDC\xDD\ \xE0\xE1\xE2\xE3\xE8\xE9\xEA\xEC\xED\xF0\xF2\xF3\xF4\xF5\xF6\xF9\xFA\xFC\xFD\u0100\ \u0101\u0102\u0103\u0110\u0111\u0128\u0129\u014C\u014D\u0168\u0169\u016A\u016B\u01A0\ \u01A1\u01AF\u01B0\u1EA0\u1EA1\u1EA2\u1EA3\u1EA4\u1EA5\u1EA6\u1EA7\u1EA8\u1EA9\u1EAA\ \u1EAB\u1EAC\u1EAD\u1EAE\u1EAF\u1EB0\u1EB1\u1EB2\u1EB3\u1EB4\u1EB5\u1EB6\u1EB7\u1EB8\ \u1EB9\u1EBA\u1EBB\u1EBC\u1EBD\u1EBE\u1EBF\u1EC0\u1EC1\u1EC2\u1EC3\u1EC4\u1EC5\u1EC6\ \u1EC7\u1EC8\u1EC9\u1ECA\u1ECB\u1ECC\u1ECD\u1ECE\u1ECF\u1ED0\u1ED1\u1ED2\u1ED3\u1ED4\ \u1ED5\u1ED6\u1ED7\u1ED8\u1ED9\u1EDA\u1EDB\u1EDC\u1EDD\u1EDE\u1EDF\u1EE0\u1EE1\u1EE2\ \u1EE3\u1EE4\u1EE5\u1EE6\u1EE7\u1EE8\u1EE9\u1EEA\u1EEB\u1EEC\u1EED\u1EEE\u1EEF\u1EF0\ \u1EF1\u1EF2\u1EF3\u1EF4\u1EF5\u1EF6\u1EF7\u1EF8\u1EF9\u2013\u2014\u2019\u201C\u201D\ \u2026\u20AC\u2122\u2212" print(config) detector = Predictor(config) # Option for predicting folder images img_list = os.listdir(args.img) img_list = sorted(img_list) f_pre = open("./test_seq.txt", "w+") # new output <name>\t<gtruth>\t<predict> # f_gt = open("./gt_word.txt", "r") # lines = [line.strip("\n") for line in f_gt if line != "\n"] # start_time = time.time() # for img in lines: # name, gt = img.split("\t") # img_path = args.img + name # image = Image.open(img_path) # s, prob = detector.predict(image, return_prob=True) # res = name + "\t" + gt + "\t" + s + "\t" + str(prob) + "\n" # f_pre.write(res) # runtime = time.time() - start_time # print("FPS:", len(img_list) / runtime) start_time = time.time() for img in img_list: img_path = args.img + img image = Image.open(img_path) s = detector.predict(image) print(img_path, "-----", s) res = img + "\t" + s + "\n" f_pre.write(res) runtime = time.time() - start_time print("FPS:", len(img_list) / runtime) if __name__ == "__main__": main()
2,632
1,454
# -*- coding: utf-8 -*- #--------------------------------------------------------------------------- # Copyright 2020 VMware, Inc. All rights reserved. # AUTO GENERATED FILE -- DO NOT MODIFY! # # vAPI stub file for package com.vmware.esx.settings.depot_content.components. #--------------------------------------------------------------------------- """ The ``com.vmware.esx.settings.depot_content.components_client`` module provides classes to retrieve component versions from the depot. """ __author__ = 'VMware, Inc.' __docformat__ = 'restructuredtext en' import sys from vmware.vapi.bindings import type from vmware.vapi.bindings.converter import TypeConverter from vmware.vapi.bindings.enum import Enum from vmware.vapi.bindings.error import VapiError from vmware.vapi.bindings.struct import VapiStruct from vmware.vapi.bindings.stub import ( ApiInterfaceStub, StubFactoryBase, VapiInterface) from vmware.vapi.bindings.common import raise_core_exception from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator) from vmware.vapi.exception import CoreException from vmware.vapi.lib.constants import TaskType from vmware.vapi.lib.rest import OperationRestMetadata class Versions(VapiInterface): """ The ``Versions`` class provides methods to get component versions from the sync'ed and imported depots. """ _VAPI_SERVICE_ID = 'com.vmware.esx.settings.depot_content.components.versions' """ Identifier of the service in canonical form. """ def __init__(self, config): """ :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration` :param config: Configuration to be used for creating the stub. """ VapiInterface.__init__(self, config, _VersionsStub) self._VAPI_OPERATION_IDS = {} class CategoryType(Enum): """ The ``Versions.CategoryType`` class defines possible values of categories for a component. .. note:: This class represents an enumerated type in the interface language definition. The class contains class attributes which represent the values in the current version of the enumerated type. Newer versions of the enumerated type may contain new values. To use new values of the enumerated type in communication with a server that supports the newer version of the API, you instantiate this class. See :ref:`enumerated type description page <enumeration_description>`. """ SECURITY = None """ Security """ ENHANCEMENT = None """ Enhancement """ BUGFIX = None """ Bugfix """ RECALL = None """ Recall """ RECALL_FIX = None """ Recall-fix """ INFO = None """ Info """ MISC = None """ Misc """ GENERAL = None """ General """ def __init__(self, string): """ :type string: :class:`str` :param string: String value for the :class:`CategoryType` instance. """ Enum.__init__(string) CategoryType._set_values([ CategoryType('SECURITY'), CategoryType('ENHANCEMENT'), CategoryType('BUGFIX'), CategoryType('RECALL'), CategoryType('RECALL_FIX'), CategoryType('INFO'), CategoryType('MISC'), CategoryType('GENERAL'), ]) CategoryType._set_binding_type(type.EnumType( 'com.vmware.esx.settings.depot_content.components.versions.category_type', CategoryType)) class UrgencyType(Enum): """ The ``Versions.UrgencyType`` class defines possible values of urgencies for a component. .. note:: This class represents an enumerated type in the interface language definition. The class contains class attributes which represent the values in the current version of the enumerated type. Newer versions of the enumerated type may contain new values. To use new values of the enumerated type in communication with a server that supports the newer version of the API, you instantiate this class. See :ref:`enumerated type description page <enumeration_description>`. """ CRITICAL = None """ Critical """ IMPORTANT = None """ Important """ MODERATE = None """ Moderate """ LOW = None """ Low """ GENERAL = None """ General """ def __init__(self, string): """ :type string: :class:`str` :param string: String value for the :class:`UrgencyType` instance. """ Enum.__init__(string) UrgencyType._set_values([ UrgencyType('CRITICAL'), UrgencyType('IMPORTANT'), UrgencyType('MODERATE'), UrgencyType('LOW'), UrgencyType('GENERAL'), ]) UrgencyType._set_binding_type(type.EnumType( 'com.vmware.esx.settings.depot_content.components.versions.urgency_type', UrgencyType)) class Info(VapiStruct): """ The ``Versions.Info`` class defines the information regarding a component version. .. tip:: The arguments are used to initialize data attributes with the same names. """ def __init__(self, display_name=None, vendor=None, display_version=None, summary=None, description=None, category=None, urgency=None, kb=None, contact=None, release_date=None, ): """ :type display_name: :class:`str` :param display_name: Display name of the component. :type vendor: :class:`str` :param vendor: Vendor of the component. :type display_version: :class:`str` :param display_version: Human readable version of the component. :type summary: :class:`str` :param summary: Summary of the component version. :type description: :class:`str` :param description: Discription of the component version. :type category: :class:`Versions.CategoryType` :param category: Category of the component version. :type urgency: :class:`Versions.UrgencyType` :param urgency: Urgency of the component version. :type kb: :class:`str` :param kb: Link to kb article related to this the component version. :type contact: :class:`str` :param contact: Contact email for the component version. :type release_date: :class:`datetime.datetime` :param release_date: Release date of the component version. """ self.display_name = display_name self.vendor = vendor self.display_version = display_version self.summary = summary self.description = description self.category = category self.urgency = urgency self.kb = kb self.contact = contact self.release_date = release_date VapiStruct.__init__(self) Info._set_binding_type(type.StructType( 'com.vmware.esx.settings.depot_content.components.versions.info', { 'display_name': type.StringType(), 'vendor': type.StringType(), 'display_version': type.StringType(), 'summary': type.StringType(), 'description': type.StringType(), 'category': type.ReferenceType(__name__, 'Versions.CategoryType'), 'urgency': type.ReferenceType(__name__, 'Versions.UrgencyType'), 'kb': type.URIType(), 'contact': type.StringType(), 'release_date': type.DateTimeType(), }, Info, False, None)) def get(self, name, version, ): """ Returns information about a given component version in the depot. :type name: :class:`str` :param name: Name of the component The parameter must be an identifier for the resource type: ``com.vmware.esx.settings.component``. :type version: :class:`str` :param version: Version of the component :rtype: :class:`Versions.Info` :return: Information about the given component :raise: :class:`com.vmware.vapi.std.errors_client.Error` If there is unknown internal error. The accompanying error message will give more details about the failure. :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` if component with given version is not found. :raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated` if the caller is not authenticated. :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` If the service is not available. :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` if you do not have all of the privileges described as follows: * Method execution requires ``VcIntegrity.lifecycleSettings.Read``. """ return self._invoke('get', { 'name': name, 'version': version, }) class _VersionsStub(ApiInterfaceStub): def __init__(self, config): # properties for get operation get_input_type = type.StructType('operation-input', { 'name': type.IdType(resource_types='com.vmware.esx.settings.component'), 'version': type.StringType(), }) get_error_dict = { 'com.vmware.vapi.std.errors.error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), 'com.vmware.vapi.std.errors.unauthenticated': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'), 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), } get_input_value_validator_list = [ ] get_output_validator_list = [ ] get_rest_metadata = OperationRestMetadata( http_method='GET', url_template='/esx/settings/depot-content/components/{name}/versions/{version}', path_variables={ 'name': 'name', 'version': 'version', }, query_parameters={ }, dispatch_parameters={ }, header_parameters={ }, dispatch_header_parameters={ } ) operations = { 'get': { 'input_type': get_input_type, 'output_type': type.ReferenceType(__name__, 'Versions.Info'), 'errors': get_error_dict, 'input_value_validator_list': get_input_value_validator_list, 'output_validator_list': get_output_validator_list, 'task_type': TaskType.NONE, }, } rest_metadata = { 'get': get_rest_metadata, } ApiInterfaceStub.__init__( self, iface_name='com.vmware.esx.settings.depot_content.components.versions', config=config, operations=operations, rest_metadata=rest_metadata, is_vapi_rest=True) class StubFactory(StubFactoryBase): _attrs = { 'Versions': Versions, }
12,231
3,228
#! python3 # Multi-atlas segmentation scheme trying to give a platform to do tests before translating them to the plugin. from __future__ import print_function from GetMetricFromElastixRegistration import GetFinalMetricFromElastixLogFile from MultiAtlasSegmentation import MultiAtlasSegmentation from ApplyBiasCorrection import ApplyBiasCorrection import SimpleITK as sitk from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets import SitkImageManipulation as sitkIm import winshell import numpy as np import matplotlib.pyplot as plt import sys import os # DATA FOLDERS: case = "107" basePath = "D:\Martin\ImplantMigrationStudy\\" + case + "\\" postopImageNames = basePath + case + '_Migration_ContralateralPostopHemiPelvis.mhd' followupImageNames = basePath + case + '_Migration_ContralateralFollowupHemiPelvis.mhd' #postopImageNames = basePath + case + '_Migration_PostopPelvis.mhd' #followupImageNames = basePath + case + '_Migration_FollowupPelvis.mhd' #postopImageNames = basePath + case + '_Migration_PostopBone.mhd' #followupImageNames = basePath + case + '_Migration_FollowupBone.mhd' # READ DATA postopImage = sitk.ReadImage(postopImageNames) # This will be the reference followupImage = sitk.ReadImage(followupImageNames) # This will be the segmented # BINARIZE THE IMAGES: postopImage = sitk.Greater(postopImage, 0) followupImage = sitk.Greater(followupImage, 0) # HOW OVERLAP IMAGES slice_number = round(postopImage.GetSize()[1]/2) #DisplayWithOverlay(image, segmented, slice_number, window_min, window_max) sitkIm.DisplayWithOverlay(postopImage[:,slice_number,:], followupImage[:,slice_number,:], 0, 1) #interact(sitkIm.DisplayWithOverlay, slice_number = (5), image = fixed(postopImage), segmented = fixed(followupImage), # window_min = fixed(0), window_max=fixed(1)); # Get the image constrained by both bounding boxes: #labelStatisticFilter = sitk.LabelShapeStatisticsImageFilter() #labelStatisticFilter.Execute(postopImage) #postopBoundingBox = np.array(labelStatisticFilter.GetBoundingBox(1)) #labelStatisticFilter.Execute(followupImage) #followupBoundingBox = np.array(labelStatisticFilter.GetBoundingBox(1)) #minimumStart = np.minimum(postopBoundingBox[0:3], followupBoundingBox[0:3]+ 20) # 50 is to give an extra margin #minimumStop = np.minimum(postopBoundingBox[0:3]+postopBoundingBox[3:6], followupBoundingBox[0:3]+followupBoundingBox[3:6]- 20) #minimumBoxSize = minimumStop - minimumStart #postopImage = postopImage[minimumStart[0]:minimumStop[0], minimumStart[1]:minimumStop[1], minimumStart[2]:minimumStop[2]] #followupImage = followupImage[minimumStart[0]:minimumStop[0], minimumStart[1]:minimumStop[1], minimumStart[2]:minimumStop[2]] # Another approach is to get the bounding box of the intersection: postopAndFollowupImage = sitk.And(postopImage, followupImage) labelStatisticFilter = sitk.LabelShapeStatisticsImageFilter() labelStatisticFilter.Execute(postopAndFollowupImage) bothBoundingBox = np.array(labelStatisticFilter.GetBoundingBox(1)) postopImage = postopImage[bothBoundingBox[0]:bothBoundingBox[0]+bothBoundingBox[3], bothBoundingBox[1]:bothBoundingBox[1]+bothBoundingBox[4], bothBoundingBox[2]+20:bothBoundingBox[2]++bothBoundingBox[5]-20] followupImage = followupImage[bothBoundingBox[0]:bothBoundingBox[0]+bothBoundingBox[3], bothBoundingBox[1]:bothBoundingBox[1]+bothBoundingBox[4], bothBoundingBox[2]+20:bothBoundingBox[2]+bothBoundingBox[5]-20] #Display reduced image: slice_number = round(postopImage.GetSize()[1]*1/3) sitkIm.DisplayWithOverlay(postopImage[:,slice_number,:], followupImage[:,slice_number,:], 0, 1) #sitk.Get #postopZ = permute(sum(sum(postopImage))>0, [3 1 2]); #followupZ = permute(sum(sum(followupImage))>0, [3 1 2]); #bothZ = find(postopZ&followupZ > 0); #% Remove 10 slices each side: #bothZ(1:10) = []; bothZ(end-10:end) = []; # GET SEGMENTATION PERFORMANCE BASED ON SURFACES: # init signed mauerer distance as reference metrics reference_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(postopImage, squaredDistance=False, useImageSpacing=True)) # Get the reference surface: reference_surface = sitk.LabelContour(postopImage) statistics_image_filter = sitk.StatisticsImageFilter() # Get the number of pixels in the reference surface by counting all pixels that are 1. statistics_image_filter.Execute(reference_surface) num_reference_surface_pixels = int(statistics_image_filter.GetSum()) # Get the surface (contour) of the segmented image: segmented_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(followupImage, squaredDistance=False, useImageSpacing=True)) segmented_surface = sitk.LabelContour(followupImage) # Get the number of pixels in the reference surface by counting all pixels that are 1. statistics_image_filter.Execute(segmented_surface) num_segmented_surface_pixels = int(statistics_image_filter.GetSum()) label_intensity_statistics_filter = sitk.LabelIntensityStatisticsImageFilter() label_intensity_statistics_filter.Execute(segmented_surface, reference_distance_map) # Hausdorff distance: hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter() hausdorff_distance_filter.Execute(postopImage, followupImage) #All the other metrics: # Multiply the binary surface segmentations with the distance maps. The resulting distance # maps contain non-zero values only on the surface (they can also contain zero on the surface) seg2ref_distance_map = reference_distance_map * sitk.Cast(segmented_surface, sitk.sitkFloat32) ref2seg_distance_map = segmented_distance_map * sitk.Cast(reference_surface, sitk.sitkFloat32) # Get all non-zero distances and then add zero distances if required. seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map) seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0]) seg2ref_distances = seg2ref_distances + \ list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances))) ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map) ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0]) ref2seg_distances = ref2seg_distances + \ list(np.zeros(num_reference_surface_pixels - len(ref2seg_distances))) all_surface_distances = seg2ref_distances + ref2seg_distances # The maximum of the symmetric surface distances is the Hausdorff distance between the surfaces. In # general, it is not equal to the Hausdorff distance between all voxel/pixel points of the two # segmentations, though in our case it is. More on this below. #hausdorff_distance = hausdorff_distance_filter.GetHausdorffDistance() #max_surface_distance = label_intensity_statistics_filter.GetMaximum(1) #avg_surface_distance = label_intensity_statistics_filter.GetMean(1) #median_surface_distance = label_intensity_statistics_filter.GetMedian(1) #std_surface_distance = label_intensity_statistics_filter.GetStandardDeviation(1) hausdorff_distance = hausdorff_distance_filter.GetHausdorffDistance() avg_surface_distance = np.mean(all_surface_distances) max_surface_distance = np.max(all_surface_distances) median_surface_distance = np.median(all_surface_distances) std_surface_distance = np.std(all_surface_distances) # Now in mm: hausdorff_distance_mm = hausdorff_distance * postopImage.GetSpacing()[0] avg_surface_distance_mm = avg_surface_distance * postopImage.GetSpacing()[0] max_surface_distance_mm = max_surface_distance * postopImage.GetSpacing()[0] median_surface_distance_mm = median_surface_distance * postopImage.GetSpacing()[0] std_surface_distance_mm = std_surface_distance * postopImage.GetSpacing()[0] print("Surface based metrics [voxels]: MEAN_SD={0}, STDSD={1}, MEDIAN_SD={2}, HD={3}, MAX_SD={4}\n".format(avg_surface_distance, std_surface_distance, median_surface_distance, hausdorff_distance, max_surface_distance)) print("Surface based metrics [mm]: MEAN_SD={0}, STDSD={1}, MEDIAN_SD={2}, HD={3}, MAX_SD={4}\n".format(avg_surface_distance_mm, std_surface_distance_mm, median_surface_distance_mm, hausdorff_distance_mm, max_surface_distance_mm)) # GET SEGMENTATION PERFORMANCE BASED ON OVERLAP METRICS: overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() overlap_measures_filter.Execute(postopImage, followupImage) jaccard_value = overlap_measures_filter.GetJaccardCoefficient() dice_value = overlap_measures_filter.GetDiceCoefficient() volume_similarity_value = overlap_measures_filter.GetVolumeSimilarity() false_negative_value = overlap_measures_filter.GetFalseNegativeError() false_positive_value = overlap_measures_filter.GetFalsePositiveError() print("Overlap based metrics: Jaccard={0}, Dice={1}, VolumeSimilarity={2}, FN={3}, FP={4}\n".format(jaccard_value, dice_value, volume_similarity_value, false_negative_value, false_positive_value)) # Create a log file: logFilename = basePath + 'RegistrationPerformance_python.txt' log = open(logFilename, 'w') log.write("Mean Surface Distance, STD Surface Distance, Median Surface Distance, Hausdorff Distance, Max Surface Distance\n") log.write("{0}, {1}, {2}, {3}, {4}\n".format(avg_surface_distance, std_surface_distance, median_surface_distance, hausdorff_distance, max_surface_distance)) log.write("Mean Surface Distance, STD Surface Distance [mm], Median Surface Distance [mm], Hausdorff Distance [mm], Max Surface Distance [mm]\n") log.write("{0}, {1}, {2}, {3}, {4}\n".format(avg_surface_distance_mm, std_surface_distance_mm, median_surface_distance_mm, hausdorff_distance_mm, max_surface_distance_mm)) log.write("Jaccard, Dice, Volume Similarity, False Negative, False Positive\n") log.write("{0}, {1}, {2}, {3}, {4}\n".format(jaccard_value, dice_value, volume_similarity_value, false_negative_value, false_positive_value)) log.close() plt.show()
9,790
3,369
# -*- coding: utf-8 -*- """ jes.gui.dialogs.intro ===================== The "intro" dialog, which displays the JESIntroduction.txt file. :copyright: (C) 2014 Matthew Frazier and Mark Guzdial :license: GNU GPL v2 or later, see jes/help/JESCopyright.txt for details """ from __future__ import with_statement import JESResources import JESVersion from java.awt import BorderLayout from javax.swing import JTextPane, JScrollPane, JButton from jes.gui.components.actions import methodAction from .controller import BasicDialog, DialogController class IntroDialog(BasicDialog): INFO_FILE = JESResources.getPathTo("help/JESIntroduction.txt") WINDOW_TITLE = "Welcome to %s!" % JESVersion.TITLE WINDOW_SIZE = (400, 300) def __init__(self): super(IntroDialog, self).__init__() # Open the text file and make a text pane textPane = JTextPane() textPane.editable = False scrollPane = JScrollPane(textPane) scrollPane.preferredSize = (32767, 32767) # just a large number with open(self.INFO_FILE, 'r') as fd: infoText = fd.read().decode('utf8').replace( "@version@", JESVersion.VERSION ) textPane.text = infoText # Load the scroll pane into the layout self.add(scrollPane, BorderLayout.CENTER) # Make an OK button self.okButton = JButton(self.ok) self.buttonPanel.add(self.okButton) @methodAction(name="OK") def ok(self): self.visible = False introController = DialogController("Introduction", IntroDialog)
1,591
507
"""Assist """
15
9
import tornado.web import tornado.websocket import tornado.httpserver import tornado.ioloop from worker_gateway.server import WebSocketChannelHandler from heartbeat.handler import HeartbeatHandler from orm import Worker class Application(tornado.web.Application): def __init__(self): handlers = [ (r'/api/run', WebSocketChannelHandler), (r'/api/heartbeat', HeartbeatHandler) ] tornado.web.Application.__init__(self, handlers) if __name__ == '__main__': Worker.cull_worker() app = Application() server = tornado.httpserver.HTTPServer(app) server.listen(8080) tornado.ioloop.IOLoop.instance().start()
678
214
import time from websocket_server import WebsocketServer # Called for every client connecting (after handshake) def new_client(client, server): print("New client connected and was given id %d" % client['id']) #server.send_message_to_all("Hey all, a new client has joined us") short_message = "" middle_message = "" long_message = "" with open("hamlet.txt") as f: short_message=f.read() with open("xiangcunjiaoshi_liucixin.txt") as f: middle_message=f.read() with open("theLongestDayInChangAn.txt") as f: long_message=f.read() send_message(client, server, short_message) send_message(client, server, middle_message) send_message(client, server, long_message) def send_message(client, server, message): t_end = time.time() + 10 count = 1 while time.time() < t_end: server.send_message(client, message) count += 1 time.sleep(5) # Called for every client disconnecting def client_left(client, server): print("Client(%d) disconnected" % client['id']) # Called when a client sends a message def message_received(client, server, message): if len(message) > 200: message = message[:200]+'..' print("Client(%d) said: %s" % (client['id'], message)) PORT=80 HOST='0.0.0.0' server = WebsocketServer(PORT, host=HOST) server.set_fn_new_client(new_client) server.set_fn_client_left(client_left) server.set_fn_message_received(message_received) server.run_forever()
1,536
501
import collections class Solution: def isValidSudoku(self, board: List[List[str]]) -> bool: cols = collections.defaultdict(set) rows = collections.defaultdict(set) grid = collections.defaultdict(set) for r in range(len(board)): for c in range(len(board)): #Ignore empty cells if board[r][c] == ".": continue #If element exist in any of the three sets, return False if board[r][c] in rows[r] or board[r][c] in cols[c] or board[r][c] in grid[r//3, c//3]: return False #Add element if it doesn't exist rows[r].add(board[r][c]) cols[c].add(board[r][c]) grid[(r//3, c//3)].add(board[r][c]) return True
899
247
#!/usr/bin/env python # coding: utf-8 # In[1]: import numpy as np import pandas as pd import seaborn as sns get_ipython().run_line_magic('matplotlib', 'inline') import matplotlib.pyplot as plt # In[2]: from sklearn.preprocessing import LabelEncoder from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor from sklearn.svm import LinearSVC, SVC from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score, r2_score, classification_report from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder # In[32]: data=pd.read_csv("Crop_prediction.csv") # In[33]: data.head() # * (Data set taken from Indian chamber of food and agriculture) # **Data fields** # * N - ratio of Nitrogen content in soil # * P - ratio of Phosphorous content in soil # * K - ratio of Potassium content in soil # * temperature - temperature in degree Celsius # * humidity - relative humidity in % # * ph - ph value of the soil # * rainfall - rainfall in mm # In[34]: data.tail() # In[35]: data.info() # In[36]: data.describe() # In[37]: data.isnull().sum() # In[38]: data.nunique() # In[39]: data.columns # In[40]: #Visualization plt.figure(figsize=(8,8)) plt.title("Correlation between features") corr=data.corr() sns.heatmap(corr,annot=True) # In[41]: data['label'].unique() # In[42]: plt.figure(figsize=(6,8)) plt.title("Temperature relation with crops") sns.barplot(y="label", x="temperature", data=data,palette="hot") plt.ylabel("crops") #Temperature has very effect with blackgram # In[43]: plt.figure(figsize=(6,8)) plt.title("Humidity relation with crops") sns.barplot(y="label", x="humidity", data=data,palette='brg') plt.ylabel("crops") #humidity has very high relation with rice # In[44]: plt.figure(figsize=(6,8)) plt.title("pH relation with crops") sns.barplot(y="label", x="ph", data=data,palette='hot') plt.ylabel("crops") #ph has a very high relationship with crops # In[45]: plt.figure(figsize=(6,8)) plt.title("Rainfall relation with crops") sns.barplot(y="label", x="rainfall", data=data,palette='brg') plt.ylabel("crops") #Rice needs a lots of rainfall #lentil needs a very less rainfall # In[46]: plt.figure(figsize=(8,6)) plt.title("Temperature and pH effect values for crops") sns.scatterplot(data=data, x="temperature", y="label", hue="ph",palette='brg') plt.ylabel("Crops") # In[47]: plt.figure(figsize=(8,6)) plt.title("Temperature and humidity effect values for crops") sns.scatterplot(data=data, x="temperature", y="label", hue="humidity",palette='brg') plt.ylabel("Crops") # In[48]: plt.figure(figsize=(8,6)) plt.title("Temperature and Rainfall effect values for crops") sns.scatterplot(data=data, x="temperature", y="label", hue="rainfall",palette='brg') plt.ylabel("Crops") # In[49]: #from pandas_profiling import ProfileReport # In[50]: #Predictions encoder=LabelEncoder() data.label=encoder.fit_transform(data.label) # In[51]: features=data.drop("label",axis=1) target=data.label # In[52]: features # In[53]: X_train, X_test, y_train, y_test = train_test_split(features, target, random_state=42) # In[54]: #Linear Regression lr = LinearRegression().fit(X_train, y_train) lr_pred= lr.score(X_test, y_test) print("Training score: {:.3f}".format(lr.score(X_train, y_train))) print("Test score: {:.3f}".format(lr.score(X_test, y_test))) # In[55]: #Decision Tree Classifier tree = DecisionTreeClassifier(max_depth=15,random_state=0).fit(X_train, y_train) tree_pred= tree.score(X_test, y_test) print("Training score: {:.3f}".format(tree.score(X_train, y_train))) print("Test score: {:.3f}".format(tree.score(X_test, y_test))) # In[56]: #Random Forests rf = RandomForestClassifier(n_estimators=10, max_features=3, random_state=0).fit(X_train, y_train) rf_pred= rf.score(X_test, y_test) print("Training score: {:.3f}".format(rf.score(X_train, y_train))) print("Test score: {:.3f}".format(rf.score(X_test, y_test))) # In[57]: #GradientBoostingClassifier gbr = GradientBoostingClassifier(n_estimators=20, max_depth=4, max_features=2, random_state=0).fit(X_train, y_train) gbr_pred= gbr.score(X_test, y_test) print("Training score: {:.3f}".format(gbr.score(X_train, y_train))) print("Test score: {:.3f}".format(gbr.score(X_test, y_test))) # In[58]: #Support Vector Classifier svm = SVC(C=100, gamma=0.001).fit(X_train, y_train) svm_pred= svm.score(X_test, y_test) print("Training score: {:.3f}".format(svm.score(X_train, y_train))) print("Test score: {:.3f}".format(svm.score(X_test, y_test))) # In[59]: #Logistic regression log_reg = LogisticRegression(C=0.1, max_iter=100000).fit(X_train, y_train) log_reg_pred= log_reg.score(X_test, y_test) print("Training score: {:.3f}".format(log_reg.score(X_train, y_train))) print("Test score: {:.3f}".format(log_reg.score(X_test, y_test))) # In[60]: predictions_acc = { "Model": ['Decision Tree', 'Random Forest', 'Gradient Boosting', 'SVC', 'Logistic Regression'], "Accuracy": [tree_pred, rf_pred, gbr_pred, svm_pred, log_reg_pred]} # In[61]: model_acc = pd.DataFrame(predictions_acc, columns=["Model", "Accuracy"]) # In[62]: model_acc # In[3]: import tkinter as tk from tkinter.font import BOLD from tkinter import messagebox from tkinter import scrolledtext from tkinter.constants import RIGHT, Y from tkinter import filedialog from tkinter import * # In[8]: def mainscreen(): global window window = tk.Tk() window.geometry("1530x795+0+0") window.configure(bg="#FFE4B5") window.title("Prediction model") head = tk.Label(window, text="\nEnter Details\n", font=("rockwell extra bold",45),fg="dark blue",bg="#FFE4B5").pack() def back3() : window.destroy() def values(): n=n_tk.get() p=p_tk.get() k=k_tk.get() temp=temp_tk.get() humidity=humidity_tk.get() ph=ph_tk.get() rainfall=rainfall_tk.get() def predictfunc(n,p,k,temp,humidity,ph,rainfall): #Predicting Model data=pd.read_csv("Crop_prediction.csv") x=data.loc[:,"N":"rainfall"] y=data.loc[:,'label'] Knn=KNeighborsClassifier() Knn.fit(x,y) test_data=[[n,p,k,temp,humidity,ph,rainfall]] predict=Knn.predict(test_data) #print(predict[0]) output1 = tk.Label(window, text="The prediction is: ",font=("Arial", 20),bg="#FFE4B5").place(x=600, y=570) output2 = tk.Label(window, text=predict, font=("Arial", 20),bg="#FFE4B5").place(x=820, y=570) predictfunc(n,p,k,temp,humidity,ph,rainfall) n1 = tk.Label(window, text="Ratio of Nitrogen content in soil: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=200) n_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) n_tk.place(x=800, y=200) p2 = tk.Label(window, text="Ratio of Phosphorous content in soil: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=250) p_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) p_tk.place(x=800, y=250) k3 = tk.Label(window, text="Ratio of Potassium content in soil: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=300) k_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) k_tk.place(x=800, y=300) temp4= tk.Label(window, text="Temperature in degree Celsius: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=350) temp_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) temp_tk.place(x=800, y=350) humidity5= tk.Label(window, text="Relative humidity in %: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=400) humidity_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) humidity_tk.place(x=800, y=400) ph6= tk.Label(window, text="pH value of the soil: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=450) ph_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) ph_tk.place(x=800, y=450) rainfall7= tk.Label(window, text="Rainfall in mm: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=500) rainfall_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) rainfall_tk.place(x=800, y=500) back3_button = tk.Button(text="Exit", bg="blue", fg="white", height=1, width=10, borderwidth=8, cursor="hand2",font=("Arial", 12), command=back3) back3_button.place(x=530,y=680) submit_button = tk.Button(text="Submit", bg="green", fg="white", height=1, width=10, borderwidth=8, cursor="hand2",font=("Arial", 12), command=values) submit_button.place(x=830,y=680) # start the GUI window.mainloop() mainscreen() # In[ ]:
9,375
3,882
#!/usr/bin/env python # -*- coding: utf-8 -*- import simplejson as json from alipay.aop.api.constant.ParamConstants import * class ZhimaCreditOrderRepaymentApplyModel(object): def __init__(self): self._action_type = None self._category = None self._order_info = None self._out_order_no = None self._repay_amount = None self._repay_proof = None self._user_id = None @property def action_type(self): return self._action_type @action_type.setter def action_type(self, value): self._action_type = value @property def category(self): return self._category @category.setter def category(self, value): self._category = value @property def order_info(self): return self._order_info @order_info.setter def order_info(self, value): self._order_info = value @property def out_order_no(self): return self._out_order_no @out_order_no.setter def out_order_no(self, value): self._out_order_no = value @property def repay_amount(self): return self._repay_amount @repay_amount.setter def repay_amount(self, value): self._repay_amount = value @property def repay_proof(self): return self._repay_proof @repay_proof.setter def repay_proof(self, value): self._repay_proof = value @property def user_id(self): return self._user_id @user_id.setter def user_id(self, value): self._user_id = value def to_alipay_dict(self): params = dict() if self.action_type: if hasattr(self.action_type, 'to_alipay_dict'): params['action_type'] = self.action_type.to_alipay_dict() else: params['action_type'] = self.action_type if self.category: if hasattr(self.category, 'to_alipay_dict'): params['category'] = self.category.to_alipay_dict() else: params['category'] = self.category if self.order_info: if hasattr(self.order_info, 'to_alipay_dict'): params['order_info'] = self.order_info.to_alipay_dict() else: params['order_info'] = self.order_info if self.out_order_no: if hasattr(self.out_order_no, 'to_alipay_dict'): params['out_order_no'] = self.out_order_no.to_alipay_dict() else: params['out_order_no'] = self.out_order_no if self.repay_amount: if hasattr(self.repay_amount, 'to_alipay_dict'): params['repay_amount'] = self.repay_amount.to_alipay_dict() else: params['repay_amount'] = self.repay_amount if self.repay_proof: if hasattr(self.repay_proof, 'to_alipay_dict'): params['repay_proof'] = self.repay_proof.to_alipay_dict() else: params['repay_proof'] = self.repay_proof if self.user_id: if hasattr(self.user_id, 'to_alipay_dict'): params['user_id'] = self.user_id.to_alipay_dict() else: params['user_id'] = self.user_id return params @staticmethod def from_alipay_dict(d): if not d: return None o = ZhimaCreditOrderRepaymentApplyModel() if 'action_type' in d: o.action_type = d['action_type'] if 'category' in d: o.category = d['category'] if 'order_info' in d: o.order_info = d['order_info'] if 'out_order_no' in d: o.out_order_no = d['out_order_no'] if 'repay_amount' in d: o.repay_amount = d['repay_amount'] if 'repay_proof' in d: o.repay_proof = d['repay_proof'] if 'user_id' in d: o.user_id = d['user_id'] return o
3,950
1,272
#!/usr/bin/env python """ Newton Generate Boot Images Usage: generateBootImage.py <target> <file_name> [--sim][--frontdoor][--seed=<seed_value>][--count=<word_count>][--hsp_fw_0p97] Options: -h --help Shows this help message. Target is one of the following: useq_seq_ram : Microsequencer Sequence RAM useq_map_ram : Microsequencer MAP RAM useq_wave_ram : Microsequencer Wave RAM datapath_ram : Gain Correction RAM de_ram : Dump Engine RAM lps1_ram : LPS1 lps2_ram : LPS2 grouped : Grouped data packet """ from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals from docopt import docopt import sys import io import os import time import struct import subprocess import re import random import ctypes import newton_control as newton def writeFile( fileName, totalByteCount ): ofile = open( fileName, "w" ) index = 0 while index < len( commandData ): destAddress = commandData[index] index += 1 command = commandData[index] index += 1 attributes = commandData[index] index += 1 byteCount = commandData[index] index += 1 wordCount = int( byteCount / 2 ) if command == newton.CMD_GROUPED_DATA: # Modify the byteCount with totalByteCount byteCount = totalByteCount wordCount = int( byteCount / 2 ) ofile.write( '{0:0{1}X}'.format( destAddress, 4 ) + "\n" ) ofile.write( '{0:0{1}X}'.format( command, 4 ) + "\n" ) ofile.write( '{0:0{1}X}'.format( attributes, 4 ) + "\n" ) ofile.write( '{0:0{1}X}'.format( byteCount, 4 ) + "\n" ) for i in range(0, wordCount): cmdWord = commandData[index] index += 1 ofile.write( '{0:0{1}X}'.format( cmdWord, 4 ) + "\n" ) ofile.close( ) def generateCommandHeader( cmd, attr, destAddr, byteCount ): data16 = destAddr # Destination Address commandData.append( data16 ) data16 = cmd # Mail Box Command commandData.append( data16 ) data16 = attr # Attribute commandData.append( data16 ) data16 = byteCount # Byte Count commandData.append( data16 ) def generateRegisterWriteCommand( writeAddr, writeData, attributes ): attr = attributes | newton.WRITE_ATTR cmd = newton.CMD_REGISTER_CFG byteCount = 4 totalByteCount = byteCount + 8 generateCommandHeader( cmd, attr, 0, byteCount ) # Generate register list. data16 = writeData commandData.append( data16 ) data16 = writeAddr commandData.append( data16 ) return totalByteCount def generateRegisterWriteListCommand( writeList, attributes ): attr = attributes | newton.WRITE_ATTR cmd = newton.CMD_REGISTER_CFG wordCount = len( writeList ) byteCount = int( wordCount * 2 ) totalByteCount = byteCount + 8 generateCommandHeader( cmd, attr, 0, byteCount ) for writeData in writeList: # Generate register list. commandData.append( writeData ) return totalByteCount def generateRamWriteCommand( target, wordCount, attributes ): attr = attributes | newton.WRITE_ATTR totalByteCount = 0 if target == "useq_seq_ram": cmd = newton.CMD_SEQ_RAM depth = newton.USEQ_SEQ_RAM_DEPTH bitWidth = newton.USEQ_SEQ_RAM_WIDTH byteWidth = newton.USEQ_SEQ_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_SEQ_RAM sub-command with wordCount = " + str( wordCount ) ) r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 0 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount elif target == "useq_wave_ram": cmd = newton.CMD_WAVE_RAM depth = newton.USEQ_WAVE_RAM_DEPTH bitWidth = newton.USEQ_WAVE_RAM_WIDTH byteWidth = newton.USEQ_WAVE_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_WAVE_RAM sub-command with wordCount = " + str( wordCount ) ) r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 1 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount elif target == "useq_map_ram": cmd = newton.CMD_MAP_RAM depth = newton.USEQ_MAP_RAM_DEPTH bitWidth = newton.USEQ_MAP_RAM_WIDTH byteWidth = newton.USEQ_MAP_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_MAP_RAM sub-command with wordCount = " + str( wordCount ) ) r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 2 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount elif target == "datapath_ram": cmd = newton.CMD_DATAPATH_RAM depth = newton.DATAPATH_RAM_DEPTH bitWidth = newton.DATAPATH_RAM_WIDTH byteWidth = newton.DATAPATH_RAM_WIDTH_BYTES addr = newton.DATAPATH_REGS_IA_WRDATA_REG if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_DATAPATH_RAM sub-command with wordCount = " + str( wordCount ) ) writeList = [] r1 = newton.ADI_DATAPATH_IA_SELECT_s() r1.IA_ENA = 1 writeList.append( r1.VALUE16 ) writeList.append( newton.DATAPATH_REGS_IA_SELECT ) r2 = newton.ADI_DATAPATH_IA_ADDR_REG_s() r2.IA_START_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.DATAPATH_REGS_IA_ADDR_REG ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount elif target == "de_ram": cmd = newton.CMD_DUMP_ENGINE_RAM depth = newton.DE_RAM_DEPTH bitWidth = newton.DE_RAM_WIDTH byteWidth = newton.DE_RAM_WIDTH_BYTES addr = newton.DE_REGS_DE_IA_WRDATA_REG if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_DUMP_ENGINE_RAM sub-command with wordCount = " + str( wordCount ) ) writeList = [] r1 = newton.ADI_DE_REGS_YODA_DE_IA_SELECT_s() r1.RAM = 1 writeList.append( r1.VALUE16 ) writeList.append( newton.DE_REGS_DE_IA_SELECT ) r2 = newton.ADI_DE_REGS_YODA_DE_IA_ADDR_REG_s() r2.RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.DE_REGS_DE_IA_ADDR_REG ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount elif target == "lps1_ram": cmd = newton.CMD_LPS1_RAM depth = newton.LPS1_RAM_DEPTH bitWidth = newton.LPS1_RAM_WIDTH byteWidth = newton.LPS1_RAM_WIDTH_BYTES addr = newton.LPS1_REGS_LPSRAMDATA if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_LPS1_RAM sub-command with wordCount = " + str( wordCount ) ) writeList = [] r1 = newton.ADI_LPS_REGS_YODA_LPSRAMRDCMD_s() r1.LPS_RAM_READ_EN = 0 r1.LPS_RAM_READ_RDY = 0 writeList.append( r1.VALUE16 ) writeList.append( newton.LPS1_REGS_LPSRAMRDCMD ) r2 = newton.ADI_LPS_REGS_YODA_LPSRAMADDR_s() r2.LPS_RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.LPS1_REGS_LPSRAMADDR ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount elif target == "lps2_ram": cmd = newton.CMD_LPS2_RAM depth = newton.LPS2_RAM_DEPTH bitWidth = newton.LPS2_RAM_WIDTH byteWidth = newton.LPS2_RAM_WIDTH_BYTES addr = newton.LPS2_REGS_LPSRAMDATA if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_LPS2_RAM sub-command with wordCount = " + str( wordCount ) ) writeList = [] r1 = newton.ADI_LPS_REGS_YODA_LPSRAMRDCMD_s() r1.LPS_RAM_READ_EN = 0 r1.LPS_RAM_READ_RDY = 0 writeList.append( r1.VALUE16 ) writeList.append( newton.LPS2_REGS_LPSRAMRDCMD ) r2 = newton.ADI_LPS_REGS_YODA_LPSRAMADDR_s() r2.LPS_RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.LPS2_REGS_LPSRAMADDR ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount wordCount = wordCount & 0xfffe byteCount = wordCount * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, wordCount): ramWord = random.getrandbits( bitWidth ) if bitWidth <= 16: data16 = ramWord commandData.append( data16 ) elif bitWidth <= 32: data16 = ramWord & 0xffff commandData.append( data16 ) data16 = (ramWord >> 16) & 0xffff commandData.append( data16 ) elif bitWidth <= 64: data16 = ramWord & 0xffff commandData.append( data16 ) data16 = (ramWord >> 16) & 0xffff commandData.append( data16 ) data16 = (ramWord >> 32) & 0xffff commandData.append( data16 ) data16 = (ramWord >> 48) & 0xffff commandData.append( data16 ) return totalByteCount def generateGroupedCommand( target, count ): attr = newton.GROUPED_ATTR | newton.WRITE_ATTR cmd = newton.CMD_GROUPED_DATA totalByteCount = 0 generateCommandHeader( cmd, attr, 0, totalByteCount ) # Actual type count filled in later by the writeFile routine. print( "INFO: Generating grouped command ..." ) if count == 0 or count > newton.USEQ_SEQ_RAM_DEPTH: wordCount = newton.USEQ_SEQ_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "useq_seq_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.USEQ_WAVE_RAM_DEPTH: wordCount = newton.USEQ_WAVE_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "useq_wave_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.USEQ_MAP_RAM_DEPTH: wordCount = newton.USEQ_MAP_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "useq_map_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.DATAPATH_RAM_DEPTH: wordCount = newton.DATAPATH_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "datapath_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.DE_RAM_DEPTH: wordCount = newton.DE_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "de_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.LPS1_RAM_DEPTH: wordCount = newton.LPS1_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "lps1_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.LPS2_RAM_DEPTH: wordCount = newton.LPS2_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "lps2_ram", wordCount, attr ) totalByteCount += byteCount return totalByteCount def processRegisterFileList( file_name, attributes ): cmd = newton.CMD_REGISTER_CFG attr = attributes | newton.WRITE_ATTR totalByteCount = 0 print( "INFO:: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: if re.search( r'^\w+,\w+', line ): items = line.split( "," ) elif re.search( r'^\w+\s+\w+', line ): items = line.split( " " ) address = items[0].upper() data = items[1].upper() address = re.sub( r"0X", r"", address ) data = re.sub( r"0X", r"", data ) addr_int = int( address, 16 ) data_int = int( data, 16 ) if addr_int == newton.DE_REGS_DE_IA_ADDR_REG: deRamAddress = data_int elif addr_int == newton.DE_REGS_DE_IA_WRDATA_REG: deRamAddress += 1 elif addr_int == newton.USEQ_REGS_USEQRAMLOADADDR: seqRamAddress = data_int elif addr_int == newton.USEQ_REGS_USEQRAMLOADDATA: seqRamAddress += 1 else: registerWrite = {} registerWrite["address"] = int( address, 16 ) registerWrite["data"] = int( data, 16 ) if hsp_fw_0p97 == True: if registerWrite["address"] == 0x000c: print( "INFO: Skipping useqControlRegister write, data = " + hex(registerWrite["data"]) ); elif registerWrite["address"] == 0x0014: print( "INFO: Modifying write to the digPwrDown to make sure the LPS1 and DE blocks are enabled, data = " + hex(registerWrite["data"]) ); registerWrite["data"] = registerWrite["data"] & 0xbffe registerWriteList.append( registerWrite ) else: registerWriteList.append( registerWrite ) else: registerWriteList.append( registerWrite ) line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) def addRegisterWriteList( attributes ): cmd = newton.CMD_REGISTER_CFG attr = attributes | newton.WRITE_ATTR totalByteCount = 0 listSize = len( registerWriteList ) commandCount = newton.MAX_REG_LIST if listSize <= newton.MAX_REG_LIST: commandCount = 1 elif (listSize % newton.MAX_REG_LIST) == 0: commandCount = listSize // newton.MAX_REG_LIST else: commandCount = listSize // newton.MAX_REG_LIST + 1 if listSize > 0: totalByteCount += commandCount * 8 totalByteCount += listSize * 4 print( "INFO:: Register list size = " + str( listSize ) ) index = 0 for i in range(0, int( commandCount )): if i < (commandCount - 1): regCount = newton.MAX_REG_LIST else: regCount = listSize - index if listSize > 0: generateCommandHeader( cmd, attr, 0, regCount * 4 ) for j in range(0, regCount): registerWrite = registerWriteList[index] index += 1 # Generate register list. data16 = registerWrite["data"] commandData.append( data16 ) data16 = registerWrite["address"] commandData.append( data16 ) return totalByteCount def process_wave_reg_txt( file_name, attributes ): cmd = newton.CMD_WAVE_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 wave_ram = {} for i in range(0, newton.USEQ_WAVE_RAM_DEPTH): wave_ram[i] = 0 r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() memoryAddress = 0 print( "INFO:: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: m = re.search( r'(\w+)\s+(\w+)', line ) address = m.group(1).upper() data = m.group(2).upper() address = re.sub( r"0X", r"", address ) data = re.sub( r"0X", r"", data ) address = int( address, 16 ) data = int( data, 16 ) if address == newton.USEQ_REGS_USEQRAMLOADADDR: r.VALUE16 = data memoryAddress = r.LD_ADDR elif address == newton.USEQ_REGS_USEQRAMLOADDATA: wave_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) cmd = newton.CMD_WAVE_RAM depth = newton.USEQ_WAVE_RAM_DEPTH bitWidth = newton.USEQ_WAVE_RAM_WIDTH byteWidth = newton.USEQ_WAVE_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA r.LD_RAM_SEL = 1 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.USEQ_WAVE_RAM_DEPTH): commandData.append( wave_ram[i] ) return totalByteCount def extractRamAccesses( file_name, attributes ): global de_ram_temp global seq_ram_temp global wave_ram_temp global map_ram_temp totalByteCount = 0 de_ram_temp = {} seq_ram_temp = {} wave_ram_temp = {} map_ram_temp = {} r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() seqRamAddress = 0 r1 = newton.ADI_DE_REGS_YODA_DE_IA_SELECT_s() r1.RAM = 1 r2 = newton.ADI_DE_REGS_YODA_DE_IA_ADDR_REG_s() r2.RAM_ADDR = 0 deRamAddress = 0 hwordCount = 0 temp = 0 print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: m = re.search( r'(\w+)\s+(\w+)', line ) address = m.group(1).upper() data = m.group(2).upper() address = re.sub( r"0X", r"", address ) data = re.sub( r"0X", r"", data ) address = int( address, 16 ) data = int( data, 16 ) if address == newton.DE_REGS_DE_IA_ADDR_REG: r2.VALUE16 = data deRamAddress = r2.RAM_ADDR elif address == newton.DE_REGS_DE_IA_WRDATA_REG: temp += (data << (16*hwordCount)) de_ram_temp[deRamAddress] = temp if hwordCount == 3: hwordCount = 0 deRamAddress += 1 temp = 0 else: hwordCount += 1 elif address == newton.USEQ_REGS_USEQRAMLOADADDR: r.VALUE16 = data seqRamAddress = r.LD_ADDR seqRamSel = r.LD_RAM_SEL elif address == newton.USEQ_REGS_USEQRAMLOADDATA: if seqRamSel == 0: seq_ram_temp[seqRamAddress] = data elif seqRamSel == 1: wave_ram_temp[seqRamAddress] = data elif seqRamSel == 2: map_ram_temp[seqRamAddress] = data seqRamAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) return totalByteCount def processSeqRamFile( attributes ): cmd = newton.CMD_SEQ_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 seq_ram = {} for i in range(0, newton.USEQ_SEQ_RAM_DEPTH): seq_ram[i] = 0 memoryAddress = 0 file_name = "seq_ram.txt" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.USEQ_SEQ_RAM_MASK # Parity is the MSB seq_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) for memoryAddress in seq_ram_temp.keys(): seq_ram[memoryAddress] = seq_ram_temp[memoryAddress] cmd = newton.CMD_SEQ_RAM depth = newton.USEQ_SEQ_RAM_DEPTH bitWidth = newton.USEQ_SEQ_RAM_WIDTH byteWidth = newton.USEQ_SEQ_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 0 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.USEQ_SEQ_RAM_DEPTH): commandData.append( seq_ram[i] ) return totalByteCount # Read Wave RM contents from wave_ram.txt and wave_reg.txt files def processWaveRamFile( attributes ): cmd = newton.CMD_WAVE_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 wave_ram = {} for i in range(0, newton.USEQ_WAVE_RAM_DEPTH): wave_ram[i] = 0 memoryAddress = 0 file_name = "wave_ram.txt" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.USEQ_WAVE_RAM_MASK # Parity is the MSB wave_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) for memoryAddress in wave_ram_temp.keys(): wave_ram[memoryAddress] = wave_ram_temp[memoryAddress] r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() file_name = "wave_reg.txt" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: m = re.search( r'(\w+)\s+(\w+)', line ) address = m.group(1).upper() data = m.group(2).upper() address = re.sub( r"0X", r"", address ) data = re.sub( r"0X", r"", data ) address = int( address, 16 ) data = int( data, 16 ) if address == newton.USEQ_REGS_USEQRAMLOADADDR: r.VALUE16 = data memoryAddress = r.LD_ADDR elif address == newton.USEQ_REGS_USEQRAMLOADDATA: wave_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) cmd = newton.CMD_WAVE_RAM depth = newton.USEQ_WAVE_RAM_DEPTH bitWidth = newton.USEQ_WAVE_RAM_WIDTH byteWidth = newton.USEQ_WAVE_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 1 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.USEQ_WAVE_RAM_DEPTH): commandData.append( wave_ram[i] ) return totalByteCount def processMapRamFile( attributes ): cmd = newton.CMD_MAP_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 map_ram = {} for i in range(0, newton.USEQ_MAP_RAM_DEPTH): map_ram[i] = 0 memoryAddress = 0 file_name = "map_ram.txt" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.USEQ_SEQ_RAM_MASK # Parity is the MSB map_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) for memoryAddress in map_ram_temp.keys(): map_ram[memoryAddress] = map_ram_temp[memoryAddress] cmd = newton.CMD_MAP_RAM depth = newton.USEQ_MAP_RAM_DEPTH bitWidth = newton.USEQ_MAP_RAM_WIDTH byteWidth = newton.USEQ_MAP_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 2 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.USEQ_MAP_RAM_DEPTH): commandData.append( map_ram[i] ) return totalByteCount def processDatapathMemoryFiles( attributes ): cmd = newton.CMD_DATAPATH_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 datapath_ram = {} for i in range(0, newton.DATAPATH_RAM_DEPTH): datapath_ram[i] = 0 r1 = newton.ADI_DATAPATH_IA_SELECT_s() r2 = newton.ADI_DATAPATH_IA_ADDR_REG_s() r2.IA_START_ADDR = 0 memoryAddress = 0 for i in range(0, 16): file_name = "PCM_Correction_val_" + str( i ) + ".txt" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.DATAPATH_RAM_MASK # Parity is the MSB datapath_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) cmd = newton.CMD_DATAPATH_RAM depth = newton.DATAPATH_RAM_DEPTH bitWidth = newton.DATAPATH_RAM_WIDTH byteWidth = newton.DATAPATH_RAM_WIDTH_BYTES addr = newton.DATAPATH_REGS_IA_WRDATA_REG writeList = [] r1.IA_ENA = 1 writeList.append( r1.VALUE16 ) writeList.append( newton.DATAPATH_REGS_IA_SELECT ) r2.IA_START_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.DATAPATH_REGS_IA_ADDR_REG ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.DATAPATH_RAM_DEPTH): commandData.append( datapath_ram[i] ) r1.IA_ENA = 0 byteCount = generateRegisterWriteCommand( newton.DATAPATH_REGS_IA_SELECT, r1.VALUE16, attr ) totalByteCount += byteCount return totalByteCount def processDumpEngineMemoryFile( attributes ): cmd = newton.CMD_DUMP_ENGINE_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 de_ram = {} for i in range(0, newton.DE_RAM_DEPTH): de_ram[i] = 0 r1 = newton.ADI_DE_REGS_YODA_DE_IA_SELECT_s() r1.RAM = 1 r2 = newton.ADI_DE_REGS_YODA_DE_IA_ADDR_REG_s() r2.RAM_ADDR = 0 memoryAddress = 0 file_name = "De_config_all_bkdoor.hex" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.DE_RAM_MASK # Parity is the MSB de_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) for memoryAddress in de_ram_temp.keys(): de_ram[memoryAddress] = de_ram_temp[memoryAddress] cmd = newton.CMD_DUMP_ENGINE_RAM depth = newton.DE_RAM_DEPTH bitWidth = newton.DE_RAM_WIDTH byteWidth = newton.DE_RAM_WIDTH_BYTES addr = newton.DE_REGS_DE_IA_WRDATA_REG writeList = [] r1.RAM = 1 writeList.append( r1.VALUE16 ) writeList.append( newton.DE_REGS_DE_IA_SELECT ) r2.RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.DE_REGS_DE_IA_ADDR_REG ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.DE_RAM_DEPTH): commandData.append( de_ram[i] & 0xFFFF ) commandData.append( (de_ram[i] >> 16) & 0xFFFF ) commandData.append( (de_ram[i] >> 32) & 0xFFFF ) commandData.append( (de_ram[i] >> 48) & 0xFFFF ) return totalByteCount def processLps1RamFile( attributes ): cmd = newton.CMD_LPS1_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 lps1_ram = {} for i in range(0, newton.LPS1_RAM_DEPTH): lps1_ram[i] = 0 memoryAddress = 0 file_name = "lps1_ram.hex" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.LPS1_RAM_MASK # Parity is the MSB lps1_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) cmd = newton.CMD_LPS1_RAM depth = newton.LPS1_RAM_DEPTH bitWidth = newton.LPS1_RAM_WIDTH byteWidth = newton.LPS1_RAM_WIDTH_BYTES addr = newton.LPS1_REGS_LPSRAMDATA writeList = [] r1 = newton.ADI_LPS_REGS_YODA_LPSRAMRDCMD_s() r1.LPS_RAM_READ_EN = 0 r1.LPS_RAM_READ_RDY = 0 writeList.append( r1.VALUE16 ) writeList.append( newton.LPS1_REGS_LPSRAMRDCMD ) r2 = newton.ADI_LPS_REGS_YODA_LPSRAMADDR_s() r2.LPS_RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.LPS1_REGS_LPSRAMADDR ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.LPS1_RAM_DEPTH): commandData.append( lps1_ram[i] & 0xFFFF ) commandData.append( (lps1_ram[i] >> 16) & 0x00FF ) return totalByteCount def processLps2RamFile( attributes ): cmd = newton.CMD_LPS2_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 lps2_ram = {} for i in range(0, newton.LPS2_RAM_DEPTH): lps2_ram[i] = 0 memoryAddress = 0 file_name = "lps2_ram.hex" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.LPS2_RAM_MASK # Parity is the MSB lps2_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) cmd = newton.CMD_LPS2_RAM depth = newton.LPS2_RAM_DEPTH bitWidth = newton.LPS2_RAM_WIDTH byteWidth = newton.LPS2_RAM_WIDTH_BYTES addr = newton.LPS2_REGS_LPSRAMDATA writeList = [] r1 = newton.ADI_LPS_REGS_YODA_LPSRAMRDCMD_s() r1.LPS_RAM_READ_EN = 0 r1.LPS_RAM_READ_RDY = 0 writeList.append( r1.VALUE16 ) writeList.append( newton.LPS2_REGS_LPSRAMRDCMD ) r2 = newton.ADI_LPS_REGS_YODA_LPSRAMADDR_s() r2.LPS_RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.LPS2_REGS_LPSRAMADDR ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.LPS2_RAM_DEPTH): commandData.append( lps2_ram[i] & 0xFFFF ) commandData.append( (lps2_ram[i] >> 16) & 0x00FF ) return totalByteCount def generateGroupedCommandSimulation( frontdoor ): global registerWriteList attr = newton.GROUPED_ATTR | newton.WRITE_ATTR totalByteCount = 0 registerWriteList = [] if frontdoor == True: byteCount = processDatapathMemoryFiles( newton.WRITE_ATTR ) byteCount = processLps2RamFile( newton.WRITE_ATTR ) generateCommandHeader( newton.CMD_OPERATING_MODE, newton.MBX_UNSIGNED_SEQ_WFI, 0, 0 ) generateCommandHeader( newton.CMD_GROUPED_DATA, attr, 0, totalByteCount ) # Actual type count filled in later by the writeFile routine. byteCount = extractRamAccesses( "test_csv.txt", attr ) totalByteCount += byteCount processRegisterFileList( "De_config_all_bkdoor.csv", attr ) processRegisterFileList( "test_csv.txt", attr ) processRegisterFileList( "config_reg.txt", attr ) byteCount = addRegisterWriteList( attr ) totalByteCount += byteCount byteCount = processSeqRamFile( attr ) totalByteCount += byteCount byteCount = processMapRamFile( attr ) totalByteCount += byteCount byteCount = processWaveRamFile( attr ) totalByteCount += byteCount byteCount = processDumpEngineMemoryFile( attr ) totalByteCount += byteCount byteCount = processLps1RamFile( attr ) totalByteCount += byteCount else: generateCommandHeader( newton.CMD_OPERATING_MODE, newton.WRITE_ATTR, 0, 0 ) generateCommandHeader( newton.CMD_GROUPED_DATA, attr, 0, totalByteCount ) # Actual type count filled in later by the writeFile routine. processRegisterFileList( "De_config_all_bkdoor.csv", attr ) processRegisterFileList( "test_csv.txt", attr ) byteCount = process_wave_reg_txt( "wave_reg.txt", attr ) totalByteCount += byteCount byteCount = addRegisterWriteList( attr ) totalByteCount += byteCount return totalByteCount if __name__ == "__main__": global commandData global simFilesFrontDoor global isGroupedCommand global hsp_fw_0p97 maxSpiBytes = 256 wordCount = 0 seed = 1 frontdoor = False isGroupedCommand = False commandData = [] hsp_fw_0p97 = False simFilesFrontDoor = {} simFiles = {} args = docopt(__doc__, version='0.1') if args['--count']: wordCount = int( args['--count'] ) if args['--seed']: seed = int( args['--seed'] ) if args['--frontdoor']: frontdoor = True if args['--hsp_fw_0p97']: hsp_fw_0p97 = True random.seed( seed ) simFilesFrontDoor["De_config_all_bkdoor.csv"] = "De_config_all_bkdoor.csv" simFilesFrontDoor["wave_reg.txt"] = "wave_reg.txt" simFilesFrontDoor["test_csv.txt"] = "test_csv.txt" if args['<target>'] == "grouped": isGroupedCommand = True if args['--sim']: totalByteCount = generateGroupedCommandSimulation( frontdoor ) else: totalByteCount = generateGroupedCommand( args['<target>'], wordCount ) else: totalByteCount = generateRamWriteCommand( args['<target>'], wordCount, 0 ) writeFile( args['<file_name>'], totalByteCount ) sys.exit( 0 )
36,170
12,886
import os, json __author__ = 'Manfred Minimair <manfred@minimair.org>' class JSONStorage: """ File storage for a dictionary. """ file = '' # file name of storage file data = None # data dict indent = ' ' # indent prefix for pretty printing json files def __init__(self, path, name): """ Initizlize. :param path: path to the storage file; empty means the current direcory. :param name: file name, json file """ if path: os.makedirs(path, exist_ok=True) self.file = os.path.normpath(os.path.join(path, name)) try: with open(self.file) as data_file: self.data = json.load(data_file) except FileNotFoundError: self.data = dict() self.dump() def dump(self): """ Dump data into storage file. """ with open(self.file, 'w') as out_file: json.dump(self.data, out_file, indent=self.indent) def get(self, item): """ Get stored item. :param item: name, string, of item to get. :return: stored item; raises a KeyError if item does not exist. """ return self.data[item] def set(self, item, value): """ Set item's value; causes the data to be dumped into the storage file. :param item: name, string of item to set. :param value: value to set. """ self.data[item] = value self.dump()
1,503
443
from setuptools import setup setup( name="scienz", version="0.0.1", packages=["scienz"], zip_safe=False, include_package_data=True, package_data={"scienz": ["scienz/*"],}, long_description=""" Common dataset definitions for aorist package. """, long_description_content_type="text/x-rst" )
332
113
#!-*- conding: utf8 -*- #coding: utf-8 """ Aluno: Gabriel Ribeiro Camelo Matricula: 401091 """ import matplotlib.pyplot as pplt # gráficos import math # Matemática import re # expressões regulares import numpy as np # matrizes from statistics import pstdev # Desvio padrão from scipy import stats # Contem o zscore #Funções para o calculo do R2 subxy = lambda x,y: x-y multxy = lambda x,y: x*y def somaYy(y): #cria o somatorio de yy acumulador = 0 y_media = np.sum(y)/len(y) for k in range(len(y)): acumulador += (y[k] - y_media)**2 return acumulador # Coleta de dados arq = open("aerogerador.dat", "r") # abre o arquivo que contem os dados x = [] # Dados y = [] # Resultados for line in arq: # separa x de y line = line.strip() # quebra no \n line = re.sub('\s+',',',line) # trocando espaços vazios por virgula X,Y = line.split(",") # quebra nas virgulas e retorna 2 valores x.append(float(X)) y.append(float(Y)) arq.close() # fecha o arquivo que contem os dados # Normalização Zscore xn = stats.zscore(x) #adicionando o peso que pondera o bias xb = [] for i in range(2250): xb.append(-1) X = np.matrix([xb, xn]) # Matriz de dados com o bias # Matriz de pesos aleatórios def matPesos (qtdNeuronios, qtdAtributos): # retorna uma matriz de numeros aleatórios de uma distribuição narmal w = np.random.randn(qtdNeuronios, qtdAtributos+1) return w Neuronios = int(input("Quantidade de Neuronios: ")) W = matPesos(Neuronios, 1) # Função de Ativação phi = lambda u: (1 - math.exp(u))/(1 + math.exp(u)) #Logistica # Ativação dos Neuronios U = np.array(W@X) Z = list(map(phi, [valor for linha in U for valor in linha])) Z = np.array(Z) Z = Z.reshape(Neuronios, 2250) # Matriz de pesos dos neuronios da camada de saida M = (y@Z.T) @ np.linalg.inv(Z@Z.T) # Ativação dos neuronios de saida D = M@Z # Calculo do R2 somaQe = sum(map(multxy, list(map(subxy, y, D)), list(map(subxy, y, D)))) R2 = 1 - (somaQe/somaYy(y)) #Resultados print("R2: ", R2) #gráfico pplt.plot(x, D, color ='red') pplt.scatter(x, y, marker = "*") pplt.show()
2,219
918
#!/usr/bin/env python # encoding: utf-8 """ @author: zhanghe @software: PyCharm @file: weixin.py @time: 2018-02-10 17:55 """ import re import time import hashlib # from urlparse import urljoin # PY2 # from urllib.parse import urljoin # PY3 from future.moves.urllib.parse import urljoin import execjs from tools.char import un_escape from config import current_config from models.news import FetchResult from news.items import FetchResultItem from apps.client_db import db_session_mysql from maps.platform import WEIXIN, WEIBO BASE_DIR = current_config.BASE_DIR def get_finger(content_str): """ :param content_str: :return: """ m = hashlib.md5() m.update(content_str.encode('utf-8') if isinstance(content_str, unicode) else content_str) finger = m.hexdigest() return finger def parse_weixin_js_body(html_body, url=''): """ 解析js :param html_body: :param url: :return: """ rule = r'<script type="text/javascript">.*?(var msgList.*?)seajs.use\("sougou/profile.js"\);.*?</script>' js_list = re.compile(rule, re.S).findall(html_body) if not js_list: print('parse error url: %s' % url) return ''.join(js_list) def parse_weixin_article_id(html_body): rule = r'<script nonce="(\d+)" type="text\/javascript">' article_id_list = re.compile(rule, re.I).findall(html_body) return article_id_list[0] def add_img_src(html_body): rule = r'data-src="(.*?)"' img_data_src_list = re.compile(rule, re.I).findall(html_body) print(img_data_src_list) for img_src in img_data_src_list: print(img_src) html_body = html_body.replace(img_src, '%(img_src)s" src="%(img_src)s' % {'img_src': img_src}) return html_body def get_img_src_list(html_body, host_name='/', limit=None): rule = r'src="(%s.*?)"' % host_name img_data_src_list = re.compile(rule, re.I).findall(html_body) if limit: return img_data_src_list[:limit] return img_data_src_list def check_article_title_duplicate(article_title): """ 检查标题重复 :param article_title: :return: """ session = db_session_mysql() article_id_count = session.query(FetchResult) \ .filter(FetchResult.platform_id == WEIXIN, FetchResult.article_id == get_finger(article_title)) \ .count() return article_id_count class ParseJsWc(object): """ 解析微信动态数据 """ def __init__(self, js_body): self.js_body = js_body self._add_js_msg_list_fn() self.ctx = execjs.compile(self.js_body) # print(self.ctx) def _add_js_msg_list_fn(self): js_msg_list_fn = """ function r_msg_list() { return msgList.list; }; """ self.js_body += js_msg_list_fn def parse_js_msg_list(self): msg_list = self.ctx.call('r_msg_list') app_msg_ext_info_list = [i['app_msg_ext_info'] for i in msg_list] comm_msg_info_date_time_list = [time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(i['comm_msg_info']['datetime'])) for i in msg_list] # msg_id_list = [i['comm_msg_info']['id'] for i in msg_list] msg_data_list = [ { # 'article_id': '%s_000' % msg_id_list[index], 'article_id': get_finger(i['title']), 'article_url': urljoin('https://mp.weixin.qq.com', un_escape(i['content_url'])), 'article_title': i['title'], 'article_abstract': i['digest'], 'article_pub_time': comm_msg_info_date_time_list[index], } for index, i in enumerate(app_msg_ext_info_list) ] msg_ext_list = [i['multi_app_msg_item_list'] for i in app_msg_ext_info_list] for index_j, j in enumerate(msg_ext_list): for index_i, i in enumerate(j): msg_data_list.append( { # 'article_id': '%s_%03d' % (msg_id_list[index_j], index_i + 1), 'article_id': get_finger(i['title']), 'article_url': urljoin('https://mp.weixin.qq.com', un_escape(i['content_url'])), 'article_title': i['title'], 'article_abstract': i['digest'], 'article_pub_time': comm_msg_info_date_time_list[index_j], } ) return msg_data_list
4,424
1,571
import numpy arr = map(int, input().strip().split(' ')) d2_arr = numpy.array(list(arr)) d2_arr.shape = (3, 3) print(d2_arr)
126
57
import os.path import json class Temprature_scaling: def __init__(self, label): fname = './parameters/Models/Txm/' + label + '_parameters.json' if os.path.isfile(fname) == False: print("Error: %s does not exists it uses Tx scaling default parameters."%s) exit(1) # fname = './parameters/Models/Txm/default_parameters.xml' with open(fname) as fp: _param = json.load(fp) # Parameters self.Norm = _param['a'] self.M_slope = _param['M_slope'] self.E_slope = _param['E_slope'] self.M_p = _param['M_p'] self.z_p = _param['z_p'] self.sig = _param['sig'] class Luminocity_scaling: def __init__(self, label): fname = './parameters/Models/Lxm/' + label + '_parameters.json' if os.path.isfile(fname) == False: print("ERROR: %s does not exists it uses Lx scaling default parameters." % s) exit(1) # fname = './parameters/Models/Lxm/default_parameters.xml' with open(fname) as fp: _param = json.load(fp) # Parameters self.Norm = _param['a'] self.M_slope = _param['M_slope'] self.E_slope = _param['E_slope'] self.M_p = _param['M_p'] self.z_p = _param['z_p'] self.sig = _param['sig']
1,359
485
import multiprocessing import os from settings.default import QUANDL_TICKERS, CPD_QUANDL_OUTPUT_FOLDER_DEFAULT N_WORKERS = len(QUANDL_TICKERS) if not os.path.exists(CPD_QUANDL_OUTPUT_FOLDER_DEFAULT): os.mkdir(CPD_QUANDL_OUTPUT_FOLDER_DEFAULT) all_processes = [ f'python script_cpd_example.py "{ticker}" "{os.path.join(CPD_QUANDL_OUTPUT_FOLDER_DEFAULT, ticker + ".csv")}" "1990-01-01" "2019-12-31"' for ticker in QUANDL_TICKERS ] process_pool = multiprocessing.Pool(processes=N_WORKERS) process_pool.map(os.system, all_processes)
545
244
# Generated by Django 3.1.7 on 2021-03-08 14:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('events', '0048_auto_20210307_1644'), ] operations = [ migrations.AlterField( model_name='event', name='email_conf', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='event', name='fields_info', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='event', name='limits', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='event', name='logins_paths', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='event', name='standings_urls', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='participant', name='addition_fields', field=models.JSONField(blank=True, default=dict), ), ]
1,248
373
# -*- coding: utf-8 -*- from odoo import models, fields, api class Cowin_custom_model_data(models.Model): _name = 'cowin_settings.custome_model_data' # name = fields.Char(string=u'ID') model_name = fields.Char(string=u'model ID') _sql_constraints = [ ('model_name_key', 'UNIQUE (model_name)', u'model_name标识名不能相同!!!') ]
369
159
"""iex_parser""" from .parser import Parser from .messages import DEEP_1_0, TOPS_1_6, TOPS_1_5 __all__ = [ 'Parser', 'DEEP_1_0', 'TOPS_1_5', 'TOPS_1_6' ]
172
90
from pathlib import Path import json import re import numpy as np import os from collections import OrderedDict from .TxtMpiFile import TxtMpiFile from .BaseSource import BaseSource from tweezers.meta import MetaDict, UnitDict class TxtMpiSource(BaseSource): """ Data source for \*.txt files from the MPI with the old style header or the new JSON format. """ data = None psd = None ts = None def __init__(self, data=None, psd=None, ts=None): """ Args: path (:class:`patlhlib.Path`): path to file to read, if the input is of a different type, it is given to :class:`pathlibh.Path` to try to create an instance """ super().__init__() # go through input if data: self.data = TxtMpiFile(data) if psd: self.psd = TxtMpiFile(psd) if ts: self.ts = TxtMpiFile(ts) @staticmethod def isDataFile(path): """ Checks if a given file is a valid data file and returns its ID and type. Args: path (:class:`pathlib.Path`): file to check Returns: :class:`dict` with `id` and `type` """ pPath = Path(path) m = re.match('^((?P<type>[A-Z]+)_)?(?P<id>(?P<trial>[0-9]{1,3})_Date_[0-9_]{19})\.txt$', pPath.name) if m: tipe = 'data' if m.group('type'): tipe = m.group('type').lower() res = {'id': m.group('id'), 'trial': m.group('trial'), 'type': tipe, 'path': pPath} return res else: return False @classmethod def getAllSources(cls, path): """ Get a list of all IDs and their files that are at the given path and its subfolders. Args: path (:class:`pathlib.Path`): root path for searching Returns: `dir` """ _path = Path(path) # get a list of all files and their properties files = cls.getAllFiles(_path) sources = OrderedDict() # sort files that belong to the same id for el in files: if el['id'] not in sources.keys(): sources[el['id']] = cls() setattr(sources[el['id']], el['type'], TxtMpiFile(el['path'])) return sources def getMetadata(self): """ Return the metadata of the experiment. Returns: :class:`tweezers.MetaDict` and :class:`tweezers.UnitDict` """ # keep variables local so they are not stored in memory meta, units = self.getDefaultMeta() # check each available file for header information # sequence is important since later calls overwrite earlier ones so if a header is present in "psd" and # "data", the value from "data" will be returned if self.ts: # get header data from file metaTmp, unitsTmp = self.ts.getMetadata() # make sure we don't override important stuff that by accident has the same name self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp) self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp) # set time series unit unitsTmp['timeseries'] = 'V' # update the dictionaries with newly found values meta.update(metaTmp) units.update(unitsTmp) if self.psd: metaTmp, unitsTmp = self.psd.getMetadata() # make sure we don't override important stuff that by accident has the same name # also, 'nSamples' and 'samplingRate' in reality refer to the underlying timeseries data self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp) self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp) # set psd unit unitsTmp['psd'] = 'V^2 / Hz' meta.update(metaTmp) units.update(unitsTmp) if self.data: metaTmp, unitsTmp = self.data.getMetadata() # rename variables for the sake of consistency and compatibility with Matlab and because the naming is # confusing: samplingRate is actually the acquisition rate since the DAQ card averages the data already # the sampling rate should describe the actual time step between data points not something else if 'recordingRate' in metaTmp: self.renameKey('samplingRate', 'acquisitionRate', meta=metaTmp, units=unitsTmp) self.renameKey('recordingRate', 'samplingRate', meta=metaTmp, units=unitsTmp) self.renameKey('nSamples', 'nAcquisitionsPerSample', meta=metaTmp) # add trial number metaTmp['trial'] = self.data.getTrialNumber() # update dictionaries meta.update(metaTmp) units.update(unitsTmp) # add title string to metadata, used for plots self.setTitle(meta) # make sure all axes have the beadDiameter meta['pmY']['beadDiameter'] = meta['pmX']['beadDiameter'] units['pmY']['beadDiameter'] = units['pmX']['beadDiameter'] meta['aodY']['beadDiameter'] = meta['aodX']['beadDiameter'] units['aodY']['beadDiameter'] = units['aodX']['beadDiameter'] # add trap names meta['traps'] = meta.subDictKeys() return meta, units def getData(self): """ Return the experiment data. Returns: :class:`pandas.DataFrame` """ if not self.data: raise ValueError('No data file given.') return self.data.getData() def getDataSegment(self, tmin, tmax, chunkN=10000): """ Returns the data between ``tmin`` and ``tmax``. Args: tmin (float): minimum data timestamp tmax (float): maximum data timestamp chunkN (int): number of rows to read per chunk Returns: :class:`pandas.DataFrame` """ meta, units = self.getMetadata() nstart = int(meta.samplingRate * tmin) nrows = int(meta.samplingRate * (tmax - tmin)) return self.data.getDataSegment(nstart, nrows) def getPsd(self): """ Return the PSD of the thermal calibration of the experiment as computed by LabView. Returns: :class:`pandas.DataFrame` """ if not self.psd: raise ValueError('No PSD file given.') # read psd file which also contains the fitting data = self.psd.getData() # ignore the fitting titles = [title for title, column in data.iteritems() if not title.endswith('Fit')] return data[titles] def getPsdFit(self): """ Return the LabView fit of the Lorentzian to the PSD. Returns: :class:`pandas.DataFrame` """ if not self.psd: raise ValueError('No PSD file given.') # the fit is in the psd file data = self.psd.getData() # only choose frequency and fit columns titles = [title for title, column in data.iteritems() if title.endswith('Fit') or title == 'f'] return data[titles] def getTs(self): """ Return the time series recorded for thermal calibration. Returns: :class:`pandas.DataFrame` """ if not self.ts: raise ValueError('No time series file given.') data = self.ts.getData() # remove "Diff" from column headers columnHeader = [title.split('Diff')[0] for title in data.columns] data.columns = columnHeader return data @staticmethod def calculateForce(meta, units, data): """ Calculate forces from Diff signal and calibration values. Args: meta (:class:`.MetaDict`): metadata units (:class:`.UnitDict`): unit metadata data (:class:`pandas.DataFrame`): data Returns: Updated versions of the input parameters * meta (:class:`.MetaDict`) * units (:class:`.UnitDict`) * data (:class:`pandas.DataFrame`) """ # calculate force per trap and axis for trap in meta['traps']: m = meta[trap] data[trap + 'Force'] = (data[trap + 'Diff'] - m['zeroOffset']) \ / m['displacementSensitivity'] \ * m['stiffness'] units[trap + 'Force'] = 'pN' # invert PM force, is not as expected in the raw data # data.pmYForce = -data.pmYForce # calculate mean force per axis, only meaningful for two traps data['xForce'] = (data.pmXForce + data.aodXForce) / 2 data['yForce'] = (data.pmYForce - data.aodYForce) / 2 units['xForce'] = 'pN' units['yForce'] = 'pN' return meta, units, data @staticmethod def postprocessData(meta, units, data): """ Create time array, calculate forces etc. Args: meta (:class:`tweezers.MetaDict`): meta dictionary units (:class:`tweezers.UnitDict`): units dictionary data (:class:`pandas.DataFrame`): data Returns: Updated versions of the input parameters * meta (:class:`.MetaDict`) * units (:class:`.UnitDict`) * data (:class:`pandas.DataFrame`) """ data['time'] = np.arange(0, meta['dt'] * len(data), meta['dt']) units['time'] = 's' meta, units, data = self.calculateForce(meta, units, data) data['distance'] = np.sqrt(data.xDist**2 + data.yDist**2) units['distance'] = 'nm' return meta, units, data def setTitle(self, meta): """ Set the 'title' key in the metadata dictionary based on date and trial number if they are available. This string is e.g. used for plots. Args: meta Returns: :class:`tweezers.MetaDict` """ title = '' try: title += meta['date'] + ' ' except KeyError: pass try: title += meta['time'] + ' ' except KeyError: pass try: title += meta['trial'] except KeyError: pass meta['title'] = title.strip() def save(self, container, path=None): """ Writes the data of a :class:`tweezers.TweezersData` to disk. This preservers the `data` and`thermalCalibration` folder structure. `path` should be the folder that holds these subfolders. If it is empty, the original files will be overwritten. Args: container (:class:`tweezers.TweezersData`): data to write path (:class:`pathlib.Path`): path to a folder for the dataset, if not set, the original data will be overwritten """ if not isinstance(path, Path): path = Path(path) data = ['ts', 'psd', 'data'] # list of input files and their data from the container, these are the ones we're writing back # this is also important for the laziness of the TweezerData object files = [[getattr(self, file), getattr(container, file)] for file in data if getattr(self, file)] if not files: return # get root path if not given if not path: path = files[0][0].path.parents[1] meta = container.meta meta['units'] = container.units # now write all of it for file in files: filePath = path / file[0].path.parent.name / file[0].path.name self.writeData(meta, file[1], filePath) def writeData(self, meta, data, path): """ Write experiment data back to a target file. Note that this writes the data in an `UTF-8` encoding. Implementing this is not required for a data source but used here to convert the header to JSON. Args: meta (:class:`tweezers.MetaDict`): meta data to store data (:class:`pandas.DataFrame`): data to write back path (:class:`pathlib.Path`): path where to write the file """ # ensure directory exists try: os.makedirs(str(path.parent)) except FileExistsError: pass # write the data with path.open(mode='w', encoding='utf-8') as f: f.write(json.dumps(meta, indent=4, ensure_ascii=False, sort_keys=True)) f.write("\n\n#### DATA ####\n\n") data.to_csv(path_or_buf=str(path), sep='\t', mode='a', index=False) def getDefaultMeta(self): """ Set default values for metadata and units. This will be overwritten by values in the data files if they exist. Returns: :class:`tweezers.MetaDict` and :class:`tweezers.UnitDict` """ meta = MetaDict() units = UnitDict() # meta[self.getStandardIdentifier('tsSamplingRate')] = 80000 # # units[self.getStandardIdentifier('tsSamplingRate')] = 'Hz' return meta, units def renameKey(self, oldKey, newKey, meta=None, units=None): """ Rename a key in the meta- and units-dictionaries. Does not work for nested dictionaries. Args: meta (:class:`tweezers.MetaDict`): meta dictionary units (:class:`tweezers.UnitDict`): units dictionary (can be an empty one if not required) oldKey (str): key to be renamed newKey (str): new key name """ if meta: if oldKey not in meta: return meta.replaceKey(oldKey, newKey) if units: if oldKey not in units: return units.replaceKey(oldKey, newKey)
14,129
4,025
# keys IMPLEMENTATION_FILE_PATHS_KEY = r'implementation_file_paths' LPROJ_DIR_PATHS_KEY = r'lproj_file_paths' KEY_KEY = r'key' TRANSLATION_KEY = r'translation' # file names LOCALIZABLE_STRINGS_FILE_NAME = r'Localizable.strings'
232
97
#!/usr/bin/env python3 from threading import Thread from time import sleep import logging import shexter.requester import shexter.platform as platform import shexter.config """ This file is for the shexter daemon, which runs persistantly. Every 5 seconds, it polls the phone to see if there are unread messages. If there are, it displays a notification to the user. This file is meant to be run directly; not to be imported by any other file. """ def notify(msg: str, title=shexter.config.APP_NAME): print(title + ': ' + msg) if notifier: # Note swap of msg, title order notify_function(title, msg) def _parse_contact_name(line: str): # print('parsing contact name from "{}"'.format(line)) # The contact name is the first word after the first ']' try: return line.split(']')[1].strip().split()[0].rstrip(':') except Exception as e: print(e) print('Error parsing contact name from "{}"'.format(line)) def notify_unread(unread: str) -> None: unread_lines = unread.splitlines() # Remove the first line, which is just "Unread Messages:" unread_lines = unread_lines[1:] if len(unread_lines) > 1: notify_title = str(len(unread_lines)) + ' New Messages' notify_msg = 'Messages from ' contact_names = [] for line in unread_lines: contact_name = _parse_contact_name(line) # Don't repeat contacts if contact_name not in contact_names: notify_msg += contact_name + ', ' contact_names.append(contact_name) # Remove last ', ' notify_msg = notify_msg[:-2] elif len(unread_lines) == 0: # At this time, if the unread response was originally exactly one line, # it was because the phone rejected the request. notify_title = 'Approval Required' notify_msg = 'Approve this computer on your phone' else: contact_name = _parse_contact_name(unread_lines[0] ) notify_title = 'New Message' notify_msg = 'Message from ' + contact_name # A cool title would be the phone's hostname. notify(notify_msg, title=notify_title) def init_notifier_win(): try: import win10toast toaster = win10toast.ToastNotifier() toaster.show_toast(shexter.config.APP_NAME, 'Notifications enabled', duration=3, threaded=True) return toaster except ImportError as e: print(e) print('***** To use the ' + shexter.config.APP_NAME + ' daemon on Windows you must install win10toast' ' with "[pip | pip3] install win10toast"') NOTIFY_LEN_S = 10 def notify_win(title: str, msg: str) -> None: # Notifier is a win10toast.ToastNotifier notifier.show_toast(title, msg, duration=NOTIFY_LEN_S, threaded=True) """ def build_notifier_macos(): # Fuck this for now try: import gntp.notifier except ImportError: print('To use the ' + shexter.config.APP_NAME + ' daemon on OSX you must install Growl (see http://growl.info) and its python library with "pip3 install gntp"') quit() """ import subprocess NOTIFY_SEND = 'notify-send' def init_notifier_nix(): try: subprocess.check_call([NOTIFY_SEND, shexter.config.APP_NAME, 'Notifications enabled', '-t', '3000']) return True except Exception as e: print(e) print('***** To use the ' + shexter.config.APP_NAME + ' daemon on Linux you must install notify-send, eg "sudo apt-get install notify-send"') def notify_nix(title: str, msg: str): # print('notify_nix {} {}'.format(title, msg)) result = subprocess.getstatusoutput('notify-send "{}" "{}" -t {}' .format(title, msg, NOTIFY_LEN_S * 1000)) if result[0] != 0: print('Error running notify-send:') print(result[1]) def init_notifier(): """ Initializes the 'notifier' and 'notify_function' globals, which are later called by notify The notifier is an object for the notify_platform functions to use """ platf = platform.get_platform() global notifier, notify_function if platf == platform.Platform.WIN: notifier = init_notifier_win() notify_function = notify_win elif platf == platform.Platform.LINUX: notifier = init_notifier_nix() notify_function = notify_nix else: print('Sorry, notifications are not supported on your platform, which appears to be ' + platf) return None # Must match response from phone in the case of no msgs. NO_UNREAD_RESPONSE = 'No unread messages.' def main(connectinfo: tuple): running = True logging.basicConfig(filename=shexter.config.APP_NAME.lower() + 'd.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(shexter.config.APP_NAME) launched_msg = shexter.config.APP_NAME + ' daemon launched' logger.info(launched_msg) logger.info('ConnectInfo: ' + str(connectinfo)) print(launched_msg + ' - CTRL + C to quit') try: while running: unread_result = shexter.requester.unread_command(connectinfo, silent=True) # print('result: ' + str(type(unread_result)) + ' ' + unread_result) if not unread_result: logger.info('Failed to connect to phone') elif unread_result != NO_UNREAD_RESPONSE: # new messages Thread(target=notify_unread, args=(unread_result,)).start() logger.info('Got at least 1 msg') else: logger.debug('No unread') # print('no unread') for i in range(5): # Shorter sleep to afford interrupting... # https://stackoverflow.com/questions/5114292/break-interrupt-a-time-sleep-in-python sleep(1) except (KeyboardInterrupt, EOFError): print('Exiting') quit(0) _connectinfo = shexter.config.configure(False) if not _connectinfo: print('Please run ' + shexter.config.APP_NAME + ' config first, so the daemon knows how to find your phone.') quit() # Initialize globals notifier = None notify_function = None init_notifier() if not notifier: notify_function = print # Call the main loop main(_connectinfo)
6,482
1,966
import sys import os import logging import time import datetime from PIL import ( Image, ImageDraw, ImageFont ) from config import * from util import * from info_epd import praytimes DEBUG_PRAYTIMES = False class SalahMixin: """Uses praytimes library for calculating prayer times. Make sure settings are correct for your location, Madhab, etc. Especially check: * Caluclation methos * Settings for Maghrib & midnight """ calc_method = 'ISNA' time_fmt = '12h' def __init__(self): self._funcs['setup'].append(self.setup_praytimes) self._funcs['update'].append(self.update_praytimes) self._funcs['redraw'].append(self.redraw_praytimes) def setup_praytimes(self): logging.info("Setup praytimes...") self.pt = praytimes.PrayTimes() self.pt.setMethod(self.calc_method) params = dict( fajr=15, maghrib='0 min', isha=15, midnight='Jafari' #doublecheck: seems to be more correct than non-jafari ) self.pt.adjust(params) self.update_info['praytimes'] = {} self.update_info['praytimes']['pt'] = None self.update_info['praytimes']['curr'] = None self.update_info['praytimes']['curr_end'] = None self.update_info['praytimes']['next_time'] = None def update_praytimes(self): logging.info("Update praytimes...") today = get_today() tomorrow = get_tomorrow() now = get_now() coords = COORDS['Culver City'] timezone = TIMEZONES['Los Angeles'] dst = time.localtime().tm_isdst pt = self.pt.getTimes(today, coords, timezone, dst, self.time_fmt) fmt = '%I:%M%p' def to_time_obj(p1): p2 = datetime.datetime.strptime(pt[p1], fmt) def to_date_obj(): return datetime.datetime(year=now.year, month=now.month, day=now.day, hour=p2.hour, minute=p2.minute) return to_date_obj fajr = to_time_obj('fajr')() sunrise = to_time_obj('sunrise')() dhuhr = to_time_obj('dhuhr')() asr = to_time_obj('asr')() maghrib = to_time_obj('maghrib')() isha = to_time_obj('isha')() midnight = to_time_obj('midnight')() # Assume maghrib lasts for 45 mins maghrib_end = maghrib + datetime.timedelta(minutes=45) # Figure out what applies to current time curr = {} curr['fajr'] = fajr <= now < sunrise after_fajr = sunrise <= now < dhuhr curr['dhuhr'] = dhuhr <= now < asr curr['asr'] = asr <= now < maghrib curr['maghrib'] = maghrib <= now < maghrib_end after_maghrib = maghrib_end <= now < isha # Check isha time (could be past 00:00) is_isha = False if not any((curr['fajr'], curr['dhuhr'], curr['asr'], curr['maghrib'], after_fajr, after_maghrib)): # Either we are before fajr, or after isha after_isha = now >= isha if after_isha: m_hr = midnight.hour if m_hr < fajr.hour: m_hr += 24 m_min = midnight.minute n_hr = now.hour n_min = now.minute if n_hr < m_hr: is_isha = True elif n_hr == m_hr: if n_min < m_min: is_isha = True curr['isha'] = is_isha # Figure out what comes next next_secs, next_time = secs_til_midnight(), 'midnight' next_prayer, pt['next_fajr'] = 'next_fajr', None curr_end = None if curr['fajr']: next_secs, next_time = (sunrise - now).seconds, pt['sunrise'] curr_end = pt['sunrise'] next_prayer = 'dhuhr' elif after_fajr: next_secs, next_time = (dhuhr - now).seconds, pt['dhuhr'] next_prayer = 'dhuhr' elif curr['dhuhr']: next_secs, next_time = (asr - now).seconds, pt['asr'] curr_end = pt['asr'] next_prayer = 'asr' elif curr['asr']: next_secs, next_time = (maghrib - now).seconds, pt['maghrib'] curr_end = pt['maghrib'] next_prayer = 'maghrib' elif curr['maghrib']: maghrib_end_t = maghrib_end.strftime('%I:%M%p') next_secs, next_time = (maghrib_end - now).seconds, maghrib_end_t curr_end = maghrib_end_t next_prayer = 'isha' elif after_maghrib: next_secs, next_time = (isha - now).seconds, pt['isha'] next_prayer = 'isha' elif curr['isha']: curr_end = pt['midnight'] elif now < fajr: next_secs, next_time = (fajr - now).seconds, pt['fajr'] next_prayer = 'fajr' # Need to get next day's times if next_prayer == 'next_fajr': next_pt = self.pt.getTimes(tomorrow, coords, timezone, dst, self.time_fmt) pt['next_fajr'] = next_pt['fajr'] # Save info self.update_info['next_secs'] = next_secs self.update_info['praytimes']['pt'] = pt self.update_info['praytimes']['curr'] = curr self.update_info['praytimes']['curr_end'] = curr_end self.update_info['praytimes']['next_time'] = next_time self.update_info['praytimes']['next_prayer'] = next_prayer def redraw_praytimes(self): logging.info("Redraw praytimes...") if EPD_USED == EPD2in13: self.redraw_praytimes_partial() else: self.redraw_praytimes_full() def redraw_praytimes_partial(self): pinfo = self.update_info['praytimes'] pt, curr, curr_end = pinfo['pt'], pinfo['curr'], pinfo['curr_end'] next_upd, next_prayer = pinfo['next_time'], pinfo['next_prayer'] h, w = self.epd.height, self.epd.width bmp = Image.open(os.path.join(imgdir, 'masjid.bmp')) self.image.paste(bmp, (2,w//2+25)) # If have current end time then we have a current prayer time as well font = font24 if curr_end: for p in curr: if curr[p]: txt = f"{p.capitalize()}: {pt[p]}" x, y = font.getsize(txt) self.draw.rectangle([(5,5),(x+15,y+15)], fill='black') self.draw.text((10,10), txt, font=font, fill='white') self.draw.text((10, y+15), f'Ends {curr_end}', font=font18, fill=0) else: txt = f'Next update: {next_upd}' self.draw.text((10, 10), txt, font=font, fill=0) # Next prayer self.draw.text((55,w//2+10), 'Upcoming:', font=font18, fill=0) p = next_prayer n = 'fajr' if p=='next_fajr' else p txt = f"{n.capitalize()}: {pt[p]}" self.draw.text((55,w//2+30), txt, font=font, fill=0) def redraw_praytimes_full(self): """To be implemented"""
7,218
2,459
import pandas as pd import pytest from denstatbank.denstatbank import StatBankClient from denstatbank.utils import data_dict_to_df, add_list_to_dict from .mock_responses import ( mock_sub_resp_default, mock_sub_resp_2401, mock_tables_resp, mock_tableinfo_resp, mock_tableinfo_variable_resp, mock_data_resp, mock_data_resp_to_df, mock_data_resp_with_vars, mock_codes ) @pytest.fixture(autouse=True) def no_requests(monkeypatch): """Remove requests.sessions.Session.request for all tests.""" monkeypatch.delattr("requests.sessions.Session.request") @pytest.fixture def client(): client = StatBankClient() return client def test_base_request(client, monkeypatch): def mock_base_request(self, *args, **kwargs): return mock_tableinfo_resp monkeypatch.setattr(StatBankClient, "_base_request", mock_base_request) r = client._base_request('data', lang='en') assert r == mock_tableinfo_resp def test_subjects(client, monkeypatch): def mock_subjects(self, subjects=None, include_tables=False, recursive=False): if subjects is None: return mock_sub_resp_default monkeypatch.setattr(StatBankClient, "subjects", mock_subjects) r = client.subjects() assert isinstance(r, list) d = r[0] assert isinstance(d, dict) assert 'id' in d.keys() assert 'description' in d.keys() def test_subjects_returns_specified_subject(client, monkeypatch): def mock_subjects(self, subjects=None, include_tables=False, recursive=False): if subjects[0] == '2401': return mock_sub_resp_2401 monkeypatch.setattr(StatBankClient, "subjects", mock_subjects) r = client.subjects(subjects=['2401']) assert isinstance(r, list) d = r[0] assert isinstance(d, dict) assert d['id'] == '2401' def test_tables_returns_dict(client, monkeypatch): def mock_tables(self, subjects=None, past_days=None, include_inactive=False, as_df=True): return mock_tables_resp monkeypatch.setattr(StatBankClient, "tables", mock_tables) r = client.tables(as_df=False) assert isinstance(r, list) d = r[0] assert isinstance(d, dict) assert 'id' in d.keys() assert 'text' in d.keys() assert 'unit' in d.keys() assert 'updated' in d.keys() assert 'firstPeriod' in d.keys() assert 'latestPeriod' in d.keys() assert 'active' in d.keys() assert 'variables' in d.keys() def test_tables_returns_df(client, monkeypatch): def mock_tables(self, subjects=None, past_days=None, include_inactive=False, as_df=True): return pd.DataFrame(mock_tables_resp) monkeypatch.setattr(StatBankClient, "tables", mock_tables) df = client.tables() assert isinstance(df, pd.DataFrame) assert 'id' in df.columns assert 'text' in df.columns assert 'unit' in df.columns assert 'updated' in df.columns assert 'firstPeriod' in df.columns assert 'latestPeriod' in df.columns assert 'active' in df.columns assert 'variables' in df.columns def test_tableinfo_returns_dict(client, monkeypatch): def mock_tableinfo(self, table_id, variables_df=False): return mock_tableinfo_resp monkeypatch.setattr(StatBankClient, "tableinfo", mock_tableinfo) d = client.tableinfo('FOLK1A') assert isinstance(d, dict) assert d['id'] == 'FOLK1A' assert 'id' in d['variables'][0].keys() assert 'text' in d['variables'][0].keys() def test_tableinfo_returns_variables_df(client, monkeypatch): def mock_tableinfo(self, table_id, variables_df): if variables_df: return pd.DataFrame(mock_tableinfo_variable_resp) monkeypatch.setattr(StatBankClient, "tableinfo", mock_tableinfo) df = client.tableinfo('FOLK1A', variables_df=True) assert isinstance(df, pd.DataFrame) print(df) assert 'id' in df.columns assert 'text' in df.columns assert 'variable' in df.columns assert len(df.columns.tolist()) == 3 def test_data_returns_dict(client, monkeypatch): def mock_data(self, table_id, as_df, variables=None, **kwargs): return mock_data_resp monkeypatch.setattr(StatBankClient, "data", mock_data) d = client.data(table_id='folk1a', as_df=False) assert isinstance(d, dict) assert 'dataset' in d.keys() dd = d['dataset'] assert 'value' in dd.keys() assert isinstance(dd['value'], list) def test_data_returns_df(client, monkeypatch): def mock_data(self, table_id, as_df=True, variables=None, **kwargs): return pd.DataFrame(mock_data_resp_to_df) monkeypatch.setattr(StatBankClient, "data", mock_data) d = client.data(table_id='folk1a') assert isinstance(d, pd.DataFrame) def test_variables_dict(client): kon = client.variable_dict(code='køn', values=['M', 'K']) assert isinstance(kon, dict) assert 'code' in kon.keys() assert 'values' in kon.keys() assert kon['code'] == 'køn' assert isinstance(kon['values'], list) assert kon['values'] == ['M', 'K'] tid = client.variable_dict(code='tid', values='2018') assert isinstance(tid, dict) assert 'code' in tid.keys() assert 'values' in tid.keys() assert tid['code'] == 'tid' assert isinstance(tid['values'], list) assert tid['values'] == ['2018'] def test_data_dict_to_df(): df = data_dict_to_df(mock_data_resp_with_vars, mock_codes) assert isinstance(df, pd.DataFrame) assert isinstance(df.index, pd.MultiIndex) assert df.shape == (8, 1) def test_add_list_to_dict(): params = {'lang': 'en'} add_list_to_dict(params, subjects=['02']) assert 'subjects' in params.keys() assert isinstance(params['subjects'], list) assert params['subjects'] == ['02'] with pytest.raises(Exception) as e: assert add_list_to_dict(params, subjects='03') assert str(e.value) == 'subjects must be a list.'
5,871
2,013
class Solution(object): def addDigits(self, num): """ :type num: int :rtype: int """ s = str(num) l = list(s) sum = 0 for digit in l: sum += int(digit) if len(list(str(sum))) == 1: return sum else: return self.addDigits(sum) def main(): num = 38 solution = Solution() print solution.addDigits(num) if __name__ == '__main__': main()
473
157
import sys from os.path import join, dirname with open(join(dirname(sys.executable), 'license.lic'), 'rb') as fs: with open(join(sys._MEIPASS, 'license.lic'), 'wb') as fd: fd.write(fs.read())
205
78
from django.template import Library register = Library() hundreds = [ '', 'сто', 'двести', 'триста', 'четыреста', 'пятьсот', 'шестьсот', 'семьсот', 'восемьсот', 'девятьсот' ] first_decade = [ '', ('одна', 'один'), ('две', 'два'), 'три', 'четыре', 'пять', 'шесть', 'семь', 'восемь', 'девять' ] second_decade = [ 'десять', 'одиннадцать', 'двенадцать', 'тринадцать', 'четырнадцать', 'пятнадцать', 'шестнадцать', 'семнадцать', 'восемнадцать', 'девятнадцать' ] decades = [ '', 'десять', 'двадцать', 'тридцать', 'сорок', 'пятьдесят', 'шестьдесят', 'семьдесят', 'восемьдесят', 'девяносто' ] def pluralize(number, one, two, five): last_digit = number % 10 prelast_digit = (number // 10) % 10 if last_digit == 1 and prelast_digit != 1: return one if 2 <= last_digit <= 4 and prelast_digit != 1: return two return five @register.filter(is_safe=False) def russian_pluralize(value, arg='s'): if ',' not in arg: arg = ',' + arg bits = arg.split(',') if len(bits) > 3: return '' one, two, five = bits[:3] return pluralize(value, one, two, five) @register.filter def number_to_text(number, gender='male', return_text_for_zero=True): """ Supports numbers less than 1 000 000 000 """ if number is None or number == 0: return 'ноль' if return_text_for_zero else '' text = [] if number >= 1000000: billions = number // 1000000 text.extend([number_to_text(billions, gender='male', return_text_for_zero=False), 'миллион' + pluralize(billions, '', 'а', 'ов')]) number %= 100000 if number >= 1000: thousands = number // 1000 text.extend([number_to_text(thousands, gender='female', return_text_for_zero=False), 'тысяч' + pluralize(thousands, 'а', 'и', '')]) number %= 1000 if number >= 100: text.append(hundreds[number // 100]) number %= 100 if number == 0: pass elif number < 10: number_text = first_decade[number] if isinstance(number_text, (tuple, list)): number_text = number_text[1 if gender == 'male' else 0] text.append(number_text) elif number < 20: text.append(second_decade[number - 10]) else: number_text = first_decade[number % 10] if isinstance(number_text, (tuple, list)): number_text = number_text[1 if gender == 'male' else 0] text.extend([decades[number // 10], number_text]) return ' '.join(text)
2,689
1,067
#!/usr/bin/env python # -*- coding: utf-8 -*- from sympy import * x, y, z = symbols('x y z') init_printing(use_unicode=True) print(Eq(x, y)) print(solveset(Eq(x**2, 1), x)) print(solveset(Eq(x**2 - 1, 0), x)) print(solveset(x**2 - 1, x)) print(solveset(x**2 - x, x)) print(solveset(x - x, x, domain=S.Reals)) print(solveset(sin(x) - 1, x, domain=S.Reals)) print(solveset(exp(x), x)) # No solution exists print(solveset(cos(x) - x, x)) # Not able to find solution print(linsolve([x + y + z - 1, x + y + 2*z - 3 ], (x, y, z))) print(linsolve(Matrix(([1, 1, 1, 1], [1, 1, 2, 3])), (x, y, z))) M = Matrix(((1, 1, 1, 1), (1, 1, 2, 3))) system = A, b = M[:, :-1], M[:, -1] print(linsolve(system, x, y, z)) a, b, c, d = symbols('a, b, c, d', real=True) print(nonlinsolve([a**2 + a, a - b], [a, b])) print(nonlinsolve([x*y - 1, x - 2], x, y)) print(nonlinsolve([x**2 + 1, y**2 + 1], [x, y])) system = [x**2 - 2*y**2 -2, x*y - 2] vars = [x, y] print(nonlinsolve(system, vars)) system = [exp(x) - sin(y), 1/y - 3] print(nonlinsolve(system, vars)) print(nonlinsolve([x*y, x*y - x], [x, y])) system = [a**2 + a*c, a - b] print(nonlinsolve(system, [a, b])) print(solve([x**2 - y**2/exp(x)], [x, y], dict=True)) print(solve([sin(x + y), cos(x - y)], [x, y])) print(solveset(x**3 - 6*x**2 + 9*x, x)) print(roots(x**3 - 6*x**2 + 9*x, x)) print(solve(x*exp(x) - 1, x )) f, g = symbols('f g', cls=Function) print(f(x).diff(x)) diffeq = Eq(f(x).diff(x, x) - 2*f(x).diff(x) + f(x), sin(x)) print(diffeq) print(dsolve(diffeq, f(x))) print(dsolve(f(x).diff(x)*(1 - sin(f(x))) - 1, f(x)))
1,577
840
#!/usr/bin/env python from setuptools import setup, find_packages setup( name='ptwitter', version='0.0.1', description="Tiny python library for Twitter's REST API.", author='Mitchel Kelonye', author_email='kelonyemitchel@gmail.com', url='https://github.com/kelonye/python-twitter', packages=['ptwitter',], package_dir = {'ptwitter': 'lib'}, license='MIT License', zip_safe=True)
421
142
import sys import a print(datetime, sys, a)
44
16
""" Unit tests for resdk/resources/utils.py file. """ # pylint: disable=missing-docstring, protected-access import unittest import six from mock import MagicMock, call, patch from resdk.resources import Collection, Data, Process, Relation, Sample from resdk.resources.utils import ( _print_input_line, endswith_colon, fill_spaces, find_field, get_collection_id, get_data_id, get_process_id, get_relation_id, get_resolwe, get_resource_collection, get_sample_id, get_samples, iterate_fields, iterate_schema, ) PROCESS_OUTPUT_SCHEMA = [ {'name': "fastq", 'type': "basic:file:", 'label': "Reads file"}, {'name': "bases", 'type': "basic:string:", 'label': "Number of bases"}, {'name': "options", 'label': "Options", 'group': [ {'name': "id", 'type': "basic:string:", 'label': "ID"}, {'name': "k", 'type': "basic:integer:", 'label': "k-mer size"} ]} ] OUTPUT = { 'fastq': {'file': "example.fastq.gz"}, 'bases': "75", 'options': { 'id': 'abc', 'k': 123} } class TestUtils(unittest.TestCase): def test_iterate_fields(self): result = list(iterate_fields(OUTPUT, PROCESS_OUTPUT_SCHEMA)) # result object is iterator - we use lists to pull all elements expected = [ ({ 'type': 'basic:string:', 'name': 'id', 'label': 'ID' }, { 'k': 123, 'id': 'abc' }), ({ 'type': 'basic:string:', 'name': 'bases', 'label': 'Number of bases' }, { 'options': { 'k': 123, 'id': 'abc' }, 'bases': '75', 'fastq': { 'file': 'example.fastq.gz' } }), ({ 'type': 'basic:file:', 'name': 'fastq', 'label': 'Reads file' }, { 'options': { 'k': 123, 'id': 'abc' }, 'bases': '75', 'fastq': { 'file': 'example.fastq.gz' } }), ({ 'type': 'basic:integer:', 'name': 'k', 'label': 'k-mer size' }, { 'k': 123, 'id': 'abc' }) ] six.assertCountEqual(self, result, expected) def test_iterate_fields_modif(self): """ Ensure that changing ``values`` inside iteration loop also changes ``OUTPUT`` values. """ for schema, values in iterate_fields(OUTPUT, PROCESS_OUTPUT_SCHEMA): field_name = schema['name'] if field_name == "bases": values[field_name] = str(int(values[field_name]) + 1) self.assertEqual(OUTPUT['bases'], "76") # Fix the OUTPUT to previous state: OUTPUT['bases'] = "75" def test_find_field(self): result = find_field(PROCESS_OUTPUT_SCHEMA, 'fastq') expected = {'type': 'basic:file:', 'name': 'fastq', 'label': 'Reads file'} self.assertEqual(result, expected) def test_iterate_schema(self): result1 = list(iterate_schema(OUTPUT, PROCESS_OUTPUT_SCHEMA, 'my_path')) result2 = list(iterate_schema(OUTPUT, PROCESS_OUTPUT_SCHEMA)) expected1 = [ ({'name': 'fastq', 'label': 'Reads file', 'type': 'basic:file:'}, {'fastq': {'file': 'example.fastq.gz'}, 'options': {'k': 123, 'id': 'abc'}, 'bases': '75'}, 'my_path.fastq'), ({'name': 'bases', 'label': 'Number of bases', 'type': 'basic:string:'}, {'fastq': {'file': 'example.fastq.gz'}, 'options': {'k': 123, 'id': 'abc'}, 'bases': '75'}, 'my_path.bases'), ({'name': 'id', 'label': 'ID', 'type': 'basic:string:'}, {'k': 123, 'id': 'abc'}, 'my_path.options.id'), ({'name': 'k', 'label': 'k-mer size', 'type': 'basic:integer:'}, {'k': 123, 'id': 'abc'}, 'my_path.options.k')] expected2 = [ ({'type': 'basic:file:', 'name': 'fastq', 'label': 'Reads file'}, {'fastq': {'file': 'example.fastq.gz'}, 'bases': '75', 'options': {'k': 123, 'id': 'abc'}}), ({'type': 'basic:string:', 'name': 'bases', 'label': 'Number of bases'}, {'fastq': {'file': 'example.fastq.gz'}, 'bases': '75', 'options': {'k': 123, 'id': 'abc'}}), ({'type': 'basic:string:', 'name': 'id', 'label': 'ID'}, {'k': 123, 'id': 'abc'}), ({'type': 'basic:integer:', 'name': 'k', 'label': 'k-mer size'}, {'k': 123, 'id': 'abc'})] self.assertEqual(result1, expected1) self.assertEqual(result2, expected2) def test_fill_spaces(self): result = fill_spaces("one_word", 12) self.assertEqual(result, "one_word ") @patch('resdk.resources.utils.print') def test_print_input_line(self, print_mock): _print_input_line(PROCESS_OUTPUT_SCHEMA, 0) calls = [ call(u'- fastq [basic:file:] - Reads file'), call(u'- bases [basic:string:] - Number of bases'), call(u'- options - Options'), call(u' - id [basic:string:] - ID'), call(u' - k [basic:integer:] - k-mer size')] self.assertEqual(print_mock.mock_calls, calls) def test_endswith_colon(self): schema = {'process_type': 'data:reads:fastq:single'} endswith_colon(schema, 'process_type') self.assertEqual(schema, {'process_type': u'data:reads:fastq:single:'}) def test_get_collection_id(self): collection = Collection(id=1, resolwe=MagicMock()) collection.id = 1 # this is overriden when initialized self.assertEqual(get_collection_id(collection), 1) self.assertEqual(get_collection_id(2), 2) def test_get_sample_id(self): sample = Sample(id=1, resolwe=MagicMock()) sample.id = 1 # this is overriden when initialized self.assertEqual(get_sample_id(sample), 1) self.assertEqual(get_sample_id(2), 2) def test_get_data_id(self): data = Data(id=1, resolwe=MagicMock()) data.id = 1 # this is overriden when initialized self.assertEqual(get_data_id(data), 1) self.assertEqual(get_data_id(2), 2) def test_get_process_id(self): process = Process(id=1, resolwe=MagicMock()) process.id = 1 # this is overriden when initialized self.assertEqual(get_process_id(process), 1) self.assertEqual(get_process_id(2), 2) def test_get_relation_id(self): relation = Relation(id=1, resolwe=MagicMock()) relation.id = 1 # this is overriden when initialized self.assertEqual(get_relation_id(relation), 1) self.assertEqual(get_relation_id(2), 2) def test_get_samples(self): collection = Collection(id=1, resolwe=MagicMock()) collection._samples = ['sample_1', 'sample_2'] self.assertEqual(get_samples(collection), ['sample_1', 'sample_2']) collection_1 = Collection(id=1, resolwe=MagicMock()) collection_1._samples = ['sample_1'] collection_2 = Collection(id=2, resolwe=MagicMock()) collection_2._samples = ['sample_2'] self.assertEqual(get_samples([collection_1, collection_2]), ['sample_1', 'sample_2']) data = Data(id=1, resolwe=MagicMock()) data._sample = 'sample_1' self.assertEqual(get_samples(data), ['sample_1']) data1 = Data(id=1, resolwe=MagicMock()) data1._sample = 'sample1' data2 = Data(id=2, resolwe=MagicMock()) data2._sample = 'sample2' self.assertEqual(get_samples([data1, data2]), ['sample1', 'sample2']) data = Data(id=1, resolwe=MagicMock(**{'sample.filter.return_value': None})) data._sample = None with self.assertRaises(TypeError): get_samples(data) sample = Sample(id=1, resolwe=MagicMock()) self.assertEqual(get_samples(sample), [sample]) sample_1 = Sample(id=1, resolwe=MagicMock()) sample_2 = Sample(id=3, resolwe=MagicMock()) self.assertEqual(get_samples([sample_1, sample_2]), [sample_1, sample_2]) def test_get_resource_collection(self): collection = Collection(id=1, resolwe=MagicMock()) collection.id = 1 # this is overriden when initialized self.assertEqual(get_resource_collection(collection), 1) relation = Relation(id=1, resolwe=MagicMock()) relation._hydrated_collection = Collection(id=2, resolwe=MagicMock()) relation._hydrated_collection.id = 2 # this is overriden when initialized self.assertEqual(get_resource_collection(relation), 2) data = Data(id=1, resolwe=MagicMock()) data._collections = [Collection(id=3, resolwe=MagicMock())] data._collections[0].id = 3 # this is overriden when initialized self.assertEqual(get_resource_collection(data), 3) sample = Sample(id=1, resolwe=MagicMock()) sample._collections = [Collection(id=4, resolwe=MagicMock())] sample._collections[0].id = 4 # this is overriden when initialized self.assertEqual(get_resource_collection(sample), 4) sample = Sample(id=1, resolwe=MagicMock()) sample._collections = [ Collection(id=5, resolwe=MagicMock()), Collection(id=6, resolwe=MagicMock()) ] sample._collections[0].id = 5 # this is overriden when initialized sample._collections[1].id = 6 # this is overriden when initialized self.assertEqual(get_resource_collection(sample), None) with self.assertRaises(LookupError): get_resource_collection(sample, fail_silently=False) def test_get_resolwe(self): # same resolwe object resolwe_mock = MagicMock() relation = Relation(id=1, resolwe=resolwe_mock) sample = Sample(id=1, resolwe=resolwe_mock) self.assertEqual(get_resolwe(relation, sample), resolwe_mock) relation = Relation(id=1, resolwe=MagicMock()) sample = Sample(id=1, resolwe=MagicMock()) with self.assertRaises(TypeError): get_resolwe(relation, sample) if __name__ == '__main__': unittest.main()
10,444
3,375
class BSBIIndex(BSBIIndex): def parse_block(self, block_dir_relative): """Parses a tokenized text file into termID-docID pairs Parameters ---------- block_dir_relative : str Relative Path to the directory that contains the files for the block Returns ------- List[Tuple[Int, Int]] Returns all the td_pairs extracted from the block Should use self.term_id_map and self.doc_id_map to get termIDs and docIDs. These persist across calls to parse_block """ ### Begin your code td_pairs = [] for filename in os.listdir(self.data_dir +'/'+ block_dir_relative): with open(self.data_dir +'/'+ block_dir_relative +'/'+ filename, 'r',encoding="utf8", errors='ignore') as f: doc_id = self.doc_id_map.__getitem__(filename) for s in f.read().split(): term_id = self.term_id_map.__getitem__(s) td_pairs.append((term_id, doc_id)) return td_pairs ### End your code
1,136
321
"""test unit for core/initializer.py""" import runtime_path # isort:skip from core.initializer import * TEST_SHAPE = (100000, 1) TOR = 1e-2 def test_get_fans(): fan_in, fan_out = get_fans(shape=(100, 10)) assert fan_in == 100 and fan_out == 10 fan_in, fan_out = get_fans(shape=(64, 5, 5, 128)) assert fan_in == 5 * 5 * 128 assert fan_out == 64 def test_normal_init(): val = NormalInit(mean=0.0, std=1.0).init(TEST_SHAPE) assert -TOR <= val.mean() <= TOR assert 1.0 - TOR <= val.std() <= 1.0 + TOR def test_truncated_normal_init(): val = TruncatedNormalInit(mean=0.0, std=1.0).init(TEST_SHAPE) assert -TOR <= val.mean() <= TOR assert all(val >= -2.0) and all(val <= 2.0) def test_uniform_init(): val = UniformInit(-1.0, 1.0).init(TEST_SHAPE) assert all(val >= -1.0) and all(val <= 1.0) def test_constant_init(): val = ConstantInit(3.1).init(TEST_SHAPE) assert all(val == 3.1) def test_xavier_uniform_init(): val = XavierUniformInit().init(TEST_SHAPE) bound = np.sqrt(6.0 / np.sum(get_fans(TEST_SHAPE))) assert np.all(val >= -bound) and np.all(val <= bound) def test_xavier_normal_init(): val = XavierNormalInit().init(TEST_SHAPE) std = np.sqrt(2.0 / np.sum(get_fans(TEST_SHAPE))) assert std - TOR <= val.std() <= std + TOR def test_he_uniform_init(): val = HeUniformInit().init(TEST_SHAPE) bound = np.sqrt(6.0 / get_fans(TEST_SHAPE)[0]) assert np.all(val >= -bound) and np.all(val <= bound) def test_he_normal_init(): val = HeNormalInit().init(TEST_SHAPE) std = np.sqrt(2.0 / get_fans(TEST_SHAPE)[0]) assert std - TOR <= val.std() <= std + TOR
1,674
727
class VCFEntry(object): def __init__(self, vkey, ssid, pid, ac, passFilter=1, qual=-1, gq=-1, dp=-1, ad=-1): self.vkey = vkey if not ssid: self.ssid = "UNKNOWN" else: self.ssid = ssid self.pid = pid self.ac = ac self.passFilter = passFilter self.qual = qual self.gq = gq self.dp = dp self.ad = ad def __repr__(self): return "VCFEntry: (" + ', '.join([str(x) for x in [self.vkey, self.ssid, self.pid, self.ac, self.passFilter, self.qual, self.gq, self.dp, self.ad]]) + ")" def __str__(self): return '\t'.join([str(x) for x in [self.vkey, self.ssid, self.pid, self.ac, self.passFilter, self.qual, self.gq, self.dp, self.ad]]) def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __ne__(self, other): return not self.__eq__(other) def sameEntry(self, other): return (isinstance(other, self.__class__) and self.vkey == other.vkey and self.ssid == other.ssid and self.pid == other.pid)
1,117
403
#!/usr/bin/env python # (c) 2012, Marco Vito Moscaritolo <marco@agavee.com> # modified by Tomas Karasek <tomas.karasek@digile.fi> # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import sys import re import os import argparse import subprocess import yaml import time import md5 import itertools import novaclient.client import ansible.module_utils.openstack try: import json except ImportError: import simplejson as json # This is a script getting dynamic inventory from Nova. Features: # - you can refer to instances by their nova name in ansible{-playbook} calls # - you can refer to single tenants, regions and openstack environments in # ansible{-playbook} calls # - you can refer to a hostgroup when you pass the arbitrary --meta group= # in "nova boot" # - it caches the state of the cloud # - it tries to guess ansible_ssh_user based on name of image # ('\cubuntu' -> 'ubuntu', '\ccentos' -> 'cloud-user', ...) # - allows to access machines by their private ip * # - it will work with no additional configuration, just handling single tenant # from set OS_* environment variables (just like python-novaclient). # - you can choose to heavy-configure it for multiple environments # - it's configured from simple YAML (I dislike ConfigParser). See nova.yml # - Nodes can be listed in inventory either by DNS name or IP address based # on setting. # # * I took few ideas and some code from other pull requests # - https://github.com/ansible/ansible/pull/8657 by Monty Taylor # - https://github.com/ansible/ansible/pull/7444 by Carson Gee # # If Ansible fails to parse JSON, please run this with --list and observe. # # HOW CACHING WORKS: # Cache of list of servers is kept per combination of (auth_url, region_name, # project_id). Default max age is 300 seconds. You can set the age per section # (openstack envrionment) in config. # # If you want to build the cache from cron, consider: # */5 * * * * . /home/tomk/os/openrc.sh && \ # ANSIBLE_NOVA_CONFIG=/home/tomk/.nova.yml \ # /home/tomk/ansible/plugins/inventory/nova.py --refresh-cache # # HOW IS NOVA INVENTORY CONFIGURED: # (Note: if you have env vars set from openrc.sh, you can run this without # writing the config file. Defaults are sane. The values in the config file # will rewrite the defaults.) # # To load configuration from a file, you must have the config file path in # environment variable ANSIBLE_NOVA_CONFIG. # # IN THE CONFIG FILE: # The keys in the top level dict are names for different OS environments. # The keys in a dict for OS environment can be: # - auth_url # - region_name (can be a list) # - project_id (can be a list) # - username # - api_key # - service_type # - auth_system # - prefer_private (connect using private IPs) # - cache_max_age (how long to consider cached data. In seconds) # - resolve_ips (translate IP addresses to domain names) # # If you have a list in region and/or project, all the combinations of # will be listed. # # If you don't have configfile, there will be one cloud section created called # 'openstack'. # # WHAT IS AVAILABLE AS A GROUP FOR ANSIBLE CALLS (how are nodes grouped): # tenants, regions, clouds (top config section), groups by metadata key (nova # boot --meta group=<name>). CONFIG_ENV_VAR_NAME = 'ANSIBLE_NOVA_CONFIG' NOVA_DEFAULTS = { 'auth_system': os.environ.get('OS_AUTH_SYSTEM'), 'service_type': 'compute', 'username': os.environ.get('OS_USERNAME'), 'api_key': os.environ.get('OS_PASSWORD'), 'auth_url': os.environ.get('OS_AUTH_URL'), 'project_id': os.environ.get('OS_TENANT_NAME'), 'region_name': os.environ.get('OS_REGION_NAME'), 'prefer_private': False, 'version': '2', 'cache_max_age': 300, 'resolve_ips': True, } DEFAULT_CONFIG_KEY = 'openstack' CACHE_DIR = '~/.ansible/tmp' CONFIG = {} def load_config(): global CONFIG _config_file = os.environ.get(CONFIG_ENV_VAR_NAME) if _config_file: with open(_config_file) as f: CONFIG = yaml.load(f.read()) if not CONFIG: CONFIG = {DEFAULT_CONFIG_KEY: {}} for section in CONFIG.values(): for key in NOVA_DEFAULTS: if (key not in section): section[key] = NOVA_DEFAULTS[key] def push(data, key, element): ''' Assist in items to a dictionary of lists ''' if (not element) or (not key): return if key in data: data[key].append(element) else: data[key] = [element] def to_safe(word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub(r"[^A-Za-z0-9\-]", "_", word) def get_access_ip(server, prefer_private): ''' Find an IP for Ansible SSH for a host. ''' private = ansible.module_utils.openstack.openstack_find_nova_addresses( getattr(server, 'addresses'), 'fixed', 'private') public = ansible.module_utils.openstack.openstack_find_nova_addresses( getattr(server, 'addresses'), 'floating', 'public') if prefer_private: return private[0] if server.accessIPv4: return server.accessIPv4 if public: return public[0] else: return private[0] def get_metadata(server): ''' Returns dictionary of all host metadata ''' results = {} for key in vars(server): # Extract value value = getattr(server, key) # Generate sanitized key key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower() # Att value to instance result (exclude manager class) #TODO: maybe use value.__class__ or similar inside of key_name if key != 'os_manager': results[key] = value return results def get_ssh_user(server, nova_client): ''' Try to guess ansible_ssh_user based on image name. ''' try: image_name = nova_client.images.get(server.image['id']).name if 'ubuntu' in image_name.lower(): return 'ubuntu' if 'centos' in image_name.lower(): return 'cloud-user' if 'debian' in image_name.lower(): return 'debian' if 'coreos' in image_name.lower(): return 'coreos' except: pass def get_nova_client(combination): ''' There is a bit more info in the combination than we need for nova client, so we need to create a copy and delete keys that are not relevant. ''' kwargs = dict(combination) del kwargs['name'] del kwargs['prefer_private'] del kwargs['cache_max_age'] del kwargs['resolve_ips'] return novaclient.client.Client(**kwargs) def merge_update_to_result(result, update): ''' This will merge data from a nova servers.list call (in update) into aggregating dict (in result) ''' for host, specs in update['_meta']['hostvars'].items(): # Can same host be in two differnt listings? I hope not. result['_meta']['hostvars'][host] = dict(specs) # groups must be copied if not present, otherwise merged for group in update: if group == '_meta': continue if group not in result: # copy the list over result[group] = update[group][:] else: result[group] = list(set(update[group]) | set(result[group])) def get_name(ip): ''' Gets the shortest domain name for IP address''' # I first did this with gethostbyaddr but that did not return all the names # Also, this won't work on Windows. But it can be turned of by setting # resolve_ips to false command = "host %s" % ip p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _ = p.communicate() if p.returncode != 0: return None names = [] for l in stdout.split('\n'): if 'domain name pointer' not in l: continue names.append(l.split()[-1]) return min(names, key=len) def get_update(call_params): ''' Fetch host dicts and groups from single nova_client.servers.list call. This is called for each element in "cartesian product" of openstack e environments, tenants and regions. ''' update = {'_meta': {'hostvars': {}}} # Cycle on servers nova_client = get_nova_client(call_params) for server in nova_client.servers.list(): access_ip = get_access_ip(server, call_params['prefer_private']) access_identifier = access_ip if call_params['resolve_ips']: dns_name = get_name(access_ip) if dns_name: access_identifier = dns_name # Push to a group for its name. This way we can use the nova name as # a target for ansible{-playbook} push(update, server.name, access_identifier) # Run through each metadata item and add instance to it for key, value in server.metadata.iteritems(): composed_key = to_safe('tag_{0}_{1}'.format(key, value)) push(update, composed_key, access_identifier) # Do special handling of group for backwards compat # inventory update group = 'undefined' if 'group' in server.metadata: group = server.metadata['group'] push(update, group, access_identifier) # Add vars to _meta key for performance optimization in # Ansible 1.3+ update['_meta']['hostvars'][access_identifier] = get_metadata(server) # guess username based on image name ssh_user = get_ssh_user(server, nova_client) if ssh_user: host_record = update['_meta']['hostvars'][access_identifier] host_record['ansible_ssh_user'] = ssh_user push(update, call_params['name'], access_identifier) push(update, call_params['project_id'], access_identifier) if call_params['region_name']: push(update, call_params['region_name'], access_identifier) return update def expand_to_product(d): ''' this will transform {1: [2, 3, 4], 5: [6, 7]} to [{1: 2, 5: 6}, {1: 2, 5: 7}, {1: 3, 5: 6}, {1: 3, 5: 7}, {1: 4, 5: 6}, {1: 4, 5: 7}] ''' return (dict(itertools.izip(d, x)) for x in itertools.product(*d.itervalues())) def get_list_of_kwarg_combinations(): ''' This will transfrom CONFIG = {'openstack':{version:'2', project_id:['tenant1', tenant2'],...}, 'openstack_dev':{version:'2', project_id:'tenant3',...}, into [{'name':'openstack', version:'2', project_id: 'tenant1', ...}, {'name':'openstack', version:'2', project_id: 'tenant2', ...}, {'name':'openstack_dev', version:'2', project_id: 'tenant3', ...}] The elements in the returned list can be (with little customization) used as **kwargs for nova client. ''' l = [] for section in CONFIG: d = dict(CONFIG[section]) d['name'] = section for key in d: # all single elements must become list for the product to work if type(d[key]) is not list: d[key] = [d[key]] for one_call_kwargs in expand_to_product(d): l.append(one_call_kwargs) return l def get_cache_filename(call_params): ''' cache filename is ~/.ansible/tmp/<md5(auth_url,project_id,region_name)>.nova.json ''' id_to_hash = ("region_name: %(region_name)s, auth_url:%(auth_url)s," "project_id: %(project_id)s, resolve_ips: %(resolve_ips)s" % call_params) return os.path.join(os.path.expanduser(CACHE_DIR), md5.new(id_to_hash).hexdigest() + ".nova.json") def cache_valid(call_params): ''' cache file is specific for (auth_url, project_id, region_name) ''' cache_path = get_cache_filename(call_params) if os.path.isfile(cache_path): mod_time = os.path.getmtime(cache_path) current_time = time.time() if (mod_time + call_params['cache_max_age']) > current_time: return True return False def update_cache(call_params): fn = get_cache_filename(call_params) content = get_update(call_params) with open(fn, 'w') as f: f.write(json.dumps(content, sort_keys=True, indent=2)) def load_from_cache(call_params): fn = get_cache_filename(call_params) with open(fn) as f: return json.loads(f.read()) def get_args(args_list): parser = argparse.ArgumentParser( description='Nova dynamic inventory for Ansible') g = parser.add_mutually_exclusive_group() g.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') g.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help=('Force refresh of cache by making API requests to' 'Nova (default: False - use cache files)')) return parser.parse_args(args_list) def main(args_list): load_config() args = get_args(args_list) if args.host: print(json.dumps({})) return 0 if args.list: output = {'_meta': {'hostvars': {}}} # we have to deal with every combination of # (cloud, region, project). for c in get_list_of_kwarg_combinations(): if args.refresh_cache or (not cache_valid(c)): update_cache(c) update = load_from_cache(c) merge_update_to_result(output, update) print(json.dumps(output, sort_keys=True, indent=2)) return 0 if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
14,321
4,504
from datetime import date import numpy as np from sklearn.metrics import ( roc_curve, auc, ) import torch from torch.utils.data import DataLoader from .metrics import accuracy_thresh, fbeta, pairwise_confusion_matrix import pandas as pd from tqdm import tqdm class ModelEvaluator: """Class for evaluating and testing the text classification models. Evaluation is done with labeled data whilst testing/prediction is done with unlabeled data. """ def __init__(self, args, processor, model, logger): self.args = args self.processor = processor self.model = model self.logger = logger self.device = "cpu" self.eval_dataloader: DataLoader def prepare_eval_data(self, file_name, parent_labels=None): """Creates a PyTorch Dataloader from a CSV file, which is used as input to the classifiers. """ eval_examples = self.processor.get_examples(file_name, "eval", parent_labels) eval_features = self.processor.convert_examples_to_features( eval_examples, self.args["max_seq_length"] ) self.eval_dataloader = self.processor.pack_features_in_dataloader( eval_features, self.args["eval_batch_size"], "eval" ) def evaluate(self): """Evaluates a classifier using labeled data. Calculates and returns accuracy, precision, recall F1 score and ROC AUC. """ all_logits = None all_labels = None self.model.eval() eval_loss, eval_accuracy, eval_f1, eval_prec, eval_rec = 0, 0, 0, 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in self.eval_dataloader: batch = tuple(t.to(self.device) for t in batch) input_ids, input_mask, segment_ids, label_ids, parent_labels = batch with torch.no_grad(): # parent_labels is of boolean type if there are no parent labels if parent_labels.dtype != torch.bool: outputs = self.model( input_ids, segment_ids, input_mask, label_ids, parent_labels=parent_labels, ) else: outputs = self.model(input_ids, segment_ids, input_mask, label_ids) tmp_eval_loss, logits = outputs[:2] tmp_eval_accuracy = accuracy_thresh(logits, label_ids) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy f1, prec, rec = fbeta(logits, label_ids) eval_f1 += f1 eval_prec += prec eval_rec += rec if all_logits is None: all_logits = logits.detach().cpu().numpy() else: all_logits = np.concatenate( (all_logits, logits.detach().cpu().numpy()), axis=0 ) if all_labels is None: all_labels = label_ids.detach().cpu().numpy() else: all_labels = np.concatenate( (all_labels, label_ids.detach().cpu().numpy()), axis=0 ) nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples eval_f1 = eval_f1 / nb_eval_steps eval_prec = eval_prec / nb_eval_steps eval_rec = eval_rec / nb_eval_steps # ROC-AUC calcualation # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() confusion_matrices = [] for i in range(len(self.processor.labels)): fpr[i], tpr[i], _ = roc_curve(all_labels[:, i], all_logits[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) confusion_matrices += [ pairwise_confusion_matrix( all_logits[:, [13, i]], all_labels[:, [13, i]] ) ] # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve( all_labels.ravel(), all_logits.ravel() ) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) result = { "eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "roc_auc": roc_auc, "eval_f1": eval_f1, "eval_prec": eval_prec, "eval_rec": eval_rec, # "confusion_matrices": confusion_matrices, } self.save_result(result) return result def save_result(self, result): """Saves the evaluation results as a text file.""" d = date.today().strftime("%Y-%m-%d") output_eval_file = f"mltc/data/results/eval_results_{d}.txt" with open(output_eval_file, "w") as writer: self.logger.info("***** Eval results *****") for key in sorted(result.keys()): self.logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) def predict(self, file_name): """Makes class predicitons for unlabeled data. Returns the estimated probabilities for each of the labels. """ test_examples = self.processor.get_examples(file_name, "test") test_features = self.processor.convert_examples_to_features( test_examples, self.args["max_seq_length"] ) test_dataloader = self.processor.pack_features_in_dataloader( test_features, self.args["eval_batch_size"], "test" ) # Hold input data for returning it input_data = [ {"id": input_example.guid, "text": input_example.text_a} for input_example in test_examples ] self.logger.info("***** Running prediction *****") self.logger.info(" Num examples = %d", len(test_examples)) self.logger.info(" Batch size = %d", self.args["eval_batch_size"]) all_logits = None self.model.eval() for step, batch in enumerate( tqdm(test_dataloader, desc="Prediction Iteration") ): batch = tuple(t.to(self.device) for t in batch) input_ids, input_mask, segment_ids = batch with torch.no_grad(): outputs = self.model(input_ids, segment_ids, input_mask) logits = outputs[0] logits = logits.sigmoid() if all_logits is None: all_logits = logits.detach().cpu().numpy() else: all_logits = np.concatenate( (all_logits, logits.detach().cpu().numpy()), axis=0 ) return pd.merge( pd.DataFrame(input_data), pd.DataFrame(all_logits), left_index=True, right_index=True, )
7,003
2,112
import collections import xml.etree.ElementTree as x_etree import synapse.common as s_common import synapse.lib.syntax as s_syntax class DataElem: def __init__(self, item, name=None, parent=None): self._d_name = name self._d_item = item self._d_parent = parent self._d_special = {'..': parent, '.': self} def _elem_valu(self): return self._d_item def _elem_step(self, step): try: item = self._d_item[step] except Exception as e: return None return initelem(item, name=step, parent=self) def name(self): return self._d_name def _elem_kids(self, step): # Most primitives only have 1 child at a given step... # However, we must handle the case of nested children # during this form of iteration to account for constructs # like XML/HTML ( See XmlDataElem ) try: item = self._d_item[step] except Exception as e: return yield initelem(item, name=step, parent=self) def step(self, path): ''' Step to the given DataElem within the tree. ''' base = self for step in self._parse_path(path): spec = base._d_special.get(step) if spec is not None: base = spec continue base = base._elem_step(step) if base is None: return None return base def valu(self, path): ''' Return the value of the element at the given path. ''' if not path: return self._elem_valu() elem = self.step(path) if elem is None: return None return elem._elem_valu() def vals(self, path): ''' Iterate the given path elements and yield values. Example: data = { 'foo':[ {'bar':'lol'}, {'bar':'heh'} ] } root = s_datapath.initelem(data) for elem in root.iter('foo/*/bar'): dostuff(elem) # elem is at value "lol" and "heh" ''' for elem in self.iter(path): yield elem._elem_valu() def _elem_iter(self): # special case for dictionaries # to iterate children and keep track # of their names... if type(self._d_item) == dict: for name, item in self._d_item.items(): yield initelem((name, item), name=self.name(), parent=self) return if isinstance(self._d_item, int): return if isinstance(self._d_item, str): return for i, item in enumerate(self._d_item): yield initelem(item, name=str(i), parent=self) def _elem_search(self, step): subs = self._elem_iter() todo = collections.deque(subs) while todo: elem = todo.popleft() #print('SEARCH: %r' % (elem.name(),)) if elem.name() == step: yield elem for sube in elem._elem_iter(): todo.append(sube) def iter(self, path): ''' Iterate sub elements using the given path. Example: data = { 'foo':[ {'bar':'lol'}, {'bar':'heh'} ] } root = s_datapath.initelem(data) for elem in root.iter('foo/*/bar'): dostuff(elem) # elem is at value "lol" and "heh" ''' steps = self._parse_path(path) if not steps: return omax = len(steps) - 1 todo = collections.deque([(self, 0)]) while todo: base, off = todo.popleft() step = steps[off] # some special syntax for "all kids" / iterables if step == '*': for elem in base._elem_iter(): if off == omax: yield elem else: todo.append((elem, off + 1)) continue # special "all kids with name" syntax ~foo # (including recursive kids within kids) # this syntax is mostly useful XML like # hierarchical data structures. if step[0] == '~': for elem in base._elem_search(step[1:]): if off == omax: yield elem else: todo.append((elem, off + 1)) continue for elem in base._elem_kids(step): if off == omax: yield elem else: todo.append((elem, off + 1)) def _parse_path(self, path): off = 0 steps = [] plen = len(path) while off < plen: # eat the next (or possibly a first) slash _, off = s_syntax.nom(path, off, ('/',)) if off >= plen: break if s_syntax.is_literal(path, off): elem, off = s_syntax.parse_literal(path, off) steps.append(elem) continue # eat until the next / elem, off = s_syntax.meh(path, off, ('/',)) if not elem: continue steps.append(elem) return steps class XmlDataElem(DataElem): def __init__(self, item, name=None, parent=None): DataElem.__init__(self, item, name=name, parent=parent) def _elem_kids(self, step): #TODO possibly make step fnmatch compat? # special case for iterating <tag> which recurses # to find all instances of that element. #if step[0] == '<' and step[-1] == '>': #allstep = step[1:-1] #todo = collections.deque(self._d_item) #while todo: #elem = todo.popleft() for xmli in self._d_item: if xmli.tag == step: yield XmlDataElem(xmli, name=step, parent=self) def _elem_tree(self): todo = collections.deque([self._d_item]) while todo: elem = todo.popleft() yield elem for sube in elem: todo.append(sube) def _elem_step(self, step): # optional explicit syntax for dealing with colliding # attributes and sub elements. if step.startswith('$'): item = self._d_item.attrib.get(step[1:]) if item is None: return None return initelem(item, name=step, parent=self) for xmli in self._d_item: if xmli.tag == step: return XmlDataElem(xmli, name=step, parent=self) item = self._d_item.attrib.get(step) if item is not None: return initelem(item, name=step, parent=self) def _elem_valu(self): return self._d_item.text def _elem_iter(self): for item in self._d_item: yield initelem(item, name=item.tag, parent=self) # Special Element Handler Classes elemcls = { x_etree.Element: XmlDataElem, } def initelem(item, name=None, parent=None): ''' Construct a new DataElem from the given item using which ever DataElem class is most correct for the type. Example: elem = initelem( ''' ecls = elemcls.get(type(item), DataElem) return ecls(item, name=name, parent=parent)
7,381
2,159
bl_info = { "name": "plantFEM (Seed)", "author": "Haruka Tomobe", "version": (1, 0), "blender": (2, 80, 0), "location": "View3D > Add > Mesh > plantFEM Object", "description": "Adds a new plantFEM Object", "warning": "", "wiki_url": "", "category": "Add Mesh", } import bpy from bpy.types import Operator from bpy.props import FloatVectorProperty from bpy_extras.object_utils import AddObjectHelper, object_data_add from mathutils import Vector class SAMPLE21_OT_CreateICOSphere(bpy.types.Operator): bl_idname = "object.sample21_create_icosphere" bl_label = "ICO Sphere" bl_description = "Add ICO Sphere." bl_options = {'REGISTER' , 'UNDO'} def execute(self, context): bpy.ops.mesh.primitive_ico_sphere_add() print("Sample : Add ICO Sphere.") return {'FINISHED'} class SAMPLE21_OT_CreateCube(bpy.types.Operator): bl_idname = "object.sample21_create_cube" bl_label = "Cube" bl_description = "Add Cube." bl_options = {'REGISTER' , 'UNDO'} def execute(self, context): bpy.ops.mesh.primitive_cube_add() print("Sample : Add Cube") return{'FINISHED'} def menu_fn(self, context): self.layout.separator() self.layout.operator(SAMPLE21_OT_CreateICOSphere.bl_idname) self.layout.operator(SAMPLE21_OT_CreateCube.bl_idname) classes = [ SAMPLE21_OT_CreateICOSphere, SAMPLE21_OT_CreateCube, ] def register(): for c in classes: bpy.utils.register_class(c) bpy.types.VIEW3D_MT_mesh_add.append(menu_fn) print("クラスを二つ使用するサンプルアドオンが有効化されました。") def unregister(): bpy.types.VIEW3D_MT_mesh_add.remove(menu_fn) for c in classes: bpy.utils.unregister_class(c) print("クラスを二つ使用するサンプルアドオンが無効化されました。") if __name__ == "__main__": register()
1,851
743
from assignment_1.envs.gaussianBandit import gaussianBandit from assignment_1.envs.bernoulliBandit import bernoulliBandit from assignment_1.envs.RWE import RWE
159
57
# -*- coding: utf-8 -*- import unittest from datetime import datetime, timedelta from context import aged_out_report, find_todays_file class Test_FTP_worker(unittest.TestCase): """ Test FTP_worker module functionality""" def test_find_today_file(self): self.assertIsNone( find_todays_file(None)) todays_fh = 'BookOpsQC.{}'.format( datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')) fh_list = [] for i in range(5): fh_list.append( 'BookOpsQC.{}'.format( datetime.strftime( datetime.now() - timedelta(days=1), '%Y%m%d%H%M%S'))) fh_list.append(todays_fh) self.assertEqual( find_todays_file(fh_list), todays_fh) def test_aged_out_report(self): fh1 = 'BookOpsQC.{}'.format( datetime.strftime(datetime.now() - timedelta(days=31), '%Y%m%d%H%M%S')) self.assertTrue( aged_out_report(fh1)) fh2 = 'BookOpsQC.{}'.format( datetime.strftime(datetime.now() - timedelta(days=13), '%Y%m%d%H%M%S')) self.assertFalse( aged_out_report(fh2)) fh3 = 'BookOpsQC.{}'.format( datetime.strftime(datetime.now() - timedelta(days=13), '%Y%m%d')) self.assertFalse( aged_out_report(fh3)) if __name__ == '__main__': unittest.main()
1,401
514
"Usage: python -m get_version ./setup.py" import setuptools import sys setuptools.setup = lambda *args, version=None, **kwargs: print(version) exec(open(sys.argv[1]).read())
177
62
#!/usr/bin/python3 from dialectUtil import * from java.javaProperty import JAVAProperty from java.javaSnippets import * from java.javaLink import JAVALink import constants as CONST JAVA_PROPERTIES = {} JAVA_PROPERTIES['JAVA_AUTO_IMPORTABLE'] = ['created_by','last_modified_by','created_date', 'last_modified_date'] class JAVAClass: def __init__(self, dbTable, project): self.project = project self.name = underScoreToCamelCase(dbTable.name).strip() self.properties = {} self.imports = set() self.foreignElements = {} self.dbTable = dbTable self.metaData = ' ' for field in dbTable.fields: field = dbTable.fields[field] if field.fk is None: javaProperty = JAVAProperty(field, self) self.metaData += javaProperty.metaData + ' ' isImportable = False for importable in JAVA_PROPERTIES['JAVA_AUTO_IMPORTABLE']: if importable in javaProperty.metaData: isImportable = True if not isImportable: self.properties[javaProperty.name] = javaProperty def setForeign(self): for field in self.dbTable.fields: field = self.dbTable.fields[field] if field.fk is not None: link = JAVALink(field.fk, self) if link is not None: self.foreignElements[link.localProperty] = link def save(self): extension = '' if 'created_by' in self.metaData: extension = ' extends Auditable<Long>' self.imports.add('javax.persistence.Entity') self.imports.add('com.fasterxml.jackson.annotation.JsonIgnoreProperties') self.imports.add('javax.persistence.PrePersist') for javaProperty in self.properties: javaProperty = self.properties[javaProperty] for importfile in javaProperty.imports: self.imports.add(importfile) code = JavaPackage(self.project.package + '.' + CONST.MODEL) code += self.getImports() body = '\n'.join(sorted(list(map(lambda token: self.properties[token].declare(), self.properties)),key = len)) body += '\n'.join(list(map(lambda token: self.properties[token].setter(), self.properties))) body += '\n'.join(list(map(lambda token: self.properties[token].getter(), self.properties))) prePersistCode = '' if 'uuid' in self.metaData: prePersistCode += '\nuuid = UUID.randomUUID();\n' prePersist = '\n@PrePersist\npublic void prePersist(){{{0}}}\n' body += prePersist.format(prePersistCode) code += '\n'.join(classAnnotations(self)) code += '@Entity\n@JsonIgnoreProperties({"hibernateLazyInitializer", "handler"})\n'+JavaScope('public', JavaClass(self.name + extension, body)) filename = CONST.MODEL + '/' + self.name + '.java' with open( filename,'w') as the_file: the_file.write(code) def saveRepo(self): code = JavaPackage(self.project.package + '.' + CONST.REPO) code += JavaImport('org.springframework.data.jpa.repository.JpaRepository') code += JavaImport(self.project.package + '.' + CONST.MODEL + '.' + self.name) code += 'public interface {0}Repository extends JpaRepository<{0}, Long> {{\n\n}}'.format(self.name) filename = CONST.REPO + '/' + self.name + 'Repository.java' with open( filename,'w') as the_file: the_file.write(code) def saveDAO(self): code = JavaPackage(self.project.package + '.' + CONST.DAO) code += JavaImport('javax.persistence.EntityManager') code += JavaImport('org.hibernate.Session') code += JavaImport('org.springframework.stereotype.Repository') code += JavaImport('java.util.List') code += JavaImport('org.springframework.beans.factory.annotation.Autowired') code += JavaImport(self.project.package + '.' + CONST.REPO + '.' + self.name + 'Repository') code += JavaImport(self.project.package + '.' + CONST.MODEL + '.' + self.name) safeUpdateTemplate = 'if ({0}.get{1}() != null) {0}Persisted.set{1}({0}.get{1}());' safeUpdate = '\n'.join(list(map(lambda token: safeUpdateTemplate.format(firstSmall(self.name), camel(self.properties[token].name)), self.properties))) daoTemplate = open('./java/templates/dao.template.java').read() code += daoTemplate.format(self.name, firstSmall(self.name), safeUpdate) filename = CONST.DAO + '/' + self.name + 'Dao.java' with open(filename,'w') as the_file: the_file.write(code) def getImports(self): return '\n'.join(list(map(lambda token: JavaImport(token), self.imports)))
4,815
1,417
from tkinter import * import os main = Tk() main.geometry('{}x{}'.format(550, 550)) main.wm_title("Welcome to Face Recognition Based Attendence System ") svalue3= StringVar() # defines the widget state as string svalue2 = StringVar() #imagePath = PhotoImage(file="facerec.png") #widgetf = Label(main, image=imagePath).pack(side="bottom") #imagePath1 = PhotoImage(file="efylogo.png") #widgetf = Label(main, image=imagePath1).pack(side="top") comments = """ Developed and Design by Aseem Kanungo""" widgets = Label(main, justify=CENTER, padx = 10, text=comments).pack(side="bottom") w = Entry(main,textvariable=svalue3) # adds a textarea widget w.pack() w.place(x=200,y=75) def fisher_dataset_button_fn(): scholarid= svalue3.get() os.system('python 01_face_dataset.py {0}'.format(scholarid)) def camera(*args): camerano= svalue2.get() os.system('python 01_face_dataset.py {0}'.format(camerano)) train_database_button = Button(main,text="Scholar ID", command=fisher_dataset_button_fn, justify=CENTER, padx = 10) train_database_button.pack() train_database_button.place(x=200, y=110) a=[0,1] popupMenu = OptionMenu(main, svalue2, *a) Label(main, text="Choose a Camera").place(x=250, y=150) popupMenu.place(x=250,y=160) main.mainloop()
1,295
506
"""Extensions module - Set up for additional libraries can go in here.""" from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy()
133
39
#!/usr/bin/env python3 # # Copyright 2017-2020 GridGain Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from threading import Thread # from random import choices from ...util import log_print, util_sleep from .zookeeper import Zookeeper class ZkNodesRestart(Thread): def __init__(self, zk, nodes_amount): super().__init__() self.setDaemon(True) # self.zk: Zookeeper = zk self.zk = zk self.nodes_amount = nodes_amount self.running = True self.order = 'seq' self.restart_timeout = 5 def stop(self): log_print('Interrupting ZK nodes restarting thread', color='red') self.running = False def run(self): log_print('Starting ZK nodes restarts', color='green') while self.running: for node_id in self.__get_nodes_to_restart(): log_print('Killing ZK node {}'.format(node_id), color='debug') self.zk.kill_node(node_id) util_sleep(self.restart_timeout) log_print('Starting ZK node {}'.format(node_id), color='debug') self.zk.start_node(node_id) def set_params(self, **kwargs): self.order = kwargs.get('order', self.order) self.restart_timeout = kwargs.get('restart_timeout', self.restart_timeout) self.nodes_amount = kwargs.get('nodes_amount', self.nodes_amount) log_print('Params set to:\norder={}\nrestart_timeout={}\nnodes_amount={}' .format(self.order, self.restart_timeout, self.nodes_amount)) def __get_nodes_to_restart(self): zk_nodes = list(self.zk.nodes.keys()) zk_nodes = zk_nodes[:self.nodes_amount] # uncomment this when Python 3.7 will be used. # if self.order == 'rand': # zk_nodes = choices(zk_nodes[:self.nodes_amount]) return zk_nodes def __enter__(self): self.start() def __exit__(self, exc_type, exc_val, exc_tb): self.stop() self.join() if exc_type and exc_val and exc_tb: raise Exception(exc_tb)
2,586
810
from __future__ import division import cv2 import track import detect def main(video_path): cap = cv2.VideoCapture(video_path) ticks = 0 lt = track.LaneTracker(2, 0.1, 500) ld = detect.LaneDetector(180) while cap.isOpened(): precTick = ticks ticks = cv2.getTickCount() dt = (ticks - precTick) / cv2.getTickFrequency() ret, frame = cap.read() predicted = lt.predict(dt) lanes = ld.detect(frame) if predicted is not None: cv2.line(frame, (predicted[0][0], predicted[0][1]), (predicted[0][2], predicted[0][3]), (0, 0, 255), 5) cv2.line(frame, (predicted[1][0], predicted[1][1]), (predicted[1][2], predicted[1][3]), (0, 0, 255), 5) lt.update(lanes) cv2.imshow('', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break
859
342
SEECRET = 'Extremely secretive seecret. You could not guess this one if your life depended on it.' DATABASE = 'db/data.db' DATABASE_PRICES = 'db/prices.db' SESSION_TTL = 240 WEBSOCKETS_PORT= 7334 WEBSOCKETS_URI = 'ws://localhost:' + str(WEBSOCKETS_PORT) DEFAULT_LEDGER = { 'value': 10000, 'asset_id': 1 } DEFAULT_COIN_PRICE = 500 RECORDS_FOR_TIMEFRAME = 260
368
172
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'telas/telaEditUser.ui' # # Created by: PyQt5 UI code generator 5.13.0 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QMessageBox import PyrebaseConnector as PC import sys class Ui_Form(object): def setupUi(self, Form): Form.setObjectName("Form") Form.resize(577, 502) Form.setFixedSize(577, 502) self.label = QtWidgets.QLabel(Form) self.label.setGeometry(QtCore.QRect(80, 25, 401, 61)) self.label.setObjectName("label") self.layoutWidget = QtWidgets.QWidget(Form) self.layoutWidget.setGeometry(QtCore.QRect(170, 120, 231, 261)) self.layoutWidget.setObjectName("layoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.label_6 = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.label_6.setFont(font) self.label_6.setObjectName("label_6") self.verticalLayout.addWidget(self.label_6) self.lineEdit_4 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_4.setObjectName("lineEdit_4") self.lineEdit_4.setDisabled(True) self.verticalLayout.addWidget(self.lineEdit_4) self.label_7 = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.label_7.setFont(font) self.label_7.setObjectName("label_7") self.verticalLayout.addWidget(self.label_7) self.lineEdit_5 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_5.setObjectName("lineEdit_5") self.verticalLayout.addWidget(self.lineEdit_5) self.label_5 = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.label_5.setFont(font) self.label_5.setObjectName("label_5") self.verticalLayout.addWidget(self.label_5) self.dateEdit = QtWidgets.QDateEdit(self.layoutWidget) self.dateEdit.setObjectName("dateEdit") self.verticalLayout.addWidget(self.dateEdit) self.label_4 = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.verticalLayout.addWidget(self.label_4) self.comboBox = QtWidgets.QComboBox(self.layoutWidget) self.comboBox.setObjectName("comboBox") self.comboBox.addItem('Feminino') self.comboBox.addItem('Masculino') self.verticalLayout.addWidget(self.comboBox) self.buttonResetPass = QtWidgets.QPushButton(Form) self.buttonResetPass.setObjectName('buttonResetPass') self.buttonResetPass.setGeometry(QtCore.QRect(250, 410, 71, 31)) self.buttonResetPass.setStyleSheet('background-color:#1f4c73') self.buttonResetPass.setFont(font) self.button_cadastrar = QtWidgets.QPushButton(Form) self.button_cadastrar.setGeometry(QtCore.QRect(330, 410, 71, 31)) self.button_cadastrar.setStyleSheet('background-color:#1f4c73') font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.button_cadastrar.setFont(font) self.button_cadastrar.setObjectName("button_cadastrar") self.button_back = QtWidgets.QPushButton(Form) self.button_back.setGeometry(QtCore.QRect(170, 410, 71, 31)) self.button_back.setStyleSheet('background-color:#1f4c73') font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.button_back.setFont(font) self.button_back.setObjectName("button_back") self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): _translate = QtCore.QCoreApplication.translate Form.setWindowTitle(_translate("Form", "Form")) self.label.setText(_translate("Form", "TextLabel")) pixmap = QtGui.QPixmap("icons/iconEditUser.png") pixmap3 = pixmap.scaled(400, 80, QtCore.Qt.KeepAspectRatio) self.label.setPixmap(pixmap3) self.label.setAlignment(QtCore.Qt.AlignCenter) self.label_6.setText(_translate("Form", "Email:")) self.label_7.setText(_translate("Form", "Nome de usuário:")) self.label_5.setText(_translate("Form", "Data de nascimento:")) self.label_4.setText(_translate("Form", "Sexo:")) self.button_cadastrar.setText(_translate("Form", "Salvar")) self.button_cadastrar.clicked.connect(self.UpdateUser) self.buttonResetPass.setText(_translate('Form', 'Mudar\nsenha')) self.buttonResetPass.clicked.connect(self.changePass) self.button_back.setText(_translate("Form", "Voltar")) def changePass(self): PC.pc.changePassword(PC.pc.auth.current_user['email']) self.messageBox('Enviamos um email para você com as instruções para cadastrar uma nova senha!', 'Alerta') def messageBox(self, textMessage, nameWin): infoBox = QMessageBox() infoBox.setIcon(QMessageBox.Information) infoBox.setText(textMessage) infoBox.setWindowTitle(nameWin) infoBox.setStandardButtons(QMessageBox.Ok) infoBox.exec_() def UpdateUser(self): erroVazio = 0 if self.lineEdit_5.text() == '': erroVazio = 1 self.messageBox('Campos obrigatórios!', 'Erro') if erroVazio == 0: PC.pc.updateUser(self.lineEdit_5.text(), self.dateEdit.text(), self.comboBox.currentText()) self.messageBox('Dados atualizados!', 'Mensagem') """ if __name__ == '__main__': app = QtWidgets.QApplication(sys.argv) Other = QtWidgets.QMainWindow() ui = Ui_Form() ui.setupUi(Other) Other.show() sys.exit(app.exec_()) """
6,349
2,177
import pytest from edera import Condition from edera import Task from edera.exceptions import TargetVerificationError from edera.testing import DefaultScenario from edera.testing import ScenarioWithProvidedStubs def test_default_scenario_works_correctly(): class A(Task): def execute(self): raise RuntimeError class B(Task): class T(Condition): def check(self): raise RuntimeError target = T() class Z(Task): class T(Condition): def check(self): return False target = T() scenario = DefaultScenario() assert scenario.stub(Z(), {A(), B()}) == { A(): DefaultScenario(), B(): DefaultScenario(), } with pytest.raises(RuntimeError): scenario.run(A()) with pytest.raises(RuntimeError): scenario.run(B()) with pytest.raises(TargetVerificationError): scenario.run(Z()) def test_scenario_with_provided_stubs_works_correctly(): class A(Task): pass class B(Task): pass class Z(Task): pass class S(ScenarioWithProvidedStubs): def run(self, subject): pass stubs = {A(): DefaultScenario()} assert S(stubs=stubs).stub(Z(), {A(), B()}) == stubs
1,303
393
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.conductor.tasks import base from nova import test class FakeTask(base.TaskBase): def __init__(self, context, instance, fail=False): super(FakeTask, self).__init__(context, instance) self.fail = fail def _execute(self): if self.fail: raise Exception else: pass class TaskBaseTestCase(test.NoDBTestCase): def setUp(self): super(TaskBaseTestCase, self).setUp() self.task = FakeTask(mock.MagicMock(), mock.MagicMock()) @mock.patch.object(FakeTask, 'rollback') def test_wrapper_exception(self, fake_rollback): self.task.fail = True try: self.task.execute() except Exception: pass fake_rollback.assert_called_once_with() @mock.patch.object(FakeTask, 'rollback') def test_wrapper_no_exception(self, fake_rollback): try: self.task.execute() except Exception: pass self.assertFalse(fake_rollback.called)
1,604
474
#!/usr/bin/python3 import MySQLdb from ognddbfuncs import getognchk unkglider = [] def getflarmid(conn, registration): # get the FLARMID from the GLIDERS table on the database cursG = conn.cursor() # set the cursor for searching the devices try: cursG.execute("select idglider, flarmtype from GLIDERS where registration = '"+registration+"' ;") except MySQLdb.Error as e: try: print(">>>MySQL Error [%d]: %s" % (e.args[0], e.args[1])) except IndexError: print(">>>MySQL Error: %s" % str(e)) print(">>>MySQL error:", "select idglider, flarmtype from GLIDERS where registration = '"+registration+"' ;") print(">>>MySQL data :", registration) return("NOREG") rowg = cursG.fetchone() # look for that registration on the OGN database if rowg is None: return("NOREG") idglider = rowg[0] # flarmid to report flarmtype = rowg[1] # flarmtype flarm/ica/ogn if not getognchk(idglider): # check that the registration is on the table - sanity check if idglider not in unkglider: print("Warning: FLARM ID=", idglider, "not on OGN DDB") unkglider.append(idglider) if flarmtype == 'F': flarmid = "FLR"+idglider # flarm elif flarmtype == 'I': flarmid = "ICA"+idglider # ICA elif flarmtype == 'O': flarmid = "OGN"+idglider # ogn tracker else: flarmid = "RND"+idglider # undefined #print "GGG:", registration, rowg, flarmid return (flarmid) # ----------------------------------------------------------- def chkflarmid(idglider): # check if the FLARM ID exist, if not add it to the unkglider table glider = idglider[3:9] # only the last 6 chars of the ID if not getognchk(glider): # check that the registration is on the table - sanity check if idglider not in unkglider: print("Warning: FLARM ID=", idglider, "not on OGN DDB") unkglider.append(idglider) return (False) return (True) # -----------------------------------------------------------
2,160
687
"""Edinburgh Genomics Online SF2 web application. examples: To start the tornado server: $ start_sf2_webapp More information is available at: - http://gitlab.genepool.private/hdunnda/sf2-webapp """ __version__="0.0.1" import os import tornado.options from tornado.options import define, options import sf2_webapp.controller import sf2_webapp.config import sf2_webapp.database define("dbconfig", default=None, help="Path to the database configuration file", type=str) define("webconfig", default=None, help="Path to the web configuration file", type=str) define("emailconfig", default=None, help="Path to the email configuration file", type=str) define("loggingconfig", default=None, help="Path to the logging configuration file", type=str) define("enable_cors", default=False, help="Flag to indicate that CORS should be enabled", type=bool) def main(): # type: () -> None """Command line entry point for the web application""" tornado.options.parse_command_line() assert (options.dbconfig is None or os.path.exists(options.dbconfig)), 'Error: database configuration file ' + str(options.dbconfig) + ' not found.' assert (options.webconfig is None or os.path.exists(options.webconfig)), 'Error: web configuration file ' + str(options.webconfig) + ' not found.' assert (options.emailconfig is None or os.path.exists(options.emailconfig)), 'Error: email configuration file ' + str(options.emailconfig) + ' not found.' assert (options.loggingconfig is None or os.path.exists(options.loggingconfig)), 'Error: logging configuration file ' + str(options.loggingconfig) + ' not found.' sf2_webapp.controller.run( enable_cors=options.enable_cors, db_config_fp=options.dbconfig, web_config_fp=options.webconfig, email_config_fp=options.emailconfig, logging_config_fp=options.loggingconfig ) if __name__ == "__main__": main()
1,916
565
#!/usr/bin/env python # coding:utf-8 import cv2 from PIL import Image from PyPDF2 import PdfFileReader import logging Image.MAX_IMAGE_PIXELS = None log = logging.getLogger(__name__) def variance_of_laplacian(image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var() def blur_factor(image): # load the image, convert it to grayscale, and compute the # focus measure of the image using the Variance of Laplacian # method gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) return variance_of_laplacian(gray) def count_pages(pdf_path): with open(pdf_path, 'rb') as f: pdf = PdfFileReader(f) information = pdf.getDocumentInfo() return pdf.getNumPages()
828
276
#Copy a image def copyImage(imagePathToCopy, imageNameToPaste): with open(imagePathToCopy, 'rb') as rf: with open(imageNameToPaste, 'wb') as wf: for line in rf: wf.write(line)
216
75
# import paddle def check_logits_losses(logits_list, losses): # 自动权重和衰减 if 'ceof' not in losses.keys(): losses['ceof'] = [1] * len(losses['type']) if 'decay' not in losses.keys(): losses['decay'] = [1] * len(losses['type']) if len(losses['type']) == len(losses['ceof']) and \ len(losses['type']) == len(losses['decay']): len_logits = len(logits_list) len_losses = len(losses['type']) if len_logits != len_losses: raise RuntimeError( 'The length of logits_list should equal to the types of loss config: {} != {}.' .format(len_logits, len_losses)) else: raise RuntimeError('The logits_list type/coef/decay should equal.') def loss_computation(logits_list, labels, losses, epoch=None, batch=None): check_logits_losses(logits_list, losses) loss_list = [] lab_m = False if len(labels) > 1: lab_m = True if len(labels) != len(logits_list): raise RuntimeError( 'The length of logits_list should equal to labels: {} != {}.' .format(len(logits_list), len(labels))) for i in range(len(logits_list)): logits = logits_list[i] coef_i = losses['ceof'][i] loss_i = losses['type'][i] label_i = labels[i] if lab_m else labels[0] # 多标签损失 if epoch != None and (epoch != 0 and batch == 0): decay_i = losses['decay'][i] ** epoch # print(decay_i) loss_list.append(decay_i * coef_i * loss_i(logits, label_i)) else: loss_list.append(coef_i * loss_i(logits, label_i)) return loss_list
1,705
624
import numpy as np from futile.Utils import write HaeV = 27.21138386 def _occ_and_virt(log): """ Extract the number of occupied and empty orbitals from a logfile """ norb = log.log['Total Number of Orbitals'] if log.log['Spin treatment'] == 'Averaged': norbv = log.evals[0].info[0]-norb return (norb,), (norbv,) elif log.log['Spin treatment'] == 'Collinear': mpol = log.log['dft']['mpol'] norbu = int((norb+mpol)/2) norbd = norb-norbu norbvu = log.evals[0].info[0]-norbu norbvd = log.evals[0].info[0]-norbd return (norbu, norbd), (norbvu, norbvd) else: raise ValueError('Information for the orbitals to be implemented') def transition_indexes(np, nalpha, indexes): """ Returns the list of the indices in the bigdft convention that correspond to the couple iorb-ialpha with given spin. Args: np (tuple): (norbu,norbd) occupied orbitals: when of length 1 assumed spin averaged nalpha (tuple): (norbu, norbd)virtual orbitals: when of length 1 assumed spin averaged indexes (list): list of tuples of (iorb,ialpha,ispin) desired indices in python convention (start from 0) """ nspin = len(np) inds = [] for iorb, ialpha, ispin in indexes: jspin = ispin if nspin == 2 else 0 ind = ialpha+iorb*nalpha[jspin] # local index of the spin subspace if ispin == 1: ind += np[0]*nalpha[0] # spin 2 comes after spin one inds.append(ind) return inds def _collection_indexes(np, nvirt_small): harvest = [] for ispin in [0, 1]: jspin = ispin if len(np) == 2 else 0 for ip in range(np[jspin]): for ialpha in range(nvirt_small[jspin]): harvest.append([ip, ialpha, ispin]) return harvest def _collection_indexes_iocc(iocc, nvirt, spin=None): """ For each iocc and a selected spin provide the list of couples that are concerned up to nvirt If spin is none provide the list for all values of the spin """ harvest = [] for ispin in [0, 1]: jspin = ispin if len(nvirt) == 2 else 0 if spin is not None and ispin != spin: continue for ialpha in range(nvirt[jspin]): harvest.append([iocc, ialpha, ispin]) return harvest class TransitionMatrix(np.matrix): """ Matrix of Transition Quantities, might be either :class:`CouplingMatrix` or :class:`TransitionMultipoles` Args: matrix (matrix-like): data of the coupling matrix. If present also the number of orbitals should be provided. norb_occ (tuple): number of occupied orbitals per spin channnel. Compulsory if ``matrix`` is specified. norb_virt (tuple): number of empty orbitals per spin channnel. Compulsory if ``matrix`` is specified. log (Logfile): Instance of the logfile from which the coupling matrix calculation is performed. Automatically retrieves the ``norb_occ`` and `norb_virt`` parameters. When ``log`` parameter is present the parameter ``matrix`` is ignored. Raises: ValueError: if the file of the coupling matrix indicated by ``log`` does not exists """ def __new__(cls, matrix=None, norb_occ=None, norb_virt=None, log=None): """ Create the object from the arguments and return the ``self`` instance """ import os if log is not None: datadir = log.log.get('radical', '') datadir = 'data-'+datadir if len(datadir) > 0 else 'data' cmfile = os.path.join(log.srcdir, datadir, cls._filename) if not os.path.isfile(cmfile): raise ValueError('The file "'+cmfile+'" does not exist') norb, norbv = _occ_and_virt(log) write('Loading data with ', norb, ' occupied and ', norbv, ' empty states, from file "', cmfile, '"') try: import pandas as pd write('Using pandas:') mat = pd.read_csv(cmfile, delim_whitespace=True, header=None) except ImportError: write('Using numpy:') mat = np.loadtxt(cmfile) write('done') else: mat = matrix return super(TransitionMatrix, cls).__new__(cls, mat) def __init__(self, *args, **kwargs): """ Perform sanity checks on the loaded matrix """ log = kwargs.get('log') if log is not None: self.norb_occ, self.norb_virt = _occ_and_virt(log) else: self.norb_occ = kwargs.get('norb_occ') self.norb_virt = kwargs.get('norb_virt') assert(self.shape[0] == self._total_transitions()) write("Shape is conformal with the number of orbitals") self._sanity_check() def _total_transitions(self): ntot = 0 for no, nv in zip(self.norb_occ, self.norb_virt): ntot += no*nv if len(self.norb_occ) == 1: ntot *= 2 return ntot def _subindices(self, norb_occ, norb_virt): for i, (no, nv) in enumerate(zip(norb_occ, norb_virt)): assert(no <= self.norb_occ[i] and nv <= self.norb_virt[i]) harvest = _collection_indexes(norb_occ, norb_virt) return np.array(transition_indexes(norb_occ, self.norb_virt, harvest)) def _sanity_check(self): pass class CouplingMatrix(TransitionMatrix): """ Casida Coupling Matrix, extracted from the calculation performed by BigDFT """ _filename = 'coupling_matrix.txt' def _sanity_check(self): write('Casida Matrix is symmetric', np.allclose(self, self.T, atol=1.e-12)) def subportion(self, norb_occ, norb_virt): """Extract a subportion of the coupling matrix. Returns a Coupling Matrix which is made by only considering the first ``norb_occ`` and ``norb_virt`` orbitals Args: norb_occ (tuple): new number of occupied orbitals. Must be lower that the instance value norb_virt (tuple): new number of virtual orbitals. Must be lower that the instance value """ inds = self._subindices(norb_occ, norb_virt) mat = np.array([row[0, inds] for row in self[inds]]) return CouplingMatrix(matrix=mat, norb_occ=norb_occ, norb_virt=norb_virt) def diagonalize(self): """ Diagonalize the Coupling Matrix Returns: (np.matrix, np.matrix): tuple of the Eigenvvalues and Eigenvectors of the coupling matrix, as returned by :meth:`numpy.linalg.eigh`. We perform the transpose of the matrix with eigenvectors to have them sorted as row vectors """ write('Diagonalizing Coupling matrix of shape', self.shape) E2, C_E2 = np.linalg.eigh(self) write('Eigensystem solved') C_E2 = C_E2.T return E2, C_E2 class TransitionMultipoles(TransitionMatrix): """ Transition dipoles, extracted from the calculation performed by BigDFT """ _filename = 'transition_quantities.txt' def subportion(self, norb_occ, norb_virt): """Extract a subportion of the Transition Multipoles. Returns a set of transition multipoles which is made by only considering the first ``norb_occ`` and ``norb_virt`` orbitals Args: norb_occ (tuple): new number of occupied orbitals. Must be lower that the instance value norb_virt (tuple): new number of virtual orbitals. Must be lower that the instance value Returns: TransitionMultipoles: reduced transition multipoles """ inds = self._subindices(norb_occ, norb_virt) mat = np.array(self[inds]) return TransitionMultipoles(matrix=mat, norb_occ=norb_occ, norb_virt=norb_virt) def get_transitions(self): """ Get the transition quantities as the dimensional objects which should contribute to the oscillator strengths. Returns: numpy.array: Transition quantities multiplied by the square root of the unperturbed transition energy """ newdipole = [] for line in self: newdipole.append(np.ravel(line[0, 0]*line[0, 1:])) return np.array(newdipole) class TransitionDipoles(TransitionMultipoles): """ Transition dipoles as provided in the version of the code < 1.8.0. Deprecated, to be used in some particular cases """ _filename = 'transition_dipoles.txt' def get_transitions(self): return self class Excitations(): """LR Excited states of a system Definition of the excited states in the Casida Formalism Args: cm (CouplingMatrix): the matrix of coupling among transitions tm (TransitionMultipoles): scalar product of multipoles among transitions """ def __init__(self, cm, tm): self.cm = cm self.tm = tm self.eigenvalues, self.eigenvectors = cm.diagonalize() # : array: transition quantities coming from the multipoles self.transitions = tm.get_transitions() scpr = np.array(np.dot(self.eigenvectors, self.transitions)) #: array: oscillator strenghts components of the transitions defined # as the square of $\int w_a(\mathbf r) r_i $ self.oscillator_strenghts = np.array([t**2 for t in scpr[:, 0:3]]) # : array: average of all the components of the OS self.avg_os = np.average(self.oscillator_strenghts, axis=1) self.alpha_prime = 2.0*self.oscillator_strenghts / \ self.eigenvalues[:, np.newaxis] """ array: elements of the integrand of the statical polarizability in the space of the excitations """ self._indices_for_spin_comparison = \ self._get_indices_for_spin_comparison() self.identify_singlet_and_triplets(1.e-5) def _get_indices_for_spin_comparison(self): inds = [[], []] inds0 = [] # get the indices for comparison, take the minimum between the spins if len(self.cm.norb_occ) == 1: nocc = self.cm.norb_occ[0] nvirt = self.cm.norb_virt[0] nos = [nocc, nocc] nvs = [nvirt, nvirt] else: nocc = min(self.cm.norb_occ) nvirt = min(self.cm.norb_virt) nos = self.cm.norb_occ nvs = self.cm.norb_virt for ispin in [0, 1]: for a in range(nvirt): for p in range(nocc): inds[ispin].append([p, a, ispin]) for a in range(nvirt, nvs[ispin]): for p in range(nocc, nos[ispin]): inds0.append([p, a, ispin]) transA = transition_indexes( self.cm.norb_occ, self.cm.norb_virt, inds[0]) transB = transition_indexes( self.cm.norb_occ, self.cm.norb_virt, inds[1]) trans0 = transition_indexes(self.cm.norb_occ, self.cm.norb_virt, inds0) return transA, transB, trans0 def spectrum_curves(self, omega, slice=None, weights=None): """Calculate spectrum curves. Provide the set of the curves associated to the weights. The resulting curves might then be used to draw the excitation spectra. Args: omega (array): the linspace used for the plotting, of shape ``(n,)``. Must be provided in Atomic Units slice (array): the lookup array that has to be considered. if Not provided the entire range is assumed weights (array): the set of arrays used to weight the spectra. Must have shape ``(rank,m)``, where ``rank`` is equal to the number of eigenvalues. If m is absent it is assumed to be 1. When not specified, it defaults to the average oscillator strenghts. Returns: array: a set of spectrum curves, of shape equal to ``(n,m)``, where ``n`` is the shape of ``omega`` and ``m`` the size of the second dimension of ``weights``. """ if slice is None: oo = self.eigenvalues[:, np.newaxis] - omega**2 wgts = weights if weights is not None else self.avg_os else: oo = self.eigenvalues[slice, np.newaxis] - omega**2 oo = oo[0] wgts = weights if weights is not None else self.avg_os[slice] return np.dot(2.0/oo.T, wgts) def identify_singlet_and_triplets(self, tol=1.e-5): """ Find the lookup tables that select the singlets and the triplets among the excitations Args: tol (float): tolerance to be applied to recognise the spin character """ sings = [] trips = [] for exc in range(len(self.eigenvalues)): sing, trip = self.project_on_spin(exc, tol) if sing: sings.append(exc) if trip: trips.append(exc) if len(sings) > 0: self.singlets = (np.array(sings),) """array: lookup table of the singlet excitations""" if len(trips) > 0: self.triplets = (np.array(trips),) """array: lookup table of the triplet excitations""" def _project_on_occ(self, exc): """ Project a given eigenvector on the occupied orbitals. In the spin averaged case consider all the spin indices nonetheless """ norb_occ = self.cm.norb_occ norb_virt = self.cm.norb_virt pProj_spin = [] for ispin, norb in enumerate(norb_occ): pProj = np.zeros(norb) for iorb in range(norb): harvest = _collection_indexes_iocc( iorb, self.cm.norb_virt, spin=None if len(norb_occ) == 1 else ispin) inds = np.array(transition_indexes( norb_occ, norb_virt, harvest)) pProj[iorb] = np.sum(np.ravel(self.eigenvectors[exc, inds])**2) pProj_spin.append(pProj) return pProj_spin def project_on_spin(self, exc, tol=1.e-8): """ Control if an excitation has a Singlet or Triplet character Args: exc (int): index of the excitation to be controlled Returns: tuple (bool,bool): couple of booleans indicating if the excitation is a singlet or a triplet respectively """ A, B, zero = [np.ravel(self.eigenvectors[exc, ind]) for ind in self._indices_for_spin_comparison] issinglet = np.linalg.norm(A-B) < tol istriplet = np.linalg.norm(A+B) < tol return issinglet, istriplet # print (self.eigenvalues[exc], np.linalg.norm(A), np.linalg.norm(B), # A-B,A+B, np.linalg.norm(zero)) def _get_threshold(self, pProj_spin, th_energies, tol): """ Identify the energy which is associated to the threshold of a given excitation. The tolerance is used to discriminate the component """ ths = -1.e100 for proj, en in zip(pProj_spin, th_energies): norb = len(en) pProj = proj.tolist() pProj.reverse() imax = norb-1 for val in pProj: if val > tol: break imax -= 1 ths = max(ths, en[imax]) return ths def split_excitations(self, evals, tol, nexc='all'): """Separate the excitations in channels. This methods classify the excitations according to the channel they belong, and determine if a given excitation might be considered as a belonging to a discrete part of the spectrum or not. Args: evals (BandArray): the eigenvalues as they are provided (for instance) from a `Logfile` class instance. tol (float): tolerance for determining the threshold nexc (int,str): number of excitations to be analyzed. If ``'all'`` then the entire set of excitations are analyzed. """ self.determine_occ_energies(evals) self.identify_thresholds(self.occ_energies, tol, len( self.eigenvalues) if nexc == 'all' else nexc) def identify_thresholds(self, occ_energies, tol, nexc): """Identify the thresholds per excitation. For each of the first ``nexc`` excitations, identify the energy value of its corresponding threshold. This value is determined by projecting the excitation components on the occupied states and verifying that their norm for the highest energy level is below a given tolerance. Args: occ_energies (tuple of array-like): contains the list of the energies of the occupied states per spin channel tol (float): tolerance for determining the threshold nexc (int): number of excitations to be analyzed """ # : Norm of the $w_p^a$ states associated to each excitation self.wp_norms = [] threshold_energies = [] for exc in range(nexc): proj = self._project_on_occ(exc) self.wp_norms.append(proj) threshold_energies.append( self._get_threshold(proj, occ_energies, tol)) # : list: identified threshold for inspected excitations self.threshold_energies = np.array(threshold_energies) self.excitations_below_threshold = np.where( np.abs(self.threshold_energies) >= np.sqrt( self.eigenvalues[0:len(self.threshold_energies)])) """ array: Indices of the excitations which lie below their corresponding threshold """ self.excitations_above_threshold = np.where( np.abs(self.threshold_energies) < np.sqrt(self.eigenvalues[0:len(self.threshold_energies)])) """ array: Indices of the excitations which lie above their corresponding threshold """ def determine_occ_energies(self, evals): """ Extract the occupied energy levels from a Logfile BandArray structure, provided the tuple of the number of occupied states Args: evals (BandArray): the eigenvalues as they are provided (for instance) from a `Logfile` class instance. """ norb_occ = self.cm.norb_occ occ_energies = [] # istart=0 for ispin, norb in enumerate(norb_occ): # range(len(norb_occ)): # istart:istart+norb_occ[ispin]])) occ_energies.append(np.array(evals[0][ispin][0:norb])) # istart+=norb_tot[ispin] # : array: energies of the occupied states out of the logfile self.occ_energies = occ_energies # : float: lowest threshold of the excitations. All excitations are # discrete below this level self.first_threshold = abs( max(np.max(self.occ_energies[0]), np.max(self.occ_energies[-1]))) def plot_alpha(self, **kwargs): """Plot the imaginary part. Plot the real or imaginary part of the dynamical polarizability. Keyword Arguments: real (bool): True if real part has to be plotted. The imaginary part is plotted otherwise eta (float): Value of the complex imaginary part. Defaults to 1.e-2. group (str): see :meth:`lookup` **kwargs: other arguments that might be passed to the :meth:`plot` method of the :mod:`matplotlib.pyplot` module. Returns: :mod:`matplotlib.pyplot`: the reference to :mod:`matplotlib.pyplot` module. """ import matplotlib.pyplot as plt from futile.Utils import kw_pop emax = np.max(np.sqrt(self.eigenvalues))*HaeV kwargs, real = kw_pop('real', False, **kwargs) plt.xlim(xmax=emax) if real: plt.ylabel(r'$\mathrm{Re} \alpha$ (AU)', size=14) else: plt.ylabel(r'$\mathrm{Im} \alpha$', size=14) plt.yticks([]) plt.xlabel(r'$\omega$ (eV)', size=14) if hasattr(self, 'first_threshold'): eps_h = self.first_threshold*HaeV plt.axvline(x=eps_h, color='black', linestyle='--') kwargs, eta = kw_pop('eta', 1.e-2, **kwargs) omega = np.linspace(0.0, emax, 5000)+2.0*eta*1j kwargs, group = kw_pop('group', 'all', **kwargs) slice = self.lookup(group) spectrum = self.spectrum_curves(omega, slice=slice) toplt = spectrum.real if real else spectrum.imag pltkwargs = dict(c='black', linewidth=1.5) pltkwargs.update(kwargs) plt.plot(omega*HaeV, toplt, **pltkwargs) return plt def lookup(self, group): """ Identify the group of the excitations according to the argument Args: group (str): A string chosen between * ``"all"`` : provides the entire set of excitations (:py:class:`None` instead of the lookup array) * ``"bt"`` : provides only the excitations below threshold * ``"at"`` : provides only the excitations above threshold * ``"singlets"`` : provides the index of the excitations that have a singlet character * ``"triplets"`` : provides the index of the excitations that have a triplet character """ slice = None if group == 'bt': slice = self.excitations_below_threshold if group == 'at': slice = self.excitations_above_threshold if group == 'singlets': slice = self.singlets if group == 'triplets': slice = self.triplets return slice def plot_excitation_landscape(self, **kwargs): """ Represent the excitation landscape as splitted in the excitations class Args: **kwargs: keyword arguments to be passed to the `pyplot` instance. The ``xlabel``, ``ylabel`` as well as ``xlim`` are already set. Returns: :mod:`matplotlib.pyplot`: the reference to :mod:`matplotlib.pyplot` module. Example: >>> ex=Excitations(cm,tm) >>> ex.split_excitations(evals=...,tol=1.e-4,nexc=...) >>> ex.plot_excitation_landscape(title='Excitation landscape') """ import matplotlib.pyplot as plt Emin = 0.0 Emax = np.max(np.sqrt(self.eigenvalues))*HaeV for level in self.occ_energies[0]: eng_th = level*HaeV plt.plot((Emin, eng_th), (level, level), '--', c='red', linewidth=1) plt.plot((eng_th, Emax), (level, level), '-', c='red', linewidth=1) plt.scatter(abs(eng_th), level, marker='x', c='red') ind_bt = self.excitations_below_threshold exc_bt = np.sqrt(self.eigenvalues)[ind_bt] lev_bt = self.threshold_energies[ind_bt] plt.scatter(HaeV*exc_bt, lev_bt, s=16, marker='o', c='black') ind_at = self.excitations_above_threshold exc_at = np.sqrt(self.eigenvalues)[ind_at] lev_at = self.threshold_energies[ind_at] plt.scatter(HaeV*exc_at, lev_at, s=14, marker='s', c='blue') plt.xlabel('energy (eV)') plt.ylabel('Threshold energy (Ha)') plt.xlim(xmin=Emin-1, xmax=Emax) for attr, val in kwargs.items(): if type(val) == dict: getattr(plt, attr)(**val) else: getattr(plt, attr)(val) return plt def dos_dict(self, group='all'): """Dictionary for DoS creation. Creates the keyword arguments that have to be passed to the `meth:BigDFT.DoS.append` method of the `DoS` class Args: group (str): see :meth:`lookup` Returns: :py:class:`dict`: kewyord arguments that can be passed to the `meth:BigDFT.DoS.append` method of the :class:`DoS.DoS` class """ ev = np.sqrt(self.eigenvalues) slice = self.lookup(group) if slice is not None: ev = ev[slice] return dict(energies=np.array([np.ravel(ev)]), units='AU') def dos(self, group='all', **kwargs): """Density of States of the Excitations. Provides an instance of the :class:`~BigDFT.DoS.DoS` class, corresponding to the Excitations instance. Args: group (str): see :meth:`lookup` **kwargs: other arguments that might be passed to the :class:`DoS.DoS` instantiation Returns: :class:`DoS.DoS`: instance of the Density of States class """ from BigDFT.DoS import DoS kwa = self.dos_dict(group=group) kwa['energies'] = kwa['energies'][0] if hasattr(self, 'first_threshold'): kwa['fermi_level'] = self.first_threshold else: kwa['fermi_level'] = 0.0 kwa.update(kwargs) return DoS(**kwa) def plot_Sminustwo(self, coord, alpha_ref=None, group='all'): """Inspect S-2 sum rule. Provides an handle to the plotting of the $S^{-2}$ sum rule, which should provide reference values for the static polarizability tensor. Args: coord (str): the coordinate used for inspection. May be ``'x'``, ``'y'`` or ``'z'``. alpha_ref (list): diagonal of the reference static polarizability tensor (for instance calculated via finite differences). If present the repartition of the contribution of the various groups of excitations is plotted. group (str): see :meth:`lookup` Returns: reference to :mod:`matplotlib.pyplot` module. """ import matplotlib.pyplot as plt idir = ['x', 'y', 'z'].index(coord) fig, ax1 = plt.subplots() ax1.set_xlabel('energy (eV)', size=14) plt.ylabel(r'$\alpha_{'+coord+coord+r'}$ (AU)', size=14) if alpha_ref is not None: plt.axhline(y=alpha_ref[idir], color='r', linestyle='--') if hasattr(self, 'first_threshold'): eps_h = abs(HaeV*self.first_threshold) plt.axvline(x=eps_h, color='black', linestyle='--') e = np.sqrt(self.eigenvalues)*HaeV w_ii = self.alpha_prime[:, idir] slice = self.lookup(group) if slice is not None: e = e[slice] w_ii = w_ii[slice] ax1.plot(e, np.cumsum(w_ii)) ax2 = ax1.twinx() ax2.plot(e, w_ii, color='grey', linestyle='-') plt.ylabel(r'$w_{'+coord+coord+r'}$ (AU)', size=14) return plt def get_alpha_energy(log, norb, nalpha): return log.evals[0][0][norb+nalpha-1] def identify_contributions(numOrb, na, exc, C_E2): pProj = np.zeros(numOrb*2) for p in range(numOrb): for spin in [0, 1]: # sum over all the virtual orbital and spin for alpha in range(na): # extract the value of the index of C_E2 elements = transition_indexes( [numOrb], [na], [[p, alpha, spin]]) for el in elements: pProj[p+numOrb*spin] += C_E2[exc][el]**2 pProj = pProj[0:numOrb]+pProj[numOrb:2*numOrb] # halves the components return pProj def get_p_energy(log, norb): return log.evals[0][0][0:norb] def get_threshold(pProj, th_energies, th_levels, tol): norb = len(th_energies) pProj = pProj.tolist() pProj.reverse() imax = norb-1 for val in pProj: if val > tol: break imax -= 1 return [th_levels[imax], th_energies[imax]]
28,150
8,695
import torch from torch.autograd import Variable from sklearn.metrics import confusion_matrix, classification_report import numpy as np import time # import our model and data from rnn import RNN from data import get_data hidden_size = 10 learning_rate = 0.01 num_layers = 2 num_epochs = 1000 sequence_length = 10 batch_size = 32 def load_model(input_size): model = RNN(input_size, hidden_size, num_layers) # load on CPU only checkpoint = torch.load('checkpoint.pt', map_location='cpu') model.load_state_dict(checkpoint['model_state_dict']) model.eval() print(model) print('model training loss', checkpoint['loss']) print('model training epoch', checkpoint['epoch']) return model if __name__ == '__main__': X_train, X_test, y_train, y_test = get_data(sequence_length) input_size = X_train.shape[2] # batch, seq_len, input_size model = load_model(input_size) inputs = Variable(X_test.float()) tick = time.time() outputs = model(inputs) tock = time.time() # convert probabilities => 0 or 1 y_pred = (outputs.detach().numpy() > 0.5).astype(np.int) print('prediction time: %.3fs' % (tock - tick)) print(confusion_matrix(y_test.values, y_pred)) print(classification_report(y_test.values, y_pred))
1,290
454
# Solve the quadratic equation ax**2 + bx + c = 0 # import complex math module import cmath a=int(input("Enter a:")) b=int(input("Enter b:")) c=int(input("Enter c:")) # calculate the discriminant d = (b**2) - (4*a*c) # find two solutions sol1 = (-b-cmath.sqrt(d))/(2*a) sol2 = (-b+cmath.sqrt(d))/(2*a) print('The solution are {0} and {1}'.format(sol1,sol2))
357
149
import numpy as np from fos.core.scene import Scene from fos.core.plots import Plot from fos.core.tracks import Tracks from fos.core.points import Points #data=200*np.random.rand(1000000,3) #colors=np.random.rand(1000000,4) data=[200*np.random.rand(int(np.round(30*np.random.rand()))+1,3).astype('float32') for i in range(250000)] colors=[np.random.rand(len(d),4) for i,d in enumerate(data)] #print('no of bytes',colors.nbytes + data.nbytes) tr=Tracks(data,colors,lists=True) #slot={0:{'actor':tr,'slot':(0, 800000)}} #Scene(Plot(slot)).run() #pts=Points([data],[colors],point_size=3.,lists=True) slot={0:{'actor':tr,'slot':(0, 800000)}} #1:{'actor':pts,'slot':(0, 800000)}} Scene(Plot(slot)).run()
712
326
"""vis widgets """ # Copyright (c) 2020 ipyradiant contributors. # Distributed under the terms of the Modified BSD License. __all__ = [ "CytoscapeVisualizer", "DatashaderVisualizer", "VisualizerBase", "LayoutSelector", "NXBase", ] from .base import NXBase, VisualizerBase from .cytoscape import CytoscapeVisualizer from .datashader_vis import DatashaderVisualizer from .tools import LayoutSelector
419
139
r""" .. _compartmental-modeling-tools: Compartmental Modeling Tools ---------------------------- These functions build theoretical distributions with which to understand how Dismod-AT works, and how disease processes work. 1. Specify a disease process by making simple Python functions that return disease rates as a function of time. You can specify a set of :math:`(\iota, \rho, \chi, \mu)`, or you can specify a set of :math:`(\iota, \rho, \chi, \omega)`. We'll call the former the total-mortality specification and the latter the other-mortality specification. There is a basic version of total mortality supplied for you in ``siler_default``. 2. Given a set of pure functions, solve the differential equations in order to determine prevalence over time. For the total mortality specification, this means running:: S, C, P = prevalence_solution(iota, rho, emr, total) The returned values are functions for susceptibles, with-condition, and prevalence of with-condition, :math:`P=C/(S+C)`. They are functions built by interpolating solutions to the core differential equation. For the other-mortality specification, this means running:: S, C = dismod_solution(iota, rho, emr, omega) It can be helpful to define the total alive as a function:: def lx(t): return S(t) + C(t) This is what we should use as a weighting function for computing integrands. 3. Create a set of demographic intervals (regions of ages) over which to compute averaged values from the continuous rates:: nx = (1/52) * np.array([1, 3, 52-3, 4*52, 5*52, 5*52], dtype=np.float) intervals = DemographicInterval(nx) observations, normalization = integrands_from_function( [incidence, emr, C], lx, intervals ) The resulting list of observations is a set of arrays that then can go to Dismod-AT. .. _differential-equations: Differential Equations ---------------------- .. _dismod-at-equation: DismodAT Differential Equation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ These functions manipulate data for the compartmental model that the differential equation describes. Using :math:`S` as without this condition and alive, :math:`C` as with condition, and :math:`R` as removed, or dead, .. math:: \frac{dS}{d\tau} = -\iota S + \rho C -\omega S \frac{dC}{d\tau} = \iota S - \rho C - (\omega + \chi) C \frac{dR}{d\tau} = \omega (S+C) + \chi C. The time is cohort time, which we denote as :math:`\tau`. These functions work with continuous functions. They are designed for creating test data and analyzing interpolated data. .. _prevalence-only: Prevalence-Only Equation ^^^^^^^^^^^^^^^^^^^^^^^^ The prevalence-only form of the ODE. The full differential equation can be transformed into a space .. math:: P = \frac{C}{S+C} N = S + C. to yield two differential equations, one for the prevalence and one for the total mortality .. math:: P' = \iota (1-P) - \rho P - \chi (1-P) P N' = -\omega N - \chi C. The :math:`N` variable doesn't appear in the first equation, so it is independent and can be solved alone. Then the second equation is equivalent to total mortality .. math:: N' = -\mu N which indicates that :math:`\mu = \omega + \chi P`. """ import numpy as np from scipy.integrate import quad, solve_ivp from cascade.core.log import getLoggers CODELOG, MATHLOG = getLoggers(__name__) def build_derivative_prevalence(iota, rho, chi): r""" Given three functions for the basic rates, this creates a function that lets the ODE solver integrate the prevalence-only differential equation. Args: iota (function): Incidence rate rho (function): Remission rate chi (function): Excess mortality rate Returns: function: The arguments are time and a sequence of :math:`N` prevalence states, given as a :math:`(1, N)` array. """ def ode_right_hand_side(t, y): return iota(t) * (1 - y) - rho(t) * y - chi(t) * (1 - y) * y return ode_right_hand_side def build_derivative_total(mu): r""" Turns a mortality rate into an argument for the ODE solver. Args: mu (function): Total mortality rate Returns: function: The arguments are time and a sequence of :math:`N` prevalence states, given as a :math:`(1, N)` array. """ def ode_right_hand_side(t, y): return -mu(t) * y return ode_right_hand_side def build_derivative_full(iota, rho, chi, omega): r""" The Dismod-AT ODE Args: iota (function): Incidence rate rho (function): Remission rate chi (function): Excess mortality rate omega (function): Other-cause mortality Returns: function: The arguments are time and a sequence of :math:`N` prevalence states, given as a :math:`(2, N)` array. """ def ode_right_hand_side(t, y): sprime = -(iota(t) + omega(t)) * y[0, :] + rho(t) * y[1, :] cprime = iota(t) * y[0, :] - (rho(t) + omega(t) + chi(t)) * y[1, :] return np.vstack([sprime, cprime]) return ode_right_hand_side def omega_from_mu(mu, chi, P): r""" Given functions for :math:`(\mu, \chi, P)`, return a function for :math:`\omega`. Args: mu (function): Total mortality rate. chi (function): Excess mortality rate. P (function): Prevalence. Returns: function: Other-cause mortality. """ def omega(t): return mu(t) - chi(t) * P(t) return omega def mu_from_omega(omega, chi, P): r""" Given :math:`(\omega, \chi, P)`, return a function for total mortality, :math:`\mu`. Args: omega (function): Other-cause mortality chi (function): Excess mortality. P (function): Prevalence. Returns: function: Total mortality rate. """ def total_mortality(t): return omega(t) + chi(t) * P(t) return total_mortality def solve_differential_equation(f_derivatives, initial, oldest=120): r""" Solve differential equations between ages 0 and oldest. Uses ``numpy.integrate.solve_ivp`` underneath. Args: f_derivatives (function): A function that returns first derivatives of the differential equation. initial (np.array): A numpy array of initial values. Must be the same dimension as the returned by f_derivatives. oldest (float): Upper limit of integration. For instance, 100. Returns: Array of interpolation functions, of same length as input function's return values. """ bunch = solve_ivp(f_derivatives, t_span=(0, oldest), y0=initial, vectorized=True, dense_output=True) return bunch.sol SILER_CONSTANTS = [0, 0.2, 0.0002, 0.003, 1, 0.1, 0.015, 0.01] def siler_default(): r""" Construct a total mortality rate using the Siler distribution and default constants. """ return siler_time_dependent_hazard(SILER_CONSTANTS) def siler_time_dependent_hazard(constants): r""" This Siler distribution is a good approximation to what a real total mortality rate looks like. Both the equations and the parameters come from a paper [1] where they were fit to a Scandinavian country. We will use this as the one true mortality rate for this session. [1] V. Canudas-Romo and R. Schoen, “Age-specific contributions to changes in the period and cohort life expectancy,” Demogr. Res., vol. 13, pp. 63–82, 2005. Args: constants (np.array): List of constants. The first is time because this function can model change in a total mortality distribution over time. These are named according to the paper and are, in order, "t, a1, a2, a3, b1, b2, c1, c2". Returns: A function that returns mortality rate as a function of age. """ t, a1, a2, a3, b1, b2, c1, c2 = constants def siler(x): return a1 * np.exp(-b1 * x - c1 * t) + a2 * np.exp(b2 * x - c2 * t) + a3 * np.exp(-c2 * t) return siler def total_mortality_solution(mu): r"""Given a total mortality rate, as a function, return :math:`N=l(x)`.""" n_array = solve_differential_equation(build_derivative_total(mu), initial=np.array([1.0], dtype=float)) def total_pop(t): val = n_array(t)[0] if isinstance(val, np.ndarray): val[val < 0] = 0 elif val < 0: return 0.0 return val return total_pop def prevalence_solution(iota, rho, chi, mu): r"""This uses the single, prevalence-based equation.""" N = total_mortality_solution(mu) f_b = build_derivative_prevalence(iota, rho, chi) bunch = solve_differential_equation(f_b, initial=np.array([1e-6])) P = lambda t: bunch(t)[0] C = lambda t: P(t) * N(t) S = lambda t: (1 - P(t)) * N(t) return S, C, P def dismod_solution(iota, rho, chi, omega): r"""This solves the Dismod-AT equations.""" f_b = build_derivative_full(iota, rho, chi, omega) bunch = solve_differential_equation(f_b, initial=np.array([1.0 - 1e-6, 1e-6], dtype=np.float)) S = lambda t: bunch(t)[0] C = lambda t: bunch(t)[1] return S, C def average_over_interval(raw_rate, weight_function, intervals): r""" Construct demographic observations from a raw rate function. This is a one-dimensional function, presumably along the cohort time. It doesn't integrate over ages and years. Args: raw_rate (function): A function that returns a rate. weight_function (function): A function that returns a weight. This will usually be :math:`N`, the total population. intervals (DemographicInterval): Set of contiguous intervals over which to average the values. Returns: np.ndarray: List of integrand values. """ def averaging_function(t): return raw_rate(t) * weight_function(t) results = np.zeros(len(intervals), dtype=np.float) for interval_idx in range(len(intervals)): start = intervals.start[interval_idx] finish = intervals.finish[interval_idx] results[interval_idx] = quad(averaging_function, start, finish)[0] return results def integrand_normalization(weight_function, intervals): r""" Make the denominator for integrands. This is a one-dimensional function, presumably along the cohort time. It doesn't integrate over ages and years. Args: weight_function (function): Weights, usually population. intervals (DemographicInterval): Contiguous time periods. Returns: np.array: Integrated values of the weight function. """ def constant_rate(t): return 1.0 return average_over_interval(constant_rate, weight_function, intervals) def integrands_from_function(rates, weight_function, intervals): r""" Given a list of rate functions and a weight function, return their integrands on intervals. Args: rates (list[function]): A list of rate functions to integrate. weight_function (function): The weight function, usually population. intervals (DemographicInterval): A set of time intervals, here along the cohort time. Returns: (list[np.array], np.array): A list of integrands, followed by the integrand that is the weighting function. """ normalization = integrand_normalization(weight_function, intervals) rate_integrands = list() for rate in rates: rate_integrands.append(average_over_interval(rate, weight_function, intervals) / normalization) return rate_integrands, normalization
11,780
3,763
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-11-13 01:17 from __future__ import unicode_literals from django.db import migrations, models import randomslugfield.fields class Migration(migrations.Migration): dependencies = [ ('badge', '0009_auto_20161112_1649'), ] operations = [ migrations.AddField( model_name='badgeclass', name='slug', field=randomslugfield.fields.RandomSlugField(blank=True, editable=False, length=7, max_length=7, unique=True), ), migrations.AlterField( model_name='badgeclass', name='image', field=models.ImageField(upload_to='uploads/badges/'), ), ]
722
246
import sys from typing import Callable, List try: sys.getsizeof(0) getsizeof = lambda x: sys.getsizeof(x) except: # import resource getsizeof = lambda _: 1#resource.getrusage(resource.RUSAGE_SELF).ru_maxrss def get_size(obj, seen=None): """Recursively finds size of objects""" size = getsizeof(obj) if seen is None: seen = set() obj_id = id(obj) if obj_id in seen: return 0 # Important mark as seen *before* entering recursion to gracefully handle # self-referential objects seen.add(obj_id) if isinstance(obj, dict): size += sum([get_size(v, seen) for v in obj.values()]) size += sum([get_size(k, seen) for k in obj.keys()]) elif hasattr(obj, '__dict__'): size += get_size(obj.__dict__, seen) elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)): size += sum([get_size(i, seen) for i in obj]) return size def list_contains(base_list, obj_list): '''''' if len(base_list) < len(obj_list): return False obj0 = obj_list[0] for i, base in enumerate(base_list[:len(base_list)+1 - len(obj_list)]): if base == obj0: if base_list[i: i+len(obj_list)] == obj_list: return True return False def rand_seed(x: int): import random random.seed(x) import numpy as np np.random.seed(x) # if using pytorch, set its seed! # # import torch # # torch.manual_seed(x) # # torch.cuda.manual_seed(x) # # torch.cuda.manual_seed_all(x) find_var_with_pos: Callable[[list, list, List[list]], list] = lambda pos_search, variables, positions: [var for var, pos in zip(variables, positions) if pos[:len(pos_search)] == pos_search] # find those variables with a common head of position. e.g. pos_search=[0], variables=[1, 1, 2, 2], and positions=[[0, 2, 0, 0], [0, 2, 1, 0], [0, 3, 0], [1, 0]], then return [1, 1, 2] find_pos_with_pos: Callable[[list, List[list]], list] = lambda pos_search, positions: [pos for pos in positions if pos[:len(pos_search)] == pos_search]
2,084
744
from __future__ import division import numpy as np import pandas as pd import math from tqdm import tqdm from matplotlib.colors import rgb2hex def MonodExt(k1,k2,kt,l,Stmp,ks=200): # source: Lejeune et al 1995, Morphology of Trichoderma reesei QM 9414 in Submerged Cultures return (k1+k2*(l/(l+kt)))*(Stmp/(Stmp+ks)) class hyphal_walk(object): def __init__(self,minTheta=10,maxTheta=100,avgRate=28,H=1440,N=1200,M=1e10,tstep=.0005, q=0.004,S0=5e5,k1=50,maxktip=None,k2=None,kt=5,init_n=20,width=100, set_start_center=True,use_monod=True,normal_theta=True): """ minTheta = 10 #*pi/180 - minimum angle a branch can occur maxTheta = 100 #*pi/180 - maximum angle a branch can occur H = 1440 # number of hours N = 1200 # max simulation rounds M = 1e10 # max hyphae (carrying capacity) tstep = .0005 # time step (hours/step) q = 0.004 # branching frequency (maybe need to be scaled of what the time step is) S0 = 5e5 # intital conc. of substrate mg in whole grid (evenly dist.) k1 = 50 # (µm/h) initial tip extension rate, value estimated from Spohr et al 1998 figure 5 maxktip = 2*k1 # (µm/h) maximum tip extension rate, value estimated from Spohr et al 1998 figure 5 k2 = maxktip - k1 # (µm/h) difference between k1 and maxktip kt = 5 # saturation constant init_n = 20 # starting spores width = 100 # view window (um) (this is just 1 cm) set_start_center = True # if you want the model to start all spores at (0,0) """ if maxktip is None: maxktip = k1 if k2 is None: k2 = maxktip - k1 self.minTheta = minTheta self.maxTheta = maxTheta #self.avgRate = avgRate self.H = H self.N = N self.M = M self.tstep = tstep self.q = q self.S0 = S0 self.k1 = k1 self.maxktip = maxktip self.k2 = k2 self.kt = kt self.init_n = init_n self.width = width self.set_start_center = set_start_center self.use_monod = use_monod self.normal_theta = normal_theta self.hyphae = self.intialize_hyphae() self.Sgrid = self.intialize_subtrate() def intialize_hyphae(self): hyphae = {} centers = np.array([0,0]).reshape(1, 2) # for each spore make and intital random walk direction (no movement yet) if self.normal_theta==True: theta_init = {i:angle_ for i,angle_ in enumerate(np.linspace(0,360,self.init_n))} for spore_i in range(0,self.init_n): if self.set_start_center==False: rxy = np.random.uniform(0,round(self.width),2) + centers else: rxy = centers if self.normal_theta==True: iTheta = theta_init[spore_i] else: iTheta = np.around(np.random.uniform(0,360),1) hyphae[spore_i] = {'x0':rxy[:,0], 'y0':rxy[:,1], 'x':rxy[:,0], 'y':rxy[:,1], 'angle':iTheta, 'biomass':0, 't':0, 'l':0} return hyphae def intialize_subtrate(self,block_div=2): # make a substrate grid Sgrid = [] # make a substrate grid size_of_block = round(self.width/block_div) grid_min = list(np.linspace(-self.width,self.width, size_of_block)[:-1]) grid_max = list(np.linspace(-self.width,self.width, size_of_block)[1:]) for i in range(len(grid_max)): Sgrid.append(pd.DataFrame([[self.S0/len(grid_max)]*len(grid_max),grid_min,grid_max, [grid_min[i]]*len(grid_max),[grid_max[i]]*len(grid_max)], index=['S','X_Gmin','X_Gmax','Y_Gmin','Y_Gmax']).T) Sgrid = pd.concat(Sgrid,axis=0).reset_index() return Sgrid def run_simulation(self): # run until i exceeds limits time_snapshot_hy = {} time_snapshot_sub = {} for i in tqdm(range(0,self.N)): bio_mass = 0 if len(self.hyphae)>=self.M: # hit carrying cpacity of the system print('broke capacity first') break # otherwise continue to model for j in range(0,len(self.hyphae)): # find tip in substrate grid grid_index = self.Sgrid[((self.Sgrid['Y_Gmin']<=self.hyphae[j]['y'][0])&\ (self.Sgrid['X_Gmin']<=self.hyphae[j]['x'][0]))==True].index.max() if np.isnan(grid_index): # left view space continue if round(self.Sgrid.loc[grid_index,'S'])!=0: # get current extention if self.use_monod==True: ext = MonodExt(self.k1,self.k2,self.kt, self.hyphae[j]['l'], self.Sgrid.loc[grid_index,'S']) else: ext = self.maxktip # extend in x and y dx = ext * self.tstep * np.cos(self.hyphae[j]['angle']*np.pi/180) # new coordinate in x-axis dy = ext * self.tstep * np.sin(self.hyphae[j]['angle']*np.pi/180) # new coordinate in y-axis # biomass created for hyphae j dl_c = np.sqrt(dx**2 + dy**2) # (constant to scale biomass density) dl_c *= 1 bio_mass += dl_c # subtract used substrate if self.use_monod==True: self.Sgrid.loc[grid_index,'S'] = self.Sgrid.loc[grid_index,'S'] - dl_c # update location self.hyphae[j]['x'] = self.hyphae[j]['x']+dx self.hyphae[j]['y'] = self.hyphae[j]['y']+dy self.hyphae[j]['l'] = np.sqrt((self.hyphae[j]['x'][0]-self.hyphae[j]['x0'][0])**2 \ +(self.hyphae[j]['y'][0]-self.hyphae[j]['y0'][0])**2 ) self.hyphae[j]['biomass'] = self.hyphae[j]['biomass'] + dl_c # randomly split if np.random.uniform(0,1) < self.q: direction = [-1,1][round(np.random.uniform(0,1))] newangle = direction*round(np.random.uniform(self.minTheta,self.maxTheta)) newangle += self.hyphae[j]['angle'] self.hyphae[len(self.hyphae)] = {'x0':self.hyphae[j]['x'], 'y0':self.hyphae[j]['y'], 'x':self.hyphae[j]['x'], 'y':self.hyphae[j]['y'], 'angle':newangle, 'biomass':0, 't':i, 'l':0} time_snapshot_hy[i] = pd.DataFrame(self.hyphae.copy()).copy() time_snapshot_sub[i] = pd.DataFrame(self.Sgrid.copy()).copy() return time_snapshot_hy,time_snapshot_sub
7,331
2,469
import sys sys.setrecursionlimit(10**6) class Node: def __init__(self, val, pos): self.left = None self.right = None self.pos = pos self.val = val def insert(node, val, pos): if node is None: print(pos) return Node(val, pos) if val < node.val: # move to left child node.left = insert(node.left, val, 2*pos) else: # move to right child node.right = insert(node.right, val, 2*pos+1) return node def minValueNode(node): current = node while current.left is not None: current = current.left return current def delete(node,val, case=True): if node is None: return node # search if val < node.val: # move to left child node.left = delete(node.left, val, case) elif val > node.val: # move to right child node.right = delete(node.right, val, case) else: # here found if case: print(node.pos) # Now delete node and replacement if node.left is None and node.right is None: # check left child, if None node = None elif node.left is None: node = node.right elif node.right is None: node = node.left else: temp = minValueNode(node.right) node.val = temp.val node.right = delete(node.right, temp.val, False) return node root = None def main(q): global root oper, elem = input().split() if oper == 'i': root = insert(root, int(elem), 1) else: root = delete(root, int(elem), True) if q>1: main(q-1) main(int(input()))
1,636
509
from kubernetes import client, config import json # 生成YML def main(): pod = create_pod("dev") print(json.dumps(client.ApiClient().sanitize_for_serialization(pod))) def create_pod(environment): return client.V1Pod( api_version="v1", kind="Pod", metadata=client.V1ObjectMeta( name="test-pod", ), spec=client.V1PodSpec( containers=[ client.V1Container( name="test-container", image="nginx", env=[ client.V1EnvVar( name="ENV", value=environment, ) ] ) ] ) ) if __name__ == '__main__': main()
814
221
"""Animations that update mobjects.""" __all__ = ["UpdateFromFunc", "UpdateFromAlphaFunc", "MaintainPositionRelativeTo"] import operator as op import typing from ..animation.animation import Animation if typing.TYPE_CHECKING: from ..mobject.mobject import Mobject class UpdateFromFunc(Animation): """ update_function of the form func(mobject), presumably to be used when the state of one mobject is dependent on another simultaneously animated mobject """ def __init__( self, mobject: "Mobject", update_function: typing.Callable[["Mobject"], typing.Any], suspend_mobject_updating: bool = False, **kwargs ) -> None: self.update_function = update_function super().__init__( mobject, suspend_mobject_updating=suspend_mobject_updating, **kwargs ) def interpolate_mobject(self, alpha: float) -> None: self.update_function(self.mobject) class UpdateFromAlphaFunc(UpdateFromFunc): def interpolate_mobject(self, alpha: float) -> None: self.update_function(self.mobject, alpha) class MaintainPositionRelativeTo(Animation): def __init__( self, mobject: "Mobject", tracked_mobject: "Mobject", **kwargs ) -> None: self.tracked_mobject = tracked_mobject self.diff = op.sub( mobject.get_center(), tracked_mobject.get_center(), ) super().__init__(mobject, **kwargs) def interpolate_mobject(self, alpha: float) -> None: target = self.tracked_mobject.get_center() location = self.mobject.get_center() self.mobject.shift(target - location + self.diff)
1,680
496
import torch import torch.nn as nn def _make_divisible(ch, divisor=8, min_ch=None): if min_ch is None: min_ch = divisor new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor) if new_ch < 0.9 * ch: new_ch += divisor return new_ch # ---------------------- # MobileNetV1 # ---------------------- # 普通卷积+BN+ReLU def conv_bn(inp, oup, stride=1): return nn.Sequential( nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.ReLU6() ) # DW卷积++BN+ReLU def conv_dw(inp, oup, stride=1): return nn.Sequential( nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), nn.BatchNorm2d(inp), nn.ReLU6(), # PW nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(), ) class MobileNetV1(nn.Module): def __init__(self): super(MobileNetV1, self).__init__() self.stage1 = nn.Sequential( # H, W, C # 224, 224, 3 -> 112, 112, 32 conv_bn(3, 32, 2), # 112, 112, 32 -> 112, 112, 64 conv_dw(32, 64, 1), # 112, 112, 64 -> 56, 56, 128 conv_dw(64, 128, 2), conv_dw(128, 128, 1), # 56 ,56 ,128 -> 28, 28, 256 conv_dw(128, 256, 2), conv_dw(256, 256, 1), ) self.stage2 = nn.Sequential( # 28, 28, 256 -> 14, 14, 512 conv_dw(256, 512, 2), conv_dw(512, 512, 1), conv_dw(512, 512, 1), conv_dw(512, 512, 1), conv_dw(512, 512, 1), conv_dw(512, 512, 1), ) self.stage3 = nn.Sequential( # 14, 14, 512 -> 7, 7, 1024 conv_dw(512, 1024, 2), conv_dw(1024, 1024, 1), ) # 7, 7, 1024 -> 1, 1, 1024 self.avg = nn.AdaptiveAvgPool2d((1, 1)) # 1, 1, 1024 -> 1, 1, 1000 self.fc = nn.Linear(1024, 1000) def forward(self, x): x = self.stage1(x) x = self.stage2(x) x = self.stage3(x) x = self.avg(x) x = x.view(-1, 1024) x = self.fc(x) return x # ---------------------- # MobileNet V2 # ---------------------- class ConvBNReLU(nn.Sequential): def __init__(self, in_channel, out_channel, kernel_size=3, stride=1, groups=1): padding = (kernel_size - 1) // 2 super(ConvBNReLU, self).__init__( nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_channel), nn.ReLU6(inplace=True) ) class InvertedResidual(nn.Module): def __init__(self, in_channel, out_channel, stride, expand_ratio): super(InvertedResidual, self).__init__() hidden_channel = in_channel * expand_ratio self.use_shortcut = stride == 1 and in_channel == out_channel layers = [] if expand_ratio != 1: # 1x1 pointwise conv layers.append(ConvBNReLU(in_channel, hidden_channel, kernel_size=1)) layers.extend([ # 3x3 depthwise conv ConvBNReLU(hidden_channel, hidden_channel, stride=stride, groups=hidden_channel), # 1x1 pointwise conv(linear) nn.Conv2d(hidden_channel, out_channel, kernel_size=1, bias=False), nn.BatchNorm2d(out_channel), ]) self.conv = nn.Sequential(*layers) def forward(self, x): if self.use_shortcut: return x + self.conv(x) else: return self.conv(x) class MobileNetV2(nn.Module): def __init__(self, num_classes=1000, alpha=1.0, round_nearest=8): super(MobileNetV2, self).__init__() block = InvertedResidual input_channel = _make_divisible(32 * alpha, round_nearest) last_channel = _make_divisible(1280 * alpha, round_nearest) inverted_residual_setting = [ # t, c, n, s [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ] features = [] # conv1 layer features.append(ConvBNReLU(3, input_channel, stride=2)) # building inverted residual residual blockes for t, c, n, s in inverted_residual_setting: output_channel = _make_divisible(c * alpha, round_nearest) for i in range(n): stride = s if i == 0 else 1 features.append(block(input_channel, output_channel, stride, expand_ratio=t)) input_channel = output_channel # building last several layers features.append(ConvBNReLU(input_channel, last_channel, 1)) # combine feature layers self.features = nn.Sequential(*features) # building classifier self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.classifier = nn.Sequential( nn.Dropout(0.2), nn.Linear(last_channel, num_classes) ) def forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x if __name__ == '__main__': model = MobileNetV2() # model = MobileNetV1() input = torch.randn(1, 3, 224, 224) out = model(input) print(out.shape)
5,583
2,377
from xbrr.base.reader.base_parser import BaseParser from xbrr.edinet.reader.element_value import ElementValue class Stock(BaseParser): def __init__(self, reader): tags = { "dividend_paid": "jpcrp_cor:DividendPaidPerShareSummaryOfBusinessResults", # 一株配当 "dividends_surplus": "jppfs_cor:DividendsFromSurplus", # 剰余金の配当 "purchase_treasury_stock": "jppfs_cor:PurchaseOfTreasuryStock", # 自社株買い } super().__init__(reader, ElementValue, tags)
538
190
"""This program first reads in the sqlite database made by ParseAuthors.py. Then, after just a little data cleaning, it undergoes PCA decomposition. After being decomposed via PCA, the author data is then clustered by way of a K-means clustering algorithm. The number of clusters can be set by changing the value of n_clusters.""" import sqlite3 import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn.cluster import KMeans if __name__ == '__main__': # Filepath of sqlite database made by ParseAuthors.py db_path = '/media/sf_G_DRIVE/jita1407/authors.sqlite' # Load this into a dataframe conn = sqlite3.connect(db_path, detect_types=sqlite3.PARSE_DECLTYPES) dataframe = pd.read_sql_query("SELECT * FROM Authors", conn) conn.close() # Get rid of some redundant data to make analysis cleaner and more straightforward dataframe = dataframe.drop(['int_skew', 'unique_messages'], axis=1) # Separate out our list of Authors from the data about them authors = dataframe.ix[:,1].copy() data = dataframe.ix[:,2:7].copy() # Set up our PCA decomposition pca = PCA() pca.fit(data.as_matrix()) # Transform our data into features calculated by PCA transformed = pca.transform(data.as_matrix()) # Cluster our data according to K-means n_clusters = 2 # number of clusters to organize data into n_init = 20 # number of times to replicate clustering n_jobs = 1 # number of processors to use for clustering (-1 for all) kmeans = KMeans(n_clusters=n_clusters, n_init=n_init, n_jobs=n_jobs).fit(transformed) # Get the results of the clustering centers = kmeans.cluster_centers_ labels = kmeans.labels_ # Make some plots # Plot explained variance for each PCA component #plt.bar(np.arange(len(pca.explained_variance_)), pca.explained_variance_)
2,031
645
class NumMatrix: def __init__(self, matrix: List[List[int]]): if not matrix or not matrix[0]: return None m, n = len(matrix), len(matrix[0]) self.dp = [[0] * (n + 1) for _ in range(m + 1)] for r in range(m): for c in range(n): self.dp[r + 1][c + 1] = self.dp[r + 1][c] + self.dp[r][c + 1] + matrix[r][c] - self.dp[r][c] def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int: return self.dp[row2 + 1][col2 + 1] - self.dp[row1][col2 + 1] - self.dp[row2 + 1][col1] + self.dp[row1][col1] # Your NumMatrix object will be instantiated and called as such: # obj = NumMatrix(matrix) # param_1 = obj.sumRegion(row1,col1,row2,col2)
718
291
""" 格式: \033[0m -> 默认字体显示 \033[显示方式;前景色;背景色m -> 格式 三个参数顺序不敏感,因为值各不相同 显示方式列表: 0 - 默认值 1 - 高亮 4 - 下划线 5 - 闪烁 7 - 反显 8 - 不可见 前景色: 30 - 黑色 31 - 红色 32 - 绿色 33 - 黄色 34 - 蓝色 35 - 梅色 36 - 青色 37 - 白色 背景色: 40 - 黑色 前景色+10即可 """ from copy import copy as _copy METHOD_DEFAULT = -1 METHOD_BOLD = 1 METHOD_UNDERLINE = 4 METHOD_FLASH = 5 METHOD_REVERSE = 7 METHOD_HIDE = 8 FORE_BLACK = 30 FORE_RED = 31 FORE_GREEN = 32 FORE_YELLOW = 33 FORE_BLUE = 34 FORE_PLUM = 35 FORE_CYAN = 36 FORE_WHITE = 37 FORE_DEFAULT = -1 BACK_BLACK = 40 BACK_RED = 41 BACK_GREEN = 42 BACK_YELLOW = 43 BACK_BLUE = 44 BACK_PLUM = 45 BACK_CYAN = 46 BACK_WHITE = 47 BACK_DEFAULT = -1 def _ColorDecoratorAll(content, method, foreColor, backColor): rtn = "\033[" if method != METHOD_DEFAULT: rtn += str(method) if foreColor != FORE_DEFAULT: rtn += ";" + str(foreColor) if backColor != BACK_DEFAULT: rtn += ";" + str(backColor) rtn += "m" + content + "\033[0m" return rtn class _StrDecorator: method = METHOD_DEFAULT foreColor = FORE_DEFAULT backColor = BACK_DEFAULT def __init__( self, method=METHOD_DEFAULT, foreColor=FORE_DEFAULT, backColor=BACK_DEFAULT ): self.method = method self.foreColor = foreColor self.backColor = backColor def __add__(self, ano): rtn = _copy(self) if ano.method != METHOD_DEFAULT: rtn.method = ano.method if ano.foreColor != FORE_DEFAULT: rtn.foreColor = ano.foreColor if ano.backColor != BACK_DEFAULT: rtn.backColor = ano.backColor return rtn def __call__(self, str): return _ColorDecoratorAll(str, self.method, self.foreColor, self.backColor) # Fore color Black = _StrDecorator(foreColor=FORE_BLACK) Red = _StrDecorator(foreColor=FORE_RED) Green = _StrDecorator(foreColor=FORE_GREEN) Yellow = _StrDecorator(foreColor=FORE_YELLOW) Blue = _StrDecorator(foreColor=FORE_BLUE) Plum = _StrDecorator(foreColor=FORE_PLUM) Cyan = _StrDecorator(foreColor=FORE_CYAN) White = _StrDecorator(foreColor=FORE_WHITE) # Method Bold = _StrDecorator(method=METHOD_BOLD) Underline = _StrDecorator(method=METHOD_UNDERLINE) Flash = _StrDecorator(method=METHOD_FLASH) Reverse = _StrDecorator(method=METHOD_REVERSE) Hide = _StrDecorator(method=METHOD_HIDE) # Back Color BackBlack = _StrDecorator(backColor=BACK_BLACK) BackRed = _StrDecorator(backColor=BACK_RED) BackGreen = _StrDecorator(backColor=BACK_GREEN) BackYellow = _StrDecorator(backColor=BACK_YELLOW) BackBlue = _StrDecorator(backColor=BACK_BLUE) BackPlum = _StrDecorator(backColor=BACK_PLUM) BackCyan = _StrDecorator(backColor=BACK_CYAN) BackWhite = _StrDecorator(backColor=BACK_WHITE) # Some short cuts FontInfo = _StrDecorator() # All default FontStrength = _copy(Bold) FontWarining = Yellow + Bold FontError = Red + Bold
2,951
1,342
import abc from functools import cached_property, partial import jax import jax.numpy as jnp class Kernel(abc.ABC): """Covariance kernel interface.""" @abc.abstractmethod def __call__(self, X, Y): raise NotImplementedError class _PairwiseKernel(Kernel): @partial(jax.jit, static_argnums=(0,)) def __call__(self, X, Y): # Single element of the Gram matrix: # X.shape=(d,), Y.shape=(d,) -> K.shape = () if X.ndim == Y.ndim <= 1: return self.pairwise(X, Y) # Diagonal of the Gram matrix: # X.shape=(N,d), Y.shape=(N,d) -> K.shape = (N,) if X.shape == Y.shape: return self._evaluate_inner(X, Y) # Full Gram matrix: # X.shape=[N,d), Y.shape=(d,K) -> K.shape = (N,K) return self._evaluate_outer(X, Y) @abc.abstractmethod def pairwise(self, x, y): raise NotImplementedError @cached_property def _evaluate_inner(self): return jax.jit(jax.vmap(self.pairwise, (0, 0), 0)) @cached_property def _evaluate_outer(self): _pairwise_row = jax.jit(jax.vmap(self.pairwise, (0, None), 0)) return jax.jit(jax.vmap(_pairwise_row, (None, 1), 1)) def __str__(self): return f"{self.__class__.__name__}()" def __add__(self, other): @jax.jit def pairwise_new(x, y): return self.pairwise(x, y) + other.pairwise(x, y) return Lambda(pairwise_new) class Lambda(_PairwiseKernel): def __init__(self, fun, /): self._lambda_fun = jax.jit(fun) @partial(jax.jit, static_argnums=(0,)) def pairwise(self, x, y): return self._lambda_fun(x, y) class _RadialKernel(_PairwiseKernel): r"""Radial kernels. k(x,y) = output_scale * \varphi(\|x-y\|*input_scale) """ def __init__( self, *, output_scale=1.0, input_scale=1.0, ): self._output_scale = output_scale self._input_scale = input_scale @property def output_scale(self): return self._output_scale @property def output_scale_squared(self): return self.output_scale ** 2 @property def input_scale(self): return self._input_scale @property def input_scale_squared(self): return self.input_scale ** 2 @abc.abstractmethod def pairwise(self, X, Y): raise NotImplementedError @partial(jax.jit, static_argnums=0) def _distance_squared_l2(self, X, Y): return (X - Y).dot(X - Y) class SquareExponential(_RadialKernel): @partial(jax.jit, static_argnums=0) def pairwise(self, x, y): dist_squared = self._distance_squared_l2(x, y) * self.input_scale_squared return self.output_scale_squared * jnp.exp(-dist_squared / 2.0) class Matern52(_RadialKernel): # Careful! Matern52 is not differentiable at x=y! # Therefore, it is likely unusable for PNMOL... @partial(jax.jit, static_argnums=(0,)) def pairwise(self, x, y): dist_unscaled = self._distance_squared_l2(x, y) dist_scaled = jnp.sqrt(5.0 * dist_unscaled * self.input_scale_squared) A = 1 + dist_scaled + dist_scaled ** 2.0 / 3.0 B = jnp.exp(-dist_scaled) return self.output_scale_squared * A * B class Polynomial(_PairwiseKernel): """k(x,y) = (x.T @ y + c)^d""" def __init__(self, *, order=2, const=1.0): self._order = order self._const = const @property def order(self): return self._order @property def const(self): return self._const @partial(jax.jit, static_argnums=(0,)) def pairwise(self, x, y): return (x.dot(y) + self.const) ** self.order class WhiteNoise(_PairwiseKernel): def __init__(self, *, output_scale=1.0): self._output_scale = output_scale @property def output_scale(self): return self._output_scale @partial(jax.jit, static_argnums=(0,)) def pairwise(self, x, y): return self.output_scale ** 2 * jnp.all(x == y) class _StackedKernel(Kernel): def __init__(self, *, kernel_list): self.kernel_list = kernel_list @partial(jax.jit, static_argnums=0) def __call__(self, X, Y): gram_matrix_list = [k(X, Y) for k in self.kernel_list] # Diagonal of the Gram matrix: # Concatenate the results together if X.shape == Y.shape: return jnp.concatenate(gram_matrix_list) # Full Gram matrix: # Block diag the gram matrix return jax.scipy.linalg.block_diag(*gram_matrix_list) def duplicate(kernel, num): """Create a stack of kernels such that the Gram matrix becomes block diagonal. The blocks are all identical. """ return _StackedKernel(kernel_list=[kernel] * num) def mle_input_scale(*, mesh_points, data, kernel_type, input_scale_trials): scale_to_log_lklhd = partial( input_scale_to_log_likelihood, data=data, kernel_type=kernel_type, mesh_points=mesh_points, ) scale_to_log_lklhd_optimised = jax.jit(jax.vmap(scale_to_log_lklhd)) log_likelihood_values = scale_to_log_lklhd_optimised(input_scale=input_scale_trials) index_max = jnp.argmax(log_likelihood_values) return input_scale_trials[index_max] @partial(jax.jit, static_argnums=3) def input_scale_to_log_likelihood(input_scale, mesh_points, data, kernel_type): kernel = kernel_type(input_scale=input_scale) K = kernel(mesh_points, mesh_points.T) return log_likelihood(gram_matrix=K, y=data, n=data.shape[0]) @jax.jit def log_likelihood(gram_matrix, y, n): a = y @ jnp.linalg.solve(gram_matrix, y) b = jnp.log(jnp.linalg.det(gram_matrix)) c = n * jnp.log(2 * jnp.pi) return -0.5 * (a + b + c)
5,765
2,120
# super class from genie.libs.ops.msdp.iosxe.msdp import Msdp as MsdpXE class Msdp(MsdpXE): ''' Msdp Ops Object ''' pass
148
68
import aiohttp import pytest from aiohttp import web from virtool_workflow.api.client import JobApiHttpSession from tests.api.mocks.mock_api import mock_routes @pytest.fixture def loop(event_loop): return event_loop @pytest.fixture async def jobs_api_url(): return "/api" @pytest.fixture async def mock_jobs_api_app(loop): app = web.Application(loop=loop) for route_table in mock_routes: app.add_routes(route_table) return app @pytest.fixture async def http(mock_jobs_api_app, aiohttp_client) -> aiohttp.ClientSession: """Create an http client for accessing the mocked Jobs API.""" session = await aiohttp_client(mock_jobs_api_app, auto_decompress=False) return JobApiHttpSession(session)
740
253
# Generated by Django 3.0.14 on 2021-08-08 10:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("pbn_api", "0020_przemapuj"), ] operations = [ migrations.AlterField( model_name="scientist", name="lastName", field=models.TextField(blank=True, db_index=True, null=True), ), migrations.AlterField( model_name="scientist", name="name", field=models.TextField(blank=True, db_index=True, null=True), ), migrations.AlterField( model_name="scientist", name="orcid", field=models.TextField(blank=True, db_index=True, null=True), ), migrations.AlterField( model_name="scientist", name="pbnId", field=models.TextField(blank=True, db_index=True, null=True), ), migrations.AlterField( model_name="scientist", name="polonUid", field=models.TextField(blank=True, db_index=True, null=True), ), migrations.AlterField( model_name="scientist", name="qualifications", field=models.TextField( blank=True, db_index=True, null=True, verbose_name="Tytuł" ), ), ]
1,359
414
"""Unit test for google.py""" __author__ = "Mark Pilgrim (f8dy@diveintomark.org)" __version__ = "$Revision: 1.4 $" __date__ = "$Date: 2004/02/06 21:00:53 $" __copyright__ = "Copyright (c) 2002 Mark Pilgrim" __license__ = "Python" import google import unittest import sys, os import GoogleSOAPFacade from StringIO import StringIO class BaseClass(unittest.TestCase): q = "python unit testing" url = "http://www.python.org/" phrase = "ptyhon" searchparams = {"func":"doGoogleSearch"} luckyparams = {} luckyparams.update(searchparams) luckyparams.update({"feelingLucky":1}) metaparams = {} metaparams.update(searchparams) metaparams.update({"showMeta":1}) reverseparams = {} reverseparams.update(searchparams) reverseparams.update({"reverseOrder":1}) cacheparams = {"func":"doGetCachedPage"} spellingparams = {"func":"doSpellingSuggestion"} envkey = "GOOGLE_LICENSE_KEY" badkey = "a" class Redirector(BaseClass): def setUp(self): self.savestdout = sys.stdout self.output = StringIO() sys.stdout = self.output def tearDown(self): sys.stdout = self.savestdout class CommandLineTest(Redirector): def lastOutput(self): self.output.seek(0) rc = self.output.read() self.output.seek(0) return rc def testVersion(self): """-v should print version""" google.main(["-v"]) commandLineAnswer = self.lastOutput() google._version() self.assertEqual(commandLineAnswer, self.lastOutput()) def testVersionLong(self): """--version should print version""" google.main(["--version"]) commandLineAnswer = self.lastOutput() google._version() self.assertEqual(commandLineAnswer, self.lastOutput()) def testHelp(self): """-h should print usage""" google.main(["-h"]) commandLineAnswer = self.lastOutput() google._usage() self.assertEqual(commandLineAnswer, self.lastOutput()) def testHelpLong(self): """--help should print usage""" google.main(["--help"]) commandLineAnswer = self.lastOutput() google._usage() self.assertEqual(commandLineAnswer, self.lastOutput()) def testSearch(self): """-s should search""" google.main(["-s %s" % self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.searchparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testSearchLong(self): """--search should search""" google.main(["--search", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.searchparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testSearchDefault(self): """no options + search phrase should search""" google.main([self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.searchparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testNoOptions(self): """no options at all should print usage""" google.main([]) commandLineAnswer = self.lastOutput() google._usage() self.assertEqual(commandLineAnswer, self.lastOutput()) def testCache(self): """-c should retrieve cache""" google.main(["-c", self.url]) commandLineAnswer = self.lastOutput() google._output(google.doGetCachedPage(self.url), self.cacheparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testCacheLong(self): """--cache should retrieve cache""" google.main(["--cache", self.url]) commandLineAnswer = self.lastOutput() google._output(google.doGetCachedPage(self.url), self.cacheparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testSpelling(self): """-p should check spelling""" google.main(["-p", self.phrase]) commandLineAnswer = self.lastOutput() google._output(google.doSpellingSuggestion(self.phrase), self.spellingparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testSpellingLong(self): """--spelling should check spelling""" google.main(["--spelling", self.phrase]) commandLineAnswer = self.lastOutput() google._output(google.doSpellingSuggestion(self.phrase), self.spellingparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testLucky(self): """-l should return only first result""" google.main(["-l", "-s", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.luckyparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testLucky1(self): """-1 should return only first result""" google.main(["-1", "-s", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.luckyparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testLuckyLong(self): """--lucky should return only first result""" google.main(["--lucky", "-s", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.luckyparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testMeta(self): """-m should return meta information""" google.main(["-m", "-s", self.q]) commandLineAnswer = self.lastOutput() commandLineAnswer = commandLineAnswer[:commandLineAnswer.index('searchTime')] google._output(google.doGoogleSearch(self.q), self.metaparams) realAnswer = self.lastOutput() realAnswer = realAnswer[:realAnswer.index('searchTime')] self.assertEqual(commandLineAnswer, realAnswer) def testMetaLong(self): """--meta should return meta information""" google.main(["--meta", "-s", self.q]) commandLineAnswer = self.lastOutput() commandLineAnswer = commandLineAnswer[:commandLineAnswer.index('searchTime')] google._output(google.doGoogleSearch(self.q), self.metaparams) realAnswer = self.lastOutput() realAnswer = realAnswer[:realAnswer.index('searchTime')] self.assertEqual(commandLineAnswer, realAnswer) def testReverse(self): """-r should reverse results""" google.main(["-r", "-s", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.reverseparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testReverseLong(self): """--reverse should reverse results""" google.main(["--reverse", "-s", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.reverseparams) self.assertEqual(commandLineAnswer, self.lastOutput()) class LicenseKeyTest(Redirector): licensefile = "googlekey.txt" licensebackup = "googlekey.txt.bak" def safeRename(self, dirname, old, new): if dirname: old = os.path.join(dirname, old) new = os.path.join(dirname, new) try: os.rename(old, new) except OSError: pass def safeDelete(self, dirname, filename): if dirname: filename = os.path.join(dirname, filename) try: os.remove(filename) except OSError: pass def createfile(self, dirname, filename, content): if dirname: filename = os.path.join(dirname, filename) fsock = open(filename, "w") fsock.write(content) fsock.close() def rememberKeys(self): self.moduleLicenseKey = google.LICENSE_KEY self.envLicenseKey = os.environ.get(self.envkey, None) self.safeRename(os.environ["HOME"], self.licensefile, self.licensebackup) self.safeRename("", self.licensefile, self.licensebackup) self.safeRename(google._getScriptDir(), self.licensefile, self.licensebackup) def restoreKeys(self): google.LICENSE_KEY = self.moduleLicenseKey if self.envLicenseKey: os.environ[self.envkey] = self.envLicenseKey self.safeDelete(os.environ["HOME"], self.licensefile) self.safeRename(os.environ["HOME"], self.licensebackup, self.licensefile) self.safeDelete("", self.licensefile) self.safeRename("", self.licensebackup, self.licensefile) self.safeDelete(google._getScriptDir(), self.licensefile) self.safeRename(google._getScriptDir(), self.licensebackup, self.licensefile) def clearKeys(self): google.setLicense(None) if os.environ.get(self.envkey): del os.environ[self.envkey] def setUp(self): Redirector.setUp(self) self.rememberKeys() self.clearKeys() def tearDown(self): Redirector.tearDown(self) self.clearKeys() self.restoreKeys() def testNoKey(self): """having no license key should raise google.NoLicenseKey""" self.assertRaises(google.NoLicenseKey, google.doGoogleSearch, q=self.q) def testPassInvalidKey(self): """passing invalid license key should fail with faultType""" self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q, license_key=self.badkey) def testSetInvalidKey(self): """setting invalid module-level license key should fail with faultType""" google.setLicense(self.badkey) self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q) def testEnvInvalidKey(self): """invalid environment variable license key should fail with faultType""" os.environ[self.envkey] = self.badkey self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q) def testHomeDirKey(self): """invalid license key in home directory should fail with faultType""" self.createfile(os.environ["HOME"], self.licensefile, self.badkey) self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q) def testCurDirKey(self): """invalid license key in current directory should fail with faultType""" self.createfile("", self.licensefile, self.badkey) self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q) def testScriptDirKey(self): """invalid license key in script directory should fail with faultType""" self.createfile(google._getScriptDir(), self.licensefile, self.badkey) self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q) if __name__ == "__main__": unittest.main()
11,047
3,299
import sys import asyncio import tornado.ioloop from classes.rabbitmq_tornado import TornadoAdapter from tornado import gen from services.read_sheet import read_sheet RABBIT_URI = "amqp://guest:guest@localhost:5672/" @gen.coroutine def handle_message(logger, message): logger.info("File request {}".format(message)) res = read_sheet(message) logger.info("File result {}".format(res)) return res if __name__ == "__main__": if sys.platform == 'win32': asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) configuration = dict( publish=dict( outgoing_1=dict( exchange="processdata-rpc", exchange_type="direct", routing_key="processdata", queue="process-data-finished", durable=True, auto_delete=False, prefetch_count=1 ) ), receive=dict( incoming=dict( exchange="processdata-rpc", exchange_type="direct", routing_key="processdata", queue="process-data-comming", durable=True, auto_delete=False, prefetch_count=1 ) ) ) # Using Tornado IO Loop io_loop = tornado.ioloop.IOLoop.current() rabbit_connection = TornadoAdapter(rabbitmq_url=RABBIT_URI, configuration=configuration, io_loop=io_loop) rabbit_connection.receive(handler=handle_message, queue=configuration["receive"]["incoming"]["queue"]) io_loop.start()
1,591
460
#!/usr/bin/env python3 import json import argparse import re import datetime import paramiko import requests # cmd ['ssh', 'smart', # 'mkdir -p /home/levabd/smart-home-temp-humidity-monitor; # cat - > /home/levabd/smart-home-temp-humidity-monitor/lr.json'] from btlewrap import available_backends, BluepyBackend from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \ MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY br_state = {} cb_state = {} f = open('/home/pi/smart-climat-daemon/ac_br_state.json') br_state = json.load(f) f = open('/home/pi/smart-climat-daemon/ac_cb_state.json') cb_state = json.load(f) dummy_ac_url = 'http://smart.levabd.pp.ua:2002' def valid_mitemp_mac(mac, pat=re.compile(r"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")): """Check for valid mac addresses.""" if not pat.match(mac.upper()): raise argparse.ArgumentTypeError( 'The MAC address "{}" seems to be in the wrong format'.format(mac)) return mac # turn_on_humidifier(): # """Turn on humidifier on a first floor.""" # hummidifier_plug = chuangmi_plug.ChuangmiPlug( # ip='192.168.19.61', # token='14f5b868a58ef4ffaef6fece61c65b16', # start_id=0, # debug=1, # lazy_discover=True, # model='chuangmi.plug.m1') # hummidifier_plug.on() # # # def turn_off_humidifier(): # """Turn off humidifier on a first floor.""" # hummidifier_plug = chuangmi_plug.ChuangmiPlug( # ip='192.168.19.61', # token='14f5b868a58ef4ffaef6fece61c65b16', # start_id=0, # debug=1, # lazy_discover=True, # model='chuangmi.plug.m1') # hummidifier_plug.off() def check_if_ac_off(room): """Check if AC is turned off.""" status_url = dummy_ac_url if room == 'br': status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a' elif room == 'cb': status_url = 'http://smart.levabd.pp.ua:2002/status-office?key=27fbc501b51b47663e77c46816a' response = requests.get(status_url, timeout=(20, 30)) if 'Pow' in response.json(): print(response.json()['Pow']) if response.json()['Pow'] == "ON": return False return True return None def check_if_ac_heat(room): """Check if AC is turned for a automate cooling.""" status_url = dummy_ac_url if room == 'br': status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a' elif room == 'cb': status_url = 'http://smart.levabd.pp.ua:2002/status-office?key=27fbc501b51b47663e77c46816a' response = requests.get(status_url, timeout=(20, 30)) print(response.json()) if 'Pow' in response.json(): if (response.json()['Pow'] == "ON") and (response.json()['Mod'] == "HEAT"): return True return False return None def check_if_ac_cool(room): """Check if AC is turned for a automate cooling.""" status_url = dummy_ac_url if room == 'br': status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a' elif room == 'cb': status_url = 'http://smart.levabd.pp.ua:2002/status-office?key=27fbc501b51b47663e77c46816a' response = requests.get(status_url, timeout=(20, 30)) print(response.json()) if 'Pow' in response.json(): if (response.json()['Pow'] == "ON") and (response.json()['Mod'] == "COOL"): return True return False return None def set_cool_temp_ac(room, temp): """Set AC temerature of cooling if AC already turned cool.""" state = {} state = br_state if room == 'br' else cb_state # 'cb' if (not state['wasTurnedCool'] == 1 and check_if_ac_cool(room)) or (check_if_ac_heat('br')): return temp_url = dummy_ac_url if room == 'br': temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-bedroom?key=27fbc501b51b47663e77c46816a&temp=' elif room == 'cb': temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-office?key=27fbc501b51b47663e77c46816a&temp=' response = requests.get(temp_url + temp) print(response) def turn_on_cool_ac(room): """Turn on AC for a cooling if it was not.""" state = {} state = br_state if room == 'br' else cb_state # 'cb' ac_cool = check_if_ac_cool(room) if ((state['wasTurnedCool'] == 1) and not state['triedTurnedCool'] == 1) or (ac_cool is None) or (check_if_ac_heat('br')): return if ac_cool and (state['triedTurnedCool'] == 1): if room == 'br': br_state['triedTurnedOff'] = 0 br_state['wasTurnedOff'] = 0 br_state['triedTurnedCool'] = 0 br_state['wasTurnedCool'] = 1 br_state['triedTurnedHeat'] = 0 br_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedOff'] = 0 cb_state['wasTurnedOff'] = 0 cb_state['triedTurnedCool'] = 0 cb_state['wasTurnedCool'] = 1 cb_state['triedTurnedHeat'] = 0 cb_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) return cool_url = dummy_ac_url turn_on_url = dummy_ac_url temp_url = dummy_ac_url if room == 'br': turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-bedroom?key=27fbc501b51b47663e77c46816a' cool_url = 'http://smart.levabd.pp.ua:2002/cool-bedroom?autoFan=false&key=27fbc501b51b47663e77c46816a' temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-bedroom?key=27fbc501b51b47663e77c46816a&temp=26' elif room == 'cb': turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-office?key=27fbc501b51b47663e77c46816a' cool_url = 'http://smart.levabd.pp.ua:2002/cool-office?autoFan=false&key=27fbc501b51b47663e77c46816a' temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-office?key=27fbc501b51b47663e77c46816a&temp=26' if room == 'br': br_state['triedTurnedCool'] = 1 br_state['wasTurnedCool'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedCool'] = 1 cb_state['wasTurnedCool'] = 0 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) response = requests.get(temp_url) print(response) response = requests.get(cool_url) print(response) response = requests.get(turn_on_url) print(response) def turn_on_heat_ac(room): """Turn on AC for a heating if it was not.""" state = {} state = br_state if room == 'br' else cb_state # 'cb' ac_heat = check_if_ac_heat(room) if ((state['wasTurnedHeat'] == 1) and not state['triedTurnedHeat'] == 1) or (ac_heat is None): return if ac_heat and (state['triedTurnedHeat'] == 1): if room == 'br': br_state['triedTurnedOff'] = 0 br_state['wasTurnedOff'] = 0 br_state['triedTurnedCool'] = 0 br_state['wasTurnedCool'] = 0 br_state['triedTurnedHeat'] = 0 br_state['wasTurnedHeat'] = 1 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedOff'] = 0 cb_state['wasTurnedOff'] = 0 cb_state['triedTurnedCool'] = 0 cb_state['wasTurnedCool'] = 0 cb_state['triedTurnedHeat'] = 0 cb_state['wasTurnedHeat'] = 1 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) return heat_url = dummy_ac_url turn_on_url = dummy_ac_url temp_url = dummy_ac_url if room == 'br': turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-bedroom?key=27fbc501b51b47663e77c46816a' heat_url = 'http://smart.levabd.pp.ua:2002/heat-bedroom?key=27fbc501b51b47663e77c46816a' temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-bedroom?key=27fbc501b51b47663e77c46816a&temp=25' elif room == 'cb': turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-office?key=27fbc501b51b47663e77c46816a' heat_url = 'http://smart.levabd.pp.ua:2002/heat-office?autoFan=false&key=27fbc501b51b47663e77c46816a' temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-office?key=27fbc501b51b47663e77c46816a&temp=25' if room == 'br': br_state['triedTurnedHeat'] = 1 br_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedHeat'] = 1 cb_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) response = requests.get(temp_url) print(response) response = requests.get(heat_url) print(response) response = requests.get(turn_on_url) print(response) def turn_off_ac(room): """Turn off AC .""" state = {} state = br_state if room == 'br' else cb_state # 'cb' ac_off = check_if_ac_off(room) if ((state['wasTurnedOff'] == 1) and not state['triedTurnedOff'] == 1) or (ac_off is None): return if ac_off and (state['triedTurnedCool'] == 1): if room == 'br': br_state['triedTurnedOff'] = 0 br_state['wasTurnedOff'] = 1 br_state['triedTurnedCool'] = 0 br_state['wasTurnedCool'] = 0 br_state['triedTurnedHeat'] = 0 br_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedOff'] = 0 cb_state['wasTurnedOff'] = 1 cb_state['triedTurnedCool'] = 0 cb_state['wasTurnedCool'] = 0 cb_state['triedTurnedHeat'] = 0 cb_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) turn_url = dummy_ac_url if room == 'br': turn_url = 'http://smart.levabd.pp.ua:2002/powerOff-bedroom?key=27fbc501b51b47663e77c46816a' elif room == 'cb': turn_url = 'http://smart.levabd.pp.ua:2002/powerOff-office?key=27fbc501b51b47663e77c46816a' if room == 'br': br_state['triedTurnedOff'] = 1 br_state['wasTurnedOff'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedOff'] = 1 cb_state['wasTurnedOff'] = 0 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) response = requests.get(turn_url) print(response) def record_temp_humid(temperature, humidity, room): """Record temperature and humidity data for web interface monitor""" dicty = { "temperature": temperature, "humidity": humidity } ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect('smart.levabd.pp.ua', port = 2001, username='levabd', password='vapipu280.') sftp = ssh.open_sftp() with sftp.open('smart-home-temp-humidity-monitor/' + room + '.json', 'w') as outfile: json.dump(dicty, outfile) ssh.close() def poll_temp_humidity(room): """Poll data frstate['triedTurnedOff']om the sensor.""" today = datetime.datetime.today() backend = BluepyBackend mac = '58:2d:34:38:be:2e' if room == 'br' else '58:2d:34:39:27:4e' # 'cb' poller = MiTempBtPoller(mac, backend) temperature = poller.parameter_value(MI_TEMPERATURE) humidity = poller.parameter_value(MI_HUMIDITY) print("Month: {}".format(today.month)) print("Getting data from Mi Temperature and Humidity Sensor") print("FW: {}".format(poller.firmware_version())) print("Name: {}".format(poller.name())) print("Battery: {}".format(poller.parameter_value(MI_BATTERY))) print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE))) print("Humidity: {}".format(poller.parameter_value(MI_HUMIDITY))) return (today, temperature, humidity) # scan(args): # """Scan for sensors.""" # backend = _get_backend(args) # print('Scanning for 10 seconds...') # devices = mitemp_scanner.scan(backend, 10) # devices = [] # print('Found {} devices:'.format(len(devices))) # for device in devices: # print(' {}'.format(device)) def list_backends(_): """List all available backends.""" backends = [b.__name__ for b in available_backends()] print('\n'.join(backends)) def main(): """Main function.""" # check bedroom (today, temperature, humidity) = poll_temp_humidity('br') # if (humidity > 49) and (today.month < 10) and (today.month > 4): # turn_off_humidifier() # if (humidity < 31) and (today.month < 10) and (today.month > 4): # turn_on_humidifier() # if (humidity < 31) and ((today.month > 9) or (today.month < 5)): # turn_on_humidifier() # if (humidity > 49) and ((today.month > 9) or (today.month < 5)): # turn_off_humidifier() # # Prevent Sleep of Xiaomi Smart Plug # hummidifier_plug = chuangmi_plug.ChuangmiPlug( # ip='192.168.19.59', # token='14f5b868a58ef4ffaef6fece61c65b16', # start_id=0, # debug=0, # lazy_discover=True, # model='chuangmi.plug.m1') # print(hummidifier_plug.status()) # Record temperature and humidity for monitor record_temp_humid(temperature, humidity, 'br') # clear env at night if today.hour == 3: br_state['triedTurnedOff'] = 0 br_state['wasTurnedOff'] = 0 br_state['triedTurnedCool'] = 0 br_state['wasTurnedCool'] = 0 br_state['triedTurnedHeat'] = 0 br_state['wasTurnedHeat'] = 0 cb_state['triedTurnedOff'] = 0 cb_state['wasTurnedOff'] = 0 cb_state['triedTurnedCool'] = 0 cb_state['wasTurnedCool'] = 0 cb_state['triedTurnedHeat'] = 0 cb_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) # if (temperature > 24.0) and (today.month < 6) and (today.month > 3) and (today.hour < 11) and (today.hour > 3): # turn_on_cool_ac('br') if (temperature > 32) and (today.hour < 24) and (today.hour > 7): turn_on_cool_ac('br') if (temperature > 25.3) and (today.month < 10) and (today.month > 4) and (today.hour < 8) and (today.hour > 4): turn_on_cool_ac('br') if (temperature < 22) and (today.month == 10) and (today.hour < 9): turn_on_heat_ac('br') if (temperature < 22) and (today.month == 10) and (today.hour > 22): turn_on_heat_ac('br') if (temperature > 25) and (today.month == 10) and (today.hour < 9): turn_off_ac('br') if (temperature > 25) and (today.month == 10) and (today.hour > 22): turn_off_ac('br') if (today.month == 10) and (today.hour == 0) and (today.minute == 0): turn_off_ac('br') if (temperature < 23.3) and (today.hour < 8) and (today.hour > 4) and (not(check_if_ac_heat('br'))): turn_off_ac('br') if (temperature < 19) and (today.hour < 24) and (today.hour > 8) and (not(check_if_ac_heat('br'))): turn_off_ac('br') # _if (temperature < 20) and ((today.month > 9) or (today.month < 5)) and (today.hour < 24) and (today.hour > 9): # turn_on_heat_ac() # if (temperature > 22) and ((today.month > 9) or (today.month < 5)): # turn_off_ac() # record the office room numbers (_, temperature, humidity) = poll_temp_humidity('cb') record_temp_humid(temperature, humidity, 'cb') if __name__ == '__main__': main()
16,428
6,731
# Copyright (c) 2021 CNES/JPL # # All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE file. """ Orbital error ------------- """ from typing import Dict, Tuple # import dask.array as da import numpy as np # from .. import random_signal from .. import settings from .. import VOLUMETRIC_MEAN_RADIUS #: Signal amplitude of the orbital error in micro-radians AMPLITUDE = 100 #: Delta T of the spatial sampling in seconds DT = 60 def _orbital_error_spectrum( orbit_duration: np.timedelta64, rng: np.random.Generator) -> Tuple[np.ndarray, float]: """Calculate orbital error spectrum Args: orbit_duration (float): Orbit duration in fractional days rng (np.random.Generator): Random number generator Returns: tuple: (yg, fmaxr) """ df = 1 / (1000 * 86400) spatial_frequency = np.arange(df, 1 / DT, df) orbital_frequency = 1 / float( orbit_duration.astype("timedelta64[us]").astype("float64") * 1e-6) sigma_peak = orbital_frequency / 1000 ps_orbital = np.exp(-0.5 * (spatial_frequency - orbital_frequency)**2 / sigma_peak**2) ps_orbital[ps_orbital < 1 / 1000] = 0. ps_orbital /= np.sum(ps_orbital * df) ps_orbital *= AMPLITUDE**2 return random_signal.gen_psd_1d(spatial_frequency, ps_orbital, rng, alpha=10) class Orbital: """ Simulate the error orbital Args: parameters (Parameters): Simulation parameters. orbit_duration (np.timedelta64): Orbit duration. """ def __init__(self, parameters: settings.Parameters, orbit_duration: np.timedelta64) -> None: yg, self.fmaxr = _orbital_error_spectrum(orbit_duration, parameters.rng()) self.yg = da.from_array(yg, name="orbital_error").persist() assert parameters.height is not None height = parameters.height * 1e-3 self.conversion_factor = (1 + height / VOLUMETRIC_MEAN_RADIUS) * 1e-3 def generate( self, time: np.ndarray, x_ac: np.ndarray, ) -> Dict[str, np.ndarray]: """Generate orbital error Args: time (np.ndarray): time vector Returns: np.ndarray: orbital error """ time = time.astype("datetime64[us]").astype("float64") * 1e-6 xg = np.linspace(0, 0.5 / self.fmaxr * self.yg.shape[0], self.yg.shape[0]) error_orbital = np.interp(np.mod(time, xg.max()), xg, self.yg.compute()) return { "simulated_error_orbital": x_ac * error_orbital[:, np.newaxis] * self.conversion_factor, }
2,880
981