hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
d68b45e4e7a07123f8215b18f29f9f415483134c
219
py
Python
list.py
TomckySan/python-training
7d5214d01e8844a314d4a5aea6a4e35afa19f729
[ "MIT" ]
null
null
null
list.py
TomckySan/python-training
7d5214d01e8844a314d4a5aea6a4e35afa19f729
[ "MIT" ]
null
null
null
list.py
TomckySan/python-training
7d5214d01e8844a314d4a5aea6a4e35afa19f729
[ "MIT" ]
null
null
null
# coding: utf-8 sales= [255, 100, 353, 400] print len(sales) print sales[2] sales[2] = 100 print sales[2] # 含んでいるか否か print 100 in sales print 500 in sales # range print range(10) print range(3,10) print range(3,10,2)
13.6875
27
0.69863
42
219
3.642857
0.404762
0.117647
0.143791
0.169935
0.183007
0
0
0
0
0
0
0.186813
0.16895
219
15
28
14.6
0.653846
0.127854
0
0.2
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.8
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
3
d68d945342e1ae0d7e7a7a1d0a9e54406e6ceb70
18,034
py
Python
booltest/battery.py
sobuch/polynomial-distinguishers
5a007abd222d00cbf99f1083c3b537343d2fff56
[ "MIT" ]
5
2017-03-03T13:53:51.000Z
2019-05-09T09:47:28.000Z
booltest/battery.py
sobuch/polynomial-distinguishers
5a007abd222d00cbf99f1083c3b537343d2fff56
[ "MIT" ]
5
2017-10-07T11:15:09.000Z
2021-01-25T17:03:59.000Z
booltest/battery.py
sobuch/polynomial-distinguishers
5a007abd222d00cbf99f1083c3b537343d2fff56
[ "MIT" ]
6
2017-03-26T17:06:20.000Z
2021-11-15T22:22:33.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import coloredlogs import logging import json import itertools import shlex import time import queue import sys import os import collections import tempfile from jsonpath_ng import jsonpath, parse from .runner import AsyncRunner from .common import merge_pvals, booltest_pval from . import common logger = logging.getLogger(__name__) coloredlogs.install(level=logging.INFO) """ Config can look like this: { "default-cli": "--no-summary --json-out --log-prints --top 128 --no-comb-and --only-top-comb --only-top-deg --no-term-map --topterm-heap --topterm-heap-k 256 --best-x-combs 512", "strategies": [ { "name": "v1", "cli": "", "variations": [ { "bl": [128, 256, 384, 512], "deg": [1], "cdeg": [1], "exclusions": [] } ] }, { "name": "halving", "cli": "--halving", "variations": [ { "bl": [128, 256, 384, 512], "deg": [1, 2, 3], "cdeg": [1, 2, 3], "exclusions": [] } ] } ] } """ def jsonpath(path, obj, allow_none=False): r = [m.value for m in parse(path).find(obj)] return r[0] if not allow_none else (r[0] if r else None) def listize(obj): return obj if (obj is None or isinstance(obj, list)) else [obj] def get_runner(cli, cwd=None, rtt_env=None): async_runner = AsyncRunner(cli, cwd=cwd, shell=False, env=rtt_env) async_runner.log_out_after = False async_runner.preexec_setgrp = True return async_runner class BoolParamGen: def __init__(self, cli, vals): self.cli = cli self.vals = vals if isinstance(vals, list) else [vals] class BoolJob: def __init__(self, cli, name, vinfo='', idx=None): self.cli = cli self.name = name self.vinfo = vinfo self.idx = idx def is_halving(self): return '--halving' in self.cli class BoolRes: def __init__(self, job, ret_code, js_res, is_halving, rejects=False, pval=None, alpha=None, stderr=None): self.job = job # type: BoolJob self.ret_code = ret_code self.js_res = js_res self.is_halving = is_halving self.rejects = rejects self.alpha = alpha self.pval = pval self.stderr = stderr class BoolRunner: def __init__(self): self.args = None self.bool_config = None self.parallel_tasks = None self.bool_wrapper = None self.job_queue = queue.Queue(maxsize=0) self.runners = [] # type: List[Optional[AsyncRunner]] self.comp_jobs = [] # type: List[Optional[BoolJob]] self.results = [] def init_config(self): self.parallel_tasks = self.args.threads or 1 self.bool_wrapper = self.args.booltest_bin try: if self.args.config: with open(self.args.config) as fh: self.bool_config = json.load(fh) if not self.bool_wrapper: self.bool_wrapper = jsonpath("$.wrapper", self.bool_config, True) if not self.args.threads: self.parallel_tasks = jsonpath("$.threads", self.bool_config, True) or self.args.threads or 1 except Exception as e: logger.error("Could not load the config %s" % (e,), exc_info=e) if not self.bool_wrapper: self.bool_wrapper = "\"%s\" -m booltest.booltest_main" % sys.executable def norm_methods(self, methods): res = set() for m in methods: if m == 'v1': res.add('1') elif m == '1': res.add(m) elif m == 'halving': res.add('2') elif m == 'v2': res.add('2') elif m == '2': res.add(m) else: raise ValueError("Unknown method %s" % m) return sorted(list(res)) def norm_params(self, params, default): if params is None or len(params) == 0: return default return [int(x) for x in params] def generate_jobs(self): dcli = self.args.cli if dcli is None: dcli = jsonpath('$.default-cli', self.bool_config, True) if dcli is None: dcli = '--no-summary --json-out --log-prints --top 128 --no-comb-and --only-top-comb --only-top-deg ' \ '--no-term-map --topterm-heap --topterm-heap-k 256 --best-x-combs 512' if '--no-summary' not in dcli: dcli += ' --no-summary' if '--json-out' not in dcli: dcli += ' --json-out' if '--log-prints' not in dcli: dcli += ' --log-prints' strategies = jsonpath('$.strategies', self.bool_config, True) if strategies is None: strategies = [] methods = self.norm_methods(self.args.methods or ["1", "2"]) for mt in methods: strat = collections.OrderedDict() strat['name'] = "v%s" % mt strat['cli'] = "--halving" if mt == '2' else '' strat['variations'] = [collections.OrderedDict([ ('bl', self.norm_params(self.args.block, [128, 256, 384, 512])), ('deg', self.norm_params(self.args.deg, [1, 2])), ('cdeg', self.norm_params(self.args.comb_deg, [1, 2])), ('exclusions', []), ])] strategies.append(strat) for st in strategies: name = st['name'] st_cli = jsonpath('$.cli', st, True) or '' st_vars = jsonpath('$.variations', st, True) or [] ccli = ('%s %s' % (dcli, st_cli)).strip() if not st_vars: yield BoolJob(ccli, name) continue for cvar in st_vars: blocks = listize(jsonpath('$.bl', cvar, True)) or [None] degs = listize(jsonpath('$.deg', cvar, True)) or [None] cdegs = listize(jsonpath('$.cdeg', cvar, True)) or [None] pcli = ['--block', '--degree', '--combine-deg'] vinfo = ['', '', ''] iterator = itertools.product(blocks, degs, cdegs) for el in iterator: c = ' '.join([(('%s %s') % (pcli[ix], dt)) for (ix, dt) in enumerate(el) if dt is not None]) vi = '-'.join([(('%s%s') % (vinfo[ix], dt)).strip() for (ix, dt) in enumerate(el) if dt is not None]) ccli0 = ('%s %s' % (ccli, c)).strip() yield BoolJob(ccli0, name, vi) def run_job(self, cli): async_runner = get_runner(shlex.split(cli)) logger.info("Starting async command %s" % cli) async_runner.start() while async_runner.is_running: time.sleep(1) logger.info("Async command finished") def on_finished(self, job, runner, idx): if runner.ret_code != 0: logger.warning("Return code of job %s is %s" % (idx, runner.ret_code)) stderr = ("\n".join(runner.err_acc)).strip() br = BoolRes(job, runner.ret_code, None, job.is_halving, stderr=stderr) self.results.append(br) return results = runner.out_acc buff = (''.join(results)).strip() try: js = json.loads(buff) is_halving = js['halving'] br = BoolRes(job, 0, js, is_halving) if not is_halving: br.rejects = [m.value for m in parse('$.inputs[0].res[0].rejects').find(js)][0] br.alpha = [m.value for m in parse('$.inputs[0].res[0].ref_alpha').find(js)][0] logger.info('rejects: %s, at alpha %.5e' % (br.rejects, br.alpha)) else: br.pval = [m.value for m in parse('$.inputs[0].res[1].halvings[0].pval').find(js)][0] logger.info('halving pval: %5e' % br.pval) self.results.append(br) except Exception as e: logger.error("Exception processing results: %s" % (e,), exc_info=e) logger.warning("[[[%s]]]" % buff) def on_results_ready(self): try: logger.info("="*80) logger.info("Results") ok_results = [r for r in self.results if r.ret_code == 0] nok_results = [r for r in self.results if r.ret_code != 0] bat_errors = ['Job %d (%s-%s), ret_code %d' % (r.job.idx, r.job.name, r.job.vinfo, r.ret_code) for r in self.results if r.ret_code != 0] if nok_results: logger.warning("Some jobs failed with error: \n%s" % ("\n".join(bat_errors))) for r in nok_results: logger.info("Job %s, (%s-%s)" % (r.job.idx, r.job.name, r.job.vinfo)) logger.info("Stderr: %s" % r.stderr) v1_jobs = [r for r in ok_results if not r.is_halving] v2_jobs = [r for r in ok_results if r.is_halving] v1_sum = collections.OrderedDict() v2_sum = collections.OrderedDict() if v1_jobs: rejects = [r for r in v1_jobs if r.rejects] v1_sum['alpha'] = max([x.alpha for x in v1_jobs]) v1_sum['pvalue'] = booltest_pval(nfails=len(rejects), ntests=len(v1_jobs), alpha=v1_sum['alpha']) v1_sum['npassed'] = sum([1 for r in v1_jobs if not r.rejects]) if v2_jobs: pvals = [r.pval for r in v2_jobs] v2_sum['npassed'] = sum([1 for r in v2_jobs if r.pval >= self.args.alpha]) v2_sum['pvalue'] = merge_pvals(pvals)[0] if len(pvals) > 1 else -1 if v1_jobs: logger.info("V1 results:") self.print_test_res(v1_jobs) if v2_jobs: logger.info("V2 results:") self.print_test_res(v2_jobs) logger.info("=" * 80) logger.info("Summary: ") if v1_jobs: logger.info("v1 tests: %s, #passed: %s, pvalue: %s" % (len(v1_jobs), v1_sum['npassed'], v1_sum['pvalue'])) if v2_jobs: logger.info("v2 tests: %s, #passed: %s, pvalue: %s" % (len(v2_jobs), v2_sum['npassed'], v2_sum['pvalue'])) if not self.args.json_out and not self.args.json_out_file: return jsout = collections.OrderedDict() jsout["nfailed_jobs"] = len(nok_results) jsout["failed_jobs_stderr"] = [r.stderr for r in nok_results] jsout["results"] = common.noindent_poly([r.js_res for r in ok_results]) kwargs = {'indent': 2} if self.args.json_nice else {} if self.args.json_out: print(common.json_dumps(jsout, **kwargs)) if self.args.json_out_file: with open(self.args.json_out_file, 'w+') as fh: common.json_dump(jsout, fh, **kwargs) jsout = common.jsunwrap(jsout) return jsout except Exception as e: logger.warning("Exception in results processing: %s" % (e,), exc_info=e) def print_test_res(self, res): for rs in res: # type: BoolRes passed = (rs.pval >= self.args.alpha if rs.is_halving else not rs.rejects) if rs.ret_code == 0 else None desc_str = "" if rs.is_halving: desc_str = "pvalue: %5e" % (rs.pval,) else: desc_str = "alpha: %5e" % (rs.alpha,) res = rs.js_res["inputs"][0]["res"] dist_poly = jsonpath('$[0].dists[0].poly', res, True) time_elapsed = jsonpath('$.time_elapsed', rs.js_res, True) best_dist_zscore = jsonpath('$[0].dists[0].zscore', res, True) or -1 ref_zscore_min = jsonpath('$[0].ref_minmax[0]', res, True) or -1 ref_zscore_max = jsonpath('$[0].ref_minmax[1]', res, True) or -1 aux_str = "" if rs.is_halving: best_dist_zscore_halving = jsonpath('$[1].dists[0].zscore', res, True) aux_str = "Learn: (z-score: %.5f, acc. zscores: [%.5f, %.5f]), Eval: (z-score: %.5f)" \ % (best_dist_zscore, ref_zscore_min, ref_zscore_max, best_dist_zscore_halving) else: aux_str = "z-score: %.5f, acc. zscores: [%.5f, %.5f]" \ % (best_dist_zscore, ref_zscore_min, ref_zscore_max) logger.info(" - %s %s: passed: %s, %s, dist: %s\n elapsed time: %6.2f s, %s" % (rs.job.name, rs.job.vinfo, passed, desc_str, dist_poly, time_elapsed, aux_str)) def work(self): if len(self.args.files) != 1: raise ValueError("Provide exactly one file to test") ifile = self.args.files[0] if ifile != '-' and not os.path.exists(ifile): raise ValueError("Provided input file not found") tmp_file = None if ifile == '-': tmp_file = tempfile.NamedTemporaryFile(prefix="booltest-bat-inp", delete=True) while True: data = sys.stdin.read(4096) if sys.version_info < (3,) else sys.stdin.buffer.read(4096) if data is None or len(data) == 0: break tmp_file.write(data) ifile = tmp_file.name jobs = [x for x in self.generate_jobs()] for i, j in enumerate(jobs): j.idx = i self.runners = [None] * self.parallel_tasks self.comp_jobs = [None] * self.parallel_tasks for j in jobs: self.job_queue.put_nowait(j) while not self.job_queue.empty() or sum([1 for x in self.runners if x is not None]) > 0: time.sleep(0.1) # Realloc work for i in range(len(self.runners)): if self.runners[i] is not None and self.runners[i].is_running: continue was_empty = self.runners[i] is None if not was_empty: self.job_queue.task_done() logger.info("Task %d done, job queue size: %d, running: %s" % (i, self.job_queue.qsize(), sum([1 for x in self.runners if x]))) self.on_finished(self.comp_jobs[i], self.runners[i], i) # Start a new task, if any try: job = self.job_queue.get_nowait() # type: BoolJob except queue.Empty: self.runners[i] = None continue cli = '%s %s "%s"' % (self.bool_wrapper, job.cli, ifile) self.comp_jobs[i] = job self.runners[i] = get_runner(shlex.split(cli)) logger.info("Starting async command %s %s, %s" % (job.name, job.vinfo, cli)) self.runners[i].start() return self.on_results_ready() def main(self): parser = self.argparser() self.args = parser.parse_args() self.init_config() return self.work() def argparser(self): parser = argparse.ArgumentParser(description='BoolTest Battery Runner') parser.add_argument('--debug', dest='debug', action='store_const', const=True, help='enables debug mode') parser.add_argument('-c', '--config', default=None, help='Test config') parser.add_argument('--alpha', dest='alpha', type=float, default=1e-4, help='Alpha value for pass/fail') parser.add_argument('-t', dest='threads', type=int, default=1, help='Maximum parallel threads') parser.add_argument('--block', dest='block', nargs=argparse.ZERO_OR_MORE, default=None, type=int, help='List of block sizes to test') parser.add_argument('--deg', dest='deg', nargs=argparse.ZERO_OR_MORE, default=None, type=int, help='List of degree to test') parser.add_argument('--comb-deg', dest='comb_deg', nargs=argparse.ZERO_OR_MORE, default=None, type=int, help='List of degree of combinations to test') parser.add_argument('--methods', dest='methods', nargs=argparse.ZERO_OR_MORE, default=None, help='List of methods to test, supported: 1, 2, halving') parser.add_argument('files', nargs=argparse.ONE_OR_MORE, default=[], help='files to process') parser.add_argument('--stdin', dest='stdin', action='store_const', const=True, default=False, help='Read from the stdin') parser.add_argument('--booltest-bin', dest='booltest_bin', help='Specify BoolTest binary launcher. If not specified, autodetected.') parser.add_argument('--cli', dest='cli', help='Specify common BoolTest CLI options') parser.add_argument('--json-out', dest='json_out', action='store_const', const=True, default=False, help='Produce json result') parser.add_argument('--json-out-file', dest='json_out_file', default=None, help='Produce json result to a file') parser.add_argument('--json-nice', dest='json_nice', action='store_const', const=True, default=False, help='Nicely formatted json output') return parser def main(): br = BoolRunner() return br.main() if __name__ == '__main__': main()
37.886555
182
0.523345
2,256
18,034
4.055408
0.155142
0.020111
0.027872
0.003826
0.260794
0.173789
0.15029
0.132583
0.100557
0.087113
0
0.016541
0.342963
18,034
475
183
37.966316
0.755591
0.010314
0
0.118497
0
0.011561
0.135592
0.006541
0
0
0
0
0
1
0.057803
false
0.028902
0.046243
0.00578
0.156069
0.020231
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d68e21dba61d5cbeb398194248f4e63acb8aae21
8,346
py
Python
traincifar224.py
iitm-sysdl/FuSeConv
04cdf54abfdbf359235d1b4c0848f188b1abbf2d
[ "Apache-2.0" ]
8
2021-02-08T22:12:53.000Z
2022-02-20T16:33:11.000Z
traincifar224.py
iitm-sysdl/FuSeConv
04cdf54abfdbf359235d1b4c0848f188b1abbf2d
[ "Apache-2.0" ]
null
null
null
traincifar224.py
iitm-sysdl/FuSeConv
04cdf54abfdbf359235d1b4c0848f188b1abbf2d
[ "Apache-2.0" ]
4
2021-03-04T11:21:42.000Z
2022-02-15T07:47:19.000Z
''' FuSeConv: Fully Separable Convolutions for Fast Inference on Systolic Arrays Authors: Surya Selvam, Vinod Ganesan, Pratyush Kumar Email ID: selvams@purdue.edu, vinodg@cse.iitm.ac.in, pratyush@cse.iitm.ac.in ''' import os import torch import wandb import random import argparse import torchvision import torch.nn as nn import torchvision.datasets as datasets import torchvision.transforms as transforms from utils import * from models import * def dumpData(flag, string): if flag == 'train': meta = open(args.name+'/metadataTrain.txt', "a") meta.write(string) meta.close() else: meta = open(args.name+'/metadataTest.txt', "a") meta.write(string) meta.close() def train(net, trainloader, criterion, optimizer, epoch): print('\nEpoch: %d' % epoch) net.train() train_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs = inputs.cuda() targets = targets.cuda() optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) string = str(epoch) + ',' + str(train_loss) + ',' + str(correct*1.0/total) + '\n' dumpData('train', string) wandb.log({ "Train Loss": train_loss, "Train Accuracy": 100*correct/total}, step=epoch) def test(net, testloader, criterion, epoch): net.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.cuda(), targets.cuda() outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) string = str(epoch) + ',' + str(test_loss) + ',' + str(correct*1.0/total) + '\n' dumpData('test', string) wandb.log({ "Test Loss": test_loss, "Test Accuracy": 100*correct/total}, step=epoch) return correct*1.0/total def main(): wandb.init(name=args.name, project="cifar-224-full-variant") transform_train = transforms.Compose([ transforms.Resize(224), transforms.RandomCrop(224, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.Resize(224), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) if args.Dataset == 'CIFAR10': trainset = torchvision.datasets.CIFAR10(root='data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR10(root='data', train=False, download=True, transform=transform_test) numClasses = 10 elif args.Dataset == 'CIFAR100': trainset = torchvision.datasets.CIFAR100(root='data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR100(root='data', train=False, download=True, transform=transform_test) numClasses = 100 trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=4) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=4) if args.variant == 'baseline': if args.Network == 'ResNet': net = ResNet50(numClasses) elif args.Network == 'MobileNetV1': net = MobileNetV1(numClasses) elif args.Network == 'MobileNetV2': net = MobileNetV2(numClasses) elif args.Network == 'MobileNetV3S': net = MobileNetV3('small', numClasses) elif args.Network == 'MobileNetV3L': net = MobileNetV3('large', numClasses) elif args.Network == 'MnasNet': net = MnasNet(numClasses) elif args.variant == 'half': if args.Network == 'ResNet': net = ResNet50FuSeHalf(numClasses) elif args.Network == 'MobileNetV1': net = MobileNetV1FuSeHalf(numClasses) elif args.Network == 'MobileNetV2': net = MobileNetV2FuSeHalf(numClasses) elif args.Network == 'MobileNetV3S': net = MobileNetV3FuSeHalf('small', numClasses) elif args.Network == 'MobileNetV3L': net = MobileNetV3FuSeHalf('large', numClasses) elif args.Network == 'MnasNet': net = MnasNetFuSeHalf(numClasses) elif args.variant == 'full': if args.Network == 'ResNet': net = ResNet50FuSeFull(numClasses) elif args.Network == 'MobileNetV1': net = MobileNetV1FuSeFull(numClasses) elif args.Network == 'MobileNetV2': net = MobileNetV2FuSeFull(numClasses) elif args.Network == 'MobileNetV3S': net = MobileNetV3FuSeFull('small', numClasses) elif args.Network == 'MobileNetV3L': net = MobileNetV3FuSeFull('large', numClasses) elif args.Network == 'MnasNet': net = MnasNetFuSeFull(numClasses) else: print("Provide a valid variant") exit(0) criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD(net.parameters(), 0.1, momentum=0.9, weight_decay=5e-4) net.cuda() wandb.watch(net, log="all") bestAcc = 0 startEpoch = 0 if args.resume == True: assert os.path.isdir(args.name), 'Error: no checkpoint directory found!' checkpoint = torch.load(args.name+'/BestModel.t7') net.load_state_dict(checkpoint['net']) bestAcc = checkpoint['acc'] startEpoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer']) lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 40, 60, 70, 80, 90], gamma=0.1, last_epoch=startEpoch-1) for epoch in range(startEpoch, 60): train(net, trainloader, criterion, optimizer, epoch) lr_scheduler.step() acc = test(net, testloader, criterion, epoch) state = {'net': net.state_dict(), 'acc': acc, 'epoch': epoch+1, 'optimizer' : optimizer.state_dict() } if acc > bestAcc: torch.save(state, args.name+'/BestModel.t7') bestAcc = acc wandb.save('BestModel.h5') else: torch.save(state, args.name+'/LastEpoch.t7') meta = open(args.name+'/stats.txt', "a") s = args.variant meta.write(args.Dataset + ' , ' + args.Network + ' , ' + s + ' , ' + str(bestAcc) + '\n') meta.close() if __name__ == '__main__': random.seed(42) torch.manual_seed(42) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False parser = argparse.ArgumentParser(description = "Train CIFAR Models") parser.add_argument("--Dataset", "-D", type = str, help = 'CIFAR10, CIFAR100', required=True) parser.add_argument("--Network", "-N", type = str, help = 'ResNet, MobileNetV1, MobileNetV2, MobileNetV3S, MobileNetV3L, MnasNet', required=True) parser.add_argument("--name", "-n", type=str, help = 'Name of the run', required=True) parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--variant', '-v', type=str, help='baseline or half or full', required=True) args = parser.parse_args() if not os.path.isdir(args.name): os.mkdir(args.name) main()
39.367925
149
0.606278
920
8,346
5.43587
0.26087
0.041792
0.061188
0.074985
0.454709
0.395321
0.25215
0.158768
0.146771
0.146771
0
0.033742
0.257848
8,346
211
150
39.554502
0.773652
0.024682
0
0.274725
0
0
0.106493
0.002705
0
0
0
0
0.005495
1
0.021978
false
0
0.06044
0
0.087912
0.010989
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d69171373efa977663e506a9e0cd4ffbf706ae5a
2,590
py
Python
fabfile.py
prezi/snakebasket
8e2e91ef2c7d034fa45c8005e5217fec333808ee
[ "MIT" ]
24
2015-02-03T00:04:06.000Z
2021-09-14T06:50:01.000Z
fabfile.py
prezi/snakebasket
8e2e91ef2c7d034fa45c8005e5217fec333808ee
[ "MIT" ]
1
2021-03-23T10:44:18.000Z
2021-03-23T15:38:38.000Z
fabfile.py
prezi/snakebasket
8e2e91ef2c7d034fa45c8005e5217fec333808ee
[ "MIT" ]
5
2015-08-16T11:31:09.000Z
2021-12-27T13:31:33.000Z
import os.path from fabric.api import local, env from fabric.utils import fastprint from prezi.fabric.s3 import CommonTasks, S3Deploy, NoopServiceManager env.forward_agent = True env.user = 'publisher' env.roledefs = {'production': [], 'stage': [], 'local': []} class SingleVirtualenvS3Deploy(S3Deploy): def __init__(self, app_name, buckets, revno): super(SingleVirtualenvS3Deploy, self).__init__(app_name, buckets, revno) self.service = NoopServiceManager(self) self.virtualenv = SingleVirtualenvService(self) class SingleVirtualenvService(object): def __init__(self, deployer): self.deployer = deployer self.tarball_path = self.deployer.build_dir + '.tar' self.tarbz_path = self.tarball_path + '.bz2' self.tarbz_name = os.path.basename(self.tarbz_path) def build_tarbz(self): self.build_venv() self.compress_venv() def cleanup(self): local('rm -rf %s %s' % (self.tarbz_path, self.deployer.build_dir)) def build_venv(self): fastprint('Building single virtualenv service in %s\n' % self.deployer.build_dir) # init + update pip submodule local('git submodule init; git submodule update') # builds venv self.run_virtualenv_cmd("--distribute --no-site-packages -p python2.7 %s" % self.deployer.build_dir) # installs app + dependencies local(' && '.join( ['. %s/bin/activate' % self.deployer.build_dir, 'pip install --exists-action=s -e `pwd`/pip#egg=pip -e `pwd`@master#egg=snakebasket -r requirements-development.txt'] )) # makes venv relocatable self.run_virtualenv_cmd("--relocatable -p python2.7 %s" % self.deployer.build_dir) def compress_venv(self): fastprint('Compressing virtualenv') local('tar -C %(build_dir)s/.. -cjf %(tarbz_path)s %(dirname)s' % { 'build_dir': self.deployer.build_dir, 'tarbz_path': self.tarbz_path, 'dirname': os.path.basename(self.deployer.build_dir) }) def run_virtualenv_cmd(self, args): if not isinstance(args, list): args = args.split() fastprint('Running virtualenv with args %s\n' % args) local("env VERSIONER_PYTHON_VERSION='' virtualenv %s" % ' '.join(args)) @property def upload_source(self): return self.tarbz_path @property def upload_target(self): return self.tarbz_name tasks = CommonTasks(SingleVirtualenvS3Deploy, 'snakebasket', None) snakebasket_build = tasks.build cleanup = tasks.cleanup
35.479452
130
0.660232
310
2,590
5.348387
0.354839
0.072376
0.082027
0.096502
0.08263
0.036188
0.036188
0.036188
0
0
0
0.005435
0.218533
2,590
72
131
35.972222
0.813735
0.034749
0
0.037736
0
0.018868
0.214429
0.033267
0
0
0
0
0
1
0.169811
false
0
0.075472
0.037736
0.320755
0.075472
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d69196f51793e1153bef57beef63e6af53ecc91a
706
py
Python
djstripe/__init__.py
TigerDX/dj-stripe
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
[ "BSD-3-Clause" ]
null
null
null
djstripe/__init__.py
TigerDX/dj-stripe
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
[ "BSD-3-Clause" ]
null
null
null
djstripe/__init__.py
TigerDX/dj-stripe
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
[ "BSD-3-Clause" ]
null
null
null
from __future__ import unicode_literals import warnings from django import get_version as get_django_version __title__ = "dj-stripe" __summary__ = "Django + Stripe Made Easy" __uri__ = "https://github.com/pydanny/dj-stripe/" __version__ = "0.5.0" __author__ = "Daniel Greenfeld" __email__ = "pydanny@gmail.com" __license__ = "BSD" __license__ = "License :: OSI Approved :: BSD License" __copyright__ = "Copyright 2015 Daniel Greenfeld" if get_django_version() <= '1.6.x': msg = "dj-stripe deprecation notice: Django 1.6 and lower are deprecated\n" \ "and will be removed in dj-stripe 0.6.0.\n" \ "Reference: https://github.com/pydanny/dj-stripe/issues/173" warnings.warn(msg)
29.416667
81
0.723796
98
706
4.744898
0.55102
0.086022
0.068817
0.090323
0.124731
0.124731
0
0
0
0
0
0.028523
0.155807
706
23
82
30.695652
0.751678
0
0
0
0
0
0.498584
0
0
0
0
0
0
1
0
false
0
0.176471
0
0.176471
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6946eb298801b23fec7b4b5e6de31aae00f1e3a
10,283
py
Python
autolamella/milling.py
Chlanda-Lab/autolamella
ab135eefd56770f326f90747ef4dafebff4e8f71
[ "MIT" ]
null
null
null
autolamella/milling.py
Chlanda-Lab/autolamella
ab135eefd56770f326f90747ef4dafebff4e8f71
[ "MIT" ]
null
null
null
autolamella/milling.py
Chlanda-Lab/autolamella
ab135eefd56770f326f90747ef4dafebff4e8f71
[ "MIT" ]
null
null
null
import logging import os import time import numpy as np from autolamella.acquire import ( grab_images, save_reference_images, save_final_images, ) from autolamella.align import realign from autolamella.autoscript import reset_state def milling( microscope, settings, stage_settings, my_lamella, pattern, # "upper", "lower", "both" filename_prefix="", demo_mode=False, ): from autoscript_core.common import ApplicationServerException from autoscript_sdb_microscope_client.structures import StagePosition # Sanity-check pattern parameter if pattern not in ("upper", "lower", "both"): raise ValueError(f"Invalid pattern type:\n" f"Should be \"upper\", \"lower\" or \"both\", not \"{pattern}\"") # Setup and realign to fiducial marker setup_milling(microscope, settings, stage_settings, my_lamella) tilt_in_radians = np.deg2rad(stage_settings["overtilt_degrees"]) if pattern == "upper": microscope.specimen.stage.relative_move(StagePosition(t=-tilt_in_radians)) elif pattern == "lower": microscope.specimen.stage.relative_move(StagePosition(t=+tilt_in_radians)) # Realign three times for abc in "abc": image_unaligned = grab_images( microscope, settings, my_lamella, prefix="IB_" + filename_prefix, suffix=f"_0{abc}-unaligned", ) realign(microscope, image_unaligned, my_lamella.fiducial_image) # Save the refined position to prevent gradual stage-drift my_lamella.fibsem_position.ion_beam.update_beam_shift() # Save the newly aligned image for the next alignment stage my_lamella.fiducial_image = grab_images( microscope, settings, my_lamella, # can remove prefix="IB_" + filename_prefix, suffix="_1-aligned", ) # Create and mill patterns if pattern == "upper" or pattern == "both": _milling_coords(microscope, stage_settings, my_lamella, "upper") if pattern == "lower" or pattern == "both": _milling_coords(microscope, stage_settings, my_lamella, "lower") # Create microexpansion joints (if applicable) _microexpansion_coords(microscope, stage_settings, my_lamella) if 'patterning_mode' in stage_settings: microscope.patterning.mode = stage_settings['patterning_mode'] if not demo_mode: microscope.imaging.set_active_view(2) # the ion beam view print("Milling pattern...") try: microscope.patterning.run() except ApplicationServerException: logging.error("ApplicationServerException: could not mill!") microscope.patterning.clear_patterns() grab_images( microscope, settings, my_lamella, # can remove prefix="IB_" + filename_prefix, suffix=f"_2-after-{pattern}-milling", ) return microscope def _milling_coords(microscope, stage_settings, my_lamella, pattern): """Create milling pattern for lamella position.""" # Sanity-check pattern parameter if pattern not in ("upper", "lower"): raise ValueError(f"Invalid pattern type for milling coords generation:\n" f"Should be \"upper\" or \"lower\", not \"{pattern}\"") microscope.imaging.set_active_view(2) # the ion beam view lamella_center_x, lamella_center_y = my_lamella.center_coord_realspace if my_lamella.custom_milling_depth is not None: milling_depth = my_lamella.custom_milling_depth else: milling_depth = stage_settings["milling_depth"] height = float( stage_settings["total_cut_height"] * stage_settings.get(f"percentage_roi_height_{pattern}", stage_settings["percentage_roi_height"]) ) center_offset = ( (0.5 * stage_settings["lamella_height"]) + (stage_settings["total_cut_height"] * stage_settings["percentage_from_lamella_surface"]) + (0.5 * height) ) center_y = lamella_center_y + center_offset \ if pattern == "upper" \ else lamella_center_y - center_offset # milling_roi = microscope.patterning.create_cleaning_cross_section( milling_roi = microscope.patterning.create_rectangle( lamella_center_x, center_y, stage_settings.get(f'lamella_width_{pattern}', stage_settings["lamella_width"]), height, milling_depth, ) if pattern == "upper": milling_roi.scan_direction = "TopToBottom" elif pattern == "lower": milling_roi.scan_direction = "BottomToTop" return milling_roi def _microexpansion_coords(microscope, stage_settings, my_lamella): """Mill microexpansion joints (TODO: add reference)""" if not ("microexpansion_width" in stage_settings and "microexpansion_distance_from_lamella" in stage_settings and "microexpansion_percentage_height" in stage_settings): return None microscope.imaging.set_active_view(2) # the ion beam view lamella_center_x, lamella_center_y = my_lamella.center_coord_realspace if my_lamella.custom_milling_depth is not None: milling_depth = my_lamella.custom_milling_depth else: milling_depth = stage_settings["milling_depth"] height = float( ( 2 * stage_settings["total_cut_height"] * (stage_settings["percentage_roi_height"] + stage_settings["percentage_from_lamella_surface"]) + stage_settings["lamella_height"] ) * stage_settings["microexpansion_percentage_height"] ) offset_x = (stage_settings["lamella_width"] + stage_settings["microexpansion_width"]) / 2 \ + stage_settings["microexpansion_distance_from_lamella"] milling_rois = [] for scan_direction, offset_x in (("LeftToRight", -offset_x), ("RightToLeft", offset_x)): milling_roi = microscope.patterning.create_rectangle( lamella_center_x + offset_x, lamella_center_y, stage_settings["microexpansion_width"], height, milling_depth, ) milling_roi.scan_direction = scan_direction milling_rois.append(milling_roi) return milling_rois def setup_milling(microscope, settings, stage_settings, my_lamella): """Setup the ion beam system ready for milling.""" system_settings = settings["system"] ccs_file = system_settings["application_file_cleaning_cross_section"] microscope = reset_state(microscope, settings, application_file=ccs_file) my_lamella.fibsem_position.restore_state(microscope) microscope.beams.ion_beam.beam_current.value = stage_settings["milling_current"] return microscope def run_drift_corrected_milling(microscope, correction_interval, reduced_area=None): """ Parameters ---------- microscope : Autoscript microscope object correction_interval : Time in seconds between drift correction realignment reduced_area : Autoscript Rectangle() object Describes the reduced area view in relative coordinates, with the origin in the top left corner. Default value is None, which will create a Rectangle(0, 0, 1, 1), which means the imaging will use the whole field of view. """ from autoscript_core.common import ApplicationServerException from autoscript_sdb_microscope_client.structures import (GrabFrameSettings, Rectangle) if reduced_area is None: reduced_area = Rectangle(0, 0, 1, 1) s = GrabFrameSettings(reduced_area=reduced_area) reference_image = microscope.imaging.grab_frame(s) # start drift corrected patterning (is a blocking function, not asynchronous) microscope.patterning.start() while microscope.patterning.state == "Running": time.sleep(correction_interval) try: microscope.patterning.pause() except ApplicationServerException: continue else: new_image = microscope.imaging.grab_frame(s) realign(microscope, new_image, reference_image) microscope.patterning.resume() def mill_single_stage( microscope, settings, stage_settings, stage_number, my_lamella, lamella_number ): """Run ion beam milling for a single milling stage in the protocol.""" filename_prefix = f"lamella{lamella_number + 1}_stage{stage_number + 1}" demo_mode = settings["demo_mode"] milling( microscope, settings, stage_settings, my_lamella, pattern="both", filename_prefix=filename_prefix, demo_mode=demo_mode, ) def mill_all_stages( microscope, protocol_stages, lamella_list, settings, output_dir="output_images" ): """Run all milling stages in the protocol.""" if lamella_list == []: logging.info("Lamella sample list is empty, nothing to mill here.") return if not os.path.isdir(output_dir): os.mkdir(output_dir) for stage_number, stage_settings in enumerate(protocol_stages): logging.info( f"Protocol stage {stage_number + 1} of {len(protocol_stages)}" ) for lamella_number, my_lamella in enumerate(lamella_list): logging.info( f"Lamella number {lamella_number + 1} of {len(lamella_list)}" ) # save all the reference images you took creating the fiducial if stage_number == 0: save_reference_images(settings, my_lamella, lamella_number) mill_single_stage( microscope, settings, stage_settings, stage_number, my_lamella, lamella_number, ) # If this is the very last stage, take an image if stage_number + 1 == len(protocol_stages): save_final_images(microscope, settings, lamella_number) reset_state(microscope, settings) # Return ion beam current to imaging current (20 pico-Amps) microscope.beams.ion_beam.beam_current.value = 20e-12
38.950758
111
0.665565
1,148
10,283
5.684669
0.208188
0.075697
0.033865
0.03034
0.404382
0.366074
0.325621
0.275054
0.227245
0.209163
0
0.004285
0.251094
10,283
263
112
39.098859
0.843137
0.134008
0
0.302885
0
0
0.137509
0.051169
0
0
0
0.003802
0
1
0.033654
false
0
0.052885
0
0.115385
0.004808
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6979012b22ac73cacf5e578b3aa216da2c78397
2,210
py
Python
robots/SlowRobot.py
theGitRory/RoboWars
6121f13e3569c4699a93900a8a6f45301f01a98c
[ "MIT" ]
null
null
null
robots/SlowRobot.py
theGitRory/RoboWars
6121f13e3569c4699a93900a8a6f45301f01a98c
[ "MIT" ]
null
null
null
robots/SlowRobot.py
theGitRory/RoboWars
6121f13e3569c4699a93900a8a6f45301f01a98c
[ "MIT" ]
1
2021-12-16T22:49:29.000Z
2021-12-16T22:49:29.000Z
import pygame from Robot import Robot class SlowRobot(Robot): moveState = -15 shootState = 0 def __init__(self, image, name): super().__init__(image, name) self.movingLeft = False self.movingRight = True self.movingUp = False self.movingDown = True def update(self): super().update() SlowRobot.moveState = SlowRobot.moveState + 1 if((SlowRobot.moveState)% 25 == 0 or SlowRobot.moveState < 0): preX = self.getRect().centerx preY = self.getRect().centery if self.movingLeft: self.movingLeft = self.moveLeft() if not self.movingLeft: self.movingRight = True if self.movingUp: self.movingUp = self.moveUp() if not self.movingUp: self.movingDown = True else: self.movingDown = self.moveDown() if not self.movingDown: self.movingUp = True else: self.movingRight = self.moveRight() if not self.movingRight: self.movingLeft = True if self.movingDown: self.movingDown = self.moveDown() if not self.movingDown: self.movingUp = True else: self.movingUp = self.moveUp() if not self.movingUp: self.movingDown = True if self.movingLeft and self.movingUp: self.turnTowardsAngle(135) elif self.movingLeft and self.movingDown: self.turnTowardsAngle(-135) elif self.movingRight and self.movingUp: self.turnTowardsAngle(45) else: self.turnTowardsAngle(-45) SlowRobot.shootState = SlowRobot.shootState + 1 if((SlowRobot.shootState)% 10 == 0): self.shoot()
32.5
70
0.466516
182
2,210
5.620879
0.247253
0.117302
0.109482
0.043011
0.351906
0.242424
0.242424
0.242424
0.242424
0.242424
0
0.018456
0.460633
2,210
68
71
32.5
0.839765
0
0
0.365385
0
0
0
0
0
0
0
0
0
1
0.038462
false
0
0.038462
0
0.134615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6997f85637504050677de593bfdc5dfb24a288e
934
py
Python
istype/__init__.py
Cologler/typing-instancecheck-python
b4dcea88468b1ee43ebb36413b099e3e8508b3ce
[ "MIT" ]
6
2018-07-08T09:38:35.000Z
2020-06-25T13:15:02.000Z
istype/__init__.py
Cologler/typing-instancecheck-python
b4dcea88468b1ee43ebb36413b099e3e8508b3ce
[ "MIT" ]
1
2018-07-08T10:12:49.000Z
2018-07-08T11:31:18.000Z
istype/__init__.py
Cologler/istype-python
b4dcea88468b1ee43ebb36413b099e3e8508b3ce
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com> # ---------- # # ---------- from typing import Iterable from itertools import zip_longest from .internal import TypeMatcher from .g import isinstanceof, issubclassof def match(args: (list, tuple), types: Iterable[type]) -> bool: ''' check whether args match types. example: ``` py `match(('', 1), (str, int)) # True ``` ''' try: if len(args) != len(types): return False except TypeError: # object of type 'types' has no len() pass empty = object() for item, typ in zip_longest(args, types, fillvalue=empty): if item is empty or typ is empty: return False if not isinstanceof(item, typ): return False return True __all__ = [ 'TypeMatcher', 'isinstanceof', 'issubclassof', 'match' ]
19.87234
63
0.574946
108
934
4.916667
0.601852
0.062147
0
0
0
0
0
0
0
0
0
0.014925
0.282655
934
46
64
20.304348
0.777612
0.260171
0
0.130435
0
0
0.06135
0
0
0
0
0
0
1
0.043478
false
0.043478
0.173913
0
0.391304
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d69b51ad69fd05add04bf2431b9d7c7e537e89f6
3,669
py
Python
aetherling/space_time/ram_st.py
David-Durst/aetherling
91bcf0579608ccbf7d42a7bddf90ccd4257d6571
[ "MIT" ]
10
2018-04-03T01:51:16.000Z
2022-02-07T04:27:26.000Z
aetherling/space_time/ram_st.py
David-Durst/aetherling
91bcf0579608ccbf7d42a7bddf90ccd4257d6571
[ "MIT" ]
19
2018-05-20T00:43:31.000Z
2021-03-18T20:36:52.000Z
aetherling/space_time/ram_st.py
David-Durst/aetherling
91bcf0579608ccbf7d42a7bddf90ccd4257d6571
[ "MIT" ]
1
2018-07-11T23:36:43.000Z
2018-07-11T23:36:43.000Z
from aetherling.space_time.space_time_types import * from aetherling.space_time.nested_counters import * from aetherling.modules.ram_any_type import * from aetherling.modules.term_any_type import TermAnyType from aetherling.modules.mux_any_type import DefineMuxAnyType from aetherling.modules.map_fully_parallel_sequential import DefineNativeMapParallel from aetherling.helpers.nameCleanup import cleanName from mantle.coreir.memory import getRAMAddrWidth from mantle.common.countermod import Decode from aetherling.modules.ram_any_type import * from magma import * from magma.circuit import DefineCircuitKind, Circuit __all__ = ['DefineRAM_ST', 'RAM_ST'] @cache_definition def DefineRAM_ST(t: ST_Type, n: int, has_reset = False, read_latency = 0) -> DefineCircuitKind: """ Generate a RAM where t store n objects each of type t. WE, RE and RESET affect where in a t is being written/read. This is different from normal magma RAMs that don't have values that take multiple clocks. RADDR : In(Array[log_2(n), Bit)], RDATA : Out(t.magma_repr()), WADDR : In(Array(log_2(n), Bit)), WDATA : In(t.magma_repr()), WE: In(Bit) RE: In(Bit) if has_reset: RESET : In(Bit) """ class _RAM_ST(Circuit): name = 'RAM_ST_{}_hasReset{}'.format(cleanName(str(t)), str(has_reset)) addr_width = getRAMAddrWidth(n) IO = ['RADDR', In(Bits[addr_width]), 'RDATA', Out(t.magma_repr()), 'WADDR', In(Bits[addr_width]), 'WDATA', In(t.magma_repr()), 'WE', In(Bit), 'RE', In(Bit) ] + ClockInterface(has_ce=False, has_reset=has_reset) @classmethod def definition(cls): # each valid clock, going to get a magma_repr in # read or write each one of those to a location rams = [DefineRAMAnyType(t.magma_repr(), t.valid_clocks(), read_latency=read_latency)() for _ in range(n)] read_time_position_counter = DefineNestedCounters(t, has_cur_valid=True, has_ce=True, has_reset=has_reset)() read_valid_term = TermAnyType(Bit) read_last_term = TermAnyType(Bit) write_time_position_counter = DefineNestedCounters(t, has_cur_valid=True, has_ce=True, has_reset=has_reset)() write_valid_term = TermAnyType(Bit) write_last_term = TermAnyType(Bit) read_selector = DefineMuxAnyType(t.magma_repr(), n)() for i in range(n): wire(cls.WDATA, rams[i].WDATA) wire(write_time_position_counter.cur_valid, rams[i].WADDR) wire(read_selector.data[i], rams[i].RDATA) wire(read_time_position_counter.cur_valid, rams[i].RADDR) write_cur_ram = Decode(i, cls.WADDR.N)(cls.WADDR) wire(write_cur_ram & write_time_position_counter.valid, rams[i].WE) wire(cls.RADDR, read_selector.sel) wire(cls.RDATA, read_selector.out) wire(cls.WE, write_time_position_counter.CE) wire(cls.RE, read_time_position_counter.CE) wire(read_time_position_counter.valid, read_valid_term.I) wire(read_time_position_counter.last, read_last_term.I) wire(write_time_position_counter.valid, write_valid_term.I) wire(write_time_position_counter.last, write_last_term.I) if has_reset: wire(cls.RESET, write_time_position_counter.RESET) wire(cls.RESET, read_time_position_counter.RESET) return _RAM_ST def RAM_ST(t: ST_Type, n: int, has_reset: bool = False) -> Circuit: DefineRAM_ST(t, n, has_reset)
42.662791
121
0.67021
505
3,669
4.6
0.247525
0.067155
0.106328
0.07232
0.303487
0.238485
0.226431
0.148945
0.095566
0.095566
0
0.001063
0.230853
3,669
85
122
43.164706
0.822112
0.131098
0
0.035714
0
0
0.019802
0
0
0
0
0
0
1
0.053571
false
0
0.214286
0
0.357143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d69d1551c5ade2888af4e328fc5206b21287212d
1,227
py
Python
yhteenlasku/Python/rose_images_plus.py
samuntiede/valokuvamatikka
adab47a93534bf0f83f39603a8744bf6e5923da4
[ "Apache-2.0" ]
null
null
null
yhteenlasku/Python/rose_images_plus.py
samuntiede/valokuvamatikka
adab47a93534bf0f83f39603a8744bf6e5923da4
[ "Apache-2.0" ]
null
null
null
yhteenlasku/Python/rose_images_plus.py
samuntiede/valokuvamatikka
adab47a93534bf0f83f39603a8744bf6e5923da4
[ "Apache-2.0" ]
null
null
null
# Process two rose images by summing them together # FIN Laske kaksi ruusukuvaa yhteen # # Samuli Siltanen April 2021 # Python-käännös Ville Tilvis 2021 import numpy as np import matplotlib.pyplot as plt # Read in the images # FIN Lue kuvat levyltä im1 = plt.imread('../_kuvat/ruusu1.png') im2 = plt.imread('../_kuvat/ruusu2.png') print('Images read') # Normalize images # FIN Normalisoi kuva-alkiot nollan ja ykkösen välille MAX = np.max([np.max(im1),np.max(im2)]) im1 = im1/MAX im2 = im2/MAX print('Images normalized') # Gamma correction for brightening images # FIN Gammakorjaus ja kynnystyksiä gammacorrB = .6 blackthr = .03 whitethr = .95 # Save the summed image to file # FIN Laske summakuva im3 = (im1+im2)/2 # FIN Kohenna kuvaa im3 = im3-np.min(im3); im3 = im3/np.max(im3); blackthrarray = blackthr*np.ones(im3.shape) im3 = np.maximum(im3,blackthrarray)-blackthrarray im3 = im3/(whitethr*np.max(im3)); im3 =np.minimum(im3, np.ones(im3.shape)) im3 = np.power(im3,gammacorrB) print('New image ready') # FIN Tallenna levylle plt.imsave('../_kuvat/ruusu12.png', im3); print('Wrote new image to file') # FIN Katso, miltä kuva näyttää plt.figure(1) plt.clf plt.axis('off') plt.gcf().set_dpi(600) plt.imshow(im3)
22.309091
55
0.727791
194
1,227
4.582474
0.515464
0.033746
0.026997
0.031496
0.042745
0.042745
0
0
0
0
0
0.049242
0.139364
1,227
54
56
22.722222
0.792614
0.365118
0
0
0
0
0.170604
0.027559
0
0
0
0
0
1
0
false
0
0.071429
0
0.071429
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d69d44bd20dc95ec5fff8e1985bdab256e11040d
6,931
py
Python
src/test.fastaHeaderMapper.py
dentearl/assemblAnalysis
c8524456dff720d37356c55d7640687415bc1df6
[ "MIT" ]
1
2020-11-12T06:32:26.000Z
2020-11-12T06:32:26.000Z
src/test.fastaHeaderMapper.py
dentearl/assemblAnalysis
c8524456dff720d37356c55d7640687415bc1df6
[ "MIT" ]
null
null
null
src/test.fastaHeaderMapper.py
dentearl/assemblAnalysis
c8524456dff720d37356c55d7640687415bc1df6
[ "MIT" ]
null
null
null
############################## # Copyright (C) 2009-2011 by # Dent Earl (dearl@soe.ucsc.edu, dent.earl@gmail.com) # Benedict Paten (benedict@soe.ucsc.edu, benedict.paten@gmail.com) # Mark Diekhans (markd@soe.ucsc.edu) # ... and other members of the Reconstruction Team of David Haussler's # lab (BME Dept. UCSC). # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ############################## import unittest import os import sys myBinDir = os.path.normpath( os.path.dirname( sys.argv[0] ) ) #sys.path.append(myBinDir + "/../../..") #os.environ["PATH"] = myBinDir + "/../../../../bin:" + os.environ["PATH"] class RoundTripCheck( unittest.TestCase ): import os knownValues = (('''>name1 ACGTnnnACGT >name2 ACGttttttttt ttttttttt ''','''>contig000001 ACGTnnnACGT >contig000002 ACGttttttttt ttttttttt '''), ('''>apple ACTGT >apple2 ACTGTACTGT >Horrible W0rds and a tab 4@!#@!!!$&*){} ACGTACGT >emptyContig >Some other stuff, odd extra space. ACGT >Last one TGCATGCAacgt bad characters ''', '''>contig000001 ACTGT >contig000002 ACTGTACTGT >contig000003 ACGTACGT >contig000004 >contig000005 ACGT >contig000006 TGCATGCAacgt bad characters ''')) if not os.path.exists( 'tempTestFiles' ): os.mkdir( 'tempTestFiles' ) def test_oneWay( self ): """fastaHeaderMapper should produce known results.""" import subprocess for pre, post in self.knownValues: # generate map cmd = [os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--createMap=%s' % os.path.join('tempTestFiles','testMap.map'), '--label=%s' % 'contig' ] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( sout ) = p.communicate( pre )[0] # go forward cmd = [os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--map=%s' % os.path.join('tempTestFiles','testMap.map'), '--goForward', '--label=%s' % 'contig' ] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( outFor ) = p.communicate( pre )[0] self.assertEqual( post, outFor ) def test_roundTrip( self ): """fastaHeaderMapper should be invertible.""" import subprocess for pre, post in self.knownValues: # generate map cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--createMap=%s' % os.path.join('tempTestFiles','testMap.map'), '--label=%s' % 'contig' ] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( sout ) = p.communicate( pre )[0] # go forward cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--map=%s' % os.path.join('tempTestFiles','testMap.map'), '--goForward', '--label=%s' % 'contig' ] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( outFor ) = p.communicate( pre )[0] self.assertEqual( post, outFor ) # go backward cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--map=%s' % os.path.join('tempTestFiles','testMap.map'), '--goBackward', '--label=%s' % 'contig' ] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( outBack ) = p.communicate( outFor )[0] self.assertEqual( pre, outBack ) def test_roundTripPrefix( self ): """fastaHeaderMapper should be invertible with prefixes.""" import random import string import subprocess print ' ' chars = string.letters + string.digits + ' ' + '\t' + string.punctuation for i in xrange(50): prefix = ''.join( random.choice( chars ) for x in xrange(30)) for pre, post in self.knownValues: #add prefix to post post2 = '' j = 0 for p in post.split('\n'): j += 1 p = p.strip() if p == '': if j != len( post.split('\n') ): post2 += '\n' continue if p.startswith('>'): post2+= '>%s.%s\n' % ( prefix, p[1:] ) else: post2+= '%s\n' % p post = post2 # generate map cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--createMap=%s' % os.path.join('tempTestFiles','testMap.map'), '--prefix=%s' % prefix] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( sout ) = p.communicate( pre )[0] # go forward cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--map=%s' % os.path.join('tempTestFiles','testMap.map'), '--goForward'] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( outFor ) = p.communicate( pre )[0] self.assertEqual( post, outFor ) # go backward cmd = [ os.path.join( myBinDir, 'fastaHeaderMapper.py'), '--map=%s' % os.path.join('tempTestFiles','testMap.map'), '--goBackward'] p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) ( outBack ) = p.communicate( outFor )[0] self.assertEqual( pre, outBack ) if __name__ == '__main__': unittest.main()
40.063584
87
0.578127
753
6,931
5.306773
0.317397
0.028529
0.04004
0.026026
0.489239
0.46972
0.462963
0.462963
0.462963
0.462963
0
0.016546
0.284952
6,931
172
88
40.296512
0.78975
0.219305
0
0.609756
0
0
0.208026
0
0
0
0
0
0.04065
0
null
null
0
0.073171
null
null
0.00813
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d69d82f810cb6e55ebc41352a4d82679e4b15e3c
5,712
py
Python
likebee/core/admin.py
klebercode/likebee
0a0dd6368ef43b53fb8315eb5eb14663067ef07c
[ "MIT" ]
1
2019-11-05T15:00:51.000Z
2019-11-05T15:00:51.000Z
likebee/core/admin.py
klebercode/likebee
0a0dd6368ef43b53fb8315eb5eb14663067ef07c
[ "MIT" ]
null
null
null
likebee/core/admin.py
klebercode/likebee
0a0dd6368ef43b53fb8315eb5eb14663067ef07c
[ "MIT" ]
null
null
null
from django.contrib import admin from django.db.models import Q from django.utils.translation import ugettext_lazy as _ from django.utils.html import format_html from datetime import date, datetime from django_summernote.admin import SummernoteModelAdmin from mptt.admin import MPTTModelAdmin, DraggableMPTTAdmin from .models import Priority, Status, Sprint, Project, Task, TaskType from ..accounts.models import Profile def make_done(modeladmin, request, queryset): status = Status.objects.filter(done=True).first() queryset.update(status=status, done=True, done_on=datetime.now()) make_done.short_description = ''' Marcar tarefas selecionadas como concluído''' def make_archive(modeladmin, request, queryset): if request.user.is_superuser: queryset.update(archived=True, archived_on=datetime.now()) make_archive.short_description = ''' Marcar tarefas selecionadas como arquivado''' @admin.register(Task) class TaskAdmin(SummernoteModelAdmin, DraggableMPTTAdmin): # change_list_template = 'admin/task_change_list.html' mptt_indent_field = 'title' list_per_page = 100 list_display = [ 'tree_actions', 'indented_title', 'owner_thumb', 'colored_priority', 'colored_status', 'colored_task_type', 'formatted_finish', 'project', 'sprint' ] list_display_links = [ 'indented_title', ] list_filter = [ ('sprint', admin.RelatedFieldListFilter), ('owner', admin.RelatedFieldListFilter), ('project', admin.RelatedFieldListFilter), ('status', admin.RelatedFieldListFilter), 'archived', ] search_fields = ['title', 'description'] summernote_fields = ['description'] actions = [make_done, make_archive] def get_exclude(self, request, obj=None): excluded = super().get_exclude(request, obj) or [] if not request.user.is_superuser: return excluded + ['done', 'done_on', 'archived', 'archived_on'] return excluded def get_queryset(self, request): qs = super().get_queryset(request) if request.user.is_superuser: return qs # return qs.filter(Q(status=None) | Q(status__archive=False)) return qs.filter(archived=False) def formatted_finish(self, obj): if not obj.finish_on: return '' color = '#373A3C' status_done = None status = None if obj.status: status_done = obj.status.done status = obj.status if (obj.finish_on.date() < date.today()) and ( not status_done or not status): color = '#E0465E' return format_html( '<span style="color: {}; font-weight: bold;">{}</span>'.format( color, obj.finish_on.strftime('%b %-d'))) formatted_finish.allow_tags = True formatted_finish.admin_order_field = 'finish_on' formatted_finish.short_description = _('Data') def colored_priority(self, obj): if obj.priority: name = obj.priority.name color = obj.priority.color color_text = obj.priority.color_text else: name = '-' color = '#C4C4C4' color_text = '#FFFFFF' return format_html( '<div style="background:{}; color:{}; ' 'text-align:center; padding: 4px;">{}</div>'.format( color, color_text, name)) colored_priority.allow_tags = True colored_priority.admin_order_field = 'priority' colored_priority.short_description = _('Prioridade') def colored_status(self, obj): if obj.status: name = obj.status.name color = obj.status.color color_text = obj.status.color_text else: name = '-' color = '#C4C4C4' color_text = '#FFFFFF' return format_html( '<div style="background:{}; color:{}; ' 'text-align:center; padding: 4px;">{}</div>'.format( color, color_text, name)) colored_status.allow_tags = True colored_status.admin_order_field = 'status' colored_status.short_description = _('Status') def colored_task_type(self, obj): if obj.task_type: name = obj.task_type.name color = obj.task_type.color color_text = obj.task_type.color_text else: name = '-' color = '#C4C4C4' color_text = '#FFFFFF' return format_html( '<div style="background:{}; color:{}; ' 'text-align:center; padding: 4px;">{}</div>'.format( color, color_text, name)) colored_task_type.allow_tags = True colored_task_type.admin_order_field = 'task_type' colored_task_type.short_description = _('Tipo') def owner_thumb(self, obj): if obj.owner: profile = Profile.objects.filter(user=obj.owner) for item in profile: if item.photo: img = item.photo_thumbnail.url else: img = None if img: return format_html( '<img src="{0}" width="35" />'.format(img) ) owner = obj.owner else: owner = '' return '{}'.format(owner) owner_thumb.allow_tags = True owner_thumb.admin_order_field = 'owner' owner_thumb.short_description = _('Resp.') class Media: css = { 'all': ('css/likebee.css',) } admin.site.register(Priority) admin.site.register(Status) admin.site.register(Sprint) admin.site.register(Project) admin.site.register(TaskType)
31.558011
76
0.608193
624
5,712
5.363782
0.227564
0.040335
0.025097
0.014341
0.177771
0.153272
0.126382
0.126382
0.126382
0.126382
0
0.006308
0.278361
5,712
180
77
31.733333
0.805677
0.019608
0
0.22069
0
0
0.140075
0.011256
0
0
0
0
0
1
0.062069
false
0
0.062069
0
0.268966
0.027586
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d69dfab5d31344b304dc626b4e7eed938aa556de
369
py
Python
fivetran_api/Fivetran-REST-API-examples/connectorDetails.py
leootrusa/fivetran_example
50931b9fa9638a743531aae2cb10137284d6df17
[ "MIT" ]
35
2018-06-27T20:46:26.000Z
2022-01-03T01:46:12.000Z
fivetran_api/Fivetran-REST-API-examples/connectorDetails.py
dsdorazio/functions
d694ab4d1d0e68d5eaab8fd34df8decc66931cb9
[ "MIT" ]
6
2018-06-27T18:59:23.000Z
2021-06-09T04:47:51.000Z
fivetran_api/Fivetran-REST-API-examples/connectorDetails.py
dsdorazio/functions
d694ab4d1d0e68d5eaab8fd34df8decc66931cb9
[ "MIT" ]
18
2019-03-08T00:00:00.000Z
2022-02-21T22:29:14.000Z
# connectorDetails # Returns a connector object if a valid identifier was provided. # Reference: https://fivetran.com/docs/rest-api/connectors#retrieveconnectordetails import fivetran_api # Fivetran API URL (Replace {connector_id} with a valid connector id). url = "https://api.fivetran.com/v1/connectors/{connector_id}" fivetran_api.dump(fivetran_api.get_url(url))
33.545455
83
0.796748
51
369
5.647059
0.529412
0.152778
0
0
0
0
0
0
0
0
0
0.003003
0.097561
369
11
84
33.545455
0.861862
0.620596
0
0
0
0
0.392593
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
d69f54878d575fc34023843ff11b4582ac0a48da
1,703
py
Python
shop/coreapp/admin.py
bsperezb/Django-Ecomerce
f061798fd6528997ec7c1874ab0a5bdec03137c6
[ "MIT" ]
1
2021-09-02T03:48:44.000Z
2021-09-02T03:48:44.000Z
shop/coreapp/admin.py
bsperezb/Django-Ecomerce
f061798fd6528997ec7c1874ab0a5bdec03137c6
[ "MIT" ]
null
null
null
shop/coreapp/admin.py
bsperezb/Django-Ecomerce
f061798fd6528997ec7c1874ab0a5bdec03137c6
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Address, Coupon, Item, Order, OrderItem, Payment, Session def make_refund_accepted(modeladmin, request, queryset): queryset.update(refund_requested=False, refund_granted=True) make_refund_accepted.short_description = "Update orders to refound granted" class OrderAdmin(admin.ModelAdmin): list_display = [ "session", "user", "ordered", "being_delivered", "received", "refund_requested", "refund_granted", "billing_address", "shipping_address", "payment", "coupon", ] list_filter = [ "ordered", "being_delivered", "received", "refund_requested", "refund_granted", ] list_display_links = [ "session", "billing_address", "payment", "coupon", "shipping_address", ] search_fields = ["user__username", "reference", "session__session_number"] actions = [make_refund_accepted] class AddressAdmin(admin.ModelAdmin): list_display = [ "user", "street_address", "apartment_address", "country", "zip", "address_type", "default", ] list_filter = ["address_type", "default", "country"] search_fields = ["user", "street_address", "apartment_address", "zip"] class SessionAdmin(admin.ModelAdmin): readonly_fields = ("start_date",) admin.site.register(Item) admin.site.register(Order, OrderAdmin) admin.site.register(OrderItem) admin.site.register(Payment) admin.site.register(Address, AddressAdmin) admin.site.register(Coupon) admin.site.register(Session, SessionAdmin)
22.706667
78
0.63946
163
1,703
6.435583
0.380368
0.060057
0.113441
0.049571
0.171592
0.108675
0.108675
0.108675
0
0
0
0
0.23899
1,703
74
79
23.013514
0.809414
0
0
0.428571
0
0
0.259542
0.013506
0
0
0
0
0
1
0.017857
false
0
0.035714
0
0.267857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d69f649c3051f59f0608bb308dcfe5aa1166fc96
37,194
py
Python
src/pudl/metadata/codes.py
rohithdesikan/pudl
0009afc2b31031f6cd50e60aec096421f7a9d3c7
[ "MIT" ]
null
null
null
src/pudl/metadata/codes.py
rohithdesikan/pudl
0009afc2b31031f6cd50e60aec096421f7a9d3c7
[ "MIT" ]
null
null
null
src/pudl/metadata/codes.py
rohithdesikan/pudl
0009afc2b31031f6cd50e60aec096421f7a9d3c7
[ "MIT" ]
null
null
null
"""Metadata for cleaning, re-encoding, and documenting coded data columns. These dictionaries are used to create Encoder instances. They contain the following keys: 'df': A dataframe associating short codes with long descriptions and other information. 'code_fixes': A dictionary mapping non-standard codes to canonical, standardized codes. 'ignored_codes': A list of non-standard codes which appear in the data, and will be set to NA. """ from typing import Any, Dict import numpy as np import pandas as pd CODE_METADATA: Dict[str, Dict[str, Any]] = { "coalmine_types_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ("P", "preparation_plant", "A coal preparation plant."), ("S", "surface", "A surface mine."), ("U", "underground", "An underground mine."), ( "US", "underground_and_surface", "Both an underground and surface mine with most coal extracted from underground", ), ( "SU", "surface_and_underground", "Both an underground and surface mine with most coal extracted from surface", ), ], ).convert_dtypes(), "code_fixes": { "p": "P", "U/S": "US", "S/U": "SU", "Su": "S", }, "ignored_codes": [], }, "power_purchase_types_ferc1": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ( "AD", "adjustment", 'Out-of-period adjustment. Use this code for any accounting adjustments or "true-ups" for service provided in prior reporting years. Provide an explanation in a footnote for each adjustment.', ), ( "EX", "electricity_exchange", "Exchanges of electricity. Use this category for transactions involving a balancing of debits and credits for energy, capacity, etc. and any settlements for imbalanced exchanges.", ), ( "IF", "intermediate_firm", 'Intermediate-term firm service. The same as LF service expect that "intermediate-term" means longer than one year but less than five years.', ), ( "IU", "intermediate_unit", 'Intermediate-term service from a designated generating unit. The same as LU service expect that "intermediate-term" means longer than one year but less than five years.', ), ( "LF", "long_firm", 'Long-term firm service. "Long-term" means five years or longer and "firm" means that service cannot be interrupted for economic reasons and is intended to remain reliable even under adverse conditions (e.g., the supplier must attempt to buy emergency energy from third parties to maintain deliveries of LF service). This category should not be used for long-term firm service firm service which meets the definition of RQ service. For all transaction identified as LF, provide in a footnote the termination date of the contract defined as the earliest date that either buyer or seller can unilaterally get out of the contract.', ), ( "LU", "long_unit", 'Long-term service from a designated generating unit. "Long-term" means five years or longer. The availability and reliability of service, aside from transmission constraints, must match the availability and reliability of the designated unit.', ), ( "OS", "other_service", "Other service. Use this category only for those services which cannot be placed in the above-defined categories, such as all non-firm service regardless of the Length of the contract and service from designated units of Less than one year. Describe the nature of the service in a footnote for each adjustment.", ), ( "RQ", "requirement", "Requirements service. Requirements service is service which the supplier plans to provide on an ongoing basis (i.e., the supplier includes projects load for this service in its system resource planning). In addition, the reliability of requirement service must be the same as, or second only to, the supplier’s service to its own ultimate consumers.", ), ( "SF", "short_firm", "Short-term service. Use this category for all firm services, where the duration of each period of commitment for service is one year or less.", ), ], ).convert_dtypes(), "code_fixes": {}, "ignored_codes": [ "", "To", 'A"', 'B"', 'C"', "ÿ\x16", "NA", " -", "-", "OC", "N/", "Pa", "0", ], }, "momentary_interruptions_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ( "L", "less_than_1_minute", "Respondent defines a momentary interruption as less than 1 minute.", ), ( "F", "less_than_5_minutes", "Respondent defines a momentary interruption as less than 5 minutes.", ), ( "O", "other", "Respondent defines a momentary interruption using some other criteria.", ), ], ).convert_dtypes(), "code_fixes": {}, "ignored_codes": [], }, "entity_types_eia": { "df": pd.DataFrame( columns=[ "code", "label", "description", ], data=[ ( "A", "municipal_marketing_authority", "Municipal Marketing Authority. Voted into existence by the residents of a municipality and given authority for creation by the state government. They are nonprofit organizations", ), ( "B", "behind_the_meter", "Behind the Meter. Entities that install, own, and/or operate a system (usually photovoltaic), and sell, under a long term power purchase agreement (PPA) or lease, all the production from the system to the homeowner or business with which there is a net metering agreement. Third Party Owners (TPOs) of PV solar installations use this ownership code.", ), ("C", "cooperative", "Cooperative. Member-owned organizations."), ("COM", "commercial", "Commercial facility."), ( "D", "nonutility_dsm_administrator", "Non-utility DSM Administrator. Only involved with Demand-Side Management activities.", ), ( "F", "federal", "Federal. Government agencies with the authority to deliver energy to end-use customers.", ), ("G", "community_choice_aggregator", "Community Choice Aggregator."), ( "I", "investor_owned", "Investor-owned Utilities. Entities that are privately owned and provide a public service.", ), ("IND", "industrial", "Industrial facility."), ( "M", "municipal", "Municipal: Entities that are organized under authority of state statute to provide a public service to residents of that area.", ), ("O", "other", "Other entity type."), ( "P", "political_subdivision", 'Political Subdivision. (also called "public utility district"): Independent of city or county government and voted into existence by a majority of the residents of any given area for the specific purpose of providing utility service to the voters. State laws provide for the formation of such districts.', ), ("PO", "power_marketer", "Power marketer."), ("PR", "private", "Private entity."), ( "Q", "independent_power_producer", "Independent Power Producer or Qualifying Facility. Entities that own power plants and sell their power into the wholesale market.", ), ( "R", "retail_power_marketer", "Retail Power Marketer or Energy Service Provider: Entities that market power to customers in restructured markets.", ), ( "S", "state", "State entities that own or operate facilities or provide a public service.", ), ( "T", "transmission", "Transmission: Entities that operate or own high voltage transmission wires that provide bulk power services.", ), ("U", "unknown", "Unknown entity type."), ( "W", "wholesale_power_marketer", "Wholesale Power Marketer: Entities that buy and sell power in the wholesale market.", ), ], ).convert_dtypes(), "code_fixes": { "Behind the Meter": "B", "Community Choice Aggregator": "G", "Cooperative": "C", "Facility": "Q", "Federal": "F", "Investor Owned": "I", "Municipal": "M", "Political Subdivision": "P", "Power Marketer": "PO", "Retail Power Marketer": "R", "State": "S", "Unregulated": "Q", "Wholesale Power Marketer": "W", }, "ignored_codes": [], }, "energy_sources_eia": { "df": pd.DataFrame( columns=[ "code", "label", "fuel_units", "min_fuel_mmbtu_per_unit", "max_fuel_mmbtu_per_unit", "fuel_group_eia", "fuel_derived_from", "fuel_phase", "fuel_type_code_pudl", "description", ], data=[ ( "AB", "agricultural_byproducts", "short_tons", 7.0, 18.0, "renewable", "biomass", "solid", "waste", "Agricultural by-products", ), ( "ANT", "anthracite", "short_tons", 22.0, 28.0, "fossil", "coal", "solid", "coal", "Anthracite coal", ), ( "BFG", "blast_furnace_gas", "mcf", 0.07, 0.12, "fossil", "gas", "gas", "gas", "Blast furnace gas", ), ( "BIT", "bituminous_coal", "short_tons", 20.0, 29.0, "fossil", "coal", "solid", "coal", "Bituminous coal", ), ( "BLQ", "black_liquor", "short_tons", 10.0, 14.0, "renewable", "biomass", "liquid", "waste", "Black liquor", ), ( "DFO", "distillate_fuel_oil", "barrels", 5.5, 6.2, "fossil", "petroleum", "liquid", "oil", "Distillate fuel oil, including diesel, No. 1, No. 2, and No. 4 fuel oils", ), ( "GEO", "geothermal", pd.NA, np.nan, np.nan, "renewable", "other", pd.NA, "other", "Geothermal", ), ( "JF", "jet_fuel", "barrels", 5.0, 6.0, "fossil", "petroleum", "liquid", "oil", "Jet fuel", ), ( "KER", "kerosene", "barrels", 5.6, 6.1, "fossil", "petroleum", "liquid", "oil", "Kerosene", ), ( "LFG", "landfill_gas", "mcf", 0.3, 0.6, "renewable", "biomass", "gas", "waste", "Landfill gas", ), ( "LIG", "lignite", "short_tons", 10.0, 14.5, "fossil", "coal", "solid", "coal", "Lignite coal", ), ( "MSB", "municipal_solid_waste_biogenic", "short_tons", 9.0, 12.0, "renewable", "biomass", "solid", "waste", "Municipal solid waste (biogenic)", ), ( "MSN", "municipal_solid_nonbiogenic", "short_tons", 9.0, 12.0, "fossil", "petroleum", "solid", "waste", "Municipal solid waste (non-biogenic)", ), ( "MSW", "municipal_solid_waste", "short_tons", 9.0, 12.0, "renewable", "biomass", "solid", "waste", "Municipal solid waste (all types)", ), ( "MWH", "electricity_storage", "mwh", np.nan, np.nan, "other", "other", pd.NA, "other", "Electricity used for electricity storage", ), ( "NG", "natural_gas", "mcf", 0.8, 1.1, "fossil", "gas", "gas", "gas", "Natural gas", ), ( "NUC", "nuclear", pd.NA, np.nan, np.nan, "other", "other", pd.NA, "nuclear", "Nuclear, including uranium, plutonium, and thorium", ), ( "OBG", "other_biomass_gas", "mcf", 0.36, 1.6, "renewable", "biomass", "gas", "waste", "Other biomass gas, including digester gas, methane, and other biomass gasses", ), ( "OBL", "other_biomass_liquid", "barrels", 3.5, 4.0, "renewable", "biomass", "liquid", "waste", "Other biomass liquids", ), ( "OBS", "other_biomass_solid", "short_tons", 8.0, 25.0, "renewable", "biomass", "solid", "waste", "Other biomass solids", ), ( "OG", "other_gas", "mcf", 0.32, 3.3, "fossil", "other", "gas", "gas", "Other gas", ), ( "OTH", "other", pd.NA, np.nan, np.nan, "other", "other", pd.NA, "other", "Other", ), ( "PC", "petroleum_coke", "short_tons", 24.0, 30.0, "fossil", "petroleum", "solid", "coal", "Petroleum coke", ), ( "PG", "propane_gas", "mcf", 2.5, 2.75, "fossil", "petroleum", "gas", "gas", "Gaseous propane", ), ( "PUR", "purchased_steam", pd.NA, np.nan, np.nan, "other", "other", pd.NA, "other", "Purchased steam", ), ( "RC", "refined_coal", "short_tons", 20.0, 29.0, "fossil", "coal", "solid", "coal", "Refined coal", ), ( "RFO", "residual_fuel_oil", "barrels", 5.7, 6.9, "fossil", "petroleum", "liquid", "oil", "Residual fuel oil, including Nos. 5 & 6 fuel oils and bunker C fuel oil", ), ( "SC", "coal_synfuel", "short_tons", np.nan, np.nan, "fossil", "coal", "solid", "coal", "Coal synfuel. Coal-based solid fuel that has been processed by a coal synfuel plant, and coal-based fuels such as briquettes, pellets, or extrusions, which are formed from fresh or recycled coal and binding materials.", ), ( "SG", "syngas_other", "mcf", np.nan, np.nan, "fossil", "other", "gas", "gas", "Synthetic gas, other than coal-derived", ), ( "SGC", "syngas_coal", "mcf", 0.2, 0.3, "fossil", "coal", "gas", "gas", "Coal-derived synthesis gas", ), ( "SGP", "syngas_petroleum_coke", "mcf", 0.2, 1.1, "fossil", "petroleum", "gas", "gas", "Synthesis gas from petroleum coke", ), ( "SLW", "sludge_waste", "short_tons", 10.0, 16.0, "renewable", "biomass", "liquid", "waste", "Sludge waste", ), ( "SUB", "subbituminous_coal", "short_tons", 15.0, 20.0, "fossil", "coal", "solid", "coal", "Sub-bituminous coal", ), ( "SUN", "solar", pd.NA, np.nan, np.nan, "renewable", "other", pd.NA, "solar", "Solar", ), ( "TDF", "tire_derived_fuels", "short_tons", 16.0, 32.0, "other", "other", "solid", "waste", "Tire-derived fuels", ), ( "WAT", "water", pd.NA, np.nan, np.nan, "renewable", "other", pd.NA, "hydro", "Water at a conventional hydroelectric turbine, and water used in wave buoy hydrokinetic technology, current hydrokinetic technology, and tidal hydrokinetic technology, or pumping energy for reversible (pumped storage) hydroelectric turbine", ), ( "WC", "waste_coal", "short_tons", 6.5, 16.0, "fossil", "coal", "solid", "coal", "Waste/Other coal, including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.", ), ( "WDL", "wood_liquids", "barrels", 8.0, 14.0, "renewable", "biomass", "liquid", "waste", "Wood waste liquids excluding black liquor, including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids", ), ( "WDS", "wood_solids", "short_tons", 7.0, 18.0, "renewable", "biomass", "solid", "waste", "Wood/Wood waste solids, including paper pellets, railroad ties, utility poles, wood chips, park, and wood waste solids", ), ( "WH", "waste_heat", pd.NA, np.nan, np.nan, "other", "other", pd.NA, "other", "Waste heat not directly attributed to a fuel source. WH should only be reported when the fuel source is undetermined, and for combined cycle steam turbines that do not have supplemental firing.", ), ( "WND", "wind", pd.NA, np.nan, np.nan, "renewable", "other", pd.NA, "wind", "Wind", ), ( "WO", "waste_oil", "barrels", 3.0, 5.8, "fossil", "petroleum", "liquid", "oil", "Waste/Other oil, including crude oil, liquid butane, liquid propane, naptha, oil waste, re-refined motor oil, sludge oil, tar oil, or other petroleum-based liquid wastes", ), ], ).convert_dtypes(), "code_fixes": { "BL": "BLQ", "HPS": "WAT", "ng": "NG", "WOC": "WC", "OW": "WO", "WT": "WND", "H2": "OG", "OOG": "OG", }, "ignored_codes": [ 0, "0", "OO", "BM", "CBL", "COL", "N", "no", "PL", "ST", ], }, "fuel_transportation_modes_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ( "GL", "great_lakes", "Shipments of coal moved to consumers via the Great Lakes. These shipments are moved via the Great Lakes coal loading docks.", ), ( "OP", "onsite_production", "Fuel is produced on-site, making fuel shipment unnecessary.", ), ( "RR", "rail", "Shipments of fuel moved to consumers by rail (private or public/commercial). Included is coal hauled to or away from a railroad siding by truck if the truck did not use public roads.", ), ( "RV", "river", "Shipments of fuel moved to consumers via river by barge. Not included are shipments to Great Lakes coal loading docks, tidewater piers, or coastal ports.", ), ("PL", "pipeline", "Shipments of fuel moved to consumers by pipeline"), ( "SP", "slurry_pipeline", "Shipments of coal moved to consumers by slurry pipeline.", ), ( "TC", "tramway_conveyor", "Shipments of fuel moved to consumers by tramway or conveyor.", ), ( "TP", "tidewater_port", "Shipments of coal moved to Tidewater Piers and Coastal Ports for further shipments to consumers via coastal water or ocean.", ), ( "TR", "truck", "Shipments of fuel moved to consumers by truck. Not included is fuel hauled to or away from a railroad siding by truck on non-public roads.", ), ( "WT", "other_waterway", "Shipments of fuel moved to consumers by other waterways.", ), ], ).convert_dtypes(), "code_fixes": { "TK": "TR", "tk": "TR", "tr": "TR", "WA": "WT", "wa": "WT", "CV": "TC", "cv": "TC", "rr": "RR", "pl": "PL", "rv": "RV", }, "ignored_codes": ["UN"], }, "fuel_types_aer_eia": { "df": pd.DataFrame( columns=["code", "description"], data=[ ("SUN", "Solar PV and thermal"), ("COL", "Coal"), ("DFO", "Distillate Petroleum"), ("GEO", "Geothermal"), ("HPS", "Hydroelectric Pumped Storage"), ("HYC", "Hydroelectric Conventional"), ("MLG", "Biogenic Municipal Solid Waste and Landfill Gas"), ("NG", "Natural Gas"), ("NUC", "Nuclear"), ("OOG", "Other Gases"), ("ORW", "Other Renewables"), ("OTH", "Other (including Nonbiogenic Municipal Solid Waste)"), ("PC", "Petroleum Coke"), ("RFO", "Residual Petroleum"), ("WND", "Wind"), ("WOC", "Waste Coal"), ("WOO", "Waste Oil"), ("WWW", "Wood and Wood Waste"), ], ).convert_dtypes(), "code_fixes": {}, "ignored_codes": [], }, "contract_types_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ( "C", "contract", "Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ", ), ( "NC", "new_contract", "Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month", ), ("S", "spot_purchase", "Fuel obtained through a spot market purchase"), ( "T", "tolling_agreement", "Fuel received under a tolling agreement (bartering arrangement of fuel for generation)", ), ], ).convert_dtypes(), "code_fixes": {"N": "NC"}, "ignored_codes": [], }, "prime_movers_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ ("BA", "battery_storage", "Energy Storage, Battery"), ( "BT", "binary_cycle_turbine", "Turbines Used in a Binary Cycle. Including those used for geothermal applications", ), ( "CA", "combined_cycle_steam_turbine", "Combined-Cycle, Steam Turbine Part", ), ("CC", "combined_cycle_total", "Combined-Cycle, Total Unit"), ("CE", "compressed_air_storage", "Energy Storage, Compressed Air"), ( "CP", "concentrated_solar_storage", "Energy Storage, Concentrated Solar Power", ), ( "CS", "combined_cycle_single_shaft", "Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single", ), ( "CT", "combined_cycle_combustion_turbine", "Combined-Cycle Combustion Turbine Part", ), ( "ES", "other_storage", "Energy Storage, Other (Specify on Schedule 9, Comments)", ), ("FC", "fuel_cell", "Fuel Cell"), ("FW", "flywheel_storage", "Energy Storage, Flywheel"), ( "GT", "gas_combustion_turbine", "Combustion (Gas) Turbine. Including Jet Engine design", ), ("HA", "hydrokinetic_axial_flow", "Hydrokinetic, Axial Flow Turbine"), ("HB", "hydrokinetic_wave_buoy", "Hydrokinetic, Wave Buoy"), ("HK", "hydrokinetic_other", "Hydrokinetic, Other"), ( "HY", "hydraulic_turbine", "Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.", ), ( "IC", "internal_combustion", "Internal Combustion (diesel, piston, reciprocating) Engine", ), ("OT", "other", "Other"), ( "PS", "pumped_storage", "Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)", ), ("PV", "solar_pv", "Solar Photovoltaic"), ( "ST", "steam_turbine", "Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).", ), ("UNK", "unknown", "Unknown prime mover."), ("WS", "wind_offshore", "Wind Turbine, Offshore"), ("WT", "wind_onshore", "Wind Turbine, Onshore"), ], ).convert_dtypes(), "code_fixes": {}, "ignored_codes": [], }, "sector_consolidated_eia": { "df": pd.DataFrame( columns=["code", "label", "description"], data=[ (1, "electric_utility", "Traditional regulated electric utilities."), ( 2, "ipp_non_cogen", "Independent power producers which are not cogenerators.", ), ( 3, "ipp_cogen", "Independent power producers which are cogenerators, but whose primary business purpose is the same of electricity to the public.", ), ( 4, "commercial_non_cogen", "Commercial non-cogeneration facilities that produce electric power, are connected to the grid, and can sell power to the public.", ), ( 5, "commercial_cogen", "Commercial cogeneration facilities that produce electric power, are connected to the grid, and can sell power to the public.", ), ( 6, "industrial_non_cogen", "Industrial non-cogeneration facilities that produce electric power, are connected to the grid, and can sell power to the public.", ), ( 7, "industrial_cogen", "Industrial cogeneration facilities that produce electric power, are connected to the grid, and can sell power to the public", ), ], ).convert_dtypes(), "code_fixes": {}, "ignored_codes": [], }, }
36.89881
649
0.37256
2,749
37,194
4.959258
0.265187
0.008069
0.005648
0.008069
0.234211
0.198416
0.166434
0.137167
0.126751
0.110834
0
0.010337
0.534414
37,194
1,007
650
36.935452
0.776924
0.011642
0
0.450902
0
0.034068
0.391876
0.018991
0
0
0
0
0
1
0
true
0
0.003006
0
0.003006
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
d6a2fa9382a12764e6a860268ebbd0f92fa8f439
1,179
py
Python
objectModel/Python/tests/utilities/test_storage_utils.py
rt112000/CDM
34bd34f9260140a8f8aa02bd87c23033f3daad4c
[ "CC-BY-4.0", "MIT" ]
884
2019-05-10T02:09:10.000Z
2022-03-31T14:02:00.000Z
objectModel/Python/tests/utilities/test_storage_utils.py
spbast/CDM
bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7
[ "CC-BY-4.0", "MIT" ]
171
2019-06-10T11:34:37.000Z
2022-03-31T22:50:12.000Z
objectModel/Python/tests/utilities/test_storage_utils.py
spbast/CDM
bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7
[ "CC-BY-4.0", "MIT" ]
340
2019-05-07T18:00:16.000Z
2022-03-31T12:00:15.000Z
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. import unittest from cdm.utilities import StorageUtils class StorageUtilsTest(unittest.TestCase): """Test to validate StorageUtils functions""" def test_split_namespace_path(self): """Test split_namespace_path function on different paths""" self.assertIsNone(StorageUtils.split_namespace_path(None)) path_tuple_1 = StorageUtils.split_namespace_path('local:/some/path') self.assertIsNotNone(path_tuple_1) self.assertEqual('local', path_tuple_1[0]) self.assertEqual('/some/path', path_tuple_1[1]) path_tuple_2 = StorageUtils.split_namespace_path('/some/path') self.assertIsNotNone(path_tuple_2) self.assertEqual('', path_tuple_2[0]) self.assertEqual('/some/path', path_tuple_2[1]) path_tuple_3 = StorageUtils.split_namespace_path('adls:/some/path:with:colons') self.assertIsNotNone(path_tuple_3) self.assertEqual('adls', path_tuple_3[0]) self.assertEqual('/some/path:with:colons', path_tuple_3[1])
39.3
94
0.724343
151
1,179
5.410596
0.364238
0.132191
0.132191
0.146879
0.198286
0.168911
0.080783
0
0
0
0
0.01833
0.167091
1,179
29
95
40.655172
0.813646
0.207803
0
0
0
0
0.112798
0.053145
0
0
0
0
0.588235
1
0.058824
false
0
0.117647
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
1
d6a31066ae56fbab5790e025522ea654a924256a
36
py
Python
raguel/__init__.py
WireShoutLLC/piraguel
edbca6338d23735d7e1d6a63273f55851ef76518
[ "MIT" ]
null
null
null
raguel/__init__.py
WireShoutLLC/piraguel
edbca6338d23735d7e1d6a63273f55851ef76518
[ "MIT" ]
null
null
null
raguel/__init__.py
WireShoutLLC/piraguel
edbca6338d23735d7e1d6a63273f55851ef76518
[ "MIT" ]
null
null
null
import raguel.fptp import raguel.irv
18
18
0.861111
6
36
5.166667
0.666667
0.774194
0
0
0
0
0
0
0
0
0
0
0.083333
36
2
19
18
0.939394
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
d6a34d6160e9405d0d8eb8de229e4504ff3e1406
18,624
py
Python
slugdetection/Data_Engineering.py
dapolak/acse-9-independent-research-project-dapolak
5ae2cfa7f63c739d419b1362c4aede451ae83eb1
[ "MIT" ]
null
null
null
slugdetection/Data_Engineering.py
dapolak/acse-9-independent-research-project-dapolak
5ae2cfa7f63c739d419b1362c4aede451ae83eb1
[ "MIT" ]
null
null
null
slugdetection/Data_Engineering.py
dapolak/acse-9-independent-research-project-dapolak
5ae2cfa7f63c739d419b1362c4aede451ae83eb1
[ "MIT" ]
2
2019-08-29T16:14:37.000Z
2019-08-30T08:52:03.000Z
# -*- coding: utf-8 -*- """ Part of slugdetection package @author: Deirdree A Polak github: dapolak """ import numpy as np import pandas as pd from datetime import datetime import matplotlib.pyplot as plt from pyspark.sql import functions as F from pyspark.sql.window import Window class Data_Engineering: """ Tools to crop and select the raw well data. Converts data from a Spark dataframe to Pandas. Parameters ---------- well : Spark data frame data frame containing the pressure, temperature and choke data from a well. Attributes ---------- well_df : Spark data frame data frame containing all of the pressure, temperature and choke data from a well. None values have been dropped well_og : Spark data frame original data frame copy, with None values features : list of strings List of the features of the well, default "WH_P", "DH_P", "WH_T", "DH_T" and "WH_choke" thresholds : dictionary Dictionary with important features as keys, and their lower and upper thresholds as values. This is used for cropping out of range values. The set_thresholds method allows user to change or add values. """ def __init__(self, well): self.well_df = well.na.drop() self.well_og = well self.features = ["WH_P", "DH_P", "WH_T", "DH_T", "WH_choke"] self.thresholds = {"WH_P": [0, 100], "DH_P": [90, 150], "WH_T": [0, 100], "DH_T": [75, 95], "WH_choke": [-1000, 1000]} def stats(self): """ Describes the data in terms of the most common statistics, such as mean, std, max, min and count Returns ------- stats : Spark DataFrame Stats of data frame attribute well_df """ return self.well_df.describe() def shape(self): """ Describes the shape of the Spark data frame well_df, with number of rows and number of columns Returns ------- shape : int, int number of rows, number of columns """ return self.well_df.count(), len(self.well_df.columns) def reset_well_df(self): """ Resets Spark data frame attribute well_df to original state by overriding the well_df attribute """ self.well_df = self.well_og.na.drop() def timeframe(self, start="01-JAN-01 00:01", end="01-JUL-19 00:01", date_format="dd-MMM-yy HH:mm", datetime_format='%d-%b-%y %H:%M'): """ For Spark DataFrame well_df attribute, crops the data to the inputted start and end date Parameters ---------- start : str (optional) Wanted start date of cropped data frame (default is "01-JAN-01 00:01") end : str (optional) Wanted end date of cropped data frame (default is "01-JAN-19 00:01") date_format : str (optional) String format of inputted dates (default is "dd-MMM-yy HH:mm") datetime_format : str (optional) C standard data format for datetime (default is '%d-%b-%y %H:%M') """ d1 = datetime.strptime(start, datetime_format) d2 = datetime.strptime(end, datetime_format) assert max((d1, d2)) == d2, "Assert end date is later than start date" # Crop to start date self.well_df = self.well_df.filter( F.col("ts") > F.to_timestamp(F.lit(start), format=date_format).cast('timestamp')) # Crop to end date self.well_df = self.well_df.filter( F.col("ts") < F.to_timestamp(F.lit(end), format=date_format).cast('timestamp')) return def set_thresholds(self, variable, max_, min_): """ Sets the thresholds value of a variable Parameters ---------- variable : str Name of variable, for example "WH_P" max_ : float Upper threshold of variable min_ : float Lower threshold of variable """ assert isinstance(min_, float), "Minimum threshold must be a number" assert isinstance(max_, float), "Maximum threshold must be a number" assert max(min_, max_) == max_, "Maximum value must be larger than min" self.thresholds[variable] = [min_, max_] def data_range(self, verbose=True): """ Ensures variables within the dataframe well_df are within range, as set by the attribute thresholds. The out of range values are replaced by the previous in range value Parameters ---------- verbose : bool (optional) whether to allow for verbose (default is True) """ window = Window.orderBy("ts") # Spark Window ordering data frames by time lag_names = [] # Empty list to store column names for well_columns in self.well_df.schema.names: # loop through all components (columns) of data if well_columns != "ts": # no tresholding for timestamp if well_columns in self.thresholds.keys(): tresh = self.thresholds[well_columns] # set thresholds values for parameter from dictionary else: tresh = [-1000, 1000] # if feature not in thresholds attribute, set large thresholds if verbose: print(well_columns, "treshold is", tresh) for i in range(1, 10): # Naive approach, creating large amount of lagged features columns lag_col = well_columns + "_lag_" + str(i) lag_names.append(lag_col) self.well_df = self.well_df.withColumn(lag_col, F.lag(well_columns, i, 0).over(window)) for i in range(8, 0, -1): lag_col = well_columns + "_lag_" + str(i) prev_lag = well_columns + "_lag_" + str(i + 1) # apply minimum and maximum threshold to column, and replace out of range values with previous value self.well_df = self.well_df.withColumn(lag_col, F.when(F.col(lag_col) < tresh[0], F.col(prev_lag)) .otherwise(F.col(lag_col))) self.well_df = self.well_df.withColumn(lag_col, F.when(F.col(lag_col) > tresh[1], F.col(prev_lag)).otherwise(F.col(lag_col))) # apply minimum and maximum threshold to column, and replace out of range values with previous value lag_col = well_columns + "_lag_1" self.well_df = self.well_df.withColumn(well_columns, F.when(F.col(well_columns) < tresh[0], F.col(lag_col)) .otherwise(F.col(well_columns))) self.well_df = self.well_df.withColumn(well_columns, F.when(F.col(well_columns) > tresh[1], F.col(lag_col)) .otherwise(F.col(well_columns))) self.well_df = self.well_df.drop(*lag_names) return def clean_choke(self, method="99"): """ Method to clean WH_choke variables values from the well_df Spark data frame attribute Parameters ---------- method : str (optional) Method to clean out WH_choke values. "99" entails suppressing all the data rows where the choke is lower than 99%. "no_choke" entails setting to None all the rows where the WH_choke value is 0 or where it is non constant i.e. differential is larger than 1 or second differential is larger than 3 (default is '99'). """ assert ("WH_choke" in self.well_df.schema.names), 'In order to clean out WH choke data, WH choke column' \ 'in well_df must exist' if method == "99": self.well_df = self.well_df.where("WH_choke > 99") # Select well_df only where WH is larger than 99% elif method == "no_choke": # Select well_df only where WH choke is constant window = Window.orderBy("ts") # Window ordering by time # Create differential and second differential columns for WH choke self.well_df = self.well_df.withColumn("WH_choke_lag", F.lag("WH_choke", 1, 0).over(window)) self.well_df = self.well_df.withColumn("WH_choke_diff", F.abs(F.col("WH_choke") - F.col("WH_choke_lag"))) self.well_df = self.well_df.withColumn("WH_choke_lag2", F.lag("WH_choke_lag", 1, 0).over(window)) self.well_df = self.well_df.withColumn("WH_choke_diff2", F.abs(F.col("WH_choke") - F.col("WH_choke_lag2"))) for col in self.well_df.schema.names: # Set all rows with WH choke less than 10 to 0 self.well_df = self.well_df.withColumn(col, F.when(F.col("WH_choke") < 10, None). otherwise(F.col(col))) # Select well_df where WH choke gradient is less than 1, set rows with high gradient to None self.well_df = self.well_df.withColumn(col, F.when(F.col("WH_choke_diff") > 1, None). otherwise(F.col(col))) # Select well_df where WH choke curvature is less than 3, set rows with higher values to None self.well_df = self.well_df.withColumn(col, F.when(F.col("WH_choke_diff2") > 3, None). otherwise(F.col(col))) else: print("Clean choke method inputted is not know. Try 99 or no_choke") return def df_toPandas(self, stats=True, **kwargs): """ Creates a copy of Spark data frame attribute well_df in Pandas format. Also calculates and stores the mean and standard deviations of each column in the Pandas data frame in the class attributes means and stds. Parameters ---------- stats : bool (optional) Bool asserting whether or not to calculate means and standard deviations of each columns/variable (default is True) kwargs : features : list of str feature names/ column headers to include in pandas data frame pd_df attribute Returns ------- pd_df : Pandas data frame Pandas data frame of original well_df Spark data frame """ if "features" in kwargs.keys(): # if features specified in kwargs, update feature attribute self.features = kwargs["features"] cols = self.features.copy() cols.append("ts") print("Converting Spark data frame to Pandas") self.pd_df = self.well_df.select(cols).toPandas() # convert selected columns of data frame to Pandas print("Converted") if stats: # If stats is true, calculate and store mean and std as attributes self.means = pd.DataFrame([[0 for i in range(len(self.features))]], columns=self.features) self.stds = pd.DataFrame([[0 for i in range(len(self.features))]], columns=self.features) for f in self.features: self.means[f] = self.pd_df[f].mean() # Compute and store mean of column in means attribute self.stds[f] = self.pd_df[f].std() # Compute and store std of column in stds attribute return self.pd_df def standardise(self, df): """ Standardises the data based on the attributes means and stds as calculated when the original dataframe was converted to Pandas. Parameters ---------- df : Pandas data frame Input data frame to be standardised Returns ------- df : Pandas data frame Input data frame standardised """ for feature_ in self.means.columns: # For all features if (feature_ != 'ts') & (feature_ in df.columns): avg = self.means[feature_][0] # get mean for feature from means attribute std = self.stds[feature_][0] # ger std for feature from stds attribute df[feature_] -= avg # Standardise column df[feature_] /= std return df def plot(self, start=0, end=None, datetime_format="%d-%b-%y %H:%M", title="Well Pressure and Temperature over time", ax2_label="Temperature in C // Choke %", **kwargs): """ Simple plot function to plot the pd_df pandas data frame class attribute. Parameters ---------- start : int or str (optional) Index or date at which to start plotting the data (default is 0) end : int or str (optional) Index or date at which to stop plotting the data (default is None) datetime_format : str (optional) C standard data format for datetime (default is '%d-%b-%y %H:%M') title : str (optional) Plot title (default is "Well Pressure and Temperature over time") ax2_label : str (optional) Label for second axis, for non pressure features (default is "Temperature in C // Choke %") kwargs : features: list of str List of features to include in the plot Returns ------- : Figure data plot figure """ assert hasattr(self, "pd_df"), "Pandas data frame pd_df attribute must exist" assert not self.pd_df.empty, "Pandas data frame cannot be empty" # If features has been specified in kwargs passed if "features" in kwargs.keys(): # if only selected features self.features = kwargs["features"] for f in self.features: # Check features exist assert (f in self.pd_df.columns), f + "must be contained in pd_df" if isinstance(start, int): # If start date inputted as an index assert start >= 0, "Start index must be positive" assert start <= len(self.pd_df), "Start index must be less than the last index of pd_df attribute" if isinstance(end, int): # If start date inputted as an index assert end >= 0, "End index must be positive" if isinstance(start, str): # If a string has been passed for the start date date = datetime.strptime(start, datetime_format) assert np.any(self.pd_df.isin([date])), "Start time must exist in pandas data frame" start = self.pd_df['ts'][self.pd_df['ts'].isin([date])].index.tolist()[0] # Get start date as in index if isinstance(end, str): # If a string has been passed for the end date date = datetime.strptime(end, datetime_format) assert np.any(self.pd_df.isin([date])), "End time must exist in pandas data frame" end = self.pd_df['ts'][self.pd_df['ts'].isin([date])].index.tolist()[0] # Get end date as in index if end is not None: # If end index/date has been specified assert max((start, end)) == end, "Assert end date is later than start date" fig, ax = plt.subplots(1, 1, figsize=(30, 12)) # Create subplot ax2 = ax.twinx() # Instantiate secondary axis that shares the same x-axis lines = [] # Create empty list to store lines and corresponding labels colours = ['C' + str(i) for i in range(len(self.features))] # Create list of colour for plots lines for col, c in zip(self.features, colours): if col[-1] == "P": # If pressure, plot on main axis a, = ax.plot(self.pd_df["ts"][start:end], self.pd_df[col][start:end], str(c) + ".", label=col) ax.set_ylabel("Pressure in BarG") lines.append(a) else: # For other features, like Temperature and Choke, plot on secondary axis a, = ax2.plot(self.pd_df["ts"][start:end], self.pd_df[col][start:end], c + '.', label=col) ax2.set_ylabel(ax2_label) lines.append(a) ax.legend(lines, [l.get_label() for l in lines]) ax.set_xlabel("Time") ax.grid(True, which='both') ax.set_title(title) return fig def confusion_mat(cm, labels, title='Confusion Matrix', cmap='RdYlGn', **kwargs): """ Simple confusion matrix plotting method. Inspired by Scikit Learn Confusionp Matrix plot example. Parameters ---------- cm : numpy array or list Confusion matrix as outputted by Scikit Learn Confusion Matrix method. labels : list of str Labels to use on the plot of the Confusion Matrix. Must match number of rows in the confusion matrix. title : str (optional) Title that will be printed above confusion matrix plot cmap : str (optional) Colour Map of confusion matrix kwargs : figsize : tuple of int or int Matplotlib key word to set size of plot Returns ------- : Figure confusion matrix figure """ assert (len(labels) == len(cm[0])), "There must be the same number of columns in the confusion matrix as there" \ "is labels available" fig, ax = plt.subplots() if "figsize" in kwargs.keys(): # Plot confusion matrix fig, ax = plt.subplots(figsize=kwargs["figsize"]) im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), xticklabels=labels, yticklabels=labels, title=title, ylabel='True label', xlabel='Predicted label') # Loop over data dimensions and create text annotations. fmt = '.2f' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return fig
44.342857
120
0.574152
2,452
18,624
4.26876
0.165171
0.032674
0.039171
0.022738
0.348524
0.289099
0.236171
0.211713
0.195089
0.154772
0
0.012026
0.330273
18,624
419
121
44.448687
0.827147
0.381873
0
0.195531
0
0
0.130135
0
0
0
0
0
0.083799
1
0.067039
false
0
0.03352
0
0.156425
0.022346
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6a35d2bfc2590a217cfcb8cbdd8bde2a6488383
2,632
py
Python
vietocr/predict.py
anhtv26062000/vietocr
b14a7d14cc37a969f73b27b2946b8680672c0fe5
[ "Apache-2.0" ]
null
null
null
vietocr/predict.py
anhtv26062000/vietocr
b14a7d14cc37a969f73b27b2946b8680672c0fe5
[ "Apache-2.0" ]
null
null
null
vietocr/predict.py
anhtv26062000/vietocr
b14a7d14cc37a969f73b27b2946b8680672c0fe5
[ "Apache-2.0" ]
null
null
null
import os import time import yaml import argparse from PIL import Image import matplotlib.pyplot as plt from vietocr.tool.predictor import Predictor from vietocr.tool.config import Cfg def main(): parser = argparse.ArgumentParser() parser.add_argument("--img", required=True, help="foo help") parser.add_argument("--config", required=True, help="foo help") args = parser.parse_args() config = Cfg.load_config_from_file(args.config) config[ "vocab" ] = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\xB0\ \xB2\xC0\xC1\xC2\xC3\xC8\xC9\xCA\xCC\xCD\xD0\xD2\xD3\xD4\xD5\xD6\xD9\xDA\xDC\xDD\ \xE0\xE1\xE2\xE3\xE8\xE9\xEA\xEC\xED\xF0\xF2\xF3\xF4\xF5\xF6\xF9\xFA\xFC\xFD\u0100\ \u0101\u0102\u0103\u0110\u0111\u0128\u0129\u014C\u014D\u0168\u0169\u016A\u016B\u01A0\ \u01A1\u01AF\u01B0\u1EA0\u1EA1\u1EA2\u1EA3\u1EA4\u1EA5\u1EA6\u1EA7\u1EA8\u1EA9\u1EAA\ \u1EAB\u1EAC\u1EAD\u1EAE\u1EAF\u1EB0\u1EB1\u1EB2\u1EB3\u1EB4\u1EB5\u1EB6\u1EB7\u1EB8\ \u1EB9\u1EBA\u1EBB\u1EBC\u1EBD\u1EBE\u1EBF\u1EC0\u1EC1\u1EC2\u1EC3\u1EC4\u1EC5\u1EC6\ \u1EC7\u1EC8\u1EC9\u1ECA\u1ECB\u1ECC\u1ECD\u1ECE\u1ECF\u1ED0\u1ED1\u1ED2\u1ED3\u1ED4\ \u1ED5\u1ED6\u1ED7\u1ED8\u1ED9\u1EDA\u1EDB\u1EDC\u1EDD\u1EDE\u1EDF\u1EE0\u1EE1\u1EE2\ \u1EE3\u1EE4\u1EE5\u1EE6\u1EE7\u1EE8\u1EE9\u1EEA\u1EEB\u1EEC\u1EED\u1EEE\u1EEF\u1EF0\ \u1EF1\u1EF2\u1EF3\u1EF4\u1EF5\u1EF6\u1EF7\u1EF8\u1EF9\u2013\u2014\u2019\u201C\u201D\ \u2026\u20AC\u2122\u2212" print(config) detector = Predictor(config) # Option for predicting folder images img_list = os.listdir(args.img) img_list = sorted(img_list) f_pre = open("./test_seq.txt", "w+") # new output <name>\t<gtruth>\t<predict> # f_gt = open("./gt_word.txt", "r") # lines = [line.strip("\n") for line in f_gt if line != "\n"] # start_time = time.time() # for img in lines: # name, gt = img.split("\t") # img_path = args.img + name # image = Image.open(img_path) # s, prob = detector.predict(image, return_prob=True) # res = name + "\t" + gt + "\t" + s + "\t" + str(prob) + "\n" # f_pre.write(res) # runtime = time.time() - start_time # print("FPS:", len(img_list) / runtime) start_time = time.time() for img in img_list: img_path = args.img + img image = Image.open(img_path) s = detector.predict(image) print(img_path, "-----", s) res = img + "\t" + s + "\n" f_pre.write(res) runtime = time.time() - start_time print("FPS:", len(img_list) / runtime) if __name__ == "__main__": main()
34.631579
111
0.665274
387
2,632
4.426357
0.609819
0.024518
0.014011
0.022183
0.154116
0.127262
0.101576
0.072388
0.072388
0.072388
0
0.128345
0.162234
2,632
75
112
35.093333
0.648526
0.197948
0
0
0
0
0.035305
0
0
0
0
0
0
1
0.022222
false
0
0.177778
0
0.2
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6a502bc2ef1837bff69e8101cd965f9cb6f8ad1
12,231
py
Python
com/vmware/esx/settings/depot_content/components_client.py
adammillerio/vsphere-automation-sdk-python
c07e1be98615201139b26c28db3aa584c4254b66
[ "MIT" ]
null
null
null
com/vmware/esx/settings/depot_content/components_client.py
adammillerio/vsphere-automation-sdk-python
c07e1be98615201139b26c28db3aa584c4254b66
[ "MIT" ]
null
null
null
com/vmware/esx/settings/depot_content/components_client.py
adammillerio/vsphere-automation-sdk-python
c07e1be98615201139b26c28db3aa584c4254b66
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- #--------------------------------------------------------------------------- # Copyright 2020 VMware, Inc. All rights reserved. # AUTO GENERATED FILE -- DO NOT MODIFY! # # vAPI stub file for package com.vmware.esx.settings.depot_content.components. #--------------------------------------------------------------------------- """ The ``com.vmware.esx.settings.depot_content.components_client`` module provides classes to retrieve component versions from the depot. """ __author__ = 'VMware, Inc.' __docformat__ = 'restructuredtext en' import sys from vmware.vapi.bindings import type from vmware.vapi.bindings.converter import TypeConverter from vmware.vapi.bindings.enum import Enum from vmware.vapi.bindings.error import VapiError from vmware.vapi.bindings.struct import VapiStruct from vmware.vapi.bindings.stub import ( ApiInterfaceStub, StubFactoryBase, VapiInterface) from vmware.vapi.bindings.common import raise_core_exception from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator) from vmware.vapi.exception import CoreException from vmware.vapi.lib.constants import TaskType from vmware.vapi.lib.rest import OperationRestMetadata class Versions(VapiInterface): """ The ``Versions`` class provides methods to get component versions from the sync'ed and imported depots. """ _VAPI_SERVICE_ID = 'com.vmware.esx.settings.depot_content.components.versions' """ Identifier of the service in canonical form. """ def __init__(self, config): """ :type config: :class:`vmware.vapi.bindings.stub.StubConfiguration` :param config: Configuration to be used for creating the stub. """ VapiInterface.__init__(self, config, _VersionsStub) self._VAPI_OPERATION_IDS = {} class CategoryType(Enum): """ The ``Versions.CategoryType`` class defines possible values of categories for a component. .. note:: This class represents an enumerated type in the interface language definition. The class contains class attributes which represent the values in the current version of the enumerated type. Newer versions of the enumerated type may contain new values. To use new values of the enumerated type in communication with a server that supports the newer version of the API, you instantiate this class. See :ref:`enumerated type description page <enumeration_description>`. """ SECURITY = None """ Security """ ENHANCEMENT = None """ Enhancement """ BUGFIX = None """ Bugfix """ RECALL = None """ Recall """ RECALL_FIX = None """ Recall-fix """ INFO = None """ Info """ MISC = None """ Misc """ GENERAL = None """ General """ def __init__(self, string): """ :type string: :class:`str` :param string: String value for the :class:`CategoryType` instance. """ Enum.__init__(string) CategoryType._set_values([ CategoryType('SECURITY'), CategoryType('ENHANCEMENT'), CategoryType('BUGFIX'), CategoryType('RECALL'), CategoryType('RECALL_FIX'), CategoryType('INFO'), CategoryType('MISC'), CategoryType('GENERAL'), ]) CategoryType._set_binding_type(type.EnumType( 'com.vmware.esx.settings.depot_content.components.versions.category_type', CategoryType)) class UrgencyType(Enum): """ The ``Versions.UrgencyType`` class defines possible values of urgencies for a component. .. note:: This class represents an enumerated type in the interface language definition. The class contains class attributes which represent the values in the current version of the enumerated type. Newer versions of the enumerated type may contain new values. To use new values of the enumerated type in communication with a server that supports the newer version of the API, you instantiate this class. See :ref:`enumerated type description page <enumeration_description>`. """ CRITICAL = None """ Critical """ IMPORTANT = None """ Important """ MODERATE = None """ Moderate """ LOW = None """ Low """ GENERAL = None """ General """ def __init__(self, string): """ :type string: :class:`str` :param string: String value for the :class:`UrgencyType` instance. """ Enum.__init__(string) UrgencyType._set_values([ UrgencyType('CRITICAL'), UrgencyType('IMPORTANT'), UrgencyType('MODERATE'), UrgencyType('LOW'), UrgencyType('GENERAL'), ]) UrgencyType._set_binding_type(type.EnumType( 'com.vmware.esx.settings.depot_content.components.versions.urgency_type', UrgencyType)) class Info(VapiStruct): """ The ``Versions.Info`` class defines the information regarding a component version. .. tip:: The arguments are used to initialize data attributes with the same names. """ def __init__(self, display_name=None, vendor=None, display_version=None, summary=None, description=None, category=None, urgency=None, kb=None, contact=None, release_date=None, ): """ :type display_name: :class:`str` :param display_name: Display name of the component. :type vendor: :class:`str` :param vendor: Vendor of the component. :type display_version: :class:`str` :param display_version: Human readable version of the component. :type summary: :class:`str` :param summary: Summary of the component version. :type description: :class:`str` :param description: Discription of the component version. :type category: :class:`Versions.CategoryType` :param category: Category of the component version. :type urgency: :class:`Versions.UrgencyType` :param urgency: Urgency of the component version. :type kb: :class:`str` :param kb: Link to kb article related to this the component version. :type contact: :class:`str` :param contact: Contact email for the component version. :type release_date: :class:`datetime.datetime` :param release_date: Release date of the component version. """ self.display_name = display_name self.vendor = vendor self.display_version = display_version self.summary = summary self.description = description self.category = category self.urgency = urgency self.kb = kb self.contact = contact self.release_date = release_date VapiStruct.__init__(self) Info._set_binding_type(type.StructType( 'com.vmware.esx.settings.depot_content.components.versions.info', { 'display_name': type.StringType(), 'vendor': type.StringType(), 'display_version': type.StringType(), 'summary': type.StringType(), 'description': type.StringType(), 'category': type.ReferenceType(__name__, 'Versions.CategoryType'), 'urgency': type.ReferenceType(__name__, 'Versions.UrgencyType'), 'kb': type.URIType(), 'contact': type.StringType(), 'release_date': type.DateTimeType(), }, Info, False, None)) def get(self, name, version, ): """ Returns information about a given component version in the depot. :type name: :class:`str` :param name: Name of the component The parameter must be an identifier for the resource type: ``com.vmware.esx.settings.component``. :type version: :class:`str` :param version: Version of the component :rtype: :class:`Versions.Info` :return: Information about the given component :raise: :class:`com.vmware.vapi.std.errors_client.Error` If there is unknown internal error. The accompanying error message will give more details about the failure. :raise: :class:`com.vmware.vapi.std.errors_client.NotFound` if component with given version is not found. :raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated` if the caller is not authenticated. :raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable` If the service is not available. :raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized` if you do not have all of the privileges described as follows: * Method execution requires ``VcIntegrity.lifecycleSettings.Read``. """ return self._invoke('get', { 'name': name, 'version': version, }) class _VersionsStub(ApiInterfaceStub): def __init__(self, config): # properties for get operation get_input_type = type.StructType('operation-input', { 'name': type.IdType(resource_types='com.vmware.esx.settings.component'), 'version': type.StringType(), }) get_error_dict = { 'com.vmware.vapi.std.errors.error': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'), 'com.vmware.vapi.std.errors.not_found': type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'), 'com.vmware.vapi.std.errors.unauthenticated': type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'), 'com.vmware.vapi.std.errors.service_unavailable': type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'), } get_input_value_validator_list = [ ] get_output_validator_list = [ ] get_rest_metadata = OperationRestMetadata( http_method='GET', url_template='/esx/settings/depot-content/components/{name}/versions/{version}', path_variables={ 'name': 'name', 'version': 'version', }, query_parameters={ }, dispatch_parameters={ }, header_parameters={ }, dispatch_header_parameters={ } ) operations = { 'get': { 'input_type': get_input_type, 'output_type': type.ReferenceType(__name__, 'Versions.Info'), 'errors': get_error_dict, 'input_value_validator_list': get_input_value_validator_list, 'output_validator_list': get_output_validator_list, 'task_type': TaskType.NONE, }, } rest_metadata = { 'get': get_rest_metadata, } ApiInterfaceStub.__init__( self, iface_name='com.vmware.esx.settings.depot_content.components.versions', config=config, operations=operations, rest_metadata=rest_metadata, is_vapi_rest=True) class StubFactory(StubFactoryBase): _attrs = { 'Versions': Versions, }
33.509589
94
0.575178
1,180
12,231
5.811864
0.215254
0.036454
0.024643
0.03033
0.333771
0.275882
0.265675
0.240012
0.164188
0.164188
0
0.000601
0.320334
12,231
364
95
33.601648
0.824371
0.351075
0
0.083333
1
0
0.177378
0.116437
0
0
0
0
0
1
0.035714
false
0
0.083333
0
0.172619
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d6a7a396afccca6f90d97a49c348647708bf30b9
9,790
py
Python
MultiAtlasSegmenter/MultiAtlasSegmentation/EvaluateSegmentation.py
mabelzunce/MuscleSegmentation
390737ca1853e3c142a4fb7e186bc8b33bc4ade4
[ "MIT" ]
null
null
null
MultiAtlasSegmenter/MultiAtlasSegmentation/EvaluateSegmentation.py
mabelzunce/MuscleSegmentation
390737ca1853e3c142a4fb7e186bc8b33bc4ade4
[ "MIT" ]
null
null
null
MultiAtlasSegmenter/MultiAtlasSegmentation/EvaluateSegmentation.py
mabelzunce/MuscleSegmentation
390737ca1853e3c142a4fb7e186bc8b33bc4ade4
[ "MIT" ]
null
null
null
#! python3 # Multi-atlas segmentation scheme trying to give a platform to do tests before translating them to the plugin. from __future__ import print_function from GetMetricFromElastixRegistration import GetFinalMetricFromElastixLogFile from MultiAtlasSegmentation import MultiAtlasSegmentation from ApplyBiasCorrection import ApplyBiasCorrection import SimpleITK as sitk from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets import SitkImageManipulation as sitkIm import winshell import numpy as np import matplotlib.pyplot as plt import sys import os # DATA FOLDERS: case = "107" basePath = "D:\Martin\ImplantMigrationStudy\\" + case + "\\" postopImageNames = basePath + case + '_Migration_ContralateralPostopHemiPelvis.mhd' followupImageNames = basePath + case + '_Migration_ContralateralFollowupHemiPelvis.mhd' #postopImageNames = basePath + case + '_Migration_PostopPelvis.mhd' #followupImageNames = basePath + case + '_Migration_FollowupPelvis.mhd' #postopImageNames = basePath + case + '_Migration_PostopBone.mhd' #followupImageNames = basePath + case + '_Migration_FollowupBone.mhd' # READ DATA postopImage = sitk.ReadImage(postopImageNames) # This will be the reference followupImage = sitk.ReadImage(followupImageNames) # This will be the segmented # BINARIZE THE IMAGES: postopImage = sitk.Greater(postopImage, 0) followupImage = sitk.Greater(followupImage, 0) # HOW OVERLAP IMAGES slice_number = round(postopImage.GetSize()[1]/2) #DisplayWithOverlay(image, segmented, slice_number, window_min, window_max) sitkIm.DisplayWithOverlay(postopImage[:,slice_number,:], followupImage[:,slice_number,:], 0, 1) #interact(sitkIm.DisplayWithOverlay, slice_number = (5), image = fixed(postopImage), segmented = fixed(followupImage), # window_min = fixed(0), window_max=fixed(1)); # Get the image constrained by both bounding boxes: #labelStatisticFilter = sitk.LabelShapeStatisticsImageFilter() #labelStatisticFilter.Execute(postopImage) #postopBoundingBox = np.array(labelStatisticFilter.GetBoundingBox(1)) #labelStatisticFilter.Execute(followupImage) #followupBoundingBox = np.array(labelStatisticFilter.GetBoundingBox(1)) #minimumStart = np.minimum(postopBoundingBox[0:3], followupBoundingBox[0:3]+ 20) # 50 is to give an extra margin #minimumStop = np.minimum(postopBoundingBox[0:3]+postopBoundingBox[3:6], followupBoundingBox[0:3]+followupBoundingBox[3:6]- 20) #minimumBoxSize = minimumStop - minimumStart #postopImage = postopImage[minimumStart[0]:minimumStop[0], minimumStart[1]:minimumStop[1], minimumStart[2]:minimumStop[2]] #followupImage = followupImage[minimumStart[0]:minimumStop[0], minimumStart[1]:minimumStop[1], minimumStart[2]:minimumStop[2]] # Another approach is to get the bounding box of the intersection: postopAndFollowupImage = sitk.And(postopImage, followupImage) labelStatisticFilter = sitk.LabelShapeStatisticsImageFilter() labelStatisticFilter.Execute(postopAndFollowupImage) bothBoundingBox = np.array(labelStatisticFilter.GetBoundingBox(1)) postopImage = postopImage[bothBoundingBox[0]:bothBoundingBox[0]+bothBoundingBox[3], bothBoundingBox[1]:bothBoundingBox[1]+bothBoundingBox[4], bothBoundingBox[2]+20:bothBoundingBox[2]++bothBoundingBox[5]-20] followupImage = followupImage[bothBoundingBox[0]:bothBoundingBox[0]+bothBoundingBox[3], bothBoundingBox[1]:bothBoundingBox[1]+bothBoundingBox[4], bothBoundingBox[2]+20:bothBoundingBox[2]+bothBoundingBox[5]-20] #Display reduced image: slice_number = round(postopImage.GetSize()[1]*1/3) sitkIm.DisplayWithOverlay(postopImage[:,slice_number,:], followupImage[:,slice_number,:], 0, 1) #sitk.Get #postopZ = permute(sum(sum(postopImage))>0, [3 1 2]); #followupZ = permute(sum(sum(followupImage))>0, [3 1 2]); #bothZ = find(postopZ&followupZ > 0); #% Remove 10 slices each side: #bothZ(1:10) = []; bothZ(end-10:end) = []; # GET SEGMENTATION PERFORMANCE BASED ON SURFACES: # init signed mauerer distance as reference metrics reference_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(postopImage, squaredDistance=False, useImageSpacing=True)) # Get the reference surface: reference_surface = sitk.LabelContour(postopImage) statistics_image_filter = sitk.StatisticsImageFilter() # Get the number of pixels in the reference surface by counting all pixels that are 1. statistics_image_filter.Execute(reference_surface) num_reference_surface_pixels = int(statistics_image_filter.GetSum()) # Get the surface (contour) of the segmented image: segmented_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(followupImage, squaredDistance=False, useImageSpacing=True)) segmented_surface = sitk.LabelContour(followupImage) # Get the number of pixels in the reference surface by counting all pixels that are 1. statistics_image_filter.Execute(segmented_surface) num_segmented_surface_pixels = int(statistics_image_filter.GetSum()) label_intensity_statistics_filter = sitk.LabelIntensityStatisticsImageFilter() label_intensity_statistics_filter.Execute(segmented_surface, reference_distance_map) # Hausdorff distance: hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter() hausdorff_distance_filter.Execute(postopImage, followupImage) #All the other metrics: # Multiply the binary surface segmentations with the distance maps. The resulting distance # maps contain non-zero values only on the surface (they can also contain zero on the surface) seg2ref_distance_map = reference_distance_map * sitk.Cast(segmented_surface, sitk.sitkFloat32) ref2seg_distance_map = segmented_distance_map * sitk.Cast(reference_surface, sitk.sitkFloat32) # Get all non-zero distances and then add zero distances if required. seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map) seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0]) seg2ref_distances = seg2ref_distances + \ list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances))) ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map) ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0]) ref2seg_distances = ref2seg_distances + \ list(np.zeros(num_reference_surface_pixels - len(ref2seg_distances))) all_surface_distances = seg2ref_distances + ref2seg_distances # The maximum of the symmetric surface distances is the Hausdorff distance between the surfaces. In # general, it is not equal to the Hausdorff distance between all voxel/pixel points of the two # segmentations, though in our case it is. More on this below. #hausdorff_distance = hausdorff_distance_filter.GetHausdorffDistance() #max_surface_distance = label_intensity_statistics_filter.GetMaximum(1) #avg_surface_distance = label_intensity_statistics_filter.GetMean(1) #median_surface_distance = label_intensity_statistics_filter.GetMedian(1) #std_surface_distance = label_intensity_statistics_filter.GetStandardDeviation(1) hausdorff_distance = hausdorff_distance_filter.GetHausdorffDistance() avg_surface_distance = np.mean(all_surface_distances) max_surface_distance = np.max(all_surface_distances) median_surface_distance = np.median(all_surface_distances) std_surface_distance = np.std(all_surface_distances) # Now in mm: hausdorff_distance_mm = hausdorff_distance * postopImage.GetSpacing()[0] avg_surface_distance_mm = avg_surface_distance * postopImage.GetSpacing()[0] max_surface_distance_mm = max_surface_distance * postopImage.GetSpacing()[0] median_surface_distance_mm = median_surface_distance * postopImage.GetSpacing()[0] std_surface_distance_mm = std_surface_distance * postopImage.GetSpacing()[0] print("Surface based metrics [voxels]: MEAN_SD={0}, STDSD={1}, MEDIAN_SD={2}, HD={3}, MAX_SD={4}\n".format(avg_surface_distance, std_surface_distance, median_surface_distance, hausdorff_distance, max_surface_distance)) print("Surface based metrics [mm]: MEAN_SD={0}, STDSD={1}, MEDIAN_SD={2}, HD={3}, MAX_SD={4}\n".format(avg_surface_distance_mm, std_surface_distance_mm, median_surface_distance_mm, hausdorff_distance_mm, max_surface_distance_mm)) # GET SEGMENTATION PERFORMANCE BASED ON OVERLAP METRICS: overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() overlap_measures_filter.Execute(postopImage, followupImage) jaccard_value = overlap_measures_filter.GetJaccardCoefficient() dice_value = overlap_measures_filter.GetDiceCoefficient() volume_similarity_value = overlap_measures_filter.GetVolumeSimilarity() false_negative_value = overlap_measures_filter.GetFalseNegativeError() false_positive_value = overlap_measures_filter.GetFalsePositiveError() print("Overlap based metrics: Jaccard={0}, Dice={1}, VolumeSimilarity={2}, FN={3}, FP={4}\n".format(jaccard_value, dice_value, volume_similarity_value, false_negative_value, false_positive_value)) # Create a log file: logFilename = basePath + 'RegistrationPerformance_python.txt' log = open(logFilename, 'w') log.write("Mean Surface Distance, STD Surface Distance, Median Surface Distance, Hausdorff Distance, Max Surface Distance\n") log.write("{0}, {1}, {2}, {3}, {4}\n".format(avg_surface_distance, std_surface_distance, median_surface_distance, hausdorff_distance, max_surface_distance)) log.write("Mean Surface Distance, STD Surface Distance [mm], Median Surface Distance [mm], Hausdorff Distance [mm], Max Surface Distance [mm]\n") log.write("{0}, {1}, {2}, {3}, {4}\n".format(avg_surface_distance_mm, std_surface_distance_mm, median_surface_distance_mm, hausdorff_distance_mm, max_surface_distance_mm)) log.write("Jaccard, Dice, Volume Similarity, False Negative, False Positive\n") log.write("{0}, {1}, {2}, {3}, {4}\n".format(jaccard_value, dice_value, volume_similarity_value, false_negative_value, false_positive_value)) log.close() plt.show()
60.432099
229
0.81236
1,188
9,790
6.464646
0.223064
0.078125
0.033203
0.023438
0.41862
0.29974
0.230339
0.219141
0.214063
0.214063
0
0.017984
0.091216
9,790
162
230
60.432099
0.845229
0.331869
0
0.044944
0
0.044944
0.124904
0.027448
0
0
0
0
0
1
0
false
0
0.146067
0
0.146067
0.044944
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6a8326296ab6af0cd67cc06c386c30e1cff6c27
1,591
py
Python
jes/jes-v5.020-linux/jes/python/jes/gui/dialogs/intro.py
utv-teaching/foundations-computer-science
568e19fd83a3355dab2814229f335abf31bfd7e9
[ "MIT" ]
null
null
null
jes/jes-v5.020-linux/jes/python/jes/gui/dialogs/intro.py
utv-teaching/foundations-computer-science
568e19fd83a3355dab2814229f335abf31bfd7e9
[ "MIT" ]
null
null
null
jes/jes-v5.020-linux/jes/python/jes/gui/dialogs/intro.py
utv-teaching/foundations-computer-science
568e19fd83a3355dab2814229f335abf31bfd7e9
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ jes.gui.dialogs.intro ===================== The "intro" dialog, which displays the JESIntroduction.txt file. :copyright: (C) 2014 Matthew Frazier and Mark Guzdial :license: GNU GPL v2 or later, see jes/help/JESCopyright.txt for details """ from __future__ import with_statement import JESResources import JESVersion from java.awt import BorderLayout from javax.swing import JTextPane, JScrollPane, JButton from jes.gui.components.actions import methodAction from .controller import BasicDialog, DialogController class IntroDialog(BasicDialog): INFO_FILE = JESResources.getPathTo("help/JESIntroduction.txt") WINDOW_TITLE = "Welcome to %s!" % JESVersion.TITLE WINDOW_SIZE = (400, 300) def __init__(self): super(IntroDialog, self).__init__() # Open the text file and make a text pane textPane = JTextPane() textPane.editable = False scrollPane = JScrollPane(textPane) scrollPane.preferredSize = (32767, 32767) # just a large number with open(self.INFO_FILE, 'r') as fd: infoText = fd.read().decode('utf8').replace( "@version@", JESVersion.VERSION ) textPane.text = infoText # Load the scroll pane into the layout self.add(scrollPane, BorderLayout.CENTER) # Make an OK button self.okButton = JButton(self.ok) self.buttonPanel.add(self.okButton) @methodAction(name="OK") def ok(self): self.visible = False introController = DialogController("Introduction", IntroDialog)
28.927273
74
0.672533
182
1,591
5.785714
0.593407
0.011396
0
0
0
0
0
0
0
0
0
0.018548
0.220616
1,591
54
75
29.462963
0.830645
0.236329
0
0
0
0
0.054908
0.019967
0
0
0
0
0
1
0.068966
false
0
0.241379
0
0.448276
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6a8edcb5b2dcf22573395513ba3203eee17a175
15
py
Python
assist/__init__.py
jmlopez-rod/assist
6fb1c76dc3b0dc441faa7347285343e2e529651a
[ "BSD-2-Clause" ]
1
2015-10-30T23:33:11.000Z
2015-10-30T23:33:11.000Z
assist/__init__.py
jmlopez-rod/assist
6fb1c76dc3b0dc441faa7347285343e2e529651a
[ "BSD-2-Clause" ]
null
null
null
assist/__init__.py
jmlopez-rod/assist
6fb1c76dc3b0dc441faa7347285343e2e529651a
[ "BSD-2-Clause" ]
null
null
null
"""Assist """
3.75
9
0.4
1
15
6
1
0
0
0
0
0
0
0
0
0
0
0
0.2
15
3
10
5
0.5
0.4
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
d6aa6ef6511d2d3e5cb122c0409356c3cc693f88
678
py
Python
clioude-server/app.py
ccw630/CliOuDE
743540c7c14d7f95455219efe8be01817e96f96b
[ "MIT" ]
3
2020-07-22T02:17:23.000Z
2021-03-10T12:48:46.000Z
app.py
ccw630/CLIOUDE-Server
4dc129e5bc57caead2df0107f02671d74c7808f1
[ "MIT" ]
5
2020-07-24T07:39:43.000Z
2022-02-27T08:32:49.000Z
app.py
ccw630/CLIOUDE-Server
4dc129e5bc57caead2df0107f02671d74c7808f1
[ "MIT" ]
null
null
null
import tornado.web import tornado.websocket import tornado.httpserver import tornado.ioloop from worker_gateway.server import WebSocketChannelHandler from heartbeat.handler import HeartbeatHandler from orm import Worker class Application(tornado.web.Application): def __init__(self): handlers = [ (r'/api/run', WebSocketChannelHandler), (r'/api/heartbeat', HeartbeatHandler) ] tornado.web.Application.__init__(self, handlers) if __name__ == '__main__': Worker.cull_worker() app = Application() server = tornado.httpserver.HTTPServer(app) server.listen(8080) tornado.ioloop.IOLoop.instance().start()
25.111111
57
0.718289
71
678
6.605634
0.464789
0.110874
0.089552
0
0
0
0
0
0
0
0
0.007246
0.185841
678
26
58
26.076923
0.842391
0
0
0
0
0
0.044248
0
0
0
0
0
0
1
0.05
false
0
0.35
0
0.45
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
d6ac10b06dc3d01a44335f8890a997f31547f906
1,536
py
Python
server.py
guoyunlong16/python-websocket-server
81ec842f750726f5e1044305fcabbba831eca098
[ "MIT" ]
null
null
null
server.py
guoyunlong16/python-websocket-server
81ec842f750726f5e1044305fcabbba831eca098
[ "MIT" ]
null
null
null
server.py
guoyunlong16/python-websocket-server
81ec842f750726f5e1044305fcabbba831eca098
[ "MIT" ]
null
null
null
import time from websocket_server import WebsocketServer # Called for every client connecting (after handshake) def new_client(client, server): print("New client connected and was given id %d" % client['id']) #server.send_message_to_all("Hey all, a new client has joined us") short_message = "" middle_message = "" long_message = "" with open("hamlet.txt") as f: short_message=f.read() with open("xiangcunjiaoshi_liucixin.txt") as f: middle_message=f.read() with open("theLongestDayInChangAn.txt") as f: long_message=f.read() send_message(client, server, short_message) send_message(client, server, middle_message) send_message(client, server, long_message) def send_message(client, server, message): t_end = time.time() + 10 count = 1 while time.time() < t_end: server.send_message(client, message) count += 1 time.sleep(5) # Called for every client disconnecting def client_left(client, server): print("Client(%d) disconnected" % client['id']) # Called when a client sends a message def message_received(client, server, message): if len(message) > 200: message = message[:200]+'..' print("Client(%d) said: %s" % (client['id'], message)) PORT=80 HOST='0.0.0.0' server = WebsocketServer(PORT, host=HOST) server.set_fn_new_client(new_client) server.set_fn_client_left(client_left) server.set_fn_message_received(message_received) server.run_forever()
32
67
0.673828
206
1,536
4.84466
0.34466
0.096192
0.08517
0.092184
0.1002
0
0
0
0
0
0
0.014026
0.210938
1,536
47
68
32.680851
0.809406
0.125
0
0
0
0
0.120239
0.040329
0
0
0
0
0
0
null
null
0
0.055556
null
null
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d6ac72939accd121f9983c21fee472c5729238d1
899
py
Python
36-valid-sudoku/36-valid-sudoku.py
MayaScarlet/leetcode-python
8ef0c5cadf2e975957085c0ef84a8c3d90a64b6a
[ "MIT" ]
null
null
null
36-valid-sudoku/36-valid-sudoku.py
MayaScarlet/leetcode-python
8ef0c5cadf2e975957085c0ef84a8c3d90a64b6a
[ "MIT" ]
null
null
null
36-valid-sudoku/36-valid-sudoku.py
MayaScarlet/leetcode-python
8ef0c5cadf2e975957085c0ef84a8c3d90a64b6a
[ "MIT" ]
null
null
null
import collections class Solution: def isValidSudoku(self, board: List[List[str]]) -> bool: cols = collections.defaultdict(set) rows = collections.defaultdict(set) grid = collections.defaultdict(set) for r in range(len(board)): for c in range(len(board)): #Ignore empty cells if board[r][c] == ".": continue #If element exist in any of the three sets, return False if board[r][c] in rows[r] or board[r][c] in cols[c] or board[r][c] in grid[r//3, c//3]: return False #Add element if it doesn't exist rows[r].add(board[r][c]) cols[c].add(board[r][c]) grid[(r//3, c//3)].add(board[r][c]) return True
34.576923
103
0.463849
111
899
3.756757
0.387387
0.100719
0.117506
0.064748
0.091127
0
0
0
0
0
0
0.007634
0.41713
899
26
104
34.576923
0.788168
0.115684
0
0
0
0
0.001261
0
0
0
0
0
0
1
0.0625
false
0
0.0625
0
0.3125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6b10c402152179bd5b2001946c3c77a518e7627
9,375
py
Python
Crop Predictions With GUI.py
KRITGYA2001/Crop-Prediction-Model
81c2f0701c89ae6ed1f3b8ae48252c94670ee413
[ "Apache-2.0" ]
null
null
null
Crop Predictions With GUI.py
KRITGYA2001/Crop-Prediction-Model
81c2f0701c89ae6ed1f3b8ae48252c94670ee413
[ "Apache-2.0" ]
null
null
null
Crop Predictions With GUI.py
KRITGYA2001/Crop-Prediction-Model
81c2f0701c89ae6ed1f3b8ae48252c94670ee413
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # In[1]: import numpy as np import pandas as pd import seaborn as sns get_ipython().run_line_magic('matplotlib', 'inline') import matplotlib.pyplot as plt # In[2]: from sklearn.preprocessing import LabelEncoder from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor from sklearn.svm import LinearSVC, SVC from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score, r2_score, classification_report from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder # In[32]: data=pd.read_csv("Crop_prediction.csv") # In[33]: data.head() # * (Data set taken from Indian chamber of food and agriculture) # **Data fields** # * N - ratio of Nitrogen content in soil # * P - ratio of Phosphorous content in soil # * K - ratio of Potassium content in soil # * temperature - temperature in degree Celsius # * humidity - relative humidity in % # * ph - ph value of the soil # * rainfall - rainfall in mm # In[34]: data.tail() # In[35]: data.info() # In[36]: data.describe() # In[37]: data.isnull().sum() # In[38]: data.nunique() # In[39]: data.columns # In[40]: #Visualization plt.figure(figsize=(8,8)) plt.title("Correlation between features") corr=data.corr() sns.heatmap(corr,annot=True) # In[41]: data['label'].unique() # In[42]: plt.figure(figsize=(6,8)) plt.title("Temperature relation with crops") sns.barplot(y="label", x="temperature", data=data,palette="hot") plt.ylabel("crops") #Temperature has very effect with blackgram # In[43]: plt.figure(figsize=(6,8)) plt.title("Humidity relation with crops") sns.barplot(y="label", x="humidity", data=data,palette='brg') plt.ylabel("crops") #humidity has very high relation with rice # In[44]: plt.figure(figsize=(6,8)) plt.title("pH relation with crops") sns.barplot(y="label", x="ph", data=data,palette='hot') plt.ylabel("crops") #ph has a very high relationship with crops # In[45]: plt.figure(figsize=(6,8)) plt.title("Rainfall relation with crops") sns.barplot(y="label", x="rainfall", data=data,palette='brg') plt.ylabel("crops") #Rice needs a lots of rainfall #lentil needs a very less rainfall # In[46]: plt.figure(figsize=(8,6)) plt.title("Temperature and pH effect values for crops") sns.scatterplot(data=data, x="temperature", y="label", hue="ph",palette='brg') plt.ylabel("Crops") # In[47]: plt.figure(figsize=(8,6)) plt.title("Temperature and humidity effect values for crops") sns.scatterplot(data=data, x="temperature", y="label", hue="humidity",palette='brg') plt.ylabel("Crops") # In[48]: plt.figure(figsize=(8,6)) plt.title("Temperature and Rainfall effect values for crops") sns.scatterplot(data=data, x="temperature", y="label", hue="rainfall",palette='brg') plt.ylabel("Crops") # In[49]: #from pandas_profiling import ProfileReport # In[50]: #Predictions encoder=LabelEncoder() data.label=encoder.fit_transform(data.label) # In[51]: features=data.drop("label",axis=1) target=data.label # In[52]: features # In[53]: X_train, X_test, y_train, y_test = train_test_split(features, target, random_state=42) # In[54]: #Linear Regression lr = LinearRegression().fit(X_train, y_train) lr_pred= lr.score(X_test, y_test) print("Training score: {:.3f}".format(lr.score(X_train, y_train))) print("Test score: {:.3f}".format(lr.score(X_test, y_test))) # In[55]: #Decision Tree Classifier tree = DecisionTreeClassifier(max_depth=15,random_state=0).fit(X_train, y_train) tree_pred= tree.score(X_test, y_test) print("Training score: {:.3f}".format(tree.score(X_train, y_train))) print("Test score: {:.3f}".format(tree.score(X_test, y_test))) # In[56]: #Random Forests rf = RandomForestClassifier(n_estimators=10, max_features=3, random_state=0).fit(X_train, y_train) rf_pred= rf.score(X_test, y_test) print("Training score: {:.3f}".format(rf.score(X_train, y_train))) print("Test score: {:.3f}".format(rf.score(X_test, y_test))) # In[57]: #GradientBoostingClassifier gbr = GradientBoostingClassifier(n_estimators=20, max_depth=4, max_features=2, random_state=0).fit(X_train, y_train) gbr_pred= gbr.score(X_test, y_test) print("Training score: {:.3f}".format(gbr.score(X_train, y_train))) print("Test score: {:.3f}".format(gbr.score(X_test, y_test))) # In[58]: #Support Vector Classifier svm = SVC(C=100, gamma=0.001).fit(X_train, y_train) svm_pred= svm.score(X_test, y_test) print("Training score: {:.3f}".format(svm.score(X_train, y_train))) print("Test score: {:.3f}".format(svm.score(X_test, y_test))) # In[59]: #Logistic regression log_reg = LogisticRegression(C=0.1, max_iter=100000).fit(X_train, y_train) log_reg_pred= log_reg.score(X_test, y_test) print("Training score: {:.3f}".format(log_reg.score(X_train, y_train))) print("Test score: {:.3f}".format(log_reg.score(X_test, y_test))) # In[60]: predictions_acc = { "Model": ['Decision Tree', 'Random Forest', 'Gradient Boosting', 'SVC', 'Logistic Regression'], "Accuracy": [tree_pred, rf_pred, gbr_pred, svm_pred, log_reg_pred]} # In[61]: model_acc = pd.DataFrame(predictions_acc, columns=["Model", "Accuracy"]) # In[62]: model_acc # In[3]: import tkinter as tk from tkinter.font import BOLD from tkinter import messagebox from tkinter import scrolledtext from tkinter.constants import RIGHT, Y from tkinter import filedialog from tkinter import * # In[8]: def mainscreen(): global window window = tk.Tk() window.geometry("1530x795+0+0") window.configure(bg="#FFE4B5") window.title("Prediction model") head = tk.Label(window, text="\nEnter Details\n", font=("rockwell extra bold",45),fg="dark blue",bg="#FFE4B5").pack() def back3() : window.destroy() def values(): n=n_tk.get() p=p_tk.get() k=k_tk.get() temp=temp_tk.get() humidity=humidity_tk.get() ph=ph_tk.get() rainfall=rainfall_tk.get() def predictfunc(n,p,k,temp,humidity,ph,rainfall): #Predicting Model data=pd.read_csv("Crop_prediction.csv") x=data.loc[:,"N":"rainfall"] y=data.loc[:,'label'] Knn=KNeighborsClassifier() Knn.fit(x,y) test_data=[[n,p,k,temp,humidity,ph,rainfall]] predict=Knn.predict(test_data) #print(predict[0]) output1 = tk.Label(window, text="The prediction is: ",font=("Arial", 20),bg="#FFE4B5").place(x=600, y=570) output2 = tk.Label(window, text=predict, font=("Arial", 20),bg="#FFE4B5").place(x=820, y=570) predictfunc(n,p,k,temp,humidity,ph,rainfall) n1 = tk.Label(window, text="Ratio of Nitrogen content in soil: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=200) n_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) n_tk.place(x=800, y=200) p2 = tk.Label(window, text="Ratio of Phosphorous content in soil: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=250) p_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) p_tk.place(x=800, y=250) k3 = tk.Label(window, text="Ratio of Potassium content in soil: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=300) k_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) k_tk.place(x=800, y=300) temp4= tk.Label(window, text="Temperature in degree Celsius: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=350) temp_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) temp_tk.place(x=800, y=350) humidity5= tk.Label(window, text="Relative humidity in %: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=400) humidity_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) humidity_tk.place(x=800, y=400) ph6= tk.Label(window, text="pH value of the soil: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=450) ph_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) ph_tk.place(x=800, y=450) rainfall7= tk.Label(window, text="Rainfall in mm: ",font=("Arial", 20),bg="#FFE4B5").place(x=320, y=500) rainfall_tk = tk.Entry(window, fg='blue', bg='white',borderwidth=5,font=("Arial", 18), width=30) rainfall_tk.place(x=800, y=500) back3_button = tk.Button(text="Exit", bg="blue", fg="white", height=1, width=10, borderwidth=8, cursor="hand2",font=("Arial", 12), command=back3) back3_button.place(x=530,y=680) submit_button = tk.Button(text="Submit", bg="green", fg="white", height=1, width=10, borderwidth=8, cursor="hand2",font=("Arial", 12), command=values) submit_button.place(x=830,y=680) # start the GUI window.mainloop() mainscreen() # In[ ]:
22.865854
154
0.68192
1,414
9,375
4.428571
0.219236
0.017247
0.012456
0.022996
0.4481
0.423028
0.359949
0.296072
0.24992
0.216864
0
0.043593
0.14848
9,375
409
155
22.92176
0.740824
0.11552
0
0.103226
0
0
0.172422
0
0
0
0
0
0
1
0.025806
false
0
0.154839
0
0.180645
0.077419
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6b123a5d8064301b36365941fb80c50a959742e
3,950
py
Python
alipay/aop/api/domain/ZhimaCreditOrderRepaymentApplyModel.py
articuly/alipay-sdk-python-all
0259cd28eca0f219b97dac7f41c2458441d5e7a6
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/domain/ZhimaCreditOrderRepaymentApplyModel.py
articuly/alipay-sdk-python-all
0259cd28eca0f219b97dac7f41c2458441d5e7a6
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/domain/ZhimaCreditOrderRepaymentApplyModel.py
articuly/alipay-sdk-python-all
0259cd28eca0f219b97dac7f41c2458441d5e7a6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import simplejson as json from alipay.aop.api.constant.ParamConstants import * class ZhimaCreditOrderRepaymentApplyModel(object): def __init__(self): self._action_type = None self._category = None self._order_info = None self._out_order_no = None self._repay_amount = None self._repay_proof = None self._user_id = None @property def action_type(self): return self._action_type @action_type.setter def action_type(self, value): self._action_type = value @property def category(self): return self._category @category.setter def category(self, value): self._category = value @property def order_info(self): return self._order_info @order_info.setter def order_info(self, value): self._order_info = value @property def out_order_no(self): return self._out_order_no @out_order_no.setter def out_order_no(self, value): self._out_order_no = value @property def repay_amount(self): return self._repay_amount @repay_amount.setter def repay_amount(self, value): self._repay_amount = value @property def repay_proof(self): return self._repay_proof @repay_proof.setter def repay_proof(self, value): self._repay_proof = value @property def user_id(self): return self._user_id @user_id.setter def user_id(self, value): self._user_id = value def to_alipay_dict(self): params = dict() if self.action_type: if hasattr(self.action_type, 'to_alipay_dict'): params['action_type'] = self.action_type.to_alipay_dict() else: params['action_type'] = self.action_type if self.category: if hasattr(self.category, 'to_alipay_dict'): params['category'] = self.category.to_alipay_dict() else: params['category'] = self.category if self.order_info: if hasattr(self.order_info, 'to_alipay_dict'): params['order_info'] = self.order_info.to_alipay_dict() else: params['order_info'] = self.order_info if self.out_order_no: if hasattr(self.out_order_no, 'to_alipay_dict'): params['out_order_no'] = self.out_order_no.to_alipay_dict() else: params['out_order_no'] = self.out_order_no if self.repay_amount: if hasattr(self.repay_amount, 'to_alipay_dict'): params['repay_amount'] = self.repay_amount.to_alipay_dict() else: params['repay_amount'] = self.repay_amount if self.repay_proof: if hasattr(self.repay_proof, 'to_alipay_dict'): params['repay_proof'] = self.repay_proof.to_alipay_dict() else: params['repay_proof'] = self.repay_proof if self.user_id: if hasattr(self.user_id, 'to_alipay_dict'): params['user_id'] = self.user_id.to_alipay_dict() else: params['user_id'] = self.user_id return params @staticmethod def from_alipay_dict(d): if not d: return None o = ZhimaCreditOrderRepaymentApplyModel() if 'action_type' in d: o.action_type = d['action_type'] if 'category' in d: o.category = d['category'] if 'order_info' in d: o.order_info = d['order_info'] if 'out_order_no' in d: o.out_order_no = d['out_order_no'] if 'repay_amount' in d: o.repay_amount = d['repay_amount'] if 'repay_proof' in d: o.repay_proof = d['repay_proof'] if 'user_id' in d: o.user_id = d['user_id'] return o
30.152672
75
0.591899
496
3,950
4.387097
0.102823
0.073529
0.068934
0.045037
0.32261
0.264706
0.045037
0.027574
0
0
0
0.000368
0.311392
3,950
130
76
30.384615
0.799632
0.010633
0
0.126126
0
0
0.097848
0
0
0
0
0
0
1
0.153153
false
0
0.018018
0.063063
0.27027
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6b14cabbd3bcb3a92d3115531acfa5bd337cbb3
36,170
py
Python
examples/tof-viewer/external/newton_host_driver/src/host_api/examples/python/generateBootImage.py
rick-yhchen1013/aditof-sdk-rework
911465dd1e05dd0b1c5107197b3b4dc3a10f77f9
[ "MIT" ]
5
2021-09-22T10:04:47.000Z
2022-02-08T17:55:09.000Z
examples/tof-viewer/external/newton_host_driver/src/host_api/examples/python/generateBootImage.py
rick-yhchen1013/aditof-sdk-rework
911465dd1e05dd0b1c5107197b3b4dc3a10f77f9
[ "MIT" ]
99
2021-02-01T12:45:09.000Z
2022-03-08T09:54:13.000Z
examples/tof-viewer/external/newton_host_driver/src/host_api/examples/python/generateBootImage.py
rick-yhchen1013/aditof-sdk-rework
911465dd1e05dd0b1c5107197b3b4dc3a10f77f9
[ "MIT" ]
4
2021-08-09T12:32:55.000Z
2021-12-13T05:38:55.000Z
#!/usr/bin/env python """ Newton Generate Boot Images Usage: generateBootImage.py <target> <file_name> [--sim][--frontdoor][--seed=<seed_value>][--count=<word_count>][--hsp_fw_0p97] Options: -h --help Shows this help message. Target is one of the following: useq_seq_ram : Microsequencer Sequence RAM useq_map_ram : Microsequencer MAP RAM useq_wave_ram : Microsequencer Wave RAM datapath_ram : Gain Correction RAM de_ram : Dump Engine RAM lps1_ram : LPS1 lps2_ram : LPS2 grouped : Grouped data packet """ from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals from docopt import docopt import sys import io import os import time import struct import subprocess import re import random import ctypes import newton_control as newton def writeFile( fileName, totalByteCount ): ofile = open( fileName, "w" ) index = 0 while index < len( commandData ): destAddress = commandData[index] index += 1 command = commandData[index] index += 1 attributes = commandData[index] index += 1 byteCount = commandData[index] index += 1 wordCount = int( byteCount / 2 ) if command == newton.CMD_GROUPED_DATA: # Modify the byteCount with totalByteCount byteCount = totalByteCount wordCount = int( byteCount / 2 ) ofile.write( '{0:0{1}X}'.format( destAddress, 4 ) + "\n" ) ofile.write( '{0:0{1}X}'.format( command, 4 ) + "\n" ) ofile.write( '{0:0{1}X}'.format( attributes, 4 ) + "\n" ) ofile.write( '{0:0{1}X}'.format( byteCount, 4 ) + "\n" ) for i in range(0, wordCount): cmdWord = commandData[index] index += 1 ofile.write( '{0:0{1}X}'.format( cmdWord, 4 ) + "\n" ) ofile.close( ) def generateCommandHeader( cmd, attr, destAddr, byteCount ): data16 = destAddr # Destination Address commandData.append( data16 ) data16 = cmd # Mail Box Command commandData.append( data16 ) data16 = attr # Attribute commandData.append( data16 ) data16 = byteCount # Byte Count commandData.append( data16 ) def generateRegisterWriteCommand( writeAddr, writeData, attributes ): attr = attributes | newton.WRITE_ATTR cmd = newton.CMD_REGISTER_CFG byteCount = 4 totalByteCount = byteCount + 8 generateCommandHeader( cmd, attr, 0, byteCount ) # Generate register list. data16 = writeData commandData.append( data16 ) data16 = writeAddr commandData.append( data16 ) return totalByteCount def generateRegisterWriteListCommand( writeList, attributes ): attr = attributes | newton.WRITE_ATTR cmd = newton.CMD_REGISTER_CFG wordCount = len( writeList ) byteCount = int( wordCount * 2 ) totalByteCount = byteCount + 8 generateCommandHeader( cmd, attr, 0, byteCount ) for writeData in writeList: # Generate register list. commandData.append( writeData ) return totalByteCount def generateRamWriteCommand( target, wordCount, attributes ): attr = attributes | newton.WRITE_ATTR totalByteCount = 0 if target == "useq_seq_ram": cmd = newton.CMD_SEQ_RAM depth = newton.USEQ_SEQ_RAM_DEPTH bitWidth = newton.USEQ_SEQ_RAM_WIDTH byteWidth = newton.USEQ_SEQ_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_SEQ_RAM sub-command with wordCount = " + str( wordCount ) ) r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 0 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount elif target == "useq_wave_ram": cmd = newton.CMD_WAVE_RAM depth = newton.USEQ_WAVE_RAM_DEPTH bitWidth = newton.USEQ_WAVE_RAM_WIDTH byteWidth = newton.USEQ_WAVE_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_WAVE_RAM sub-command with wordCount = " + str( wordCount ) ) r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 1 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount elif target == "useq_map_ram": cmd = newton.CMD_MAP_RAM depth = newton.USEQ_MAP_RAM_DEPTH bitWidth = newton.USEQ_MAP_RAM_WIDTH byteWidth = newton.USEQ_MAP_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_MAP_RAM sub-command with wordCount = " + str( wordCount ) ) r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 2 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount elif target == "datapath_ram": cmd = newton.CMD_DATAPATH_RAM depth = newton.DATAPATH_RAM_DEPTH bitWidth = newton.DATAPATH_RAM_WIDTH byteWidth = newton.DATAPATH_RAM_WIDTH_BYTES addr = newton.DATAPATH_REGS_IA_WRDATA_REG if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_DATAPATH_RAM sub-command with wordCount = " + str( wordCount ) ) writeList = [] r1 = newton.ADI_DATAPATH_IA_SELECT_s() r1.IA_ENA = 1 writeList.append( r1.VALUE16 ) writeList.append( newton.DATAPATH_REGS_IA_SELECT ) r2 = newton.ADI_DATAPATH_IA_ADDR_REG_s() r2.IA_START_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.DATAPATH_REGS_IA_ADDR_REG ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount elif target == "de_ram": cmd = newton.CMD_DUMP_ENGINE_RAM depth = newton.DE_RAM_DEPTH bitWidth = newton.DE_RAM_WIDTH byteWidth = newton.DE_RAM_WIDTH_BYTES addr = newton.DE_REGS_DE_IA_WRDATA_REG if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_DUMP_ENGINE_RAM sub-command with wordCount = " + str( wordCount ) ) writeList = [] r1 = newton.ADI_DE_REGS_YODA_DE_IA_SELECT_s() r1.RAM = 1 writeList.append( r1.VALUE16 ) writeList.append( newton.DE_REGS_DE_IA_SELECT ) r2 = newton.ADI_DE_REGS_YODA_DE_IA_ADDR_REG_s() r2.RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.DE_REGS_DE_IA_ADDR_REG ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount elif target == "lps1_ram": cmd = newton.CMD_LPS1_RAM depth = newton.LPS1_RAM_DEPTH bitWidth = newton.LPS1_RAM_WIDTH byteWidth = newton.LPS1_RAM_WIDTH_BYTES addr = newton.LPS1_REGS_LPSRAMDATA if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_LPS1_RAM sub-command with wordCount = " + str( wordCount ) ) writeList = [] r1 = newton.ADI_LPS_REGS_YODA_LPSRAMRDCMD_s() r1.LPS_RAM_READ_EN = 0 r1.LPS_RAM_READ_RDY = 0 writeList.append( r1.VALUE16 ) writeList.append( newton.LPS1_REGS_LPSRAMRDCMD ) r2 = newton.ADI_LPS_REGS_YODA_LPSRAMADDR_s() r2.LPS_RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.LPS1_REGS_LPSRAMADDR ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount elif target == "lps2_ram": cmd = newton.CMD_LPS2_RAM depth = newton.LPS2_RAM_DEPTH bitWidth = newton.LPS2_RAM_WIDTH byteWidth = newton.LPS2_RAM_WIDTH_BYTES addr = newton.LPS2_REGS_LPSRAMDATA if wordCount == 0: wordCount = random.randrange(32,depth) print( "INFO: Adding CMD_LPS2_RAM sub-command with wordCount = " + str( wordCount ) ) writeList = [] r1 = newton.ADI_LPS_REGS_YODA_LPSRAMRDCMD_s() r1.LPS_RAM_READ_EN = 0 r1.LPS_RAM_READ_RDY = 0 writeList.append( r1.VALUE16 ) writeList.append( newton.LPS2_REGS_LPSRAMRDCMD ) r2 = newton.ADI_LPS_REGS_YODA_LPSRAMADDR_s() r2.LPS_RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.LPS2_REGS_LPSRAMADDR ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount wordCount = wordCount & 0xfffe byteCount = wordCount * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, wordCount): ramWord = random.getrandbits( bitWidth ) if bitWidth <= 16: data16 = ramWord commandData.append( data16 ) elif bitWidth <= 32: data16 = ramWord & 0xffff commandData.append( data16 ) data16 = (ramWord >> 16) & 0xffff commandData.append( data16 ) elif bitWidth <= 64: data16 = ramWord & 0xffff commandData.append( data16 ) data16 = (ramWord >> 16) & 0xffff commandData.append( data16 ) data16 = (ramWord >> 32) & 0xffff commandData.append( data16 ) data16 = (ramWord >> 48) & 0xffff commandData.append( data16 ) return totalByteCount def generateGroupedCommand( target, count ): attr = newton.GROUPED_ATTR | newton.WRITE_ATTR cmd = newton.CMD_GROUPED_DATA totalByteCount = 0 generateCommandHeader( cmd, attr, 0, totalByteCount ) # Actual type count filled in later by the writeFile routine. print( "INFO: Generating grouped command ..." ) if count == 0 or count > newton.USEQ_SEQ_RAM_DEPTH: wordCount = newton.USEQ_SEQ_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "useq_seq_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.USEQ_WAVE_RAM_DEPTH: wordCount = newton.USEQ_WAVE_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "useq_wave_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.USEQ_MAP_RAM_DEPTH: wordCount = newton.USEQ_MAP_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "useq_map_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.DATAPATH_RAM_DEPTH: wordCount = newton.DATAPATH_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "datapath_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.DE_RAM_DEPTH: wordCount = newton.DE_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "de_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.LPS1_RAM_DEPTH: wordCount = newton.LPS1_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "lps1_ram", wordCount, attr ) totalByteCount += byteCount if count == 0 or count > newton.LPS2_RAM_DEPTH: wordCount = newton.LPS2_RAM_DEPTH else: wordCount = count byteCount = generateRamWriteCommand( "lps2_ram", wordCount, attr ) totalByteCount += byteCount return totalByteCount def processRegisterFileList( file_name, attributes ): cmd = newton.CMD_REGISTER_CFG attr = attributes | newton.WRITE_ATTR totalByteCount = 0 print( "INFO:: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: if re.search( r'^\w+,\w+', line ): items = line.split( "," ) elif re.search( r'^\w+\s+\w+', line ): items = line.split( " " ) address = items[0].upper() data = items[1].upper() address = re.sub( r"0X", r"", address ) data = re.sub( r"0X", r"", data ) addr_int = int( address, 16 ) data_int = int( data, 16 ) if addr_int == newton.DE_REGS_DE_IA_ADDR_REG: deRamAddress = data_int elif addr_int == newton.DE_REGS_DE_IA_WRDATA_REG: deRamAddress += 1 elif addr_int == newton.USEQ_REGS_USEQRAMLOADADDR: seqRamAddress = data_int elif addr_int == newton.USEQ_REGS_USEQRAMLOADDATA: seqRamAddress += 1 else: registerWrite = {} registerWrite["address"] = int( address, 16 ) registerWrite["data"] = int( data, 16 ) if hsp_fw_0p97 == True: if registerWrite["address"] == 0x000c: print( "INFO: Skipping useqControlRegister write, data = " + hex(registerWrite["data"]) ); elif registerWrite["address"] == 0x0014: print( "INFO: Modifying write to the digPwrDown to make sure the LPS1 and DE blocks are enabled, data = " + hex(registerWrite["data"]) ); registerWrite["data"] = registerWrite["data"] & 0xbffe registerWriteList.append( registerWrite ) else: registerWriteList.append( registerWrite ) else: registerWriteList.append( registerWrite ) line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) def addRegisterWriteList( attributes ): cmd = newton.CMD_REGISTER_CFG attr = attributes | newton.WRITE_ATTR totalByteCount = 0 listSize = len( registerWriteList ) commandCount = newton.MAX_REG_LIST if listSize <= newton.MAX_REG_LIST: commandCount = 1 elif (listSize % newton.MAX_REG_LIST) == 0: commandCount = listSize // newton.MAX_REG_LIST else: commandCount = listSize // newton.MAX_REG_LIST + 1 if listSize > 0: totalByteCount += commandCount * 8 totalByteCount += listSize * 4 print( "INFO:: Register list size = " + str( listSize ) ) index = 0 for i in range(0, int( commandCount )): if i < (commandCount - 1): regCount = newton.MAX_REG_LIST else: regCount = listSize - index if listSize > 0: generateCommandHeader( cmd, attr, 0, regCount * 4 ) for j in range(0, regCount): registerWrite = registerWriteList[index] index += 1 # Generate register list. data16 = registerWrite["data"] commandData.append( data16 ) data16 = registerWrite["address"] commandData.append( data16 ) return totalByteCount def process_wave_reg_txt( file_name, attributes ): cmd = newton.CMD_WAVE_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 wave_ram = {} for i in range(0, newton.USEQ_WAVE_RAM_DEPTH): wave_ram[i] = 0 r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() memoryAddress = 0 print( "INFO:: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: m = re.search( r'(\w+)\s+(\w+)', line ) address = m.group(1).upper() data = m.group(2).upper() address = re.sub( r"0X", r"", address ) data = re.sub( r"0X", r"", data ) address = int( address, 16 ) data = int( data, 16 ) if address == newton.USEQ_REGS_USEQRAMLOADADDR: r.VALUE16 = data memoryAddress = r.LD_ADDR elif address == newton.USEQ_REGS_USEQRAMLOADDATA: wave_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) cmd = newton.CMD_WAVE_RAM depth = newton.USEQ_WAVE_RAM_DEPTH bitWidth = newton.USEQ_WAVE_RAM_WIDTH byteWidth = newton.USEQ_WAVE_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA r.LD_RAM_SEL = 1 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.USEQ_WAVE_RAM_DEPTH): commandData.append( wave_ram[i] ) return totalByteCount def extractRamAccesses( file_name, attributes ): global de_ram_temp global seq_ram_temp global wave_ram_temp global map_ram_temp totalByteCount = 0 de_ram_temp = {} seq_ram_temp = {} wave_ram_temp = {} map_ram_temp = {} r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() seqRamAddress = 0 r1 = newton.ADI_DE_REGS_YODA_DE_IA_SELECT_s() r1.RAM = 1 r2 = newton.ADI_DE_REGS_YODA_DE_IA_ADDR_REG_s() r2.RAM_ADDR = 0 deRamAddress = 0 hwordCount = 0 temp = 0 print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: m = re.search( r'(\w+)\s+(\w+)', line ) address = m.group(1).upper() data = m.group(2).upper() address = re.sub( r"0X", r"", address ) data = re.sub( r"0X", r"", data ) address = int( address, 16 ) data = int( data, 16 ) if address == newton.DE_REGS_DE_IA_ADDR_REG: r2.VALUE16 = data deRamAddress = r2.RAM_ADDR elif address == newton.DE_REGS_DE_IA_WRDATA_REG: temp += (data << (16*hwordCount)) de_ram_temp[deRamAddress] = temp if hwordCount == 3: hwordCount = 0 deRamAddress += 1 temp = 0 else: hwordCount += 1 elif address == newton.USEQ_REGS_USEQRAMLOADADDR: r.VALUE16 = data seqRamAddress = r.LD_ADDR seqRamSel = r.LD_RAM_SEL elif address == newton.USEQ_REGS_USEQRAMLOADDATA: if seqRamSel == 0: seq_ram_temp[seqRamAddress] = data elif seqRamSel == 1: wave_ram_temp[seqRamAddress] = data elif seqRamSel == 2: map_ram_temp[seqRamAddress] = data seqRamAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) return totalByteCount def processSeqRamFile( attributes ): cmd = newton.CMD_SEQ_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 seq_ram = {} for i in range(0, newton.USEQ_SEQ_RAM_DEPTH): seq_ram[i] = 0 memoryAddress = 0 file_name = "seq_ram.txt" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.USEQ_SEQ_RAM_MASK # Parity is the MSB seq_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) for memoryAddress in seq_ram_temp.keys(): seq_ram[memoryAddress] = seq_ram_temp[memoryAddress] cmd = newton.CMD_SEQ_RAM depth = newton.USEQ_SEQ_RAM_DEPTH bitWidth = newton.USEQ_SEQ_RAM_WIDTH byteWidth = newton.USEQ_SEQ_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 0 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.USEQ_SEQ_RAM_DEPTH): commandData.append( seq_ram[i] ) return totalByteCount # Read Wave RM contents from wave_ram.txt and wave_reg.txt files def processWaveRamFile( attributes ): cmd = newton.CMD_WAVE_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 wave_ram = {} for i in range(0, newton.USEQ_WAVE_RAM_DEPTH): wave_ram[i] = 0 memoryAddress = 0 file_name = "wave_ram.txt" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.USEQ_WAVE_RAM_MASK # Parity is the MSB wave_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) for memoryAddress in wave_ram_temp.keys(): wave_ram[memoryAddress] = wave_ram_temp[memoryAddress] r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() file_name = "wave_reg.txt" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: m = re.search( r'(\w+)\s+(\w+)', line ) address = m.group(1).upper() data = m.group(2).upper() address = re.sub( r"0X", r"", address ) data = re.sub( r"0X", r"", data ) address = int( address, 16 ) data = int( data, 16 ) if address == newton.USEQ_REGS_USEQRAMLOADADDR: r.VALUE16 = data memoryAddress = r.LD_ADDR elif address == newton.USEQ_REGS_USEQRAMLOADDATA: wave_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) cmd = newton.CMD_WAVE_RAM depth = newton.USEQ_WAVE_RAM_DEPTH bitWidth = newton.USEQ_WAVE_RAM_WIDTH byteWidth = newton.USEQ_WAVE_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 1 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.USEQ_WAVE_RAM_DEPTH): commandData.append( wave_ram[i] ) return totalByteCount def processMapRamFile( attributes ): cmd = newton.CMD_MAP_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 map_ram = {} for i in range(0, newton.USEQ_MAP_RAM_DEPTH): map_ram[i] = 0 memoryAddress = 0 file_name = "map_ram.txt" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.USEQ_SEQ_RAM_MASK # Parity is the MSB map_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) for memoryAddress in map_ram_temp.keys(): map_ram[memoryAddress] = map_ram_temp[memoryAddress] cmd = newton.CMD_MAP_RAM depth = newton.USEQ_MAP_RAM_DEPTH bitWidth = newton.USEQ_MAP_RAM_WIDTH byteWidth = newton.USEQ_MAP_RAM_WIDTH_BYTES addr = newton.USEQ_REGS_USEQRAMLOADDATA r = newton.ADI_USEQ_REGS_MAP1_USEQRAMLOADADDR_s() r.LD_RAM_SEL = 2 r.LD_ADDR = 0 byteCount = generateRegisterWriteCommand( newton.USEQ_REGS_USEQRAMLOADADDR, r.VALUE16, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.USEQ_MAP_RAM_DEPTH): commandData.append( map_ram[i] ) return totalByteCount def processDatapathMemoryFiles( attributes ): cmd = newton.CMD_DATAPATH_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 datapath_ram = {} for i in range(0, newton.DATAPATH_RAM_DEPTH): datapath_ram[i] = 0 r1 = newton.ADI_DATAPATH_IA_SELECT_s() r2 = newton.ADI_DATAPATH_IA_ADDR_REG_s() r2.IA_START_ADDR = 0 memoryAddress = 0 for i in range(0, 16): file_name = "PCM_Correction_val_" + str( i ) + ".txt" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.DATAPATH_RAM_MASK # Parity is the MSB datapath_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) cmd = newton.CMD_DATAPATH_RAM depth = newton.DATAPATH_RAM_DEPTH bitWidth = newton.DATAPATH_RAM_WIDTH byteWidth = newton.DATAPATH_RAM_WIDTH_BYTES addr = newton.DATAPATH_REGS_IA_WRDATA_REG writeList = [] r1.IA_ENA = 1 writeList.append( r1.VALUE16 ) writeList.append( newton.DATAPATH_REGS_IA_SELECT ) r2.IA_START_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.DATAPATH_REGS_IA_ADDR_REG ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.DATAPATH_RAM_DEPTH): commandData.append( datapath_ram[i] ) r1.IA_ENA = 0 byteCount = generateRegisterWriteCommand( newton.DATAPATH_REGS_IA_SELECT, r1.VALUE16, attr ) totalByteCount += byteCount return totalByteCount def processDumpEngineMemoryFile( attributes ): cmd = newton.CMD_DUMP_ENGINE_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 de_ram = {} for i in range(0, newton.DE_RAM_DEPTH): de_ram[i] = 0 r1 = newton.ADI_DE_REGS_YODA_DE_IA_SELECT_s() r1.RAM = 1 r2 = newton.ADI_DE_REGS_YODA_DE_IA_ADDR_REG_s() r2.RAM_ADDR = 0 memoryAddress = 0 file_name = "De_config_all_bkdoor.hex" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.DE_RAM_MASK # Parity is the MSB de_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) for memoryAddress in de_ram_temp.keys(): de_ram[memoryAddress] = de_ram_temp[memoryAddress] cmd = newton.CMD_DUMP_ENGINE_RAM depth = newton.DE_RAM_DEPTH bitWidth = newton.DE_RAM_WIDTH byteWidth = newton.DE_RAM_WIDTH_BYTES addr = newton.DE_REGS_DE_IA_WRDATA_REG writeList = [] r1.RAM = 1 writeList.append( r1.VALUE16 ) writeList.append( newton.DE_REGS_DE_IA_SELECT ) r2.RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.DE_REGS_DE_IA_ADDR_REG ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.DE_RAM_DEPTH): commandData.append( de_ram[i] & 0xFFFF ) commandData.append( (de_ram[i] >> 16) & 0xFFFF ) commandData.append( (de_ram[i] >> 32) & 0xFFFF ) commandData.append( (de_ram[i] >> 48) & 0xFFFF ) return totalByteCount def processLps1RamFile( attributes ): cmd = newton.CMD_LPS1_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 lps1_ram = {} for i in range(0, newton.LPS1_RAM_DEPTH): lps1_ram[i] = 0 memoryAddress = 0 file_name = "lps1_ram.hex" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.LPS1_RAM_MASK # Parity is the MSB lps1_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) cmd = newton.CMD_LPS1_RAM depth = newton.LPS1_RAM_DEPTH bitWidth = newton.LPS1_RAM_WIDTH byteWidth = newton.LPS1_RAM_WIDTH_BYTES addr = newton.LPS1_REGS_LPSRAMDATA writeList = [] r1 = newton.ADI_LPS_REGS_YODA_LPSRAMRDCMD_s() r1.LPS_RAM_READ_EN = 0 r1.LPS_RAM_READ_RDY = 0 writeList.append( r1.VALUE16 ) writeList.append( newton.LPS1_REGS_LPSRAMRDCMD ) r2 = newton.ADI_LPS_REGS_YODA_LPSRAMADDR_s() r2.LPS_RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.LPS1_REGS_LPSRAMADDR ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.LPS1_RAM_DEPTH): commandData.append( lps1_ram[i] & 0xFFFF ) commandData.append( (lps1_ram[i] >> 16) & 0x00FF ) return totalByteCount def processLps2RamFile( attributes ): cmd = newton.CMD_LPS2_RAM attr = attributes | newton.WRITE_ATTR totalByteCount = 0 lps2_ram = {} for i in range(0, newton.LPS2_RAM_DEPTH): lps2_ram[i] = 0 memoryAddress = 0 file_name = "lps2_ram.hex" print( "INFO: Reading file " + file_name + " ..." ) with open( file_name ) as ifile: line = ifile.readline() line = re.sub( r"\n", r"", line ) while line: data = line.upper() data = int( data, 16 ) & newton.LPS2_RAM_MASK # Parity is the MSB lps2_ram[memoryAddress] = data memoryAddress += 1 line = ifile.readline() line = re.sub( r"\n", r"", line ) ifile.close( ) cmd = newton.CMD_LPS2_RAM depth = newton.LPS2_RAM_DEPTH bitWidth = newton.LPS2_RAM_WIDTH byteWidth = newton.LPS2_RAM_WIDTH_BYTES addr = newton.LPS2_REGS_LPSRAMDATA writeList = [] r1 = newton.ADI_LPS_REGS_YODA_LPSRAMRDCMD_s() r1.LPS_RAM_READ_EN = 0 r1.LPS_RAM_READ_RDY = 0 writeList.append( r1.VALUE16 ) writeList.append( newton.LPS2_REGS_LPSRAMRDCMD ) r2 = newton.ADI_LPS_REGS_YODA_LPSRAMADDR_s() r2.LPS_RAM_ADDR = 0 writeList.append( r2.VALUE16 ) writeList.append( newton.LPS2_REGS_LPSRAMADDR ) byteCount = generateRegisterWriteListCommand( writeList, attr ) totalByteCount += byteCount byteCount = depth * byteWidth totalByteCount += byteCount + 8 generateCommandHeader( cmd, attr, addr, byteCount ) for i in range(0, newton.LPS2_RAM_DEPTH): commandData.append( lps2_ram[i] & 0xFFFF ) commandData.append( (lps2_ram[i] >> 16) & 0x00FF ) return totalByteCount def generateGroupedCommandSimulation( frontdoor ): global registerWriteList attr = newton.GROUPED_ATTR | newton.WRITE_ATTR totalByteCount = 0 registerWriteList = [] if frontdoor == True: byteCount = processDatapathMemoryFiles( newton.WRITE_ATTR ) byteCount = processLps2RamFile( newton.WRITE_ATTR ) generateCommandHeader( newton.CMD_OPERATING_MODE, newton.MBX_UNSIGNED_SEQ_WFI, 0, 0 ) generateCommandHeader( newton.CMD_GROUPED_DATA, attr, 0, totalByteCount ) # Actual type count filled in later by the writeFile routine. byteCount = extractRamAccesses( "test_csv.txt", attr ) totalByteCount += byteCount processRegisterFileList( "De_config_all_bkdoor.csv", attr ) processRegisterFileList( "test_csv.txt", attr ) processRegisterFileList( "config_reg.txt", attr ) byteCount = addRegisterWriteList( attr ) totalByteCount += byteCount byteCount = processSeqRamFile( attr ) totalByteCount += byteCount byteCount = processMapRamFile( attr ) totalByteCount += byteCount byteCount = processWaveRamFile( attr ) totalByteCount += byteCount byteCount = processDumpEngineMemoryFile( attr ) totalByteCount += byteCount byteCount = processLps1RamFile( attr ) totalByteCount += byteCount else: generateCommandHeader( newton.CMD_OPERATING_MODE, newton.WRITE_ATTR, 0, 0 ) generateCommandHeader( newton.CMD_GROUPED_DATA, attr, 0, totalByteCount ) # Actual type count filled in later by the writeFile routine. processRegisterFileList( "De_config_all_bkdoor.csv", attr ) processRegisterFileList( "test_csv.txt", attr ) byteCount = process_wave_reg_txt( "wave_reg.txt", attr ) totalByteCount += byteCount byteCount = addRegisterWriteList( attr ) totalByteCount += byteCount return totalByteCount if __name__ == "__main__": global commandData global simFilesFrontDoor global isGroupedCommand global hsp_fw_0p97 maxSpiBytes = 256 wordCount = 0 seed = 1 frontdoor = False isGroupedCommand = False commandData = [] hsp_fw_0p97 = False simFilesFrontDoor = {} simFiles = {} args = docopt(__doc__, version='0.1') if args['--count']: wordCount = int( args['--count'] ) if args['--seed']: seed = int( args['--seed'] ) if args['--frontdoor']: frontdoor = True if args['--hsp_fw_0p97']: hsp_fw_0p97 = True random.seed( seed ) simFilesFrontDoor["De_config_all_bkdoor.csv"] = "De_config_all_bkdoor.csv" simFilesFrontDoor["wave_reg.txt"] = "wave_reg.txt" simFilesFrontDoor["test_csv.txt"] = "test_csv.txt" if args['<target>'] == "grouped": isGroupedCommand = True if args['--sim']: totalByteCount = generateGroupedCommandSimulation( frontdoor ) else: totalByteCount = generateGroupedCommand( args['<target>'], wordCount ) else: totalByteCount = generateRamWriteCommand( args['<target>'], wordCount, 0 ) writeFile( args['<file_name>'], totalByteCount ) sys.exit( 0 )
32.122558
161
0.617086
4,108
36,170
5.205696
0.066212
0.022446
0.040402
0.021604
0.737573
0.697872
0.647884
0.605518
0.569511
0.558382
0
0.023164
0.291042
36,170
1,125
162
32.151111
0.810787
0.030578
0
0.674883
1
0
0.047323
0.003425
0
0
0.003083
0
0
1
0.021127
false
0
0.016432
0
0.055164
0.026995
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d6b2ed2144802e9da93811adc368ed32fd611400
1,503
py
Python
chconsole/storage/json_storage.py
mincode/chconsole
ab8ca8a38bd47ecb1aa7ff90225f57e042aaad6e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
chconsole/storage/json_storage.py
mincode/chconsole
ab8ca8a38bd47ecb1aa7ff90225f57e042aaad6e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
chconsole/storage/json_storage.py
mincode/chconsole
ab8ca8a38bd47ecb1aa7ff90225f57e042aaad6e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
import os, json __author__ = 'Manfred Minimair <manfred@minimair.org>' class JSONStorage: """ File storage for a dictionary. """ file = '' # file name of storage file data = None # data dict indent = ' ' # indent prefix for pretty printing json files def __init__(self, path, name): """ Initizlize. :param path: path to the storage file; empty means the current direcory. :param name: file name, json file """ if path: os.makedirs(path, exist_ok=True) self.file = os.path.normpath(os.path.join(path, name)) try: with open(self.file) as data_file: self.data = json.load(data_file) except FileNotFoundError: self.data = dict() self.dump() def dump(self): """ Dump data into storage file. """ with open(self.file, 'w') as out_file: json.dump(self.data, out_file, indent=self.indent) def get(self, item): """ Get stored item. :param item: name, string, of item to get. :return: stored item; raises a KeyError if item does not exist. """ return self.data[item] def set(self, item, value): """ Set item's value; causes the data to be dumped into the storage file. :param item: name, string of item to set. :param value: value to set. """ self.data[item] = value self.dump()
28.358491
80
0.558217
190
1,503
4.347368
0.363158
0.048426
0.033898
0.038741
0.065375
0.065375
0.065375
0
0
0
0
0
0.337991
1,503
52
81
28.903846
0.830151
0.348636
0
0.083333
0
0
0.051157
0.026797
0
0
0
0
0
1
0.166667
false
0
0.041667
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6b3222fc09c6aa5941c479f3ee01b911f781151
332
py
Python
scienz/setup.py
Vibrant-Planet/aorist
067e119ef4d0d40802ce74a8e47d882e557ce195
[ "MIT" ]
16
2021-08-14T10:20:16.000Z
2022-03-31T04:19:26.000Z
hub/setup.py
scie-nz/aorist
ac1e31251af7d851c4491a310b417de880b79d09
[ "MIT" ]
5
2021-08-15T23:19:10.000Z
2021-09-26T20:50:41.000Z
scienz/setup.py
Vibrant-Planet/aorist
067e119ef4d0d40802ce74a8e47d882e557ce195
[ "MIT" ]
1
2022-01-06T01:26:24.000Z
2022-01-06T01:26:24.000Z
from setuptools import setup setup( name="scienz", version="0.0.1", packages=["scienz"], zip_safe=False, include_package_data=True, package_data={"scienz": ["scienz/*"],}, long_description=""" Common dataset definitions for aorist package. """, long_description_content_type="text/x-rst" )
20.75
50
0.656627
39
332
5.384615
0.74359
0.104762
0
0
0
0
0
0
0
0
0
0.011194
0.192771
332
15
51
22.133333
0.772388
0
0
0
0
0
0.293051
0
0
0
0
0
0
1
0
true
0
0.076923
0
0.076923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
d6b3ca6a1ac3ed90a5486b776195623690a10478
2,219
py
Python
Trabalho_02/Trabalho/ELM_Q01.py
gabriel-rc-201/Trabalhos-Inteligencia-Computacional
c334f2fd66a31bacab0470b390782ce38dc2a100
[ "MIT" ]
1
2021-07-09T19:32:16.000Z
2021-07-09T19:32:16.000Z
Trabalho_02/Trabalho/ELM_Q01.py
gabriel-rc-201/Trabalhos-Inteligencia-Computacional
c334f2fd66a31bacab0470b390782ce38dc2a100
[ "MIT" ]
null
null
null
Trabalho_02/Trabalho/ELM_Q01.py
gabriel-rc-201/Trabalhos-Inteligencia-Computacional
c334f2fd66a31bacab0470b390782ce38dc2a100
[ "MIT" ]
null
null
null
#!-*- conding: utf8 -*- #coding: utf-8 """ Aluno: Gabriel Ribeiro Camelo Matricula: 401091 """ import matplotlib.pyplot as pplt # gráficos import math # Matemática import re # expressões regulares import numpy as np # matrizes from statistics import pstdev # Desvio padrão from scipy import stats # Contem o zscore #Funções para o calculo do R2 subxy = lambda x,y: x-y multxy = lambda x,y: x*y def somaYy(y): #cria o somatorio de yy acumulador = 0 y_media = np.sum(y)/len(y) for k in range(len(y)): acumulador += (y[k] - y_media)**2 return acumulador # Coleta de dados arq = open("aerogerador.dat", "r") # abre o arquivo que contem os dados x = [] # Dados y = [] # Resultados for line in arq: # separa x de y line = line.strip() # quebra no \n line = re.sub('\s+',',',line) # trocando espaços vazios por virgula X,Y = line.split(",") # quebra nas virgulas e retorna 2 valores x.append(float(X)) y.append(float(Y)) arq.close() # fecha o arquivo que contem os dados # Normalização Zscore xn = stats.zscore(x) #adicionando o peso que pondera o bias xb = [] for i in range(2250): xb.append(-1) X = np.matrix([xb, xn]) # Matriz de dados com o bias # Matriz de pesos aleatórios def matPesos (qtdNeuronios, qtdAtributos): # retorna uma matriz de numeros aleatórios de uma distribuição narmal w = np.random.randn(qtdNeuronios, qtdAtributos+1) return w Neuronios = int(input("Quantidade de Neuronios: ")) W = matPesos(Neuronios, 1) # Função de Ativação phi = lambda u: (1 - math.exp(u))/(1 + math.exp(u)) #Logistica # Ativação dos Neuronios U = np.array(W@X) Z = list(map(phi, [valor for linha in U for valor in linha])) Z = np.array(Z) Z = Z.reshape(Neuronios, 2250) # Matriz de pesos dos neuronios da camada de saida M = (y@Z.T) @ np.linalg.inv(Z@Z.T) # Ativação dos neuronios de saida D = M@Z # Calculo do R2 somaQe = sum(map(multxy, list(map(subxy, y, D)), list(map(subxy, y, D)))) R2 = 1 - (somaQe/somaYy(y)) #Resultados print("R2: ", R2) #gráfico pplt.plot(x, D, color ='red') pplt.scatter(x, y, marker = "*") pplt.show()
24.384615
75
0.633168
344
2,219
4.078488
0.456395
0.009979
0.015681
0.01283
0.081967
0.034212
0
0
0
0
0
0.017741
0.237945
2,219
90
76
24.655556
0.811946
0.343398
0
0
0
0
0.038325
0
0
0
0
0
0
1
0.042553
false
0
0.12766
0
0.212766
0.021277
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6b7e37b488f062925a4fc8dd11c3dc8a00b2e5d
4,424
py
Python
tools/weixin.py
GingerWWW/news_spider
51e1437cf9a58071cc5bd2c12f854ae84f96a5d5
[ "MIT" ]
208
2018-02-11T01:58:08.000Z
2022-03-28T07:15:12.000Z
tools/weixin.py
DeteMin/news_spider
9e29525a8bcb2310fca3bb4f9ca4b99b39ecfc9c
[ "MIT" ]
19
2018-04-17T11:03:28.000Z
2022-03-17T00:02:20.000Z
tools/weixin.py
DeteMin/news_spider
9e29525a8bcb2310fca3bb4f9ca4b99b39ecfc9c
[ "MIT" ]
44
2018-02-26T09:47:41.000Z
2022-03-22T13:34:27.000Z
#!/usr/bin/env python # encoding: utf-8 """ @author: zhanghe @software: PyCharm @file: weixin.py @time: 2018-02-10 17:55 """ import re import time import hashlib # from urlparse import urljoin # PY2 # from urllib.parse import urljoin # PY3 from future.moves.urllib.parse import urljoin import execjs from tools.char import un_escape from config import current_config from models.news import FetchResult from news.items import FetchResultItem from apps.client_db import db_session_mysql from maps.platform import WEIXIN, WEIBO BASE_DIR = current_config.BASE_DIR def get_finger(content_str): """ :param content_str: :return: """ m = hashlib.md5() m.update(content_str.encode('utf-8') if isinstance(content_str, unicode) else content_str) finger = m.hexdigest() return finger def parse_weixin_js_body(html_body, url=''): """ 解析js :param html_body: :param url: :return: """ rule = r'<script type="text/javascript">.*?(var msgList.*?)seajs.use\("sougou/profile.js"\);.*?</script>' js_list = re.compile(rule, re.S).findall(html_body) if not js_list: print('parse error url: %s' % url) return ''.join(js_list) def parse_weixin_article_id(html_body): rule = r'<script nonce="(\d+)" type="text\/javascript">' article_id_list = re.compile(rule, re.I).findall(html_body) return article_id_list[0] def add_img_src(html_body): rule = r'data-src="(.*?)"' img_data_src_list = re.compile(rule, re.I).findall(html_body) print(img_data_src_list) for img_src in img_data_src_list: print(img_src) html_body = html_body.replace(img_src, '%(img_src)s" src="%(img_src)s' % {'img_src': img_src}) return html_body def get_img_src_list(html_body, host_name='/', limit=None): rule = r'src="(%s.*?)"' % host_name img_data_src_list = re.compile(rule, re.I).findall(html_body) if limit: return img_data_src_list[:limit] return img_data_src_list def check_article_title_duplicate(article_title): """ 检查标题重复 :param article_title: :return: """ session = db_session_mysql() article_id_count = session.query(FetchResult) \ .filter(FetchResult.platform_id == WEIXIN, FetchResult.article_id == get_finger(article_title)) \ .count() return article_id_count class ParseJsWc(object): """ 解析微信动态数据 """ def __init__(self, js_body): self.js_body = js_body self._add_js_msg_list_fn() self.ctx = execjs.compile(self.js_body) # print(self.ctx) def _add_js_msg_list_fn(self): js_msg_list_fn = """ function r_msg_list() { return msgList.list; }; """ self.js_body += js_msg_list_fn def parse_js_msg_list(self): msg_list = self.ctx.call('r_msg_list') app_msg_ext_info_list = [i['app_msg_ext_info'] for i in msg_list] comm_msg_info_date_time_list = [time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(i['comm_msg_info']['datetime'])) for i in msg_list] # msg_id_list = [i['comm_msg_info']['id'] for i in msg_list] msg_data_list = [ { # 'article_id': '%s_000' % msg_id_list[index], 'article_id': get_finger(i['title']), 'article_url': urljoin('https://mp.weixin.qq.com', un_escape(i['content_url'])), 'article_title': i['title'], 'article_abstract': i['digest'], 'article_pub_time': comm_msg_info_date_time_list[index], } for index, i in enumerate(app_msg_ext_info_list) ] msg_ext_list = [i['multi_app_msg_item_list'] for i in app_msg_ext_info_list] for index_j, j in enumerate(msg_ext_list): for index_i, i in enumerate(j): msg_data_list.append( { # 'article_id': '%s_%03d' % (msg_id_list[index_j], index_i + 1), 'article_id': get_finger(i['title']), 'article_url': urljoin('https://mp.weixin.qq.com', un_escape(i['content_url'])), 'article_title': i['title'], 'article_abstract': i['digest'], 'article_pub_time': comm_msg_info_date_time_list[index_j], } ) return msg_data_list
30.937063
141
0.614828
615
4,424
4.092683
0.253659
0.038141
0.023838
0.033373
0.265793
0.232817
0.176798
0.176798
0.176798
0.162892
0
0.00729
0.255877
4,424
142
142
31.15493
0.75729
0.117089
0
0.116279
0
0.011628
0.167411
0.034954
0
0
0
0
0
1
0.104651
false
0
0.127907
0
0.348837
0.034884
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6be2f5eba8bd47a690f4ce82ab27016a99d70c3
126
py
Python
Python/_16_Numpy/_02_Shape_and_Reshape/solution.py
avtomato/HackerRank
96f9cf770ac57d782bb05cccc23764a529bb27ae
[ "MIT" ]
null
null
null
Python/_16_Numpy/_02_Shape_and_Reshape/solution.py
avtomato/HackerRank
96f9cf770ac57d782bb05cccc23764a529bb27ae
[ "MIT" ]
null
null
null
Python/_16_Numpy/_02_Shape_and_Reshape/solution.py
avtomato/HackerRank
96f9cf770ac57d782bb05cccc23764a529bb27ae
[ "MIT" ]
2
2019-09-21T16:04:13.000Z
2020-03-25T09:07:47.000Z
import numpy arr = map(int, input().strip().split(' ')) d2_arr = numpy.array(list(arr)) d2_arr.shape = (3, 3) print(d2_arr)
15.75
42
0.650794
22
126
3.590909
0.636364
0.189873
0
0
0
0
0
0
0
0
0
0.045872
0.134921
126
7
43
18
0.678899
0
0
0
0
0
0.007937
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0.2
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
d6bfbf2d051a74fc9be123eeb1ea06791a6a5509
1,359
py
Python
source/Objects/XTRA_Scaling_Parameters.py
afarahi/XTRA
6550b216264abaa3ed705835aca0981f2934e069
[ "MIT" ]
2
2018-11-01T12:38:56.000Z
2019-10-22T07:02:54.000Z
source/Objects/XTRA_Scaling_Parameters.py
afarahi/XTRA
6550b216264abaa3ed705835aca0981f2934e069
[ "MIT" ]
null
null
null
source/Objects/XTRA_Scaling_Parameters.py
afarahi/XTRA
6550b216264abaa3ed705835aca0981f2934e069
[ "MIT" ]
null
null
null
import os.path import json class Temprature_scaling: def __init__(self, label): fname = './parameters/Models/Txm/' + label + '_parameters.json' if os.path.isfile(fname) == False: print("Error: %s does not exists it uses Tx scaling default parameters."%s) exit(1) # fname = './parameters/Models/Txm/default_parameters.xml' with open(fname) as fp: _param = json.load(fp) # Parameters self.Norm = _param['a'] self.M_slope = _param['M_slope'] self.E_slope = _param['E_slope'] self.M_p = _param['M_p'] self.z_p = _param['z_p'] self.sig = _param['sig'] class Luminocity_scaling: def __init__(self, label): fname = './parameters/Models/Lxm/' + label + '_parameters.json' if os.path.isfile(fname) == False: print("ERROR: %s does not exists it uses Lx scaling default parameters." % s) exit(1) # fname = './parameters/Models/Lxm/default_parameters.xml' with open(fname) as fp: _param = json.load(fp) # Parameters self.Norm = _param['a'] self.M_slope = _param['M_slope'] self.E_slope = _param['E_slope'] self.M_p = _param['M_p'] self.z_p = _param['z_p'] self.sig = _param['sig']
22.278689
89
0.56365
173
1,359
4.17341
0.277457
0.083102
0.116343
0.049862
0.905817
0.905817
0.905817
0.905817
0.783934
0.642659
0
0.002116
0.304636
1,359
60
90
22.65
0.761905
0.099338
0
0.733333
0
0
0.211396
0.039637
0
0
0
0
0
1
0.066667
false
0
0.066667
0
0.2
0.066667
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d6c240b9e93cb9ac402108e702d6f69b681de912
545
py
Python
script_concurent_cpd_example.py
gmetsov/slow-momentum-fast-reversion
861c13ff2ea1d9e0b24523356c574c3ceaacf5bb
[ "MIT" ]
1
2022-03-25T11:37:31.000Z
2022-03-25T11:37:31.000Z
script_concurent_cpd_example.py
gmetsov/slow-momentum-fast-reversion
861c13ff2ea1d9e0b24523356c574c3ceaacf5bb
[ "MIT" ]
null
null
null
script_concurent_cpd_example.py
gmetsov/slow-momentum-fast-reversion
861c13ff2ea1d9e0b24523356c574c3ceaacf5bb
[ "MIT" ]
null
null
null
import multiprocessing import os from settings.default import QUANDL_TICKERS, CPD_QUANDL_OUTPUT_FOLDER_DEFAULT N_WORKERS = len(QUANDL_TICKERS) if not os.path.exists(CPD_QUANDL_OUTPUT_FOLDER_DEFAULT): os.mkdir(CPD_QUANDL_OUTPUT_FOLDER_DEFAULT) all_processes = [ f'python script_cpd_example.py "{ticker}" "{os.path.join(CPD_QUANDL_OUTPUT_FOLDER_DEFAULT, ticker + ".csv")}" "1990-01-01" "2019-12-31"' for ticker in QUANDL_TICKERS ] process_pool = multiprocessing.Pool(processes=N_WORKERS) process_pool.map(os.system, all_processes)
32.058824
140
0.801835
81
545
5.061728
0.493827
0.087805
0.146341
0.204878
0.273171
0
0
0
0
0
0
0.032587
0.099083
545
16
141
34.0625
0.802444
0
0
0
0
0.083333
0.244037
0.126606
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d6c44db13e9cf80092cf19bc3a381fd343fa5385
1,248
py
Python
events/migrations/0049_auto_20210308_1449.py
horacexd/clist
9759dfea97b86514bec9825d2430abc36decacf0
[ "Apache-2.0" ]
166
2019-05-16T23:46:08.000Z
2022-03-31T05:20:23.000Z
events/migrations/0049_auto_20210308_1449.py
horacexd/clist
9759dfea97b86514bec9825d2430abc36decacf0
[ "Apache-2.0" ]
92
2020-01-18T22:51:53.000Z
2022-03-12T01:23:57.000Z
events/migrations/0049_auto_20210308_1449.py
VadVergasov/clist
4afcdfe88250d224043b28efa511749347cec71c
[ "Apache-2.0" ]
23
2020-02-09T17:38:43.000Z
2021-12-09T14:39:07.000Z
# Generated by Django 3.1.7 on 2021-03-08 14:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('events', '0048_auto_20210307_1644'), ] operations = [ migrations.AlterField( model_name='event', name='email_conf', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='event', name='fields_info', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='event', name='limits', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='event', name='logins_paths', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='event', name='standings_urls', field=models.JSONField(blank=True, default=dict), ), migrations.AlterField( model_name='participant', name='addition_fields', field=models.JSONField(blank=True, default=dict), ), ]
28.363636
61
0.560897
118
1,248
5.813559
0.398305
0.174927
0.218659
0.253644
0.669096
0.669096
0.613703
0.555394
0.555394
0.555394
0
0.036643
0.322115
1,248
43
62
29.023256
0.774232
0.036058
0
0.621622
1
0
0.110741
0.019151
0
0
0
0
0
1
0
false
0
0.027027
0
0.108108
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
d6c46aa5da8525586e87f2b472570c93ecfc9915
369
py
Python
cowin_settings/models/cowin_custom_model_data.py
shangdinvxu/cowinaddons
4e9d69894cd80e5427ccc9bac6c37b8bd67cadd0
[ "MIT" ]
null
null
null
cowin_settings/models/cowin_custom_model_data.py
shangdinvxu/cowinaddons
4e9d69894cd80e5427ccc9bac6c37b8bd67cadd0
[ "MIT" ]
null
null
null
cowin_settings/models/cowin_custom_model_data.py
shangdinvxu/cowinaddons
4e9d69894cd80e5427ccc9bac6c37b8bd67cadd0
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from odoo import models, fields, api class Cowin_custom_model_data(models.Model): _name = 'cowin_settings.custome_model_data' # name = fields.Char(string=u'ID') model_name = fields.Char(string=u'model ID') _sql_constraints = [ ('model_name_key', 'UNIQUE (model_name)', u'model_name标识名不能相同!!!') ]
23.0625
75
0.639566
47
369
4.723404
0.553191
0.162162
0.126126
0.18018
0.189189
0
0
0
0
0
0
0.003472
0.219512
369
16
76
23.0625
0.767361
0.146341
0
0
0
0
0.315436
0.110738
0
0
0
0
0
1
0
false
0
0.142857
0
0.714286
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d6c7f2bc5030b6e6a1c3bad3cfc0eb72b6f4212f
172
py
Python
iex_parser_test/__init__.py
Cedric-Kram/iex_parser
b5aebe79b2125681ab0606f4f59ec325aadeebb9
[ "Apache-2.0" ]
15
2019-08-15T07:22:44.000Z
2022-01-18T20:52:22.000Z
iex_parser_test/__init__.py
Cedric-Kram/iex_parser
b5aebe79b2125681ab0606f4f59ec325aadeebb9
[ "Apache-2.0" ]
5
2020-05-29T04:58:34.000Z
2022-01-31T07:27:20.000Z
iex_parser_test/__init__.py
Cedric-Kram/iex_parser
b5aebe79b2125681ab0606f4f59ec325aadeebb9
[ "Apache-2.0" ]
4
2020-09-08T15:03:20.000Z
2022-01-18T13:33:56.000Z
"""iex_parser""" from .parser import Parser from .messages import DEEP_1_0, TOPS_1_6, TOPS_1_5 __all__ = [ 'Parser', 'DEEP_1_0', 'TOPS_1_5', 'TOPS_1_6' ]
14.333333
50
0.645349
29
172
3.241379
0.413793
0.212766
0.12766
0.212766
0.234043
0
0
0
0
0
0
0.088889
0.215116
172
11
51
15.636364
0.607407
0.05814
0
0
0
0
0.192308
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
d6ca0b78e18a4bf98def2fc3af39ef75294bf852
14,129
py
Python
tweezers/io/TxtMpiSource.py
DollSimon/tweezers
7c9b3d781c53f7728526a8242aa9e1d671f15688
[ "BSD-2-Clause" ]
null
null
null
tweezers/io/TxtMpiSource.py
DollSimon/tweezers
7c9b3d781c53f7728526a8242aa9e1d671f15688
[ "BSD-2-Clause" ]
null
null
null
tweezers/io/TxtMpiSource.py
DollSimon/tweezers
7c9b3d781c53f7728526a8242aa9e1d671f15688
[ "BSD-2-Clause" ]
null
null
null
from pathlib import Path import json import re import numpy as np import os from collections import OrderedDict from .TxtMpiFile import TxtMpiFile from .BaseSource import BaseSource from tweezers.meta import MetaDict, UnitDict class TxtMpiSource(BaseSource): """ Data source for \*.txt files from the MPI with the old style header or the new JSON format. """ data = None psd = None ts = None def __init__(self, data=None, psd=None, ts=None): """ Args: path (:class:`patlhlib.Path`): path to file to read, if the input is of a different type, it is given to :class:`pathlibh.Path` to try to create an instance """ super().__init__() # go through input if data: self.data = TxtMpiFile(data) if psd: self.psd = TxtMpiFile(psd) if ts: self.ts = TxtMpiFile(ts) @staticmethod def isDataFile(path): """ Checks if a given file is a valid data file and returns its ID and type. Args: path (:class:`pathlib.Path`): file to check Returns: :class:`dict` with `id` and `type` """ pPath = Path(path) m = re.match('^((?P<type>[A-Z]+)_)?(?P<id>(?P<trial>[0-9]{1,3})_Date_[0-9_]{19})\.txt$', pPath.name) if m: tipe = 'data' if m.group('type'): tipe = m.group('type').lower() res = {'id': m.group('id'), 'trial': m.group('trial'), 'type': tipe, 'path': pPath} return res else: return False @classmethod def getAllSources(cls, path): """ Get a list of all IDs and their files that are at the given path and its subfolders. Args: path (:class:`pathlib.Path`): root path for searching Returns: `dir` """ _path = Path(path) # get a list of all files and their properties files = cls.getAllFiles(_path) sources = OrderedDict() # sort files that belong to the same id for el in files: if el['id'] not in sources.keys(): sources[el['id']] = cls() setattr(sources[el['id']], el['type'], TxtMpiFile(el['path'])) return sources def getMetadata(self): """ Return the metadata of the experiment. Returns: :class:`tweezers.MetaDict` and :class:`tweezers.UnitDict` """ # keep variables local so they are not stored in memory meta, units = self.getDefaultMeta() # check each available file for header information # sequence is important since later calls overwrite earlier ones so if a header is present in "psd" and # "data", the value from "data" will be returned if self.ts: # get header data from file metaTmp, unitsTmp = self.ts.getMetadata() # make sure we don't override important stuff that by accident has the same name self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp) self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp) # set time series unit unitsTmp['timeseries'] = 'V' # update the dictionaries with newly found values meta.update(metaTmp) units.update(unitsTmp) if self.psd: metaTmp, unitsTmp = self.psd.getMetadata() # make sure we don't override important stuff that by accident has the same name # also, 'nSamples' and 'samplingRate' in reality refer to the underlying timeseries data self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp) self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp) # set psd unit unitsTmp['psd'] = 'V^2 / Hz' meta.update(metaTmp) units.update(unitsTmp) if self.data: metaTmp, unitsTmp = self.data.getMetadata() # rename variables for the sake of consistency and compatibility with Matlab and because the naming is # confusing: samplingRate is actually the acquisition rate since the DAQ card averages the data already # the sampling rate should describe the actual time step between data points not something else if 'recordingRate' in metaTmp: self.renameKey('samplingRate', 'acquisitionRate', meta=metaTmp, units=unitsTmp) self.renameKey('recordingRate', 'samplingRate', meta=metaTmp, units=unitsTmp) self.renameKey('nSamples', 'nAcquisitionsPerSample', meta=metaTmp) # add trial number metaTmp['trial'] = self.data.getTrialNumber() # update dictionaries meta.update(metaTmp) units.update(unitsTmp) # add title string to metadata, used for plots self.setTitle(meta) # make sure all axes have the beadDiameter meta['pmY']['beadDiameter'] = meta['pmX']['beadDiameter'] units['pmY']['beadDiameter'] = units['pmX']['beadDiameter'] meta['aodY']['beadDiameter'] = meta['aodX']['beadDiameter'] units['aodY']['beadDiameter'] = units['aodX']['beadDiameter'] # add trap names meta['traps'] = meta.subDictKeys() return meta, units def getData(self): """ Return the experiment data. Returns: :class:`pandas.DataFrame` """ if not self.data: raise ValueError('No data file given.') return self.data.getData() def getDataSegment(self, tmin, tmax, chunkN=10000): """ Returns the data between ``tmin`` and ``tmax``. Args: tmin (float): minimum data timestamp tmax (float): maximum data timestamp chunkN (int): number of rows to read per chunk Returns: :class:`pandas.DataFrame` """ meta, units = self.getMetadata() nstart = int(meta.samplingRate * tmin) nrows = int(meta.samplingRate * (tmax - tmin)) return self.data.getDataSegment(nstart, nrows) def getPsd(self): """ Return the PSD of the thermal calibration of the experiment as computed by LabView. Returns: :class:`pandas.DataFrame` """ if not self.psd: raise ValueError('No PSD file given.') # read psd file which also contains the fitting data = self.psd.getData() # ignore the fitting titles = [title for title, column in data.iteritems() if not title.endswith('Fit')] return data[titles] def getPsdFit(self): """ Return the LabView fit of the Lorentzian to the PSD. Returns: :class:`pandas.DataFrame` """ if not self.psd: raise ValueError('No PSD file given.') # the fit is in the psd file data = self.psd.getData() # only choose frequency and fit columns titles = [title for title, column in data.iteritems() if title.endswith('Fit') or title == 'f'] return data[titles] def getTs(self): """ Return the time series recorded for thermal calibration. Returns: :class:`pandas.DataFrame` """ if not self.ts: raise ValueError('No time series file given.') data = self.ts.getData() # remove "Diff" from column headers columnHeader = [title.split('Diff')[0] for title in data.columns] data.columns = columnHeader return data @staticmethod def calculateForce(meta, units, data): """ Calculate forces from Diff signal and calibration values. Args: meta (:class:`.MetaDict`): metadata units (:class:`.UnitDict`): unit metadata data (:class:`pandas.DataFrame`): data Returns: Updated versions of the input parameters * meta (:class:`.MetaDict`) * units (:class:`.UnitDict`) * data (:class:`pandas.DataFrame`) """ # calculate force per trap and axis for trap in meta['traps']: m = meta[trap] data[trap + 'Force'] = (data[trap + 'Diff'] - m['zeroOffset']) \ / m['displacementSensitivity'] \ * m['stiffness'] units[trap + 'Force'] = 'pN' # invert PM force, is not as expected in the raw data # data.pmYForce = -data.pmYForce # calculate mean force per axis, only meaningful for two traps data['xForce'] = (data.pmXForce + data.aodXForce) / 2 data['yForce'] = (data.pmYForce - data.aodYForce) / 2 units['xForce'] = 'pN' units['yForce'] = 'pN' return meta, units, data @staticmethod def postprocessData(meta, units, data): """ Create time array, calculate forces etc. Args: meta (:class:`tweezers.MetaDict`): meta dictionary units (:class:`tweezers.UnitDict`): units dictionary data (:class:`pandas.DataFrame`): data Returns: Updated versions of the input parameters * meta (:class:`.MetaDict`) * units (:class:`.UnitDict`) * data (:class:`pandas.DataFrame`) """ data['time'] = np.arange(0, meta['dt'] * len(data), meta['dt']) units['time'] = 's' meta, units, data = self.calculateForce(meta, units, data) data['distance'] = np.sqrt(data.xDist**2 + data.yDist**2) units['distance'] = 'nm' return meta, units, data def setTitle(self, meta): """ Set the 'title' key in the metadata dictionary based on date and trial number if they are available. This string is e.g. used for plots. Args: meta Returns: :class:`tweezers.MetaDict` """ title = '' try: title += meta['date'] + ' ' except KeyError: pass try: title += meta['time'] + ' ' except KeyError: pass try: title += meta['trial'] except KeyError: pass meta['title'] = title.strip() def save(self, container, path=None): """ Writes the data of a :class:`tweezers.TweezersData` to disk. This preservers the `data` and`thermalCalibration` folder structure. `path` should be the folder that holds these subfolders. If it is empty, the original files will be overwritten. Args: container (:class:`tweezers.TweezersData`): data to write path (:class:`pathlib.Path`): path to a folder for the dataset, if not set, the original data will be overwritten """ if not isinstance(path, Path): path = Path(path) data = ['ts', 'psd', 'data'] # list of input files and their data from the container, these are the ones we're writing back # this is also important for the laziness of the TweezerData object files = [[getattr(self, file), getattr(container, file)] for file in data if getattr(self, file)] if not files: return # get root path if not given if not path: path = files[0][0].path.parents[1] meta = container.meta meta['units'] = container.units # now write all of it for file in files: filePath = path / file[0].path.parent.name / file[0].path.name self.writeData(meta, file[1], filePath) def writeData(self, meta, data, path): """ Write experiment data back to a target file. Note that this writes the data in an `UTF-8` encoding. Implementing this is not required for a data source but used here to convert the header to JSON. Args: meta (:class:`tweezers.MetaDict`): meta data to store data (:class:`pandas.DataFrame`): data to write back path (:class:`pathlib.Path`): path where to write the file """ # ensure directory exists try: os.makedirs(str(path.parent)) except FileExistsError: pass # write the data with path.open(mode='w', encoding='utf-8') as f: f.write(json.dumps(meta, indent=4, ensure_ascii=False, sort_keys=True)) f.write("\n\n#### DATA ####\n\n") data.to_csv(path_or_buf=str(path), sep='\t', mode='a', index=False) def getDefaultMeta(self): """ Set default values for metadata and units. This will be overwritten by values in the data files if they exist. Returns: :class:`tweezers.MetaDict` and :class:`tweezers.UnitDict` """ meta = MetaDict() units = UnitDict() # meta[self.getStandardIdentifier('tsSamplingRate')] = 80000 # # units[self.getStandardIdentifier('tsSamplingRate')] = 'Hz' return meta, units def renameKey(self, oldKey, newKey, meta=None, units=None): """ Rename a key in the meta- and units-dictionaries. Does not work for nested dictionaries. Args: meta (:class:`tweezers.MetaDict`): meta dictionary units (:class:`tweezers.UnitDict`): units dictionary (can be an empty one if not required) oldKey (str): key to be renamed newKey (str): new key name """ if meta: if oldKey not in meta: return meta.replaceKey(oldKey, newKey) if units: if oldKey not in units: return units.replaceKey(oldKey, newKey)
32.038549
119
0.55793
1,608
14,129
4.890547
0.246269
0.019837
0.025432
0.018311
0.212233
0.195956
0.160478
0.151322
0.127416
0.116734
0
0.003633
0.337674
14,129
440
120
32.111364
0.836717
0.386439
0
0.214286
0
0.005495
0.091051
0.015175
0
0
0
0
0
1
0.087912
false
0.021978
0.049451
0
0.241758
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6ca2315271e77162aceabe261d43740120a7244
232
py
Python
nslocapysation/constants.py
resmio/nslocapysation
6643adf359b17212e2e2774de52904123ec2fcfd
[ "MIT" ]
1
2018-11-25T16:56:31.000Z
2018-11-25T16:56:31.000Z
nslocapysation/constants.py
resmio/nslocapysation
6643adf359b17212e2e2774de52904123ec2fcfd
[ "MIT" ]
null
null
null
nslocapysation/constants.py
resmio/nslocapysation
6643adf359b17212e2e2774de52904123ec2fcfd
[ "MIT" ]
null
null
null
# keys IMPLEMENTATION_FILE_PATHS_KEY = r'implementation_file_paths' LPROJ_DIR_PATHS_KEY = r'lproj_file_paths' KEY_KEY = r'key' TRANSLATION_KEY = r'translation' # file names LOCALIZABLE_STRINGS_FILE_NAME = r'Localizable.strings'
19.333333
60
0.818966
34
232
5.147059
0.382353
0.091429
0.262857
0
0
0
0
0
0
0
0
0
0.103448
232
11
61
21.090909
0.841346
0.064655
0
0
0
0
0.349057
0.117925
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d6ca4d4ba42af57fc033e9368f2ae8ef9b4d183f
6,482
py
Python
shexter_client/shexterd.py
tetchel/shexter-client
b1db3ac072fc9a53403a15b1f41188e0a09220f4
[ "MIT" ]
3
2017-12-18T06:37:50.000Z
2018-02-23T08:31:25.000Z
shexter_client/shexterd.py
tetchel/shexter-client
b1db3ac072fc9a53403a15b1f41188e0a09220f4
[ "MIT" ]
null
null
null
shexter_client/shexterd.py
tetchel/shexter-client
b1db3ac072fc9a53403a15b1f41188e0a09220f4
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from threading import Thread from time import sleep import logging import shexter.requester import shexter.platform as platform import shexter.config """ This file is for the shexter daemon, which runs persistantly. Every 5 seconds, it polls the phone to see if there are unread messages. If there are, it displays a notification to the user. This file is meant to be run directly; not to be imported by any other file. """ def notify(msg: str, title=shexter.config.APP_NAME): print(title + ': ' + msg) if notifier: # Note swap of msg, title order notify_function(title, msg) def _parse_contact_name(line: str): # print('parsing contact name from "{}"'.format(line)) # The contact name is the first word after the first ']' try: return line.split(']')[1].strip().split()[0].rstrip(':') except Exception as e: print(e) print('Error parsing contact name from "{}"'.format(line)) def notify_unread(unread: str) -> None: unread_lines = unread.splitlines() # Remove the first line, which is just "Unread Messages:" unread_lines = unread_lines[1:] if len(unread_lines) > 1: notify_title = str(len(unread_lines)) + ' New Messages' notify_msg = 'Messages from ' contact_names = [] for line in unread_lines: contact_name = _parse_contact_name(line) # Don't repeat contacts if contact_name not in contact_names: notify_msg += contact_name + ', ' contact_names.append(contact_name) # Remove last ', ' notify_msg = notify_msg[:-2] elif len(unread_lines) == 0: # At this time, if the unread response was originally exactly one line, # it was because the phone rejected the request. notify_title = 'Approval Required' notify_msg = 'Approve this computer on your phone' else: contact_name = _parse_contact_name(unread_lines[0] ) notify_title = 'New Message' notify_msg = 'Message from ' + contact_name # A cool title would be the phone's hostname. notify(notify_msg, title=notify_title) def init_notifier_win(): try: import win10toast toaster = win10toast.ToastNotifier() toaster.show_toast(shexter.config.APP_NAME, 'Notifications enabled', duration=3, threaded=True) return toaster except ImportError as e: print(e) print('***** To use the ' + shexter.config.APP_NAME + ' daemon on Windows you must install win10toast' ' with "[pip | pip3] install win10toast"') NOTIFY_LEN_S = 10 def notify_win(title: str, msg: str) -> None: # Notifier is a win10toast.ToastNotifier notifier.show_toast(title, msg, duration=NOTIFY_LEN_S, threaded=True) """ def build_notifier_macos(): # Fuck this for now try: import gntp.notifier except ImportError: print('To use the ' + shexter.config.APP_NAME + ' daemon on OSX you must install Growl (see http://growl.info) and its python library with "pip3 install gntp"') quit() """ import subprocess NOTIFY_SEND = 'notify-send' def init_notifier_nix(): try: subprocess.check_call([NOTIFY_SEND, shexter.config.APP_NAME, 'Notifications enabled', '-t', '3000']) return True except Exception as e: print(e) print('***** To use the ' + shexter.config.APP_NAME + ' daemon on Linux you must install notify-send, eg "sudo apt-get install notify-send"') def notify_nix(title: str, msg: str): # print('notify_nix {} {}'.format(title, msg)) result = subprocess.getstatusoutput('notify-send "{}" "{}" -t {}' .format(title, msg, NOTIFY_LEN_S * 1000)) if result[0] != 0: print('Error running notify-send:') print(result[1]) def init_notifier(): """ Initializes the 'notifier' and 'notify_function' globals, which are later called by notify The notifier is an object for the notify_platform functions to use """ platf = platform.get_platform() global notifier, notify_function if platf == platform.Platform.WIN: notifier = init_notifier_win() notify_function = notify_win elif platf == platform.Platform.LINUX: notifier = init_notifier_nix() notify_function = notify_nix else: print('Sorry, notifications are not supported on your platform, which appears to be ' + platf) return None # Must match response from phone in the case of no msgs. NO_UNREAD_RESPONSE = 'No unread messages.' def main(connectinfo: tuple): running = True logging.basicConfig(filename=shexter.config.APP_NAME.lower() + 'd.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(shexter.config.APP_NAME) launched_msg = shexter.config.APP_NAME + ' daemon launched' logger.info(launched_msg) logger.info('ConnectInfo: ' + str(connectinfo)) print(launched_msg + ' - CTRL + C to quit') try: while running: unread_result = shexter.requester.unread_command(connectinfo, silent=True) # print('result: ' + str(type(unread_result)) + ' ' + unread_result) if not unread_result: logger.info('Failed to connect to phone') elif unread_result != NO_UNREAD_RESPONSE: # new messages Thread(target=notify_unread, args=(unread_result,)).start() logger.info('Got at least 1 msg') else: logger.debug('No unread') # print('no unread') for i in range(5): # Shorter sleep to afford interrupting... # https://stackoverflow.com/questions/5114292/break-interrupt-a-time-sleep-in-python sleep(1) except (KeyboardInterrupt, EOFError): print('Exiting') quit(0) _connectinfo = shexter.config.configure(False) if not _connectinfo: print('Please run ' + shexter.config.APP_NAME + ' config first, so the daemon knows how to find your phone.') quit() # Initialize globals notifier = None notify_function = None init_notifier() if not notifier: notify_function = print # Call the main loop main(_connectinfo)
32.248756
149
0.631749
805
6,482
4.959006
0.300621
0.039078
0.04008
0.0501
0.102455
0.082415
0.046343
0.035321
0.035321
0.035321
0
0.009705
0.268744
6,482
200
150
32.41
0.832489
0.149954
0
0.106195
0
0.00885
0.161117
0
0
0
0
0
0
1
0.079646
false
0
0.079646
0
0.19469
0.123894
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6caa6b944d454c9401a57591872b8b05ba06ad2
7,218
py
Python
info_epd/mixins/salah.py
ibnadam/info_epd
3838cc30187dc10fd2c79fba2d51357d3dbbf47a
[ "MIT" ]
null
null
null
info_epd/mixins/salah.py
ibnadam/info_epd
3838cc30187dc10fd2c79fba2d51357d3dbbf47a
[ "MIT" ]
null
null
null
info_epd/mixins/salah.py
ibnadam/info_epd
3838cc30187dc10fd2c79fba2d51357d3dbbf47a
[ "MIT" ]
null
null
null
import sys import os import logging import time import datetime from PIL import ( Image, ImageDraw, ImageFont ) from config import * from util import * from info_epd import praytimes DEBUG_PRAYTIMES = False class SalahMixin: """Uses praytimes library for calculating prayer times. Make sure settings are correct for your location, Madhab, etc. Especially check: * Caluclation methos * Settings for Maghrib & midnight """ calc_method = 'ISNA' time_fmt = '12h' def __init__(self): self._funcs['setup'].append(self.setup_praytimes) self._funcs['update'].append(self.update_praytimes) self._funcs['redraw'].append(self.redraw_praytimes) def setup_praytimes(self): logging.info("Setup praytimes...") self.pt = praytimes.PrayTimes() self.pt.setMethod(self.calc_method) params = dict( fajr=15, maghrib='0 min', isha=15, midnight='Jafari' #doublecheck: seems to be more correct than non-jafari ) self.pt.adjust(params) self.update_info['praytimes'] = {} self.update_info['praytimes']['pt'] = None self.update_info['praytimes']['curr'] = None self.update_info['praytimes']['curr_end'] = None self.update_info['praytimes']['next_time'] = None def update_praytimes(self): logging.info("Update praytimes...") today = get_today() tomorrow = get_tomorrow() now = get_now() coords = COORDS['Culver City'] timezone = TIMEZONES['Los Angeles'] dst = time.localtime().tm_isdst pt = self.pt.getTimes(today, coords, timezone, dst, self.time_fmt) fmt = '%I:%M%p' def to_time_obj(p1): p2 = datetime.datetime.strptime(pt[p1], fmt) def to_date_obj(): return datetime.datetime(year=now.year, month=now.month, day=now.day, hour=p2.hour, minute=p2.minute) return to_date_obj fajr = to_time_obj('fajr')() sunrise = to_time_obj('sunrise')() dhuhr = to_time_obj('dhuhr')() asr = to_time_obj('asr')() maghrib = to_time_obj('maghrib')() isha = to_time_obj('isha')() midnight = to_time_obj('midnight')() # Assume maghrib lasts for 45 mins maghrib_end = maghrib + datetime.timedelta(minutes=45) # Figure out what applies to current time curr = {} curr['fajr'] = fajr <= now < sunrise after_fajr = sunrise <= now < dhuhr curr['dhuhr'] = dhuhr <= now < asr curr['asr'] = asr <= now < maghrib curr['maghrib'] = maghrib <= now < maghrib_end after_maghrib = maghrib_end <= now < isha # Check isha time (could be past 00:00) is_isha = False if not any((curr['fajr'], curr['dhuhr'], curr['asr'], curr['maghrib'], after_fajr, after_maghrib)): # Either we are before fajr, or after isha after_isha = now >= isha if after_isha: m_hr = midnight.hour if m_hr < fajr.hour: m_hr += 24 m_min = midnight.minute n_hr = now.hour n_min = now.minute if n_hr < m_hr: is_isha = True elif n_hr == m_hr: if n_min < m_min: is_isha = True curr['isha'] = is_isha # Figure out what comes next next_secs, next_time = secs_til_midnight(), 'midnight' next_prayer, pt['next_fajr'] = 'next_fajr', None curr_end = None if curr['fajr']: next_secs, next_time = (sunrise - now).seconds, pt['sunrise'] curr_end = pt['sunrise'] next_prayer = 'dhuhr' elif after_fajr: next_secs, next_time = (dhuhr - now).seconds, pt['dhuhr'] next_prayer = 'dhuhr' elif curr['dhuhr']: next_secs, next_time = (asr - now).seconds, pt['asr'] curr_end = pt['asr'] next_prayer = 'asr' elif curr['asr']: next_secs, next_time = (maghrib - now).seconds, pt['maghrib'] curr_end = pt['maghrib'] next_prayer = 'maghrib' elif curr['maghrib']: maghrib_end_t = maghrib_end.strftime('%I:%M%p') next_secs, next_time = (maghrib_end - now).seconds, maghrib_end_t curr_end = maghrib_end_t next_prayer = 'isha' elif after_maghrib: next_secs, next_time = (isha - now).seconds, pt['isha'] next_prayer = 'isha' elif curr['isha']: curr_end = pt['midnight'] elif now < fajr: next_secs, next_time = (fajr - now).seconds, pt['fajr'] next_prayer = 'fajr' # Need to get next day's times if next_prayer == 'next_fajr': next_pt = self.pt.getTimes(tomorrow, coords, timezone, dst, self.time_fmt) pt['next_fajr'] = next_pt['fajr'] # Save info self.update_info['next_secs'] = next_secs self.update_info['praytimes']['pt'] = pt self.update_info['praytimes']['curr'] = curr self.update_info['praytimes']['curr_end'] = curr_end self.update_info['praytimes']['next_time'] = next_time self.update_info['praytimes']['next_prayer'] = next_prayer def redraw_praytimes(self): logging.info("Redraw praytimes...") if EPD_USED == EPD2in13: self.redraw_praytimes_partial() else: self.redraw_praytimes_full() def redraw_praytimes_partial(self): pinfo = self.update_info['praytimes'] pt, curr, curr_end = pinfo['pt'], pinfo['curr'], pinfo['curr_end'] next_upd, next_prayer = pinfo['next_time'], pinfo['next_prayer'] h, w = self.epd.height, self.epd.width bmp = Image.open(os.path.join(imgdir, 'masjid.bmp')) self.image.paste(bmp, (2,w//2+25)) # If have current end time then we have a current prayer time as well font = font24 if curr_end: for p in curr: if curr[p]: txt = f"{p.capitalize()}: {pt[p]}" x, y = font.getsize(txt) self.draw.rectangle([(5,5),(x+15,y+15)], fill='black') self.draw.text((10,10), txt, font=font, fill='white') self.draw.text((10, y+15), f'Ends {curr_end}', font=font18, fill=0) else: txt = f'Next update: {next_upd}' self.draw.text((10, 10), txt, font=font, fill=0) # Next prayer self.draw.text((55,w//2+10), 'Upcoming:', font=font18, fill=0) p = next_prayer n = 'fajr' if p=='next_fajr' else p txt = f"{n.capitalize()}: {pt[p]}" self.draw.text((55,w//2+30), txt, font=font, fill=0) def redraw_praytimes_full(self): """To be implemented"""
34.371429
87
0.541563
876
7,218
4.27968
0.221461
0.040011
0.044812
0.067485
0.150173
0.081889
0.016538
0.016538
0.016538
0
0
0.013944
0.334303
7,218
209
88
34.535885
0.766285
0.077445
0
0.063291
0
0
0.104866
0
0
0
0
0
0
1
0.050633
false
0
0.056962
0.006329
0.139241
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6cba202761b7405679a5ca3b25903c5029105ec
5,871
py
Python
tests/test_denstatbank.py
gmohandas/denstatbank
c82cdbafb934de40d106c1f0f94ef58b0962e25c
[ "BSD-3-Clause-Clear" ]
1
2020-05-28T14:59:04.000Z
2020-05-28T14:59:04.000Z
tests/test_denstatbank.py
gmohandas/denstatbank
c82cdbafb934de40d106c1f0f94ef58b0962e25c
[ "BSD-3-Clause-Clear" ]
null
null
null
tests/test_denstatbank.py
gmohandas/denstatbank
c82cdbafb934de40d106c1f0f94ef58b0962e25c
[ "BSD-3-Clause-Clear" ]
null
null
null
import pandas as pd import pytest from denstatbank.denstatbank import StatBankClient from denstatbank.utils import data_dict_to_df, add_list_to_dict from .mock_responses import ( mock_sub_resp_default, mock_sub_resp_2401, mock_tables_resp, mock_tableinfo_resp, mock_tableinfo_variable_resp, mock_data_resp, mock_data_resp_to_df, mock_data_resp_with_vars, mock_codes ) @pytest.fixture(autouse=True) def no_requests(monkeypatch): """Remove requests.sessions.Session.request for all tests.""" monkeypatch.delattr("requests.sessions.Session.request") @pytest.fixture def client(): client = StatBankClient() return client def test_base_request(client, monkeypatch): def mock_base_request(self, *args, **kwargs): return mock_tableinfo_resp monkeypatch.setattr(StatBankClient, "_base_request", mock_base_request) r = client._base_request('data', lang='en') assert r == mock_tableinfo_resp def test_subjects(client, monkeypatch): def mock_subjects(self, subjects=None, include_tables=False, recursive=False): if subjects is None: return mock_sub_resp_default monkeypatch.setattr(StatBankClient, "subjects", mock_subjects) r = client.subjects() assert isinstance(r, list) d = r[0] assert isinstance(d, dict) assert 'id' in d.keys() assert 'description' in d.keys() def test_subjects_returns_specified_subject(client, monkeypatch): def mock_subjects(self, subjects=None, include_tables=False, recursive=False): if subjects[0] == '2401': return mock_sub_resp_2401 monkeypatch.setattr(StatBankClient, "subjects", mock_subjects) r = client.subjects(subjects=['2401']) assert isinstance(r, list) d = r[0] assert isinstance(d, dict) assert d['id'] == '2401' def test_tables_returns_dict(client, monkeypatch): def mock_tables(self, subjects=None, past_days=None, include_inactive=False, as_df=True): return mock_tables_resp monkeypatch.setattr(StatBankClient, "tables", mock_tables) r = client.tables(as_df=False) assert isinstance(r, list) d = r[0] assert isinstance(d, dict) assert 'id' in d.keys() assert 'text' in d.keys() assert 'unit' in d.keys() assert 'updated' in d.keys() assert 'firstPeriod' in d.keys() assert 'latestPeriod' in d.keys() assert 'active' in d.keys() assert 'variables' in d.keys() def test_tables_returns_df(client, monkeypatch): def mock_tables(self, subjects=None, past_days=None, include_inactive=False, as_df=True): return pd.DataFrame(mock_tables_resp) monkeypatch.setattr(StatBankClient, "tables", mock_tables) df = client.tables() assert isinstance(df, pd.DataFrame) assert 'id' in df.columns assert 'text' in df.columns assert 'unit' in df.columns assert 'updated' in df.columns assert 'firstPeriod' in df.columns assert 'latestPeriod' in df.columns assert 'active' in df.columns assert 'variables' in df.columns def test_tableinfo_returns_dict(client, monkeypatch): def mock_tableinfo(self, table_id, variables_df=False): return mock_tableinfo_resp monkeypatch.setattr(StatBankClient, "tableinfo", mock_tableinfo) d = client.tableinfo('FOLK1A') assert isinstance(d, dict) assert d['id'] == 'FOLK1A' assert 'id' in d['variables'][0].keys() assert 'text' in d['variables'][0].keys() def test_tableinfo_returns_variables_df(client, monkeypatch): def mock_tableinfo(self, table_id, variables_df): if variables_df: return pd.DataFrame(mock_tableinfo_variable_resp) monkeypatch.setattr(StatBankClient, "tableinfo", mock_tableinfo) df = client.tableinfo('FOLK1A', variables_df=True) assert isinstance(df, pd.DataFrame) print(df) assert 'id' in df.columns assert 'text' in df.columns assert 'variable' in df.columns assert len(df.columns.tolist()) == 3 def test_data_returns_dict(client, monkeypatch): def mock_data(self, table_id, as_df, variables=None, **kwargs): return mock_data_resp monkeypatch.setattr(StatBankClient, "data", mock_data) d = client.data(table_id='folk1a', as_df=False) assert isinstance(d, dict) assert 'dataset' in d.keys() dd = d['dataset'] assert 'value' in dd.keys() assert isinstance(dd['value'], list) def test_data_returns_df(client, monkeypatch): def mock_data(self, table_id, as_df=True, variables=None, **kwargs): return pd.DataFrame(mock_data_resp_to_df) monkeypatch.setattr(StatBankClient, "data", mock_data) d = client.data(table_id='folk1a') assert isinstance(d, pd.DataFrame) def test_variables_dict(client): kon = client.variable_dict(code='køn', values=['M', 'K']) assert isinstance(kon, dict) assert 'code' in kon.keys() assert 'values' in kon.keys() assert kon['code'] == 'køn' assert isinstance(kon['values'], list) assert kon['values'] == ['M', 'K'] tid = client.variable_dict(code='tid', values='2018') assert isinstance(tid, dict) assert 'code' in tid.keys() assert 'values' in tid.keys() assert tid['code'] == 'tid' assert isinstance(tid['values'], list) assert tid['values'] == ['2018'] def test_data_dict_to_df(): df = data_dict_to_df(mock_data_resp_with_vars, mock_codes) assert isinstance(df, pd.DataFrame) assert isinstance(df.index, pd.MultiIndex) assert df.shape == (8, 1) def test_add_list_to_dict(): params = {'lang': 'en'} add_list_to_dict(params, subjects=['02']) assert 'subjects' in params.keys() assert isinstance(params['subjects'], list) assert params['subjects'] == ['02'] with pytest.raises(Exception) as e: assert add_list_to_dict(params, subjects='03') assert str(e.value) == 'subjects must be a list.'
33.169492
93
0.69971
793
5,871
4.984868
0.145019
0.076904
0.019479
0.043005
0.477865
0.426764
0.384012
0.327599
0.327599
0.245636
0
0.00999
0.18157
5,871
176
94
33.357955
0.812695
0.009368
0
0.234483
0
0
0.081411
0.00568
0
0
0
0
0.42069
1
0.158621
false
0
0.034483
0.041379
0.262069
0.006897
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
1
d6cc154da2b05275462754e08a6a4fdca86447f1
473
py
Python
258-add-digits/258-add-digits.py
e-lin/LeetCode
1a00e187b5ccea3bddabf93a29164bf0ad928b7d
[ "Apache-2.0" ]
null
null
null
258-add-digits/258-add-digits.py
e-lin/LeetCode
1a00e187b5ccea3bddabf93a29164bf0ad928b7d
[ "Apache-2.0" ]
null
null
null
258-add-digits/258-add-digits.py
e-lin/LeetCode
1a00e187b5ccea3bddabf93a29164bf0ad928b7d
[ "Apache-2.0" ]
null
null
null
class Solution(object): def addDigits(self, num): """ :type num: int :rtype: int """ s = str(num) l = list(s) sum = 0 for digit in l: sum += int(digit) if len(list(str(sum))) == 1: return sum else: return self.addDigits(sum) def main(): num = 38 solution = Solution() print solution.addDigits(num) if __name__ == '__main__': main()
17.518519
38
0.46723
54
473
3.944444
0.518519
0
0
0
0
0
0
0
0
0
0
0.014286
0.408034
473
27
39
17.518519
0.746429
0
0
0
0
0
0.019231
0
0
0
0
0
0
0
null
null
0
0
null
null
0.058824
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d6cc317a27628be12980edb90d20e0b73593f693
205
py
Python
data/copy_license.py
dashingsoft/pyarmor-server
8ac1995d3a0b4ca4592a2a9fa255a7a420f36bc7
[ "MIT" ]
28
2020-07-02T22:12:32.000Z
2022-03-30T03:21:34.000Z
data/copy_license.py
dashingsoft/pyarmor-server
8ac1995d3a0b4ca4592a2a9fa255a7a420f36bc7
[ "MIT" ]
3
2020-03-09T09:43:48.000Z
2020-10-19T05:23:37.000Z
data/copy_license.py
dashingsoft/pyarmor-server
8ac1995d3a0b4ca4592a2a9fa255a7a420f36bc7
[ "MIT" ]
13
2020-04-27T13:31:00.000Z
2022-01-16T05:49:06.000Z
import sys from os.path import join, dirname with open(join(dirname(sys.executable), 'license.lic'), 'rb') as fs: with open(join(sys._MEIPASS, 'license.lic'), 'wb') as fd: fd.write(fs.read())
29.285714
68
0.663415
33
205
4.090909
0.606061
0.162963
0.177778
0
0
0
0
0
0
0
0
0
0.160976
205
6
69
34.166667
0.784884
0
0
0
0
0
0.127451
0
0
0
0
0
0
1
0
true
0.2
0.4
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
4
d6d00e0815efbd0fa8cd9dea45c88de5c7194783
2,689
py
Python
src/web/sistema/templatetags/number_to_text.py
fossabot/SIStema
1427dda2082688a9482c117d0e24ad380fdc26a6
[ "MIT" ]
5
2018-03-08T17:22:27.000Z
2018-03-11T14:20:53.000Z
src/web/sistema/templatetags/number_to_text.py
fossabot/SIStema
1427dda2082688a9482c117d0e24ad380fdc26a6
[ "MIT" ]
263
2018-03-08T18:05:12.000Z
2022-03-11T23:26:20.000Z
src/web/sistema/templatetags/number_to_text.py
fossabot/SIStema
1427dda2082688a9482c117d0e24ad380fdc26a6
[ "MIT" ]
6
2018-03-12T19:48:19.000Z
2022-01-14T04:58:52.000Z
from django.template import Library register = Library() hundreds = [ '', 'сто', 'двести', 'триста', 'четыреста', 'пятьсот', 'шестьсот', 'семьсот', 'восемьсот', 'девятьсот' ] first_decade = [ '', ('одна', 'один'), ('две', 'два'), 'три', 'четыре', 'пять', 'шесть', 'семь', 'восемь', 'девять' ] second_decade = [ 'десять', 'одиннадцать', 'двенадцать', 'тринадцать', 'четырнадцать', 'пятнадцать', 'шестнадцать', 'семнадцать', 'восемнадцать', 'девятнадцать' ] decades = [ '', 'десять', 'двадцать', 'тридцать', 'сорок', 'пятьдесят', 'шестьдесят', 'семьдесят', 'восемьдесят', 'девяносто' ] def pluralize(number, one, two, five): last_digit = number % 10 prelast_digit = (number // 10) % 10 if last_digit == 1 and prelast_digit != 1: return one if 2 <= last_digit <= 4 and prelast_digit != 1: return two return five @register.filter(is_safe=False) def russian_pluralize(value, arg='s'): if ',' not in arg: arg = ',' + arg bits = arg.split(',') if len(bits) > 3: return '' one, two, five = bits[:3] return pluralize(value, one, two, five) @register.filter def number_to_text(number, gender='male', return_text_for_zero=True): """ Supports numbers less than 1 000 000 000 """ if number is None or number == 0: return 'ноль' if return_text_for_zero else '' text = [] if number >= 1000000: billions = number // 1000000 text.extend([number_to_text(billions, gender='male', return_text_for_zero=False), 'миллион' + pluralize(billions, '', 'а', 'ов')]) number %= 100000 if number >= 1000: thousands = number // 1000 text.extend([number_to_text(thousands, gender='female', return_text_for_zero=False), 'тысяч' + pluralize(thousands, 'а', 'и', '')]) number %= 1000 if number >= 100: text.append(hundreds[number // 100]) number %= 100 if number == 0: pass elif number < 10: number_text = first_decade[number] if isinstance(number_text, (tuple, list)): number_text = number_text[1 if gender == 'male' else 0] text.append(number_text) elif number < 20: text.append(second_decade[number - 10]) else: number_text = first_decade[number % 10] if isinstance(number_text, (tuple, list)): number_text = number_text[1 if gender == 'male' else 0] text.extend([decades[number // 10], number_text]) return ' '.join(text)
22.040984
92
0.564522
300
2,689
4.923333
0.39
0.067705
0.035207
0.046039
0.249831
0.13541
0.098849
0.098849
0.098849
0.098849
0
0.041907
0.290071
2,689
121
93
22.223141
0.731797
0.014875
0
0.09
0
0
0.128458
0
0
0
0
0
0
1
0.03
false
0.01
0.01
0
0.11
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6d066071d6280fd212953aa9288f12b50467f48
1,577
py
Python
sympy/solvers.py
Fernal73/LearnPython3
5288017c0dbf95633b84f1e6324f00dec6982d36
[ "MIT" ]
1
2021-12-17T11:03:13.000Z
2021-12-17T11:03:13.000Z
sympy/solvers.py
Fernal73/LearnPython3
5288017c0dbf95633b84f1e6324f00dec6982d36
[ "MIT" ]
1
2020-02-05T00:14:43.000Z
2020-02-06T09:22:49.000Z
sympy/solvers.py
Fernal73/LearnPython3
5288017c0dbf95633b84f1e6324f00dec6982d36
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from sympy import * x, y, z = symbols('x y z') init_printing(use_unicode=True) print(Eq(x, y)) print(solveset(Eq(x**2, 1), x)) print(solveset(Eq(x**2 - 1, 0), x)) print(solveset(x**2 - 1, x)) print(solveset(x**2 - x, x)) print(solveset(x - x, x, domain=S.Reals)) print(solveset(sin(x) - 1, x, domain=S.Reals)) print(solveset(exp(x), x)) # No solution exists print(solveset(cos(x) - x, x)) # Not able to find solution print(linsolve([x + y + z - 1, x + y + 2*z - 3 ], (x, y, z))) print(linsolve(Matrix(([1, 1, 1, 1], [1, 1, 2, 3])), (x, y, z))) M = Matrix(((1, 1, 1, 1), (1, 1, 2, 3))) system = A, b = M[:, :-1], M[:, -1] print(linsolve(system, x, y, z)) a, b, c, d = symbols('a, b, c, d', real=True) print(nonlinsolve([a**2 + a, a - b], [a, b])) print(nonlinsolve([x*y - 1, x - 2], x, y)) print(nonlinsolve([x**2 + 1, y**2 + 1], [x, y])) system = [x**2 - 2*y**2 -2, x*y - 2] vars = [x, y] print(nonlinsolve(system, vars)) system = [exp(x) - sin(y), 1/y - 3] print(nonlinsolve(system, vars)) print(nonlinsolve([x*y, x*y - x], [x, y])) system = [a**2 + a*c, a - b] print(nonlinsolve(system, [a, b])) print(solve([x**2 - y**2/exp(x)], [x, y], dict=True)) print(solve([sin(x + y), cos(x - y)], [x, y])) print(solveset(x**3 - 6*x**2 + 9*x, x)) print(roots(x**3 - 6*x**2 + 9*x, x)) print(solve(x*exp(x) - 1, x )) f, g = symbols('f g', cls=Function) print(f(x).diff(x)) diffeq = Eq(f(x).diff(x, x) - 2*f(x).diff(x) + f(x), sin(x)) print(diffeq) print(dsolve(diffeq, f(x))) print(dsolve(f(x).diff(x)*(1 - sin(f(x))) - 1, f(x)))
31.54
64
0.551046
326
1,577
2.659509
0.190184
0.046136
0.027682
0.027682
0.202999
0.184544
0.062284
0.062284
0.062284
0
0
0.045489
0.163602
1,577
49
65
32.183673
0.611827
0.055168
0
0.05
0
0
0.012113
0
0
0
0
0
0
1
0
false
0
0.025
0
0.025
0.725
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
1
d6d18a1cdcb8a5f838db2879bdd4174a5798f34c
421
py
Python
setup.py
kelonye/python-twitter
9f8293b53c4f91de3f14510af8d18ad2ad7e8867
[ "MIT" ]
2
2015-05-29T02:10:56.000Z
2015-11-07T12:54:41.000Z
setup.py
kelonye/python-twitter
9f8293b53c4f91de3f14510af8d18ad2ad7e8867
[ "MIT" ]
null
null
null
setup.py
kelonye/python-twitter
9f8293b53c4f91de3f14510af8d18ad2ad7e8867
[ "MIT" ]
null
null
null
#!/usr/bin/env python from setuptools import setup, find_packages setup( name='ptwitter', version='0.0.1', description="Tiny python library for Twitter's REST API.", author='Mitchel Kelonye', author_email='kelonyemitchel@gmail.com', url='https://github.com/kelonye/python-twitter', packages=['ptwitter',], package_dir = {'ptwitter': 'lib'}, license='MIT License', zip_safe=True)
26.3125
62
0.674584
53
421
5.283019
0.773585
0
0
0
0
0
0
0
0
0
0
0.008571
0.168646
421
15
63
28.066667
0.791429
0.047506
0
0
0
0
0.415
0.06
0
0
0
0
0
1
0
true
0
0.083333
0
0.083333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
d6d4a59aaae6c52a8e517ab6fdf2ff853e934000
44
py
Python
python/testData/addImport/newFirstImportInBuiltinGroup/main.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/addImport/newFirstImportInBuiltinGroup/main.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/addImport/newFirstImportInBuiltinGroup/main.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
import sys import a print(datetime, sys, a)
11
23
0.75
8
44
4.125
0.625
0
0
0
0
0
0
0
0
0
0
0
0.159091
44
4
23
11
0.891892
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0.333333
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
d6d5022d562b8e05c89e7d039fdd54b153bdd856
10,444
py
Python
resdk/tests/unit/test_utils.py
tristanbrown/resolwe-bio-py
c911defde8a5e7e902ad1adf4f9e480f17002c18
[ "Apache-2.0" ]
null
null
null
resdk/tests/unit/test_utils.py
tristanbrown/resolwe-bio-py
c911defde8a5e7e902ad1adf4f9e480f17002c18
[ "Apache-2.0" ]
null
null
null
resdk/tests/unit/test_utils.py
tristanbrown/resolwe-bio-py
c911defde8a5e7e902ad1adf4f9e480f17002c18
[ "Apache-2.0" ]
null
null
null
""" Unit tests for resdk/resources/utils.py file. """ # pylint: disable=missing-docstring, protected-access import unittest import six from mock import MagicMock, call, patch from resdk.resources import Collection, Data, Process, Relation, Sample from resdk.resources.utils import ( _print_input_line, endswith_colon, fill_spaces, find_field, get_collection_id, get_data_id, get_process_id, get_relation_id, get_resolwe, get_resource_collection, get_sample_id, get_samples, iterate_fields, iterate_schema, ) PROCESS_OUTPUT_SCHEMA = [ {'name': "fastq", 'type': "basic:file:", 'label': "Reads file"}, {'name': "bases", 'type': "basic:string:", 'label': "Number of bases"}, {'name': "options", 'label': "Options", 'group': [ {'name': "id", 'type': "basic:string:", 'label': "ID"}, {'name': "k", 'type': "basic:integer:", 'label': "k-mer size"} ]} ] OUTPUT = { 'fastq': {'file': "example.fastq.gz"}, 'bases': "75", 'options': { 'id': 'abc', 'k': 123} } class TestUtils(unittest.TestCase): def test_iterate_fields(self): result = list(iterate_fields(OUTPUT, PROCESS_OUTPUT_SCHEMA)) # result object is iterator - we use lists to pull all elements expected = [ ({ 'type': 'basic:string:', 'name': 'id', 'label': 'ID' }, { 'k': 123, 'id': 'abc' }), ({ 'type': 'basic:string:', 'name': 'bases', 'label': 'Number of bases' }, { 'options': { 'k': 123, 'id': 'abc' }, 'bases': '75', 'fastq': { 'file': 'example.fastq.gz' } }), ({ 'type': 'basic:file:', 'name': 'fastq', 'label': 'Reads file' }, { 'options': { 'k': 123, 'id': 'abc' }, 'bases': '75', 'fastq': { 'file': 'example.fastq.gz' } }), ({ 'type': 'basic:integer:', 'name': 'k', 'label': 'k-mer size' }, { 'k': 123, 'id': 'abc' }) ] six.assertCountEqual(self, result, expected) def test_iterate_fields_modif(self): """ Ensure that changing ``values`` inside iteration loop also changes ``OUTPUT`` values. """ for schema, values in iterate_fields(OUTPUT, PROCESS_OUTPUT_SCHEMA): field_name = schema['name'] if field_name == "bases": values[field_name] = str(int(values[field_name]) + 1) self.assertEqual(OUTPUT['bases'], "76") # Fix the OUTPUT to previous state: OUTPUT['bases'] = "75" def test_find_field(self): result = find_field(PROCESS_OUTPUT_SCHEMA, 'fastq') expected = {'type': 'basic:file:', 'name': 'fastq', 'label': 'Reads file'} self.assertEqual(result, expected) def test_iterate_schema(self): result1 = list(iterate_schema(OUTPUT, PROCESS_OUTPUT_SCHEMA, 'my_path')) result2 = list(iterate_schema(OUTPUT, PROCESS_OUTPUT_SCHEMA)) expected1 = [ ({'name': 'fastq', 'label': 'Reads file', 'type': 'basic:file:'}, {'fastq': {'file': 'example.fastq.gz'}, 'options': {'k': 123, 'id': 'abc'}, 'bases': '75'}, 'my_path.fastq'), ({'name': 'bases', 'label': 'Number of bases', 'type': 'basic:string:'}, {'fastq': {'file': 'example.fastq.gz'}, 'options': {'k': 123, 'id': 'abc'}, 'bases': '75'}, 'my_path.bases'), ({'name': 'id', 'label': 'ID', 'type': 'basic:string:'}, {'k': 123, 'id': 'abc'}, 'my_path.options.id'), ({'name': 'k', 'label': 'k-mer size', 'type': 'basic:integer:'}, {'k': 123, 'id': 'abc'}, 'my_path.options.k')] expected2 = [ ({'type': 'basic:file:', 'name': 'fastq', 'label': 'Reads file'}, {'fastq': {'file': 'example.fastq.gz'}, 'bases': '75', 'options': {'k': 123, 'id': 'abc'}}), ({'type': 'basic:string:', 'name': 'bases', 'label': 'Number of bases'}, {'fastq': {'file': 'example.fastq.gz'}, 'bases': '75', 'options': {'k': 123, 'id': 'abc'}}), ({'type': 'basic:string:', 'name': 'id', 'label': 'ID'}, {'k': 123, 'id': 'abc'}), ({'type': 'basic:integer:', 'name': 'k', 'label': 'k-mer size'}, {'k': 123, 'id': 'abc'})] self.assertEqual(result1, expected1) self.assertEqual(result2, expected2) def test_fill_spaces(self): result = fill_spaces("one_word", 12) self.assertEqual(result, "one_word ") @patch('resdk.resources.utils.print') def test_print_input_line(self, print_mock): _print_input_line(PROCESS_OUTPUT_SCHEMA, 0) calls = [ call(u'- fastq [basic:file:] - Reads file'), call(u'- bases [basic:string:] - Number of bases'), call(u'- options - Options'), call(u' - id [basic:string:] - ID'), call(u' - k [basic:integer:] - k-mer size')] self.assertEqual(print_mock.mock_calls, calls) def test_endswith_colon(self): schema = {'process_type': 'data:reads:fastq:single'} endswith_colon(schema, 'process_type') self.assertEqual(schema, {'process_type': u'data:reads:fastq:single:'}) def test_get_collection_id(self): collection = Collection(id=1, resolwe=MagicMock()) collection.id = 1 # this is overriden when initialized self.assertEqual(get_collection_id(collection), 1) self.assertEqual(get_collection_id(2), 2) def test_get_sample_id(self): sample = Sample(id=1, resolwe=MagicMock()) sample.id = 1 # this is overriden when initialized self.assertEqual(get_sample_id(sample), 1) self.assertEqual(get_sample_id(2), 2) def test_get_data_id(self): data = Data(id=1, resolwe=MagicMock()) data.id = 1 # this is overriden when initialized self.assertEqual(get_data_id(data), 1) self.assertEqual(get_data_id(2), 2) def test_get_process_id(self): process = Process(id=1, resolwe=MagicMock()) process.id = 1 # this is overriden when initialized self.assertEqual(get_process_id(process), 1) self.assertEqual(get_process_id(2), 2) def test_get_relation_id(self): relation = Relation(id=1, resolwe=MagicMock()) relation.id = 1 # this is overriden when initialized self.assertEqual(get_relation_id(relation), 1) self.assertEqual(get_relation_id(2), 2) def test_get_samples(self): collection = Collection(id=1, resolwe=MagicMock()) collection._samples = ['sample_1', 'sample_2'] self.assertEqual(get_samples(collection), ['sample_1', 'sample_2']) collection_1 = Collection(id=1, resolwe=MagicMock()) collection_1._samples = ['sample_1'] collection_2 = Collection(id=2, resolwe=MagicMock()) collection_2._samples = ['sample_2'] self.assertEqual(get_samples([collection_1, collection_2]), ['sample_1', 'sample_2']) data = Data(id=1, resolwe=MagicMock()) data._sample = 'sample_1' self.assertEqual(get_samples(data), ['sample_1']) data1 = Data(id=1, resolwe=MagicMock()) data1._sample = 'sample1' data2 = Data(id=2, resolwe=MagicMock()) data2._sample = 'sample2' self.assertEqual(get_samples([data1, data2]), ['sample1', 'sample2']) data = Data(id=1, resolwe=MagicMock(**{'sample.filter.return_value': None})) data._sample = None with self.assertRaises(TypeError): get_samples(data) sample = Sample(id=1, resolwe=MagicMock()) self.assertEqual(get_samples(sample), [sample]) sample_1 = Sample(id=1, resolwe=MagicMock()) sample_2 = Sample(id=3, resolwe=MagicMock()) self.assertEqual(get_samples([sample_1, sample_2]), [sample_1, sample_2]) def test_get_resource_collection(self): collection = Collection(id=1, resolwe=MagicMock()) collection.id = 1 # this is overriden when initialized self.assertEqual(get_resource_collection(collection), 1) relation = Relation(id=1, resolwe=MagicMock()) relation._hydrated_collection = Collection(id=2, resolwe=MagicMock()) relation._hydrated_collection.id = 2 # this is overriden when initialized self.assertEqual(get_resource_collection(relation), 2) data = Data(id=1, resolwe=MagicMock()) data._collections = [Collection(id=3, resolwe=MagicMock())] data._collections[0].id = 3 # this is overriden when initialized self.assertEqual(get_resource_collection(data), 3) sample = Sample(id=1, resolwe=MagicMock()) sample._collections = [Collection(id=4, resolwe=MagicMock())] sample._collections[0].id = 4 # this is overriden when initialized self.assertEqual(get_resource_collection(sample), 4) sample = Sample(id=1, resolwe=MagicMock()) sample._collections = [ Collection(id=5, resolwe=MagicMock()), Collection(id=6, resolwe=MagicMock()) ] sample._collections[0].id = 5 # this is overriden when initialized sample._collections[1].id = 6 # this is overriden when initialized self.assertEqual(get_resource_collection(sample), None) with self.assertRaises(LookupError): get_resource_collection(sample, fail_silently=False) def test_get_resolwe(self): # same resolwe object resolwe_mock = MagicMock() relation = Relation(id=1, resolwe=resolwe_mock) sample = Sample(id=1, resolwe=resolwe_mock) self.assertEqual(get_resolwe(relation, sample), resolwe_mock) relation = Relation(id=1, resolwe=MagicMock()) sample = Sample(id=1, resolwe=MagicMock()) with self.assertRaises(TypeError): get_resolwe(relation, sample) if __name__ == '__main__': unittest.main()
37.035461
95
0.567503
1,156
10,444
4.957612
0.128028
0.075903
0.069098
0.062991
0.555749
0.440586
0.362938
0.282673
0.238527
0.238527
0
0.021751
0.278054
10,444
281
96
37.16726
0.738329
0.065588
0
0.234742
0
0
0.162034
0.010294
0
0
0
0
0.15493
1
0.070423
false
0
0.023474
0
0.098592
0.023474
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6d7581ab54e0bc20bcf26b8c40b0005e45bc25c
1,136
py
Python
pa1-skeleton/submission/parse_block.py
yzhong94/cs276-spring-2019
a4780a9f88b8c535146040fe11bb513c91c5693b
[ "MIT" ]
null
null
null
pa1-skeleton/submission/parse_block.py
yzhong94/cs276-spring-2019
a4780a9f88b8c535146040fe11bb513c91c5693b
[ "MIT" ]
null
null
null
pa1-skeleton/submission/parse_block.py
yzhong94/cs276-spring-2019
a4780a9f88b8c535146040fe11bb513c91c5693b
[ "MIT" ]
null
null
null
class BSBIIndex(BSBIIndex): def parse_block(self, block_dir_relative): """Parses a tokenized text file into termID-docID pairs Parameters ---------- block_dir_relative : str Relative Path to the directory that contains the files for the block Returns ------- List[Tuple[Int, Int]] Returns all the td_pairs extracted from the block Should use self.term_id_map and self.doc_id_map to get termIDs and docIDs. These persist across calls to parse_block """ ### Begin your code td_pairs = [] for filename in os.listdir(self.data_dir +'/'+ block_dir_relative): with open(self.data_dir +'/'+ block_dir_relative +'/'+ filename, 'r',encoding="utf8", errors='ignore') as f: doc_id = self.doc_id_map.__getitem__(filename) for s in f.read().split(): term_id = self.term_id_map.__getitem__(s) td_pairs.append((term_id, doc_id)) return td_pairs ### End your code
39.172414
120
0.564261
140
1,136
4.321429
0.521429
0.052893
0.105785
0.042975
0.089256
0.089256
0
0
0
0
0
0.001337
0.341549
1,136
28
121
40.571429
0.807487
0.365317
0
0
0
0
0.023451
0
0
0
0
0
0
1
0.1
false
0
0
0
0.3
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6d796c86f20ac0784cd2ab3c3a72578c896d385
1,674
py
Python
test/test_core_initializer.py
t-k-/tinynn
969eb96020406885d081a961084d9328e2939622
[ "MIT" ]
27
2019-09-16T01:54:19.000Z
2022-03-14T04:50:26.000Z
test/test_core_initializer.py
t-k-/tinynn
969eb96020406885d081a961084d9328e2939622
[ "MIT" ]
null
null
null
test/test_core_initializer.py
t-k-/tinynn
969eb96020406885d081a961084d9328e2939622
[ "MIT" ]
6
2019-09-16T01:54:33.000Z
2022-03-07T12:00:55.000Z
"""test unit for core/initializer.py""" import runtime_path # isort:skip from core.initializer import * TEST_SHAPE = (100000, 1) TOR = 1e-2 def test_get_fans(): fan_in, fan_out = get_fans(shape=(100, 10)) assert fan_in == 100 and fan_out == 10 fan_in, fan_out = get_fans(shape=(64, 5, 5, 128)) assert fan_in == 5 * 5 * 128 assert fan_out == 64 def test_normal_init(): val = NormalInit(mean=0.0, std=1.0).init(TEST_SHAPE) assert -TOR <= val.mean() <= TOR assert 1.0 - TOR <= val.std() <= 1.0 + TOR def test_truncated_normal_init(): val = TruncatedNormalInit(mean=0.0, std=1.0).init(TEST_SHAPE) assert -TOR <= val.mean() <= TOR assert all(val >= -2.0) and all(val <= 2.0) def test_uniform_init(): val = UniformInit(-1.0, 1.0).init(TEST_SHAPE) assert all(val >= -1.0) and all(val <= 1.0) def test_constant_init(): val = ConstantInit(3.1).init(TEST_SHAPE) assert all(val == 3.1) def test_xavier_uniform_init(): val = XavierUniformInit().init(TEST_SHAPE) bound = np.sqrt(6.0 / np.sum(get_fans(TEST_SHAPE))) assert np.all(val >= -bound) and np.all(val <= bound) def test_xavier_normal_init(): val = XavierNormalInit().init(TEST_SHAPE) std = np.sqrt(2.0 / np.sum(get_fans(TEST_SHAPE))) assert std - TOR <= val.std() <= std + TOR def test_he_uniform_init(): val = HeUniformInit().init(TEST_SHAPE) bound = np.sqrt(6.0 / get_fans(TEST_SHAPE)[0]) assert np.all(val >= -bound) and np.all(val <= bound) def test_he_normal_init(): val = HeNormalInit().init(TEST_SHAPE) std = np.sqrt(2.0 / get_fans(TEST_SHAPE)[0]) assert std - TOR <= val.std() <= std + TOR
26.15625
65
0.643369
279
1,674
3.677419
0.204301
0.114035
0.101365
0.074074
0.518519
0.491228
0.440546
0.321637
0.177388
0.177388
0
0.052632
0.194146
1,674
63
66
26.571429
0.707932
0.026882
0
0.15
0
0
0
0
0
0
0
0
0.325
1
0.225
false
0
0.05
0
0.275
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
d6db1b844a289657730452a636a61c698c02aa89
1,117
py
Python
varex/commons/VCFEntry.py
weiyi-bitw/varex
765e8876c0ced480a47c0e523736bd31b7897644
[ "MIT" ]
null
null
null
varex/commons/VCFEntry.py
weiyi-bitw/varex
765e8876c0ced480a47c0e523736bd31b7897644
[ "MIT" ]
null
null
null
varex/commons/VCFEntry.py
weiyi-bitw/varex
765e8876c0ced480a47c0e523736bd31b7897644
[ "MIT" ]
null
null
null
class VCFEntry(object): def __init__(self, vkey, ssid, pid, ac, passFilter=1, qual=-1, gq=-1, dp=-1, ad=-1): self.vkey = vkey if not ssid: self.ssid = "UNKNOWN" else: self.ssid = ssid self.pid = pid self.ac = ac self.passFilter = passFilter self.qual = qual self.gq = gq self.dp = dp self.ad = ad def __repr__(self): return "VCFEntry: (" + ', '.join([str(x) for x in [self.vkey, self.ssid, self.pid, self.ac, self.passFilter, self.qual, self.gq, self.dp, self.ad]]) + ")" def __str__(self): return '\t'.join([str(x) for x in [self.vkey, self.ssid, self.pid, self.ac, self.passFilter, self.qual, self.gq, self.dp, self.ad]]) def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __ne__(self, other): return not self.__eq__(other) def sameEntry(self, other): return (isinstance(other, self.__class__) and self.vkey == other.vkey and self.ssid == other.ssid and self.pid == other.pid)
32.852941
162
0.584602
159
1,117
3.855346
0.220126
0.065253
0.053834
0.035889
0.430669
0.430669
0.430669
0.430669
0.430669
0.280587
0
0.00612
0.268577
1,117
33
163
33.848485
0.744186
0
0
0
0
0
0.020702
0
0
0
0
0
0
1
0.25
false
0.166667
0
0.208333
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
0
0
0
3
d6dd795a3bd40d81aa9b1e2a17911949cba95e34
14,321
py
Python
inventory/nova.py
forgeservicelab/ansible.account-cleanup
14a855f57c5d06c1114f18f561199f9a1534f707
[ "MIT" ]
null
null
null
inventory/nova.py
forgeservicelab/ansible.account-cleanup
14a855f57c5d06c1114f18f561199f9a1534f707
[ "MIT" ]
null
null
null
inventory/nova.py
forgeservicelab/ansible.account-cleanup
14a855f57c5d06c1114f18f561199f9a1534f707
[ "MIT" ]
null
null
null
#!/usr/bin/env python # (c) 2012, Marco Vito Moscaritolo <marco@agavee.com> # modified by Tomas Karasek <tomas.karasek@digile.fi> # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import sys import re import os import argparse import subprocess import yaml import time import md5 import itertools import novaclient.client import ansible.module_utils.openstack try: import json except ImportError: import simplejson as json # This is a script getting dynamic inventory from Nova. Features: # - you can refer to instances by their nova name in ansible{-playbook} calls # - you can refer to single tenants, regions and openstack environments in # ansible{-playbook} calls # - you can refer to a hostgroup when you pass the arbitrary --meta group= # in "nova boot" # - it caches the state of the cloud # - it tries to guess ansible_ssh_user based on name of image # ('\cubuntu' -> 'ubuntu', '\ccentos' -> 'cloud-user', ...) # - allows to access machines by their private ip * # - it will work with no additional configuration, just handling single tenant # from set OS_* environment variables (just like python-novaclient). # - you can choose to heavy-configure it for multiple environments # - it's configured from simple YAML (I dislike ConfigParser). See nova.yml # - Nodes can be listed in inventory either by DNS name or IP address based # on setting. # # * I took few ideas and some code from other pull requests # - https://github.com/ansible/ansible/pull/8657 by Monty Taylor # - https://github.com/ansible/ansible/pull/7444 by Carson Gee # # If Ansible fails to parse JSON, please run this with --list and observe. # # HOW CACHING WORKS: # Cache of list of servers is kept per combination of (auth_url, region_name, # project_id). Default max age is 300 seconds. You can set the age per section # (openstack envrionment) in config. # # If you want to build the cache from cron, consider: # */5 * * * * . /home/tomk/os/openrc.sh && \ # ANSIBLE_NOVA_CONFIG=/home/tomk/.nova.yml \ # /home/tomk/ansible/plugins/inventory/nova.py --refresh-cache # # HOW IS NOVA INVENTORY CONFIGURED: # (Note: if you have env vars set from openrc.sh, you can run this without # writing the config file. Defaults are sane. The values in the config file # will rewrite the defaults.) # # To load configuration from a file, you must have the config file path in # environment variable ANSIBLE_NOVA_CONFIG. # # IN THE CONFIG FILE: # The keys in the top level dict are names for different OS environments. # The keys in a dict for OS environment can be: # - auth_url # - region_name (can be a list) # - project_id (can be a list) # - username # - api_key # - service_type # - auth_system # - prefer_private (connect using private IPs) # - cache_max_age (how long to consider cached data. In seconds) # - resolve_ips (translate IP addresses to domain names) # # If you have a list in region and/or project, all the combinations of # will be listed. # # If you don't have configfile, there will be one cloud section created called # 'openstack'. # # WHAT IS AVAILABLE AS A GROUP FOR ANSIBLE CALLS (how are nodes grouped): # tenants, regions, clouds (top config section), groups by metadata key (nova # boot --meta group=<name>). CONFIG_ENV_VAR_NAME = 'ANSIBLE_NOVA_CONFIG' NOVA_DEFAULTS = { 'auth_system': os.environ.get('OS_AUTH_SYSTEM'), 'service_type': 'compute', 'username': os.environ.get('OS_USERNAME'), 'api_key': os.environ.get('OS_PASSWORD'), 'auth_url': os.environ.get('OS_AUTH_URL'), 'project_id': os.environ.get('OS_TENANT_NAME'), 'region_name': os.environ.get('OS_REGION_NAME'), 'prefer_private': False, 'version': '2', 'cache_max_age': 300, 'resolve_ips': True, } DEFAULT_CONFIG_KEY = 'openstack' CACHE_DIR = '~/.ansible/tmp' CONFIG = {} def load_config(): global CONFIG _config_file = os.environ.get(CONFIG_ENV_VAR_NAME) if _config_file: with open(_config_file) as f: CONFIG = yaml.load(f.read()) if not CONFIG: CONFIG = {DEFAULT_CONFIG_KEY: {}} for section in CONFIG.values(): for key in NOVA_DEFAULTS: if (key not in section): section[key] = NOVA_DEFAULTS[key] def push(data, key, element): ''' Assist in items to a dictionary of lists ''' if (not element) or (not key): return if key in data: data[key].append(element) else: data[key] = [element] def to_safe(word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub(r"[^A-Za-z0-9\-]", "_", word) def get_access_ip(server, prefer_private): ''' Find an IP for Ansible SSH for a host. ''' private = ansible.module_utils.openstack.openstack_find_nova_addresses( getattr(server, 'addresses'), 'fixed', 'private') public = ansible.module_utils.openstack.openstack_find_nova_addresses( getattr(server, 'addresses'), 'floating', 'public') if prefer_private: return private[0] if server.accessIPv4: return server.accessIPv4 if public: return public[0] else: return private[0] def get_metadata(server): ''' Returns dictionary of all host metadata ''' results = {} for key in vars(server): # Extract value value = getattr(server, key) # Generate sanitized key key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower() # Att value to instance result (exclude manager class) #TODO: maybe use value.__class__ or similar inside of key_name if key != 'os_manager': results[key] = value return results def get_ssh_user(server, nova_client): ''' Try to guess ansible_ssh_user based on image name. ''' try: image_name = nova_client.images.get(server.image['id']).name if 'ubuntu' in image_name.lower(): return 'ubuntu' if 'centos' in image_name.lower(): return 'cloud-user' if 'debian' in image_name.lower(): return 'debian' if 'coreos' in image_name.lower(): return 'coreos' except: pass def get_nova_client(combination): ''' There is a bit more info in the combination than we need for nova client, so we need to create a copy and delete keys that are not relevant. ''' kwargs = dict(combination) del kwargs['name'] del kwargs['prefer_private'] del kwargs['cache_max_age'] del kwargs['resolve_ips'] return novaclient.client.Client(**kwargs) def merge_update_to_result(result, update): ''' This will merge data from a nova servers.list call (in update) into aggregating dict (in result) ''' for host, specs in update['_meta']['hostvars'].items(): # Can same host be in two differnt listings? I hope not. result['_meta']['hostvars'][host] = dict(specs) # groups must be copied if not present, otherwise merged for group in update: if group == '_meta': continue if group not in result: # copy the list over result[group] = update[group][:] else: result[group] = list(set(update[group]) | set(result[group])) def get_name(ip): ''' Gets the shortest domain name for IP address''' # I first did this with gethostbyaddr but that did not return all the names # Also, this won't work on Windows. But it can be turned of by setting # resolve_ips to false command = "host %s" % ip p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _ = p.communicate() if p.returncode != 0: return None names = [] for l in stdout.split('\n'): if 'domain name pointer' not in l: continue names.append(l.split()[-1]) return min(names, key=len) def get_update(call_params): ''' Fetch host dicts and groups from single nova_client.servers.list call. This is called for each element in "cartesian product" of openstack e environments, tenants and regions. ''' update = {'_meta': {'hostvars': {}}} # Cycle on servers nova_client = get_nova_client(call_params) for server in nova_client.servers.list(): access_ip = get_access_ip(server, call_params['prefer_private']) access_identifier = access_ip if call_params['resolve_ips']: dns_name = get_name(access_ip) if dns_name: access_identifier = dns_name # Push to a group for its name. This way we can use the nova name as # a target for ansible{-playbook} push(update, server.name, access_identifier) # Run through each metadata item and add instance to it for key, value in server.metadata.iteritems(): composed_key = to_safe('tag_{0}_{1}'.format(key, value)) push(update, composed_key, access_identifier) # Do special handling of group for backwards compat # inventory update group = 'undefined' if 'group' in server.metadata: group = server.metadata['group'] push(update, group, access_identifier) # Add vars to _meta key for performance optimization in # Ansible 1.3+ update['_meta']['hostvars'][access_identifier] = get_metadata(server) # guess username based on image name ssh_user = get_ssh_user(server, nova_client) if ssh_user: host_record = update['_meta']['hostvars'][access_identifier] host_record['ansible_ssh_user'] = ssh_user push(update, call_params['name'], access_identifier) push(update, call_params['project_id'], access_identifier) if call_params['region_name']: push(update, call_params['region_name'], access_identifier) return update def expand_to_product(d): ''' this will transform {1: [2, 3, 4], 5: [6, 7]} to [{1: 2, 5: 6}, {1: 2, 5: 7}, {1: 3, 5: 6}, {1: 3, 5: 7}, {1: 4, 5: 6}, {1: 4, 5: 7}] ''' return (dict(itertools.izip(d, x)) for x in itertools.product(*d.itervalues())) def get_list_of_kwarg_combinations(): ''' This will transfrom CONFIG = {'openstack':{version:'2', project_id:['tenant1', tenant2'],...}, 'openstack_dev':{version:'2', project_id:'tenant3',...}, into [{'name':'openstack', version:'2', project_id: 'tenant1', ...}, {'name':'openstack', version:'2', project_id: 'tenant2', ...}, {'name':'openstack_dev', version:'2', project_id: 'tenant3', ...}] The elements in the returned list can be (with little customization) used as **kwargs for nova client. ''' l = [] for section in CONFIG: d = dict(CONFIG[section]) d['name'] = section for key in d: # all single elements must become list for the product to work if type(d[key]) is not list: d[key] = [d[key]] for one_call_kwargs in expand_to_product(d): l.append(one_call_kwargs) return l def get_cache_filename(call_params): ''' cache filename is ~/.ansible/tmp/<md5(auth_url,project_id,region_name)>.nova.json ''' id_to_hash = ("region_name: %(region_name)s, auth_url:%(auth_url)s," "project_id: %(project_id)s, resolve_ips: %(resolve_ips)s" % call_params) return os.path.join(os.path.expanduser(CACHE_DIR), md5.new(id_to_hash).hexdigest() + ".nova.json") def cache_valid(call_params): ''' cache file is specific for (auth_url, project_id, region_name) ''' cache_path = get_cache_filename(call_params) if os.path.isfile(cache_path): mod_time = os.path.getmtime(cache_path) current_time = time.time() if (mod_time + call_params['cache_max_age']) > current_time: return True return False def update_cache(call_params): fn = get_cache_filename(call_params) content = get_update(call_params) with open(fn, 'w') as f: f.write(json.dumps(content, sort_keys=True, indent=2)) def load_from_cache(call_params): fn = get_cache_filename(call_params) with open(fn) as f: return json.loads(f.read()) def get_args(args_list): parser = argparse.ArgumentParser( description='Nova dynamic inventory for Ansible') g = parser.add_mutually_exclusive_group() g.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') g.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help=('Force refresh of cache by making API requests to' 'Nova (default: False - use cache files)')) return parser.parse_args(args_list) def main(args_list): load_config() args = get_args(args_list) if args.host: print(json.dumps({})) return 0 if args.list: output = {'_meta': {'hostvars': {}}} # we have to deal with every combination of # (cloud, region, project). for c in get_list_of_kwarg_combinations(): if args.refresh_cache or (not cache_valid(c)): update_cache(c) update = load_from_cache(c) merge_update_to_result(output, update) print(json.dumps(output, sort_keys=True, indent=2)) return 0 if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
33.46028
79
0.649047
1,994
14,321
4.521063
0.245236
0.019967
0.009318
0.009318
0.130006
0.086744
0.050804
0.033943
0.026179
0.016639
0
0.007915
0.241254
14,321
427
80
33.538642
0.821738
0.407863
0
0.061611
0
0
0.126424
0.002695
0
0
0
0.002342
0
1
0.085308
false
0.009479
0.066351
0
0.265403
0.009479
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6de6584aa755891b400cead530c2e7d4744bf03
7,003
py
Python
mltc/evaluator.py
BonnierNews/lukas-ner-model
1f7f688f9b0f1e7b7cb66c42f188358d27a0be09
[ "MIT" ]
null
null
null
mltc/evaluator.py
BonnierNews/lukas-ner-model
1f7f688f9b0f1e7b7cb66c42f188358d27a0be09
[ "MIT" ]
null
null
null
mltc/evaluator.py
BonnierNews/lukas-ner-model
1f7f688f9b0f1e7b7cb66c42f188358d27a0be09
[ "MIT" ]
null
null
null
from datetime import date import numpy as np from sklearn.metrics import ( roc_curve, auc, ) import torch from torch.utils.data import DataLoader from .metrics import accuracy_thresh, fbeta, pairwise_confusion_matrix import pandas as pd from tqdm import tqdm class ModelEvaluator: """Class for evaluating and testing the text classification models. Evaluation is done with labeled data whilst testing/prediction is done with unlabeled data. """ def __init__(self, args, processor, model, logger): self.args = args self.processor = processor self.model = model self.logger = logger self.device = "cpu" self.eval_dataloader: DataLoader def prepare_eval_data(self, file_name, parent_labels=None): """Creates a PyTorch Dataloader from a CSV file, which is used as input to the classifiers. """ eval_examples = self.processor.get_examples(file_name, "eval", parent_labels) eval_features = self.processor.convert_examples_to_features( eval_examples, self.args["max_seq_length"] ) self.eval_dataloader = self.processor.pack_features_in_dataloader( eval_features, self.args["eval_batch_size"], "eval" ) def evaluate(self): """Evaluates a classifier using labeled data. Calculates and returns accuracy, precision, recall F1 score and ROC AUC. """ all_logits = None all_labels = None self.model.eval() eval_loss, eval_accuracy, eval_f1, eval_prec, eval_rec = 0, 0, 0, 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in self.eval_dataloader: batch = tuple(t.to(self.device) for t in batch) input_ids, input_mask, segment_ids, label_ids, parent_labels = batch with torch.no_grad(): # parent_labels is of boolean type if there are no parent labels if parent_labels.dtype != torch.bool: outputs = self.model( input_ids, segment_ids, input_mask, label_ids, parent_labels=parent_labels, ) else: outputs = self.model(input_ids, segment_ids, input_mask, label_ids) tmp_eval_loss, logits = outputs[:2] tmp_eval_accuracy = accuracy_thresh(logits, label_ids) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy f1, prec, rec = fbeta(logits, label_ids) eval_f1 += f1 eval_prec += prec eval_rec += rec if all_logits is None: all_logits = logits.detach().cpu().numpy() else: all_logits = np.concatenate( (all_logits, logits.detach().cpu().numpy()), axis=0 ) if all_labels is None: all_labels = label_ids.detach().cpu().numpy() else: all_labels = np.concatenate( (all_labels, label_ids.detach().cpu().numpy()), axis=0 ) nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples eval_f1 = eval_f1 / nb_eval_steps eval_prec = eval_prec / nb_eval_steps eval_rec = eval_rec / nb_eval_steps # ROC-AUC calcualation # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() confusion_matrices = [] for i in range(len(self.processor.labels)): fpr[i], tpr[i], _ = roc_curve(all_labels[:, i], all_logits[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) confusion_matrices += [ pairwise_confusion_matrix( all_logits[:, [13, i]], all_labels[:, [13, i]] ) ] # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve( all_labels.ravel(), all_logits.ravel() ) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) result = { "eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "roc_auc": roc_auc, "eval_f1": eval_f1, "eval_prec": eval_prec, "eval_rec": eval_rec, # "confusion_matrices": confusion_matrices, } self.save_result(result) return result def save_result(self, result): """Saves the evaluation results as a text file.""" d = date.today().strftime("%Y-%m-%d") output_eval_file = f"mltc/data/results/eval_results_{d}.txt" with open(output_eval_file, "w") as writer: self.logger.info("***** Eval results *****") for key in sorted(result.keys()): self.logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) def predict(self, file_name): """Makes class predicitons for unlabeled data. Returns the estimated probabilities for each of the labels. """ test_examples = self.processor.get_examples(file_name, "test") test_features = self.processor.convert_examples_to_features( test_examples, self.args["max_seq_length"] ) test_dataloader = self.processor.pack_features_in_dataloader( test_features, self.args["eval_batch_size"], "test" ) # Hold input data for returning it input_data = [ {"id": input_example.guid, "text": input_example.text_a} for input_example in test_examples ] self.logger.info("***** Running prediction *****") self.logger.info(" Num examples = %d", len(test_examples)) self.logger.info(" Batch size = %d", self.args["eval_batch_size"]) all_logits = None self.model.eval() for step, batch in enumerate( tqdm(test_dataloader, desc="Prediction Iteration") ): batch = tuple(t.to(self.device) for t in batch) input_ids, input_mask, segment_ids = batch with torch.no_grad(): outputs = self.model(input_ids, segment_ids, input_mask) logits = outputs[0] logits = logits.sigmoid() if all_logits is None: all_logits = logits.detach().cpu().numpy() else: all_logits = np.concatenate( (all_logits, logits.detach().cpu().numpy()), axis=0 ) return pd.merge( pd.DataFrame(input_data), pd.DataFrame(all_logits), left_index=True, right_index=True, )
34.497537
87
0.565472
821
7,003
4.585871
0.228989
0.033466
0.01753
0.022311
0.308632
0.244622
0.214343
0.127224
0.127224
0.115803
0
0.005798
0.334999
7,003
202
88
34.668317
0.802663
0.109096
0
0.136986
0
0
0.057129
0.006185
0
0
0
0
0
1
0.034247
false
0
0.054795
0
0.109589
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6df220a36694618699e66a95c74ab11a133b077
7,381
py
Python
synapse/lib/datapath.py
larrycameron80/synapse
24bf21c40b4a467e5dc28c8204aecaf502d5cddf
[ "Apache-2.0" ]
null
null
null
synapse/lib/datapath.py
larrycameron80/synapse
24bf21c40b4a467e5dc28c8204aecaf502d5cddf
[ "Apache-2.0" ]
null
null
null
synapse/lib/datapath.py
larrycameron80/synapse
24bf21c40b4a467e5dc28c8204aecaf502d5cddf
[ "Apache-2.0" ]
null
null
null
import collections import xml.etree.ElementTree as x_etree import synapse.common as s_common import synapse.lib.syntax as s_syntax class DataElem: def __init__(self, item, name=None, parent=None): self._d_name = name self._d_item = item self._d_parent = parent self._d_special = {'..': parent, '.': self} def _elem_valu(self): return self._d_item def _elem_step(self, step): try: item = self._d_item[step] except Exception as e: return None return initelem(item, name=step, parent=self) def name(self): return self._d_name def _elem_kids(self, step): # Most primitives only have 1 child at a given step... # However, we must handle the case of nested children # during this form of iteration to account for constructs # like XML/HTML ( See XmlDataElem ) try: item = self._d_item[step] except Exception as e: return yield initelem(item, name=step, parent=self) def step(self, path): ''' Step to the given DataElem within the tree. ''' base = self for step in self._parse_path(path): spec = base._d_special.get(step) if spec is not None: base = spec continue base = base._elem_step(step) if base is None: return None return base def valu(self, path): ''' Return the value of the element at the given path. ''' if not path: return self._elem_valu() elem = self.step(path) if elem is None: return None return elem._elem_valu() def vals(self, path): ''' Iterate the given path elements and yield values. Example: data = { 'foo':[ {'bar':'lol'}, {'bar':'heh'} ] } root = s_datapath.initelem(data) for elem in root.iter('foo/*/bar'): dostuff(elem) # elem is at value "lol" and "heh" ''' for elem in self.iter(path): yield elem._elem_valu() def _elem_iter(self): # special case for dictionaries # to iterate children and keep track # of their names... if type(self._d_item) == dict: for name, item in self._d_item.items(): yield initelem((name, item), name=self.name(), parent=self) return if isinstance(self._d_item, int): return if isinstance(self._d_item, str): return for i, item in enumerate(self._d_item): yield initelem(item, name=str(i), parent=self) def _elem_search(self, step): subs = self._elem_iter() todo = collections.deque(subs) while todo: elem = todo.popleft() #print('SEARCH: %r' % (elem.name(),)) if elem.name() == step: yield elem for sube in elem._elem_iter(): todo.append(sube) def iter(self, path): ''' Iterate sub elements using the given path. Example: data = { 'foo':[ {'bar':'lol'}, {'bar':'heh'} ] } root = s_datapath.initelem(data) for elem in root.iter('foo/*/bar'): dostuff(elem) # elem is at value "lol" and "heh" ''' steps = self._parse_path(path) if not steps: return omax = len(steps) - 1 todo = collections.deque([(self, 0)]) while todo: base, off = todo.popleft() step = steps[off] # some special syntax for "all kids" / iterables if step == '*': for elem in base._elem_iter(): if off == omax: yield elem else: todo.append((elem, off + 1)) continue # special "all kids with name" syntax ~foo # (including recursive kids within kids) # this syntax is mostly useful XML like # hierarchical data structures. if step[0] == '~': for elem in base._elem_search(step[1:]): if off == omax: yield elem else: todo.append((elem, off + 1)) continue for elem in base._elem_kids(step): if off == omax: yield elem else: todo.append((elem, off + 1)) def _parse_path(self, path): off = 0 steps = [] plen = len(path) while off < plen: # eat the next (or possibly a first) slash _, off = s_syntax.nom(path, off, ('/',)) if off >= plen: break if s_syntax.is_literal(path, off): elem, off = s_syntax.parse_literal(path, off) steps.append(elem) continue # eat until the next / elem, off = s_syntax.meh(path, off, ('/',)) if not elem: continue steps.append(elem) return steps class XmlDataElem(DataElem): def __init__(self, item, name=None, parent=None): DataElem.__init__(self, item, name=name, parent=parent) def _elem_kids(self, step): #TODO possibly make step fnmatch compat? # special case for iterating <tag> which recurses # to find all instances of that element. #if step[0] == '<' and step[-1] == '>': #allstep = step[1:-1] #todo = collections.deque(self._d_item) #while todo: #elem = todo.popleft() for xmli in self._d_item: if xmli.tag == step: yield XmlDataElem(xmli, name=step, parent=self) def _elem_tree(self): todo = collections.deque([self._d_item]) while todo: elem = todo.popleft() yield elem for sube in elem: todo.append(sube) def _elem_step(self, step): # optional explicit syntax for dealing with colliding # attributes and sub elements. if step.startswith('$'): item = self._d_item.attrib.get(step[1:]) if item is None: return None return initelem(item, name=step, parent=self) for xmli in self._d_item: if xmli.tag == step: return XmlDataElem(xmli, name=step, parent=self) item = self._d_item.attrib.get(step) if item is not None: return initelem(item, name=step, parent=self) def _elem_valu(self): return self._d_item.text def _elem_iter(self): for item in self._d_item: yield initelem(item, name=item.tag, parent=self) # Special Element Handler Classes elemcls = { x_etree.Element: XmlDataElem, } def initelem(item, name=None, parent=None): ''' Construct a new DataElem from the given item using which ever DataElem class is most correct for the type. Example: elem = initelem( ''' ecls = elemcls.get(type(item), DataElem) return ecls(item, name=name, parent=parent)
25.898246
75
0.520661
873
7,381
4.272623
0.200458
0.02815
0.041019
0.028954
0.407775
0.32252
0.276676
0.237802
0.237802
0.183914
0
0.003063
0.380843
7,381
284
76
25.989437
0.813129
0.22585
0
0.402685
0
0
0.001458
0
0
0
0
0.003521
0
1
0.127517
false
0
0.026846
0.020134
0.308725
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6e3ab9d5ea800c6d75075d9a0c1f51418a5e6e8
1,851
py
Python
addon/blenderAddon/test.py
kazulagi/plantFEM
20bf2df8202d41aa5dc25d1cc820385dabbc604f
[ "MIT" ]
21
2020-06-21T08:21:44.000Z
2022-01-13T04:28:30.000Z
addon/blenderAddon/test.py
kazulagi/plantFEM_binary
32acf059a6d778307211718c2a512ff796b81c52
[ "MIT" ]
5
2021-05-08T05:20:06.000Z
2022-03-25T05:39:29.000Z
addon/blenderAddon/test.py
kazulagi/plantFEM_binary
32acf059a6d778307211718c2a512ff796b81c52
[ "MIT" ]
4
2020-10-20T18:28:59.000Z
2021-12-15T08:35:25.000Z
bl_info = { "name": "plantFEM (Seed)", "author": "Haruka Tomobe", "version": (1, 0), "blender": (2, 80, 0), "location": "View3D > Add > Mesh > plantFEM Object", "description": "Adds a new plantFEM Object", "warning": "", "wiki_url": "", "category": "Add Mesh", } import bpy from bpy.types import Operator from bpy.props import FloatVectorProperty from bpy_extras.object_utils import AddObjectHelper, object_data_add from mathutils import Vector class SAMPLE21_OT_CreateICOSphere(bpy.types.Operator): bl_idname = "object.sample21_create_icosphere" bl_label = "ICO Sphere" bl_description = "Add ICO Sphere." bl_options = {'REGISTER' , 'UNDO'} def execute(self, context): bpy.ops.mesh.primitive_ico_sphere_add() print("Sample : Add ICO Sphere.") return {'FINISHED'} class SAMPLE21_OT_CreateCube(bpy.types.Operator): bl_idname = "object.sample21_create_cube" bl_label = "Cube" bl_description = "Add Cube." bl_options = {'REGISTER' , 'UNDO'} def execute(self, context): bpy.ops.mesh.primitive_cube_add() print("Sample : Add Cube") return{'FINISHED'} def menu_fn(self, context): self.layout.separator() self.layout.operator(SAMPLE21_OT_CreateICOSphere.bl_idname) self.layout.operator(SAMPLE21_OT_CreateCube.bl_idname) classes = [ SAMPLE21_OT_CreateICOSphere, SAMPLE21_OT_CreateCube, ] def register(): for c in classes: bpy.utils.register_class(c) bpy.types.VIEW3D_MT_mesh_add.append(menu_fn) print("クラスを二つ使用するサンプルアドオンが有効化されました。") def unregister(): bpy.types.VIEW3D_MT_mesh_add.remove(menu_fn) for c in classes: bpy.utils.unregister_class(c) print("クラスを二つ使用するサンプルアドオンが無効化されました。") if __name__ == "__main__": register()
26.070423
68
0.670989
222
1,851
5.342342
0.364865
0.05059
0.063238
0.030354
0.298482
0.251265
0.177066
0.177066
0.102867
0.102867
0
0.0171
0.210157
1,851
70
69
26.442857
0.794118
0
0
0.148148
0
0
0.22
0.062162
0
0
0
0
0
1
0.092593
false
0
0.092593
0
0.388889
0.074074
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6e3fe20726371350367db02dca201ae5e10d187
159
py
Python
ASSIGN-1/assignment_1/assignment_1/envs/__init__.py
ShivenTripathi/CS698-Deep-Reinforcement-Learning
184f7887cea3065d2bfa4ba05bfb249838c3dab4
[ "MIT" ]
null
null
null
ASSIGN-1/assignment_1/assignment_1/envs/__init__.py
ShivenTripathi/CS698-Deep-Reinforcement-Learning
184f7887cea3065d2bfa4ba05bfb249838c3dab4
[ "MIT" ]
null
null
null
ASSIGN-1/assignment_1/assignment_1/envs/__init__.py
ShivenTripathi/CS698-Deep-Reinforcement-Learning
184f7887cea3065d2bfa4ba05bfb249838c3dab4
[ "MIT" ]
null
null
null
from assignment_1.envs.gaussianBandit import gaussianBandit from assignment_1.envs.bernoulliBandit import bernoulliBandit from assignment_1.envs.RWE import RWE
53
61
0.893082
21
159
6.619048
0.380952
0.302158
0.323741
0.410072
0
0
0
0
0
0
0
0.02027
0.069182
159
3
62
53
0.918919
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
d6e576ee6650fb505b02888827826458811575c7
1,401
py
Python
tests/test_ftp_worker.py
BookOps-CAT/QCbotB
258a3dd4a0b346c0fee82fe332e368c0b5c91440
[ "MIT" ]
null
null
null
tests/test_ftp_worker.py
BookOps-CAT/QCbotB
258a3dd4a0b346c0fee82fe332e368c0b5c91440
[ "MIT" ]
15
2018-01-19T17:39:08.000Z
2020-12-16T17:57:12.000Z
tests/test_ftp_worker.py
BookOps-CAT/QCbotB
258a3dd4a0b346c0fee82fe332e368c0b5c91440
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import unittest from datetime import datetime, timedelta from context import aged_out_report, find_todays_file class Test_FTP_worker(unittest.TestCase): """ Test FTP_worker module functionality""" def test_find_today_file(self): self.assertIsNone( find_todays_file(None)) todays_fh = 'BookOpsQC.{}'.format( datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')) fh_list = [] for i in range(5): fh_list.append( 'BookOpsQC.{}'.format( datetime.strftime( datetime.now() - timedelta(days=1), '%Y%m%d%H%M%S'))) fh_list.append(todays_fh) self.assertEqual( find_todays_file(fh_list), todays_fh) def test_aged_out_report(self): fh1 = 'BookOpsQC.{}'.format( datetime.strftime(datetime.now() - timedelta(days=31), '%Y%m%d%H%M%S')) self.assertTrue( aged_out_report(fh1)) fh2 = 'BookOpsQC.{}'.format( datetime.strftime(datetime.now() - timedelta(days=13), '%Y%m%d%H%M%S')) self.assertFalse( aged_out_report(fh2)) fh3 = 'BookOpsQC.{}'.format( datetime.strftime(datetime.now() - timedelta(days=13), '%Y%m%d')) self.assertFalse( aged_out_report(fh3)) if __name__ == '__main__': unittest.main()
30.456522
83
0.577445
169
1,401
4.56213
0.337278
0.045396
0.084306
0.201038
0.473411
0.405966
0.351492
0.329442
0.155642
0.155642
0
0.014793
0.276231
1,401
45
84
31.133333
0.745562
0.042113
0
0.060606
0
0
0.091386
0
0
0
0
0
0.151515
1
0.060606
false
0
0.090909
0
0.181818
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d6e578f9bf5ed9349829616902ed52c6a3a8e2e9
177
py
Python
get_version.py
sbacchio/tuneit
8d4771acbb56e07336e2aae66160a1f62777cb01
[ "BSD-3-Clause" ]
null
null
null
get_version.py
sbacchio/tuneit
8d4771acbb56e07336e2aae66160a1f62777cb01
[ "BSD-3-Clause" ]
null
null
null
get_version.py
sbacchio/tuneit
8d4771acbb56e07336e2aae66160a1f62777cb01
[ "BSD-3-Clause" ]
null
null
null
"Usage: python -m get_version ./setup.py" import setuptools import sys setuptools.setup = lambda *args, version=None, **kwargs: print(version) exec(open(sys.argv[1]).read())
19.666667
71
0.728814
26
177
4.923077
0.769231
0
0
0
0
0
0
0
0
0
0
0.006369
0.112994
177
8
72
22.125
0.808917
0.220339
0
0
0
0
0.220339
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0.2
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
d6e603f00a28c8cca2ac1585a148b957b90cf6ad
4,815
py
Python
java/javaClass.py
skylarkgit/sql2java
befd55180969b0ec68e242991c3260272d755cc9
[ "MIT" ]
2
2019-10-23T08:27:30.000Z
2019-10-23T09:58:45.000Z
java/javaClass.py
skylarkgit/sql2java
befd55180969b0ec68e242991c3260272d755cc9
[ "MIT" ]
null
null
null
java/javaClass.py
skylarkgit/sql2java
befd55180969b0ec68e242991c3260272d755cc9
[ "MIT" ]
null
null
null
#!/usr/bin/python3 from dialectUtil import * from java.javaProperty import JAVAProperty from java.javaSnippets import * from java.javaLink import JAVALink import constants as CONST JAVA_PROPERTIES = {} JAVA_PROPERTIES['JAVA_AUTO_IMPORTABLE'] = ['created_by','last_modified_by','created_date', 'last_modified_date'] class JAVAClass: def __init__(self, dbTable, project): self.project = project self.name = underScoreToCamelCase(dbTable.name).strip() self.properties = {} self.imports = set() self.foreignElements = {} self.dbTable = dbTable self.metaData = ' ' for field in dbTable.fields: field = dbTable.fields[field] if field.fk is None: javaProperty = JAVAProperty(field, self) self.metaData += javaProperty.metaData + ' ' isImportable = False for importable in JAVA_PROPERTIES['JAVA_AUTO_IMPORTABLE']: if importable in javaProperty.metaData: isImportable = True if not isImportable: self.properties[javaProperty.name] = javaProperty def setForeign(self): for field in self.dbTable.fields: field = self.dbTable.fields[field] if field.fk is not None: link = JAVALink(field.fk, self) if link is not None: self.foreignElements[link.localProperty] = link def save(self): extension = '' if 'created_by' in self.metaData: extension = ' extends Auditable<Long>' self.imports.add('javax.persistence.Entity') self.imports.add('com.fasterxml.jackson.annotation.JsonIgnoreProperties') self.imports.add('javax.persistence.PrePersist') for javaProperty in self.properties: javaProperty = self.properties[javaProperty] for importfile in javaProperty.imports: self.imports.add(importfile) code = JavaPackage(self.project.package + '.' + CONST.MODEL) code += self.getImports() body = '\n'.join(sorted(list(map(lambda token: self.properties[token].declare(), self.properties)),key = len)) body += '\n'.join(list(map(lambda token: self.properties[token].setter(), self.properties))) body += '\n'.join(list(map(lambda token: self.properties[token].getter(), self.properties))) prePersistCode = '' if 'uuid' in self.metaData: prePersistCode += '\nuuid = UUID.randomUUID();\n' prePersist = '\n@PrePersist\npublic void prePersist(){{{0}}}\n' body += prePersist.format(prePersistCode) code += '\n'.join(classAnnotations(self)) code += '@Entity\n@JsonIgnoreProperties({"hibernateLazyInitializer", "handler"})\n'+JavaScope('public', JavaClass(self.name + extension, body)) filename = CONST.MODEL + '/' + self.name + '.java' with open( filename,'w') as the_file: the_file.write(code) def saveRepo(self): code = JavaPackage(self.project.package + '.' + CONST.REPO) code += JavaImport('org.springframework.data.jpa.repository.JpaRepository') code += JavaImport(self.project.package + '.' + CONST.MODEL + '.' + self.name) code += 'public interface {0}Repository extends JpaRepository<{0}, Long> {{\n\n}}'.format(self.name) filename = CONST.REPO + '/' + self.name + 'Repository.java' with open( filename,'w') as the_file: the_file.write(code) def saveDAO(self): code = JavaPackage(self.project.package + '.' + CONST.DAO) code += JavaImport('javax.persistence.EntityManager') code += JavaImport('org.hibernate.Session') code += JavaImport('org.springframework.stereotype.Repository') code += JavaImport('java.util.List') code += JavaImport('org.springframework.beans.factory.annotation.Autowired') code += JavaImport(self.project.package + '.' + CONST.REPO + '.' + self.name + 'Repository') code += JavaImport(self.project.package + '.' + CONST.MODEL + '.' + self.name) safeUpdateTemplate = 'if ({0}.get{1}() != null) {0}Persisted.set{1}({0}.get{1}());' safeUpdate = '\n'.join(list(map(lambda token: safeUpdateTemplate.format(firstSmall(self.name), camel(self.properties[token].name)), self.properties))) daoTemplate = open('./java/templates/dao.template.java').read() code += daoTemplate.format(self.name, firstSmall(self.name), safeUpdate) filename = CONST.DAO + '/' + self.name + 'Dao.java' with open(filename,'w') as the_file: the_file.write(code) def getImports(self): return '\n'.join(list(map(lambda token: JavaImport(token), self.imports)))
51.223404
158
0.622638
512
4,815
5.808594
0.267578
0.03228
0.036315
0.046402
0.276059
0.214862
0.174176
0.113988
0.113988
0.080363
0
0.002736
0.240914
4,815
94
159
51.223404
0.810944
0.003531
0
0.093023
0
0.011628
0.17549
0.098791
0
0
0
0
0
1
0.069767
false
0
0.337209
0.011628
0.430233
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
d6e6226b3673f32b61f24bf4e754297739bd3b2c
1,295
py
Python
gui1.py
aseemk11/Attendance-system-using-Face-recognization-
b7ca08a868c8b41d776eaa8e0c6ada7ab23df040
[ "Apache-2.0" ]
null
null
null
gui1.py
aseemk11/Attendance-system-using-Face-recognization-
b7ca08a868c8b41d776eaa8e0c6ada7ab23df040
[ "Apache-2.0" ]
null
null
null
gui1.py
aseemk11/Attendance-system-using-Face-recognization-
b7ca08a868c8b41d776eaa8e0c6ada7ab23df040
[ "Apache-2.0" ]
null
null
null
from tkinter import * import os main = Tk() main.geometry('{}x{}'.format(550, 550)) main.wm_title("Welcome to Face Recognition Based Attendence System ") svalue3= StringVar() # defines the widget state as string svalue2 = StringVar() #imagePath = PhotoImage(file="facerec.png") #widgetf = Label(main, image=imagePath).pack(side="bottom") #imagePath1 = PhotoImage(file="efylogo.png") #widgetf = Label(main, image=imagePath1).pack(side="top") comments = """ Developed and Design by Aseem Kanungo""" widgets = Label(main, justify=CENTER, padx = 10, text=comments).pack(side="bottom") w = Entry(main,textvariable=svalue3) # adds a textarea widget w.pack() w.place(x=200,y=75) def fisher_dataset_button_fn(): scholarid= svalue3.get() os.system('python 01_face_dataset.py {0}'.format(scholarid)) def camera(*args): camerano= svalue2.get() os.system('python 01_face_dataset.py {0}'.format(camerano)) train_database_button = Button(main,text="Scholar ID", command=fisher_dataset_button_fn, justify=CENTER, padx = 10) train_database_button.pack() train_database_button.place(x=200, y=110) a=[0,1] popupMenu = OptionMenu(main, svalue2, *a) Label(main, text="Choose a Camera").place(x=250, y=150) popupMenu.place(x=250,y=160) main.mainloop()
26.428571
115
0.713514
185
1,295
4.902703
0.502703
0.039691
0.062845
0.041896
0.13892
0.085998
0.085998
0.085998
0.085998
0.085998
0
0.04375
0.135135
1,295
48
116
26.979167
0.766071
0.197683
0
0
0
0
0.178295
0
0
0
0
0
0
1
0.068966
false
0
0.068966
0
0.137931
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6e6b0705f1fc5675687010c9694d9720c0c22e7
133
py
Python
app/extensions.py
vatsalag99/mapping_self-harm_risk_twitter
262c36f994c909714a738686b025633d832bc596
[ "MIT" ]
null
null
null
app/extensions.py
vatsalag99/mapping_self-harm_risk_twitter
262c36f994c909714a738686b025633d832bc596
[ "MIT" ]
1
2021-06-02T01:16:32.000Z
2021-06-02T01:16:32.000Z
app/extensions.py
vatsalag99/mapping_self-harm_risk_twitter
262c36f994c909714a738686b025633d832bc596
[ "MIT" ]
null
null
null
"""Extensions module - Set up for additional libraries can go in here.""" from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy()
26.6
73
0.766917
18
133
5.611111
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.150376
133
5
74
26.6
0.893805
0.503759
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
d6e750c2e34d4d5c52e114e9bd8b810a1a5e2430
2,586
py
Python
src/tiden/apps/zookeeper/zookeeper_utils.py
mshonichev/example_pkg
556a703fe8ea4a7737b8cae9c5d4d19c1397a70b
[ "Apache-2.0" ]
14
2020-06-05T09:30:42.000Z
2022-01-19T00:26:48.000Z
src/tiden/apps/zookeeper/zookeeper_utils.py
mshonichev/example_pkg
556a703fe8ea4a7737b8cae9c5d4d19c1397a70b
[ "Apache-2.0" ]
6
2020-06-09T14:05:21.000Z
2021-03-18T13:55:15.000Z
src/tiden/apps/zookeeper/zookeeper_utils.py
mshonichev/example_pkg
556a703fe8ea4a7737b8cae9c5d4d19c1397a70b
[ "Apache-2.0" ]
1
2020-06-09T13:53:15.000Z
2020-06-09T13:53:15.000Z
#!/usr/bin/env python3 # # Copyright 2017-2020 GridGain Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from threading import Thread # from random import choices from ...util import log_print, util_sleep from .zookeeper import Zookeeper class ZkNodesRestart(Thread): def __init__(self, zk, nodes_amount): super().__init__() self.setDaemon(True) # self.zk: Zookeeper = zk self.zk = zk self.nodes_amount = nodes_amount self.running = True self.order = 'seq' self.restart_timeout = 5 def stop(self): log_print('Interrupting ZK nodes restarting thread', color='red') self.running = False def run(self): log_print('Starting ZK nodes restarts', color='green') while self.running: for node_id in self.__get_nodes_to_restart(): log_print('Killing ZK node {}'.format(node_id), color='debug') self.zk.kill_node(node_id) util_sleep(self.restart_timeout) log_print('Starting ZK node {}'.format(node_id), color='debug') self.zk.start_node(node_id) def set_params(self, **kwargs): self.order = kwargs.get('order', self.order) self.restart_timeout = kwargs.get('restart_timeout', self.restart_timeout) self.nodes_amount = kwargs.get('nodes_amount', self.nodes_amount) log_print('Params set to:\norder={}\nrestart_timeout={}\nnodes_amount={}' .format(self.order, self.restart_timeout, self.nodes_amount)) def __get_nodes_to_restart(self): zk_nodes = list(self.zk.nodes.keys()) zk_nodes = zk_nodes[:self.nodes_amount] # uncomment this when Python 3.7 will be used. # if self.order == 'rand': # zk_nodes = choices(zk_nodes[:self.nodes_amount]) return zk_nodes def __enter__(self): self.start() def __exit__(self, exc_type, exc_val, exc_tb): self.stop() self.join() if exc_type and exc_val and exc_tb: raise Exception(exc_tb)
34.026316
82
0.656999
351
2,586
4.635328
0.407407
0.043024
0.055317
0.019668
0.13153
0.08236
0.041795
0.041795
0.041795
0
0
0.008155
0.241299
2,586
75
83
34.48
0.821101
0.29041
0
0
0
0
0.119074
0.027563
0
0
0
0
0
1
0.170732
false
0
0.073171
0
0.292683
0.146341
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6e87832836db722cb4f55a3e89485349b743a75
859
py
Python
LaneTracking-master/main.py
cmflannery/openvision
884ae95325dba1db0d428179796efe03964d5f5b
[ "MIT" ]
null
null
null
LaneTracking-master/main.py
cmflannery/openvision
884ae95325dba1db0d428179796efe03964d5f5b
[ "MIT" ]
null
null
null
LaneTracking-master/main.py
cmflannery/openvision
884ae95325dba1db0d428179796efe03964d5f5b
[ "MIT" ]
null
null
null
from __future__ import division import cv2 import track import detect def main(video_path): cap = cv2.VideoCapture(video_path) ticks = 0 lt = track.LaneTracker(2, 0.1, 500) ld = detect.LaneDetector(180) while cap.isOpened(): precTick = ticks ticks = cv2.getTickCount() dt = (ticks - precTick) / cv2.getTickFrequency() ret, frame = cap.read() predicted = lt.predict(dt) lanes = ld.detect(frame) if predicted is not None: cv2.line(frame, (predicted[0][0], predicted[0][1]), (predicted[0][2], predicted[0][3]), (0, 0, 255), 5) cv2.line(frame, (predicted[1][0], predicted[1][1]), (predicted[1][2], predicted[1][3]), (0, 0, 255), 5) lt.update(lanes) cv2.imshow('', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break
23.861111
115
0.571595
114
859
4.254386
0.438596
0.082474
0.049485
0.086598
0.028866
0
0
0
0
0
0
0.076555
0.270081
859
35
116
24.542857
0.69697
0
0
0
0
0
0.001164
0
0
0
0.004657
0
0
1
0.043478
false
0
0.173913
0
0.217391
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6e8a2cccf8950f95f736755d08af27a34195960
368
py
Python
app_server/config.py
brdavs/faker-trader
6681bb65bab6d024e2f9dffec62a7decf64c76cd
[ "MIT" ]
null
null
null
app_server/config.py
brdavs/faker-trader
6681bb65bab6d024e2f9dffec62a7decf64c76cd
[ "MIT" ]
null
null
null
app_server/config.py
brdavs/faker-trader
6681bb65bab6d024e2f9dffec62a7decf64c76cd
[ "MIT" ]
null
null
null
SEECRET = 'Extremely secretive seecret. You could not guess this one if your life depended on it.' DATABASE = 'db/data.db' DATABASE_PRICES = 'db/prices.db' SESSION_TTL = 240 WEBSOCKETS_PORT= 7334 WEBSOCKETS_URI = 'ws://localhost:' + str(WEBSOCKETS_PORT) DEFAULT_LEDGER = { 'value': 10000, 'asset_id': 1 } DEFAULT_COIN_PRICE = 500 RECORDS_FOR_TIMEFRAME = 260
24.533333
98
0.73913
53
368
4.924528
0.811321
0.061303
0
0
0
0
0
0
0
0
0
0.061093
0.154891
368
15
99
24.533333
0.778135
0
0
0
0
0
0.368564
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6e8aa9357aa361fc5853ac40c9c73710411ae7e
6,349
py
Python
telas/telaEditUser.py
JedersonLuz/BibWorld
c5638a57c35bdfcdaa6b66d7e2244e33afd5ae97
[ "MIT" ]
null
null
null
telas/telaEditUser.py
JedersonLuz/BibWorld
c5638a57c35bdfcdaa6b66d7e2244e33afd5ae97
[ "MIT" ]
null
null
null
telas/telaEditUser.py
JedersonLuz/BibWorld
c5638a57c35bdfcdaa6b66d7e2244e33afd5ae97
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'telas/telaEditUser.ui' # # Created by: PyQt5 UI code generator 5.13.0 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QMessageBox import PyrebaseConnector as PC import sys class Ui_Form(object): def setupUi(self, Form): Form.setObjectName("Form") Form.resize(577, 502) Form.setFixedSize(577, 502) self.label = QtWidgets.QLabel(Form) self.label.setGeometry(QtCore.QRect(80, 25, 401, 61)) self.label.setObjectName("label") self.layoutWidget = QtWidgets.QWidget(Form) self.layoutWidget.setGeometry(QtCore.QRect(170, 120, 231, 261)) self.layoutWidget.setObjectName("layoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.label_6 = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.label_6.setFont(font) self.label_6.setObjectName("label_6") self.verticalLayout.addWidget(self.label_6) self.lineEdit_4 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_4.setObjectName("lineEdit_4") self.lineEdit_4.setDisabled(True) self.verticalLayout.addWidget(self.lineEdit_4) self.label_7 = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.label_7.setFont(font) self.label_7.setObjectName("label_7") self.verticalLayout.addWidget(self.label_7) self.lineEdit_5 = QtWidgets.QLineEdit(self.layoutWidget) self.lineEdit_5.setObjectName("lineEdit_5") self.verticalLayout.addWidget(self.lineEdit_5) self.label_5 = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.label_5.setFont(font) self.label_5.setObjectName("label_5") self.verticalLayout.addWidget(self.label_5) self.dateEdit = QtWidgets.QDateEdit(self.layoutWidget) self.dateEdit.setObjectName("dateEdit") self.verticalLayout.addWidget(self.dateEdit) self.label_4 = QtWidgets.QLabel(self.layoutWidget) font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.verticalLayout.addWidget(self.label_4) self.comboBox = QtWidgets.QComboBox(self.layoutWidget) self.comboBox.setObjectName("comboBox") self.comboBox.addItem('Feminino') self.comboBox.addItem('Masculino') self.verticalLayout.addWidget(self.comboBox) self.buttonResetPass = QtWidgets.QPushButton(Form) self.buttonResetPass.setObjectName('buttonResetPass') self.buttonResetPass.setGeometry(QtCore.QRect(250, 410, 71, 31)) self.buttonResetPass.setStyleSheet('background-color:#1f4c73') self.buttonResetPass.setFont(font) self.button_cadastrar = QtWidgets.QPushButton(Form) self.button_cadastrar.setGeometry(QtCore.QRect(330, 410, 71, 31)) self.button_cadastrar.setStyleSheet('background-color:#1f4c73') font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.button_cadastrar.setFont(font) self.button_cadastrar.setObjectName("button_cadastrar") self.button_back = QtWidgets.QPushButton(Form) self.button_back.setGeometry(QtCore.QRect(170, 410, 71, 31)) self.button_back.setStyleSheet('background-color:#1f4c73') font = QtGui.QFont() font.setFamily("KacstOne") font.setBold(True) font.setWeight(75) self.button_back.setFont(font) self.button_back.setObjectName("button_back") self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): _translate = QtCore.QCoreApplication.translate Form.setWindowTitle(_translate("Form", "Form")) self.label.setText(_translate("Form", "TextLabel")) pixmap = QtGui.QPixmap("icons/iconEditUser.png") pixmap3 = pixmap.scaled(400, 80, QtCore.Qt.KeepAspectRatio) self.label.setPixmap(pixmap3) self.label.setAlignment(QtCore.Qt.AlignCenter) self.label_6.setText(_translate("Form", "Email:")) self.label_7.setText(_translate("Form", "Nome de usuário:")) self.label_5.setText(_translate("Form", "Data de nascimento:")) self.label_4.setText(_translate("Form", "Sexo:")) self.button_cadastrar.setText(_translate("Form", "Salvar")) self.button_cadastrar.clicked.connect(self.UpdateUser) self.buttonResetPass.setText(_translate('Form', 'Mudar\nsenha')) self.buttonResetPass.clicked.connect(self.changePass) self.button_back.setText(_translate("Form", "Voltar")) def changePass(self): PC.pc.changePassword(PC.pc.auth.current_user['email']) self.messageBox('Enviamos um email para você com as instruções para cadastrar uma nova senha!', 'Alerta') def messageBox(self, textMessage, nameWin): infoBox = QMessageBox() infoBox.setIcon(QMessageBox.Information) infoBox.setText(textMessage) infoBox.setWindowTitle(nameWin) infoBox.setStandardButtons(QMessageBox.Ok) infoBox.exec_() def UpdateUser(self): erroVazio = 0 if self.lineEdit_5.text() == '': erroVazio = 1 self.messageBox('Campos obrigatórios!', 'Erro') if erroVazio == 0: PC.pc.updateUser(self.lineEdit_5.text(), self.dateEdit.text(), self.comboBox.currentText()) self.messageBox('Dados atualizados!', 'Mensagem') """ if __name__ == '__main__': app = QtWidgets.QApplication(sys.argv) Other = QtWidgets.QMainWindow() ui = Ui_Form() ui.setupUi(Other) Other.show() sys.exit(app.exec_()) """
41.769737
113
0.673807
694
6,349
6.059078
0.260807
0.055648
0.051367
0.058977
0.265398
0.173603
0.151724
0.151724
0.151724
0.151724
0
0.028594
0.206804
6,349
152
114
41.769737
0.806394
0.030241
0
0.192
1
0
0.093383
0.015788
0
0
0
0
0
1
0.04
false
0.072
0.032
0
0.08
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
d6e9fedf25e708c35ea19099181ea08ee2952f97
1,303
py
Python
tests/unit/testing/test_scenarios.py
thoughteer/edera
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
[ "MIT" ]
3
2018-11-27T15:45:19.000Z
2018-12-21T20:32:10.000Z
tests/unit/testing/test_scenarios.py
thoughteer/edera
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
[ "MIT" ]
18
2018-12-02T18:38:59.000Z
2020-02-05T22:09:37.000Z
tests/unit/testing/test_scenarios.py
thoughteer/edera
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
[ "MIT" ]
null
null
null
import pytest from edera import Condition from edera import Task from edera.exceptions import TargetVerificationError from edera.testing import DefaultScenario from edera.testing import ScenarioWithProvidedStubs def test_default_scenario_works_correctly(): class A(Task): def execute(self): raise RuntimeError class B(Task): class T(Condition): def check(self): raise RuntimeError target = T() class Z(Task): class T(Condition): def check(self): return False target = T() scenario = DefaultScenario() assert scenario.stub(Z(), {A(), B()}) == { A(): DefaultScenario(), B(): DefaultScenario(), } with pytest.raises(RuntimeError): scenario.run(A()) with pytest.raises(RuntimeError): scenario.run(B()) with pytest.raises(TargetVerificationError): scenario.run(Z()) def test_scenario_with_provided_stubs_works_correctly(): class A(Task): pass class B(Task): pass class Z(Task): pass class S(ScenarioWithProvidedStubs): def run(self, subject): pass stubs = {A(): DefaultScenario()} assert S(stubs=stubs).stub(Z(), {A(), B()}) == stubs
19.742424
56
0.603991
138
1,303
5.630435
0.289855
0.057915
0.061776
0.056628
0.241956
0.18018
0.079794
0
0
0
0
0
0.288565
1,303
65
57
20.046154
0.838188
0
0
0.465116
0
0
0
0
0
0
0
0
0.046512
1
0.139535
false
0.093023
0.139535
0.023256
0.55814
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
2
d6eb33dfcb899869dc176ac912f3e58fd02bafeb
1,604
py
Python
nova/tests/unit/conductor/tasks/test_base.py
ebalduf/nova-backports
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
[ "Apache-2.0" ]
5
2016-04-28T16:20:38.000Z
2021-04-25T11:19:03.000Z
nova/tests/unit/conductor/tasks/test_base.py
woraser/nova
fc3890667e4971e3f0f35ac921c2a6c25f72adec
[ "Apache-2.0" ]
132
2017-03-27T11:31:52.000Z
2022-03-30T08:45:02.000Z
nova/tests/unit/conductor/tasks/test_base.py
woraser/nova
fc3890667e4971e3f0f35ac921c2a6c25f72adec
[ "Apache-2.0" ]
8
2017-03-27T07:50:38.000Z
2020-02-14T16:55:56.000Z
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.conductor.tasks import base from nova import test class FakeTask(base.TaskBase): def __init__(self, context, instance, fail=False): super(FakeTask, self).__init__(context, instance) self.fail = fail def _execute(self): if self.fail: raise Exception else: pass class TaskBaseTestCase(test.NoDBTestCase): def setUp(self): super(TaskBaseTestCase, self).setUp() self.task = FakeTask(mock.MagicMock(), mock.MagicMock()) @mock.patch.object(FakeTask, 'rollback') def test_wrapper_exception(self, fake_rollback): self.task.fail = True try: self.task.execute() except Exception: pass fake_rollback.assert_called_once_with() @mock.patch.object(FakeTask, 'rollback') def test_wrapper_no_exception(self, fake_rollback): try: self.task.execute() except Exception: pass self.assertFalse(fake_rollback.called)
29.703704
78
0.666459
200
1,604
5.24
0.505
0.057252
0.024809
0.030534
0.156489
0.156489
0.156489
0.085878
0
0
0
0.003328
0.250623
1,604
53
79
30.264151
0.868552
0.340399
0
0.354839
0
0
0.01534
0
0
0
0
0
0.064516
1
0.16129
false
0.096774
0.096774
0
0.322581
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
d6ec2af6465219bbbd1c415984736e30d5b7e368
2,160
py
Python
flarmfuncs.py
acasadoalonso/RealTimeScoring
843fd151a9a963851f79549b8c9117ac37779578
[ "MIT" ]
null
null
null
flarmfuncs.py
acasadoalonso/RealTimeScoring
843fd151a9a963851f79549b8c9117ac37779578
[ "MIT" ]
null
null
null
flarmfuncs.py
acasadoalonso/RealTimeScoring
843fd151a9a963851f79549b8c9117ac37779578
[ "MIT" ]
null
null
null
#!/usr/bin/python3 import MySQLdb from ognddbfuncs import getognchk unkglider = [] def getflarmid(conn, registration): # get the FLARMID from the GLIDERS table on the database cursG = conn.cursor() # set the cursor for searching the devices try: cursG.execute("select idglider, flarmtype from GLIDERS where registration = '"+registration+"' ;") except MySQLdb.Error as e: try: print(">>>MySQL Error [%d]: %s" % (e.args[0], e.args[1])) except IndexError: print(">>>MySQL Error: %s" % str(e)) print(">>>MySQL error:", "select idglider, flarmtype from GLIDERS where registration = '"+registration+"' ;") print(">>>MySQL data :", registration) return("NOREG") rowg = cursG.fetchone() # look for that registration on the OGN database if rowg is None: return("NOREG") idglider = rowg[0] # flarmid to report flarmtype = rowg[1] # flarmtype flarm/ica/ogn if not getognchk(idglider): # check that the registration is on the table - sanity check if idglider not in unkglider: print("Warning: FLARM ID=", idglider, "not on OGN DDB") unkglider.append(idglider) if flarmtype == 'F': flarmid = "FLR"+idglider # flarm elif flarmtype == 'I': flarmid = "ICA"+idglider # ICA elif flarmtype == 'O': flarmid = "OGN"+idglider # ogn tracker else: flarmid = "RND"+idglider # undefined #print "GGG:", registration, rowg, flarmid return (flarmid) # ----------------------------------------------------------- def chkflarmid(idglider): # check if the FLARM ID exist, if not add it to the unkglider table glider = idglider[3:9] # only the last 6 chars of the ID if not getognchk(glider): # check that the registration is on the table - sanity check if idglider not in unkglider: print("Warning: FLARM ID=", idglider, "not on OGN DDB") unkglider.append(idglider) return (False) return (True) # -----------------------------------------------------------
44.081633
121
0.572685
246
2,160
5.028455
0.373984
0.016168
0.036378
0.043654
0.315279
0.315279
0.315279
0.315279
0.21342
0.21342
0
0.00507
0.269444
2,160
48
122
45
0.778834
0.28287
0
0.243902
0
0
0.189295
0
0
0
0
0
0
1
0.04878
false
0
0.04878
0
0.170732
0.146341
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6ec4b20c2057ab10ad8e413862f2137de3b2c8f
1,916
py
Python
sf2_webapp/__main__.py
EdinburghGenomics/sf2-webapp
7deab0a6513f8586646e16edacbc19bcfc3b18ad
[ "MIT" ]
null
null
null
sf2_webapp/__main__.py
EdinburghGenomics/sf2-webapp
7deab0a6513f8586646e16edacbc19bcfc3b18ad
[ "MIT" ]
null
null
null
sf2_webapp/__main__.py
EdinburghGenomics/sf2-webapp
7deab0a6513f8586646e16edacbc19bcfc3b18ad
[ "MIT" ]
null
null
null
"""Edinburgh Genomics Online SF2 web application. examples: To start the tornado server: $ start_sf2_webapp More information is available at: - http://gitlab.genepool.private/hdunnda/sf2-webapp """ __version__="0.0.1" import os import tornado.options from tornado.options import define, options import sf2_webapp.controller import sf2_webapp.config import sf2_webapp.database define("dbconfig", default=None, help="Path to the database configuration file", type=str) define("webconfig", default=None, help="Path to the web configuration file", type=str) define("emailconfig", default=None, help="Path to the email configuration file", type=str) define("loggingconfig", default=None, help="Path to the logging configuration file", type=str) define("enable_cors", default=False, help="Flag to indicate that CORS should be enabled", type=bool) def main(): # type: () -> None """Command line entry point for the web application""" tornado.options.parse_command_line() assert (options.dbconfig is None or os.path.exists(options.dbconfig)), 'Error: database configuration file ' + str(options.dbconfig) + ' not found.' assert (options.webconfig is None or os.path.exists(options.webconfig)), 'Error: web configuration file ' + str(options.webconfig) + ' not found.' assert (options.emailconfig is None or os.path.exists(options.emailconfig)), 'Error: email configuration file ' + str(options.emailconfig) + ' not found.' assert (options.loggingconfig is None or os.path.exists(options.loggingconfig)), 'Error: logging configuration file ' + str(options.loggingconfig) + ' not found.' sf2_webapp.controller.run( enable_cors=options.enable_cors, db_config_fp=options.dbconfig, web_config_fp=options.webconfig, email_config_fp=options.emailconfig, logging_config_fp=options.loggingconfig ) if __name__ == "__main__": main()
34.214286
166
0.740605
252
1,916
5.507937
0.305556
0.097983
0.043228
0.054755
0.233429
0.146974
0.07781
0
0
0
0
0.00612
0.147182
1,916
55
167
34.836364
0.843329
0.138831
0
0
0
0
0.263126
0
0
0
0
0
0.148148
1
0.037037
false
0
0.222222
0
0.259259
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6ed4a83976491832fc89d30d7ac8104322085ec
828
py
Python
tesseractApi/blur.py
MTES-MCT/Dossier-Facile-API
7b6dd5ae96b28fb1b2b3dffbd5e1bf9c92f2ae48
[ "MIT" ]
2
2021-05-31T10:14:08.000Z
2022-03-02T10:28:06.000Z
tesseractApi/blur.py
MTES-MCT/Dossier-Facile-API
7b6dd5ae96b28fb1b2b3dffbd5e1bf9c92f2ae48
[ "MIT" ]
6
2021-12-14T13:29:41.000Z
2022-02-17T10:33:13.000Z
tesseractApi/blur.py
MTES-MCT/Dossier-Facile-API
7b6dd5ae96b28fb1b2b3dffbd5e1bf9c92f2ae48
[ "MIT" ]
3
2021-05-31T10:14:20.000Z
2022-03-02T10:28:47.000Z
#!/usr/bin/env python # coding:utf-8 import cv2 from PIL import Image from PyPDF2 import PdfFileReader import logging Image.MAX_IMAGE_PIXELS = None log = logging.getLogger(__name__) def variance_of_laplacian(image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var() def blur_factor(image): # load the image, convert it to grayscale, and compute the # focus measure of the image using the Variance of Laplacian # method gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) return variance_of_laplacian(gray) def count_pages(pdf_path): with open(pdf_path, 'rb') as f: pdf = PdfFileReader(f) information = pdf.getDocumentInfo() return pdf.getNumPages()
26.709677
66
0.725845
119
828
4.915966
0.529412
0.068376
0.097436
0
0
0
0
0
0
0
0
0.01506
0.198068
828
30
67
27.6
0.865964
0.32971
0
0
0
0
0.00365
0
0
0
0
0
0
1
0.1875
false
0
0.25
0.0625
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
d6efdd858055997f80aafaaffde2d0f802fbcec1
216
py
Python
copyImage.py
MauricioQueiros/dealingWithFiles
1a99753f928e5dbc4202d93c61ecce3f235f2344
[ "MIT" ]
null
null
null
copyImage.py
MauricioQueiros/dealingWithFiles
1a99753f928e5dbc4202d93c61ecce3f235f2344
[ "MIT" ]
null
null
null
copyImage.py
MauricioQueiros/dealingWithFiles
1a99753f928e5dbc4202d93c61ecce3f235f2344
[ "MIT" ]
null
null
null
#Copy a image def copyImage(imagePathToCopy, imageNameToPaste): with open(imagePathToCopy, 'rb') as rf: with open(imageNameToPaste, 'wb') as wf: for line in rf: wf.write(line)
30.857143
49
0.62037
26
216
5.153846
0.692308
0.119403
0
0
0
0
0
0
0
0
0
0
0.282407
216
7
50
30.857143
0.864516
0.055556
0
0
0
0
0.019608
0
0
0
0
0
0
1
0.2
false
0
0
0
0.2
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
d6f068d6c5eae56035d6b920c42bb8ee0fe1c621
1,705
py
Python
ppcd/utils/loss_compute.py
geoyee/PdRSCD
4a1a7256320f006c15e3e5b5b238fdfba8198853
[ "Apache-2.0" ]
44
2021-04-21T02:41:55.000Z
2022-03-09T03:01:16.000Z
ppcd/utils/loss_compute.py
MinZHANG-WHU/PdRSCD
612976225201d78adc7ff99529ada17b41fedc5d
[ "Apache-2.0" ]
2
2021-09-30T07:52:47.000Z
2022-02-12T09:05:35.000Z
ppcd/utils/loss_compute.py
MinZHANG-WHU/PdRSCD
612976225201d78adc7ff99529ada17b41fedc5d
[ "Apache-2.0" ]
6
2021-07-23T02:18:39.000Z
2022-01-14T01:15:50.000Z
# import paddle def check_logits_losses(logits_list, losses): # 自动权重和衰减 if 'ceof' not in losses.keys(): losses['ceof'] = [1] * len(losses['type']) if 'decay' not in losses.keys(): losses['decay'] = [1] * len(losses['type']) if len(losses['type']) == len(losses['ceof']) and \ len(losses['type']) == len(losses['decay']): len_logits = len(logits_list) len_losses = len(losses['type']) if len_logits != len_losses: raise RuntimeError( 'The length of logits_list should equal to the types of loss config: {} != {}.' .format(len_logits, len_losses)) else: raise RuntimeError('The logits_list type/coef/decay should equal.') def loss_computation(logits_list, labels, losses, epoch=None, batch=None): check_logits_losses(logits_list, losses) loss_list = [] lab_m = False if len(labels) > 1: lab_m = True if len(labels) != len(logits_list): raise RuntimeError( 'The length of logits_list should equal to labels: {} != {}.' .format(len(logits_list), len(labels))) for i in range(len(logits_list)): logits = logits_list[i] coef_i = losses['ceof'][i] loss_i = losses['type'][i] label_i = labels[i] if lab_m else labels[0] # 多标签损失 if epoch != None and (epoch != 0 and batch == 0): decay_i = losses['decay'][i] ** epoch # print(decay_i) loss_list.append(decay_i * coef_i * loss_i(logits, label_i)) else: loss_list.append(coef_i * loss_i(logits, label_i)) return loss_list
39.651163
96
0.567155
222
1,705
4.171171
0.225225
0.118791
0.070194
0.048596
0.37581
0.228942
0.157667
0.110151
0.110151
0.110151
0
0.005029
0.300293
1,705
43
97
39.651163
0.771165
0.024633
0
0.111111
0
0
0.149041
0
0
0
0
0
0
1
0.055556
false
0
0
0
0.083333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6f151a961ded81e75f0e5f337aec384c9bf1232
28,150
py
Python
aiida_bigdft/PyBigDFT/BigDFT/LRTDDFT.py
adegomme/aiida-bigdft-plugin
dfd17f166a8cd547d3e581c7c3c9f4eb32bd2aab
[ "MIT" ]
2
2020-06-10T02:45:59.000Z
2020-08-05T18:55:05.000Z
aiida_bigdft/PyBigDFT/BigDFT/LRTDDFT.py
mikiec84/aiida-bigdft-plugin
ce6ddc69def97977fe0209861ea7f1637090b60f
[ "MIT" ]
6
2019-12-15T19:35:34.000Z
2021-05-07T15:32:18.000Z
aiida_bigdft/PyBigDFT/BigDFT/LRTDDFT.py
mikiec84/aiida-bigdft-plugin
ce6ddc69def97977fe0209861ea7f1637090b60f
[ "MIT" ]
1
2020-08-05T18:55:21.000Z
2020-08-05T18:55:21.000Z
import numpy as np from futile.Utils import write HaeV = 27.21138386 def _occ_and_virt(log): """ Extract the number of occupied and empty orbitals from a logfile """ norb = log.log['Total Number of Orbitals'] if log.log['Spin treatment'] == 'Averaged': norbv = log.evals[0].info[0]-norb return (norb,), (norbv,) elif log.log['Spin treatment'] == 'Collinear': mpol = log.log['dft']['mpol'] norbu = int((norb+mpol)/2) norbd = norb-norbu norbvu = log.evals[0].info[0]-norbu norbvd = log.evals[0].info[0]-norbd return (norbu, norbd), (norbvu, norbvd) else: raise ValueError('Information for the orbitals to be implemented') def transition_indexes(np, nalpha, indexes): """ Returns the list of the indices in the bigdft convention that correspond to the couple iorb-ialpha with given spin. Args: np (tuple): (norbu,norbd) occupied orbitals: when of length 1 assumed spin averaged nalpha (tuple): (norbu, norbd)virtual orbitals: when of length 1 assumed spin averaged indexes (list): list of tuples of (iorb,ialpha,ispin) desired indices in python convention (start from 0) """ nspin = len(np) inds = [] for iorb, ialpha, ispin in indexes: jspin = ispin if nspin == 2 else 0 ind = ialpha+iorb*nalpha[jspin] # local index of the spin subspace if ispin == 1: ind += np[0]*nalpha[0] # spin 2 comes after spin one inds.append(ind) return inds def _collection_indexes(np, nvirt_small): harvest = [] for ispin in [0, 1]: jspin = ispin if len(np) == 2 else 0 for ip in range(np[jspin]): for ialpha in range(nvirt_small[jspin]): harvest.append([ip, ialpha, ispin]) return harvest def _collection_indexes_iocc(iocc, nvirt, spin=None): """ For each iocc and a selected spin provide the list of couples that are concerned up to nvirt If spin is none provide the list for all values of the spin """ harvest = [] for ispin in [0, 1]: jspin = ispin if len(nvirt) == 2 else 0 if spin is not None and ispin != spin: continue for ialpha in range(nvirt[jspin]): harvest.append([iocc, ialpha, ispin]) return harvest class TransitionMatrix(np.matrix): """ Matrix of Transition Quantities, might be either :class:`CouplingMatrix` or :class:`TransitionMultipoles` Args: matrix (matrix-like): data of the coupling matrix. If present also the number of orbitals should be provided. norb_occ (tuple): number of occupied orbitals per spin channnel. Compulsory if ``matrix`` is specified. norb_virt (tuple): number of empty orbitals per spin channnel. Compulsory if ``matrix`` is specified. log (Logfile): Instance of the logfile from which the coupling matrix calculation is performed. Automatically retrieves the ``norb_occ`` and `norb_virt`` parameters. When ``log`` parameter is present the parameter ``matrix`` is ignored. Raises: ValueError: if the file of the coupling matrix indicated by ``log`` does not exists """ def __new__(cls, matrix=None, norb_occ=None, norb_virt=None, log=None): """ Create the object from the arguments and return the ``self`` instance """ import os if log is not None: datadir = log.log.get('radical', '') datadir = 'data-'+datadir if len(datadir) > 0 else 'data' cmfile = os.path.join(log.srcdir, datadir, cls._filename) if not os.path.isfile(cmfile): raise ValueError('The file "'+cmfile+'" does not exist') norb, norbv = _occ_and_virt(log) write('Loading data with ', norb, ' occupied and ', norbv, ' empty states, from file "', cmfile, '"') try: import pandas as pd write('Using pandas:') mat = pd.read_csv(cmfile, delim_whitespace=True, header=None) except ImportError: write('Using numpy:') mat = np.loadtxt(cmfile) write('done') else: mat = matrix return super(TransitionMatrix, cls).__new__(cls, mat) def __init__(self, *args, **kwargs): """ Perform sanity checks on the loaded matrix """ log = kwargs.get('log') if log is not None: self.norb_occ, self.norb_virt = _occ_and_virt(log) else: self.norb_occ = kwargs.get('norb_occ') self.norb_virt = kwargs.get('norb_virt') assert(self.shape[0] == self._total_transitions()) write("Shape is conformal with the number of orbitals") self._sanity_check() def _total_transitions(self): ntot = 0 for no, nv in zip(self.norb_occ, self.norb_virt): ntot += no*nv if len(self.norb_occ) == 1: ntot *= 2 return ntot def _subindices(self, norb_occ, norb_virt): for i, (no, nv) in enumerate(zip(norb_occ, norb_virt)): assert(no <= self.norb_occ[i] and nv <= self.norb_virt[i]) harvest = _collection_indexes(norb_occ, norb_virt) return np.array(transition_indexes(norb_occ, self.norb_virt, harvest)) def _sanity_check(self): pass class CouplingMatrix(TransitionMatrix): """ Casida Coupling Matrix, extracted from the calculation performed by BigDFT """ _filename = 'coupling_matrix.txt' def _sanity_check(self): write('Casida Matrix is symmetric', np.allclose(self, self.T, atol=1.e-12)) def subportion(self, norb_occ, norb_virt): """Extract a subportion of the coupling matrix. Returns a Coupling Matrix which is made by only considering the first ``norb_occ`` and ``norb_virt`` orbitals Args: norb_occ (tuple): new number of occupied orbitals. Must be lower that the instance value norb_virt (tuple): new number of virtual orbitals. Must be lower that the instance value """ inds = self._subindices(norb_occ, norb_virt) mat = np.array([row[0, inds] for row in self[inds]]) return CouplingMatrix(matrix=mat, norb_occ=norb_occ, norb_virt=norb_virt) def diagonalize(self): """ Diagonalize the Coupling Matrix Returns: (np.matrix, np.matrix): tuple of the Eigenvvalues and Eigenvectors of the coupling matrix, as returned by :meth:`numpy.linalg.eigh`. We perform the transpose of the matrix with eigenvectors to have them sorted as row vectors """ write('Diagonalizing Coupling matrix of shape', self.shape) E2, C_E2 = np.linalg.eigh(self) write('Eigensystem solved') C_E2 = C_E2.T return E2, C_E2 class TransitionMultipoles(TransitionMatrix): """ Transition dipoles, extracted from the calculation performed by BigDFT """ _filename = 'transition_quantities.txt' def subportion(self, norb_occ, norb_virt): """Extract a subportion of the Transition Multipoles. Returns a set of transition multipoles which is made by only considering the first ``norb_occ`` and ``norb_virt`` orbitals Args: norb_occ (tuple): new number of occupied orbitals. Must be lower that the instance value norb_virt (tuple): new number of virtual orbitals. Must be lower that the instance value Returns: TransitionMultipoles: reduced transition multipoles """ inds = self._subindices(norb_occ, norb_virt) mat = np.array(self[inds]) return TransitionMultipoles(matrix=mat, norb_occ=norb_occ, norb_virt=norb_virt) def get_transitions(self): """ Get the transition quantities as the dimensional objects which should contribute to the oscillator strengths. Returns: numpy.array: Transition quantities multiplied by the square root of the unperturbed transition energy """ newdipole = [] for line in self: newdipole.append(np.ravel(line[0, 0]*line[0, 1:])) return np.array(newdipole) class TransitionDipoles(TransitionMultipoles): """ Transition dipoles as provided in the version of the code < 1.8.0. Deprecated, to be used in some particular cases """ _filename = 'transition_dipoles.txt' def get_transitions(self): return self class Excitations(): """LR Excited states of a system Definition of the excited states in the Casida Formalism Args: cm (CouplingMatrix): the matrix of coupling among transitions tm (TransitionMultipoles): scalar product of multipoles among transitions """ def __init__(self, cm, tm): self.cm = cm self.tm = tm self.eigenvalues, self.eigenvectors = cm.diagonalize() # : array: transition quantities coming from the multipoles self.transitions = tm.get_transitions() scpr = np.array(np.dot(self.eigenvectors, self.transitions)) #: array: oscillator strenghts components of the transitions defined # as the square of $\int w_a(\mathbf r) r_i $ self.oscillator_strenghts = np.array([t**2 for t in scpr[:, 0:3]]) # : array: average of all the components of the OS self.avg_os = np.average(self.oscillator_strenghts, axis=1) self.alpha_prime = 2.0*self.oscillator_strenghts / \ self.eigenvalues[:, np.newaxis] """ array: elements of the integrand of the statical polarizability in the space of the excitations """ self._indices_for_spin_comparison = \ self._get_indices_for_spin_comparison() self.identify_singlet_and_triplets(1.e-5) def _get_indices_for_spin_comparison(self): inds = [[], []] inds0 = [] # get the indices for comparison, take the minimum between the spins if len(self.cm.norb_occ) == 1: nocc = self.cm.norb_occ[0] nvirt = self.cm.norb_virt[0] nos = [nocc, nocc] nvs = [nvirt, nvirt] else: nocc = min(self.cm.norb_occ) nvirt = min(self.cm.norb_virt) nos = self.cm.norb_occ nvs = self.cm.norb_virt for ispin in [0, 1]: for a in range(nvirt): for p in range(nocc): inds[ispin].append([p, a, ispin]) for a in range(nvirt, nvs[ispin]): for p in range(nocc, nos[ispin]): inds0.append([p, a, ispin]) transA = transition_indexes( self.cm.norb_occ, self.cm.norb_virt, inds[0]) transB = transition_indexes( self.cm.norb_occ, self.cm.norb_virt, inds[1]) trans0 = transition_indexes(self.cm.norb_occ, self.cm.norb_virt, inds0) return transA, transB, trans0 def spectrum_curves(self, omega, slice=None, weights=None): """Calculate spectrum curves. Provide the set of the curves associated to the weights. The resulting curves might then be used to draw the excitation spectra. Args: omega (array): the linspace used for the plotting, of shape ``(n,)``. Must be provided in Atomic Units slice (array): the lookup array that has to be considered. if Not provided the entire range is assumed weights (array): the set of arrays used to weight the spectra. Must have shape ``(rank,m)``, where ``rank`` is equal to the number of eigenvalues. If m is absent it is assumed to be 1. When not specified, it defaults to the average oscillator strenghts. Returns: array: a set of spectrum curves, of shape equal to ``(n,m)``, where ``n`` is the shape of ``omega`` and ``m`` the size of the second dimension of ``weights``. """ if slice is None: oo = self.eigenvalues[:, np.newaxis] - omega**2 wgts = weights if weights is not None else self.avg_os else: oo = self.eigenvalues[slice, np.newaxis] - omega**2 oo = oo[0] wgts = weights if weights is not None else self.avg_os[slice] return np.dot(2.0/oo.T, wgts) def identify_singlet_and_triplets(self, tol=1.e-5): """ Find the lookup tables that select the singlets and the triplets among the excitations Args: tol (float): tolerance to be applied to recognise the spin character """ sings = [] trips = [] for exc in range(len(self.eigenvalues)): sing, trip = self.project_on_spin(exc, tol) if sing: sings.append(exc) if trip: trips.append(exc) if len(sings) > 0: self.singlets = (np.array(sings),) """array: lookup table of the singlet excitations""" if len(trips) > 0: self.triplets = (np.array(trips),) """array: lookup table of the triplet excitations""" def _project_on_occ(self, exc): """ Project a given eigenvector on the occupied orbitals. In the spin averaged case consider all the spin indices nonetheless """ norb_occ = self.cm.norb_occ norb_virt = self.cm.norb_virt pProj_spin = [] for ispin, norb in enumerate(norb_occ): pProj = np.zeros(norb) for iorb in range(norb): harvest = _collection_indexes_iocc( iorb, self.cm.norb_virt, spin=None if len(norb_occ) == 1 else ispin) inds = np.array(transition_indexes( norb_occ, norb_virt, harvest)) pProj[iorb] = np.sum(np.ravel(self.eigenvectors[exc, inds])**2) pProj_spin.append(pProj) return pProj_spin def project_on_spin(self, exc, tol=1.e-8): """ Control if an excitation has a Singlet or Triplet character Args: exc (int): index of the excitation to be controlled Returns: tuple (bool,bool): couple of booleans indicating if the excitation is a singlet or a triplet respectively """ A, B, zero = [np.ravel(self.eigenvectors[exc, ind]) for ind in self._indices_for_spin_comparison] issinglet = np.linalg.norm(A-B) < tol istriplet = np.linalg.norm(A+B) < tol return issinglet, istriplet # print (self.eigenvalues[exc], np.linalg.norm(A), np.linalg.norm(B), # A-B,A+B, np.linalg.norm(zero)) def _get_threshold(self, pProj_spin, th_energies, tol): """ Identify the energy which is associated to the threshold of a given excitation. The tolerance is used to discriminate the component """ ths = -1.e100 for proj, en in zip(pProj_spin, th_energies): norb = len(en) pProj = proj.tolist() pProj.reverse() imax = norb-1 for val in pProj: if val > tol: break imax -= 1 ths = max(ths, en[imax]) return ths def split_excitations(self, evals, tol, nexc='all'): """Separate the excitations in channels. This methods classify the excitations according to the channel they belong, and determine if a given excitation might be considered as a belonging to a discrete part of the spectrum or not. Args: evals (BandArray): the eigenvalues as they are provided (for instance) from a `Logfile` class instance. tol (float): tolerance for determining the threshold nexc (int,str): number of excitations to be analyzed. If ``'all'`` then the entire set of excitations are analyzed. """ self.determine_occ_energies(evals) self.identify_thresholds(self.occ_energies, tol, len( self.eigenvalues) if nexc == 'all' else nexc) def identify_thresholds(self, occ_energies, tol, nexc): """Identify the thresholds per excitation. For each of the first ``nexc`` excitations, identify the energy value of its corresponding threshold. This value is determined by projecting the excitation components on the occupied states and verifying that their norm for the highest energy level is below a given tolerance. Args: occ_energies (tuple of array-like): contains the list of the energies of the occupied states per spin channel tol (float): tolerance for determining the threshold nexc (int): number of excitations to be analyzed """ # : Norm of the $w_p^a$ states associated to each excitation self.wp_norms = [] threshold_energies = [] for exc in range(nexc): proj = self._project_on_occ(exc) self.wp_norms.append(proj) threshold_energies.append( self._get_threshold(proj, occ_energies, tol)) # : list: identified threshold for inspected excitations self.threshold_energies = np.array(threshold_energies) self.excitations_below_threshold = np.where( np.abs(self.threshold_energies) >= np.sqrt( self.eigenvalues[0:len(self.threshold_energies)])) """ array: Indices of the excitations which lie below their corresponding threshold """ self.excitations_above_threshold = np.where( np.abs(self.threshold_energies) < np.sqrt(self.eigenvalues[0:len(self.threshold_energies)])) """ array: Indices of the excitations which lie above their corresponding threshold """ def determine_occ_energies(self, evals): """ Extract the occupied energy levels from a Logfile BandArray structure, provided the tuple of the number of occupied states Args: evals (BandArray): the eigenvalues as they are provided (for instance) from a `Logfile` class instance. """ norb_occ = self.cm.norb_occ occ_energies = [] # istart=0 for ispin, norb in enumerate(norb_occ): # range(len(norb_occ)): # istart:istart+norb_occ[ispin]])) occ_energies.append(np.array(evals[0][ispin][0:norb])) # istart+=norb_tot[ispin] # : array: energies of the occupied states out of the logfile self.occ_energies = occ_energies # : float: lowest threshold of the excitations. All excitations are # discrete below this level self.first_threshold = abs( max(np.max(self.occ_energies[0]), np.max(self.occ_energies[-1]))) def plot_alpha(self, **kwargs): """Plot the imaginary part. Plot the real or imaginary part of the dynamical polarizability. Keyword Arguments: real (bool): True if real part has to be plotted. The imaginary part is plotted otherwise eta (float): Value of the complex imaginary part. Defaults to 1.e-2. group (str): see :meth:`lookup` **kwargs: other arguments that might be passed to the :meth:`plot` method of the :mod:`matplotlib.pyplot` module. Returns: :mod:`matplotlib.pyplot`: the reference to :mod:`matplotlib.pyplot` module. """ import matplotlib.pyplot as plt from futile.Utils import kw_pop emax = np.max(np.sqrt(self.eigenvalues))*HaeV kwargs, real = kw_pop('real', False, **kwargs) plt.xlim(xmax=emax) if real: plt.ylabel(r'$\mathrm{Re} \alpha$ (AU)', size=14) else: plt.ylabel(r'$\mathrm{Im} \alpha$', size=14) plt.yticks([]) plt.xlabel(r'$\omega$ (eV)', size=14) if hasattr(self, 'first_threshold'): eps_h = self.first_threshold*HaeV plt.axvline(x=eps_h, color='black', linestyle='--') kwargs, eta = kw_pop('eta', 1.e-2, **kwargs) omega = np.linspace(0.0, emax, 5000)+2.0*eta*1j kwargs, group = kw_pop('group', 'all', **kwargs) slice = self.lookup(group) spectrum = self.spectrum_curves(omega, slice=slice) toplt = spectrum.real if real else spectrum.imag pltkwargs = dict(c='black', linewidth=1.5) pltkwargs.update(kwargs) plt.plot(omega*HaeV, toplt, **pltkwargs) return plt def lookup(self, group): """ Identify the group of the excitations according to the argument Args: group (str): A string chosen between * ``"all"`` : provides the entire set of excitations (:py:class:`None` instead of the lookup array) * ``"bt"`` : provides only the excitations below threshold * ``"at"`` : provides only the excitations above threshold * ``"singlets"`` : provides the index of the excitations that have a singlet character * ``"triplets"`` : provides the index of the excitations that have a triplet character """ slice = None if group == 'bt': slice = self.excitations_below_threshold if group == 'at': slice = self.excitations_above_threshold if group == 'singlets': slice = self.singlets if group == 'triplets': slice = self.triplets return slice def plot_excitation_landscape(self, **kwargs): """ Represent the excitation landscape as splitted in the excitations class Args: **kwargs: keyword arguments to be passed to the `pyplot` instance. The ``xlabel``, ``ylabel`` as well as ``xlim`` are already set. Returns: :mod:`matplotlib.pyplot`: the reference to :mod:`matplotlib.pyplot` module. Example: >>> ex=Excitations(cm,tm) >>> ex.split_excitations(evals=...,tol=1.e-4,nexc=...) >>> ex.plot_excitation_landscape(title='Excitation landscape') """ import matplotlib.pyplot as plt Emin = 0.0 Emax = np.max(np.sqrt(self.eigenvalues))*HaeV for level in self.occ_energies[0]: eng_th = level*HaeV plt.plot((Emin, eng_th), (level, level), '--', c='red', linewidth=1) plt.plot((eng_th, Emax), (level, level), '-', c='red', linewidth=1) plt.scatter(abs(eng_th), level, marker='x', c='red') ind_bt = self.excitations_below_threshold exc_bt = np.sqrt(self.eigenvalues)[ind_bt] lev_bt = self.threshold_energies[ind_bt] plt.scatter(HaeV*exc_bt, lev_bt, s=16, marker='o', c='black') ind_at = self.excitations_above_threshold exc_at = np.sqrt(self.eigenvalues)[ind_at] lev_at = self.threshold_energies[ind_at] plt.scatter(HaeV*exc_at, lev_at, s=14, marker='s', c='blue') plt.xlabel('energy (eV)') plt.ylabel('Threshold energy (Ha)') plt.xlim(xmin=Emin-1, xmax=Emax) for attr, val in kwargs.items(): if type(val) == dict: getattr(plt, attr)(**val) else: getattr(plt, attr)(val) return plt def dos_dict(self, group='all'): """Dictionary for DoS creation. Creates the keyword arguments that have to be passed to the `meth:BigDFT.DoS.append` method of the `DoS` class Args: group (str): see :meth:`lookup` Returns: :py:class:`dict`: kewyord arguments that can be passed to the `meth:BigDFT.DoS.append` method of the :class:`DoS.DoS` class """ ev = np.sqrt(self.eigenvalues) slice = self.lookup(group) if slice is not None: ev = ev[slice] return dict(energies=np.array([np.ravel(ev)]), units='AU') def dos(self, group='all', **kwargs): """Density of States of the Excitations. Provides an instance of the :class:`~BigDFT.DoS.DoS` class, corresponding to the Excitations instance. Args: group (str): see :meth:`lookup` **kwargs: other arguments that might be passed to the :class:`DoS.DoS` instantiation Returns: :class:`DoS.DoS`: instance of the Density of States class """ from BigDFT.DoS import DoS kwa = self.dos_dict(group=group) kwa['energies'] = kwa['energies'][0] if hasattr(self, 'first_threshold'): kwa['fermi_level'] = self.first_threshold else: kwa['fermi_level'] = 0.0 kwa.update(kwargs) return DoS(**kwa) def plot_Sminustwo(self, coord, alpha_ref=None, group='all'): """Inspect S-2 sum rule. Provides an handle to the plotting of the $S^{-2}$ sum rule, which should provide reference values for the static polarizability tensor. Args: coord (str): the coordinate used for inspection. May be ``'x'``, ``'y'`` or ``'z'``. alpha_ref (list): diagonal of the reference static polarizability tensor (for instance calculated via finite differences). If present the repartition of the contribution of the various groups of excitations is plotted. group (str): see :meth:`lookup` Returns: reference to :mod:`matplotlib.pyplot` module. """ import matplotlib.pyplot as plt idir = ['x', 'y', 'z'].index(coord) fig, ax1 = plt.subplots() ax1.set_xlabel('energy (eV)', size=14) plt.ylabel(r'$\alpha_{'+coord+coord+r'}$ (AU)', size=14) if alpha_ref is not None: plt.axhline(y=alpha_ref[idir], color='r', linestyle='--') if hasattr(self, 'first_threshold'): eps_h = abs(HaeV*self.first_threshold) plt.axvline(x=eps_h, color='black', linestyle='--') e = np.sqrt(self.eigenvalues)*HaeV w_ii = self.alpha_prime[:, idir] slice = self.lookup(group) if slice is not None: e = e[slice] w_ii = w_ii[slice] ax1.plot(e, np.cumsum(w_ii)) ax2 = ax1.twinx() ax2.plot(e, w_ii, color='grey', linestyle='-') plt.ylabel(r'$w_{'+coord+coord+r'}$ (AU)', size=14) return plt def get_alpha_energy(log, norb, nalpha): return log.evals[0][0][norb+nalpha-1] def identify_contributions(numOrb, na, exc, C_E2): pProj = np.zeros(numOrb*2) for p in range(numOrb): for spin in [0, 1]: # sum over all the virtual orbital and spin for alpha in range(na): # extract the value of the index of C_E2 elements = transition_indexes( [numOrb], [na], [[p, alpha, spin]]) for el in elements: pProj[p+numOrb*spin] += C_E2[exc][el]**2 pProj = pProj[0:numOrb]+pProj[numOrb:2*numOrb] # halves the components return pProj def get_p_energy(log, norb): return log.evals[0][0][0:norb] def get_threshold(pProj, th_energies, th_levels, tol): norb = len(th_energies) pProj = pProj.tolist() pProj.reverse() imax = norb-1 for val in pProj: if val > tol: break imax -= 1 return [th_levels[imax], th_energies[imax]]
37.383798
79
0.590515
3,554
28,150
4.587226
0.164041
0.015948
0.010428
0.010121
0.25161
0.201435
0.171993
0.161197
0.141569
0.11961
0
0.008755
0.310231
28,150
752
80
37.433511
0.83087
0.357052
0
0.158602
0
0
0.049097
0.002947
0
0
0
0
0.005376
1
0.094086
false
0.002688
0.026882
0.008065
0.217742
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6f34e314e7e198246f5d865ff9cf3535ca580d5
1,290
py
Python
pi-pytorch/tutorials/sensor/eval.py
tongni1975/stackup-workshops
d83f1d5adcc0b133b10e22d1db295020af967bac
[ "MIT" ]
12
2018-07-21T14:38:55.000Z
2020-08-18T07:27:39.000Z
pi-pytorch/tutorials/sensor/eval.py
tongni1975/stackup-workshops
d83f1d5adcc0b133b10e22d1db295020af967bac
[ "MIT" ]
1
2019-07-28T03:17:44.000Z
2019-12-14T09:01:18.000Z
pi-pytorch/tutorials/sensor/eval.py
tongni1975/stackup-workshops
d83f1d5adcc0b133b10e22d1db295020af967bac
[ "MIT" ]
10
2018-06-12T07:54:07.000Z
2020-08-18T07:31:47.000Z
import torch from torch.autograd import Variable from sklearn.metrics import confusion_matrix, classification_report import numpy as np import time # import our model and data from rnn import RNN from data import get_data hidden_size = 10 learning_rate = 0.01 num_layers = 2 num_epochs = 1000 sequence_length = 10 batch_size = 32 def load_model(input_size): model = RNN(input_size, hidden_size, num_layers) # load on CPU only checkpoint = torch.load('checkpoint.pt', map_location='cpu') model.load_state_dict(checkpoint['model_state_dict']) model.eval() print(model) print('model training loss', checkpoint['loss']) print('model training epoch', checkpoint['epoch']) return model if __name__ == '__main__': X_train, X_test, y_train, y_test = get_data(sequence_length) input_size = X_train.shape[2] # batch, seq_len, input_size model = load_model(input_size) inputs = Variable(X_test.float()) tick = time.time() outputs = model(inputs) tock = time.time() # convert probabilities => 0 or 1 y_pred = (outputs.detach().numpy() > 0.5).astype(np.int) print('prediction time: %.3fs' % (tock - tick)) print(confusion_matrix(y_test.values, y_pred)) print(classification_report(y_test.values, y_pred))
24.807692
67
0.712403
187
1,290
4.663102
0.438503
0.051606
0.03211
0.041284
0.036697
0
0
0
0
0
0
0.018886
0.17907
1,290
51
68
25.294118
0.804533
0.078295
0
0
0
0
0.092905
0
0
0
0
0
0
1
0.029412
false
0
0.205882
0
0.264706
0.176471
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6f3a4cb8ec73baf05b168d49488b8ba1e024e77
357
py
Python
semesterIII/FADP/Exp2.py
ShreerajRaul/ghriet
4ff8372ced0c871f019a61b735c60725b6d373d4
[ "MIT" ]
null
null
null
semesterIII/FADP/Exp2.py
ShreerajRaul/ghriet
4ff8372ced0c871f019a61b735c60725b6d373d4
[ "MIT" ]
null
null
null
semesterIII/FADP/Exp2.py
ShreerajRaul/ghriet
4ff8372ced0c871f019a61b735c60725b6d373d4
[ "MIT" ]
null
null
null
# Solve the quadratic equation ax**2 + bx + c = 0 # import complex math module import cmath a=int(input("Enter a:")) b=int(input("Enter b:")) c=int(input("Enter c:")) # calculate the discriminant d = (b**2) - (4*a*c) # find two solutions sol1 = (-b-cmath.sqrt(d))/(2*a) sol2 = (-b+cmath.sqrt(d))/(2*a) print('The solution are {0} and {1}'.format(sol1,sol2))
29.75
55
0.641457
66
357
3.469697
0.530303
0.104803
0.170306
0.09607
0.113537
0.113537
0
0
0
0
0
0.038961
0.137255
357
12
55
29.75
0.704545
0.336134
0
0
0
0
0.223176
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6f3ad28cff5e5298f763872e2703f17b36665e5
712
py
Python
very_scratch/dl_vs_vbo.py
ibrahemesam/Fos
f2f284a2c7bdc24dafafebb8aa3141ebf225e451
[ "BSD-3-Clause" ]
2
2016-08-03T10:33:08.000Z
2021-06-23T18:50:14.000Z
scratch/very_scratch/dl_vs_vbo.py
fos/fos-legacy
db6047668781a0615abcebc7d55a7164f3105047
[ "BSD-3-Clause" ]
null
null
null
scratch/very_scratch/dl_vs_vbo.py
fos/fos-legacy
db6047668781a0615abcebc7d55a7164f3105047
[ "BSD-3-Clause" ]
1
2021-07-11T00:16:46.000Z
2021-07-11T00:16:46.000Z
import numpy as np from fos.core.scene import Scene from fos.core.plots import Plot from fos.core.tracks import Tracks from fos.core.points import Points #data=200*np.random.rand(1000000,3) #colors=np.random.rand(1000000,4) data=[200*np.random.rand(int(np.round(30*np.random.rand()))+1,3).astype('float32') for i in range(250000)] colors=[np.random.rand(len(d),4) for i,d in enumerate(data)] #print('no of bytes',colors.nbytes + data.nbytes) tr=Tracks(data,colors,lists=True) #slot={0:{'actor':tr,'slot':(0, 800000)}} #Scene(Plot(slot)).run() #pts=Points([data],[colors],point_size=3.,lists=True) slot={0:{'actor':tr,'slot':(0, 800000)}} #1:{'actor':pts,'slot':(0, 800000)}} Scene(Plot(slot)).run()
24.551724
106
0.698034
126
712
3.936508
0.396825
0.080645
0.120968
0.060484
0.292339
0.215726
0.215726
0.129032
0.129032
0
0
0.092025
0.08427
712
28
107
25.428571
0.668712
0.370787
0
0
0
0
0.036446
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
d6f4d7ce78ee83568625e15f5b3230cc4dd9e69a
419
py
Python
src/ipyradiant/visualization/__init__.py
dfreeman06/ipyradiant
6298889eb0d28c0dda01c4fc9d422814b9858878
[ "BSD-3-Clause" ]
null
null
null
src/ipyradiant/visualization/__init__.py
dfreeman06/ipyradiant
6298889eb0d28c0dda01c4fc9d422814b9858878
[ "BSD-3-Clause" ]
7
2020-07-21T12:48:13.000Z
2020-07-24T15:27:26.000Z
src/ipyradiant/visualization/__init__.py
dfreeman06/ipyradiant
6298889eb0d28c0dda01c4fc9d422814b9858878
[ "BSD-3-Clause" ]
1
2020-07-20T20:45:49.000Z
2020-07-20T20:45:49.000Z
"""vis widgets """ # Copyright (c) 2020 ipyradiant contributors. # Distributed under the terms of the Modified BSD License. __all__ = [ "CytoscapeVisualizer", "DatashaderVisualizer", "VisualizerBase", "LayoutSelector", "NXBase", ] from .base import NXBase, VisualizerBase from .cytoscape import CytoscapeVisualizer from .datashader_vis import DatashaderVisualizer from .tools import LayoutSelector
24.647059
58
0.75895
40
419
7.825
0.675
0
0
0
0
0
0
0
0
0
0
0.011331
0.157518
419
16
59
26.1875
0.875354
0.26969
0
0
0
0
0.244966
0
0
0
0
0
0
1
0
false
0
0.363636
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
d6f5093886a911828813b1cbd57ad326b9f81128
11,780
py
Python
src/cascade/stats/compartmental.py
adolgert/cascade
2084e07c9ee5e901dd407b817220de882c7246a3
[ "MIT" ]
null
null
null
src/cascade/stats/compartmental.py
adolgert/cascade
2084e07c9ee5e901dd407b817220de882c7246a3
[ "MIT" ]
null
null
null
src/cascade/stats/compartmental.py
adolgert/cascade
2084e07c9ee5e901dd407b817220de882c7246a3
[ "MIT" ]
null
null
null
r""" .. _compartmental-modeling-tools: Compartmental Modeling Tools ---------------------------- These functions build theoretical distributions with which to understand how Dismod-AT works, and how disease processes work. 1. Specify a disease process by making simple Python functions that return disease rates as a function of time. You can specify a set of :math:`(\iota, \rho, \chi, \mu)`, or you can specify a set of :math:`(\iota, \rho, \chi, \omega)`. We'll call the former the total-mortality specification and the latter the other-mortality specification. There is a basic version of total mortality supplied for you in ``siler_default``. 2. Given a set of pure functions, solve the differential equations in order to determine prevalence over time. For the total mortality specification, this means running:: S, C, P = prevalence_solution(iota, rho, emr, total) The returned values are functions for susceptibles, with-condition, and prevalence of with-condition, :math:`P=C/(S+C)`. They are functions built by interpolating solutions to the core differential equation. For the other-mortality specification, this means running:: S, C = dismod_solution(iota, rho, emr, omega) It can be helpful to define the total alive as a function:: def lx(t): return S(t) + C(t) This is what we should use as a weighting function for computing integrands. 3. Create a set of demographic intervals (regions of ages) over which to compute averaged values from the continuous rates:: nx = (1/52) * np.array([1, 3, 52-3, 4*52, 5*52, 5*52], dtype=np.float) intervals = DemographicInterval(nx) observations, normalization = integrands_from_function( [incidence, emr, C], lx, intervals ) The resulting list of observations is a set of arrays that then can go to Dismod-AT. .. _differential-equations: Differential Equations ---------------------- .. _dismod-at-equation: DismodAT Differential Equation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ These functions manipulate data for the compartmental model that the differential equation describes. Using :math:`S` as without this condition and alive, :math:`C` as with condition, and :math:`R` as removed, or dead, .. math:: \frac{dS}{d\tau} = -\iota S + \rho C -\omega S \frac{dC}{d\tau} = \iota S - \rho C - (\omega + \chi) C \frac{dR}{d\tau} = \omega (S+C) + \chi C. The time is cohort time, which we denote as :math:`\tau`. These functions work with continuous functions. They are designed for creating test data and analyzing interpolated data. .. _prevalence-only: Prevalence-Only Equation ^^^^^^^^^^^^^^^^^^^^^^^^ The prevalence-only form of the ODE. The full differential equation can be transformed into a space .. math:: P = \frac{C}{S+C} N = S + C. to yield two differential equations, one for the prevalence and one for the total mortality .. math:: P' = \iota (1-P) - \rho P - \chi (1-P) P N' = -\omega N - \chi C. The :math:`N` variable doesn't appear in the first equation, so it is independent and can be solved alone. Then the second equation is equivalent to total mortality .. math:: N' = -\mu N which indicates that :math:`\mu = \omega + \chi P`. """ import numpy as np from scipy.integrate import quad, solve_ivp from cascade.core.log import getLoggers CODELOG, MATHLOG = getLoggers(__name__) def build_derivative_prevalence(iota, rho, chi): r""" Given three functions for the basic rates, this creates a function that lets the ODE solver integrate the prevalence-only differential equation. Args: iota (function): Incidence rate rho (function): Remission rate chi (function): Excess mortality rate Returns: function: The arguments are time and a sequence of :math:`N` prevalence states, given as a :math:`(1, N)` array. """ def ode_right_hand_side(t, y): return iota(t) * (1 - y) - rho(t) * y - chi(t) * (1 - y) * y return ode_right_hand_side def build_derivative_total(mu): r""" Turns a mortality rate into an argument for the ODE solver. Args: mu (function): Total mortality rate Returns: function: The arguments are time and a sequence of :math:`N` prevalence states, given as a :math:`(1, N)` array. """ def ode_right_hand_side(t, y): return -mu(t) * y return ode_right_hand_side def build_derivative_full(iota, rho, chi, omega): r""" The Dismod-AT ODE Args: iota (function): Incidence rate rho (function): Remission rate chi (function): Excess mortality rate omega (function): Other-cause mortality Returns: function: The arguments are time and a sequence of :math:`N` prevalence states, given as a :math:`(2, N)` array. """ def ode_right_hand_side(t, y): sprime = -(iota(t) + omega(t)) * y[0, :] + rho(t) * y[1, :] cprime = iota(t) * y[0, :] - (rho(t) + omega(t) + chi(t)) * y[1, :] return np.vstack([sprime, cprime]) return ode_right_hand_side def omega_from_mu(mu, chi, P): r""" Given functions for :math:`(\mu, \chi, P)`, return a function for :math:`\omega`. Args: mu (function): Total mortality rate. chi (function): Excess mortality rate. P (function): Prevalence. Returns: function: Other-cause mortality. """ def omega(t): return mu(t) - chi(t) * P(t) return omega def mu_from_omega(omega, chi, P): r""" Given :math:`(\omega, \chi, P)`, return a function for total mortality, :math:`\mu`. Args: omega (function): Other-cause mortality chi (function): Excess mortality. P (function): Prevalence. Returns: function: Total mortality rate. """ def total_mortality(t): return omega(t) + chi(t) * P(t) return total_mortality def solve_differential_equation(f_derivatives, initial, oldest=120): r""" Solve differential equations between ages 0 and oldest. Uses ``numpy.integrate.solve_ivp`` underneath. Args: f_derivatives (function): A function that returns first derivatives of the differential equation. initial (np.array): A numpy array of initial values. Must be the same dimension as the returned by f_derivatives. oldest (float): Upper limit of integration. For instance, 100. Returns: Array of interpolation functions, of same length as input function's return values. """ bunch = solve_ivp(f_derivatives, t_span=(0, oldest), y0=initial, vectorized=True, dense_output=True) return bunch.sol SILER_CONSTANTS = [0, 0.2, 0.0002, 0.003, 1, 0.1, 0.015, 0.01] def siler_default(): r""" Construct a total mortality rate using the Siler distribution and default constants. """ return siler_time_dependent_hazard(SILER_CONSTANTS) def siler_time_dependent_hazard(constants): r""" This Siler distribution is a good approximation to what a real total mortality rate looks like. Both the equations and the parameters come from a paper [1] where they were fit to a Scandinavian country. We will use this as the one true mortality rate for this session. [1] V. Canudas-Romo and R. Schoen, “Age-specific contributions to changes in the period and cohort life expectancy,” Demogr. Res., vol. 13, pp. 63–82, 2005. Args: constants (np.array): List of constants. The first is time because this function can model change in a total mortality distribution over time. These are named according to the paper and are, in order, "t, a1, a2, a3, b1, b2, c1, c2". Returns: A function that returns mortality rate as a function of age. """ t, a1, a2, a3, b1, b2, c1, c2 = constants def siler(x): return a1 * np.exp(-b1 * x - c1 * t) + a2 * np.exp(b2 * x - c2 * t) + a3 * np.exp(-c2 * t) return siler def total_mortality_solution(mu): r"""Given a total mortality rate, as a function, return :math:`N=l(x)`.""" n_array = solve_differential_equation(build_derivative_total(mu), initial=np.array([1.0], dtype=float)) def total_pop(t): val = n_array(t)[0] if isinstance(val, np.ndarray): val[val < 0] = 0 elif val < 0: return 0.0 return val return total_pop def prevalence_solution(iota, rho, chi, mu): r"""This uses the single, prevalence-based equation.""" N = total_mortality_solution(mu) f_b = build_derivative_prevalence(iota, rho, chi) bunch = solve_differential_equation(f_b, initial=np.array([1e-6])) P = lambda t: bunch(t)[0] C = lambda t: P(t) * N(t) S = lambda t: (1 - P(t)) * N(t) return S, C, P def dismod_solution(iota, rho, chi, omega): r"""This solves the Dismod-AT equations.""" f_b = build_derivative_full(iota, rho, chi, omega) bunch = solve_differential_equation(f_b, initial=np.array([1.0 - 1e-6, 1e-6], dtype=np.float)) S = lambda t: bunch(t)[0] C = lambda t: bunch(t)[1] return S, C def average_over_interval(raw_rate, weight_function, intervals): r""" Construct demographic observations from a raw rate function. This is a one-dimensional function, presumably along the cohort time. It doesn't integrate over ages and years. Args: raw_rate (function): A function that returns a rate. weight_function (function): A function that returns a weight. This will usually be :math:`N`, the total population. intervals (DemographicInterval): Set of contiguous intervals over which to average the values. Returns: np.ndarray: List of integrand values. """ def averaging_function(t): return raw_rate(t) * weight_function(t) results = np.zeros(len(intervals), dtype=np.float) for interval_idx in range(len(intervals)): start = intervals.start[interval_idx] finish = intervals.finish[interval_idx] results[interval_idx] = quad(averaging_function, start, finish)[0] return results def integrand_normalization(weight_function, intervals): r""" Make the denominator for integrands. This is a one-dimensional function, presumably along the cohort time. It doesn't integrate over ages and years. Args: weight_function (function): Weights, usually population. intervals (DemographicInterval): Contiguous time periods. Returns: np.array: Integrated values of the weight function. """ def constant_rate(t): return 1.0 return average_over_interval(constant_rate, weight_function, intervals) def integrands_from_function(rates, weight_function, intervals): r""" Given a list of rate functions and a weight function, return their integrands on intervals. Args: rates (list[function]): A list of rate functions to integrate. weight_function (function): The weight function, usually population. intervals (DemographicInterval): A set of time intervals, here along the cohort time. Returns: (list[np.array], np.array): A list of integrands, followed by the integrand that is the weighting function. """ normalization = integrand_normalization(weight_function, intervals) rate_integrands = list() for rate in rates: rate_integrands.append(average_over_interval(rate, weight_function, intervals) / normalization) return rate_integrands, normalization
29.672544
107
0.655178
1,652
11,780
4.598063
0.209443
0.031332
0.010532
0.012638
0.247367
0.196419
0.156398
0.13415
0.124408
0.097946
0
0.013298
0.240323
11,780
396
108
29.747475
0.835401
0.651358
0
0.063158
0
0
0
0
0
0
0
0
0
1
0.242105
false
0
0.031579
0.073684
0.526316
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
d6f5c1437a7d4728359f187da5b4e39080bc257e
722
py
Python
apps/badge/migrations/0010_auto_20161112_2017.py
lestrato/badgepack
7432c0ead1d5f63dd509620a0bb06bd76828b590
[ "MS-PL" ]
3
2016-10-21T01:35:46.000Z
2020-11-07T01:20:05.000Z
apps/badge/migrations/0010_auto_20161112_2017.py
lestrato/badgepack
7432c0ead1d5f63dd509620a0bb06bd76828b590
[ "MS-PL" ]
31
2016-10-31T19:28:53.000Z
2017-01-19T16:55:49.000Z
apps/badge/migrations/0010_auto_20161112_2017.py
lestrato/badgepack
7432c0ead1d5f63dd509620a0bb06bd76828b590
[ "MS-PL" ]
1
2020-11-07T01:20:07.000Z
2020-11-07T01:20:07.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-11-13 01:17 from __future__ import unicode_literals from django.db import migrations, models import randomslugfield.fields class Migration(migrations.Migration): dependencies = [ ('badge', '0009_auto_20161112_1649'), ] operations = [ migrations.AddField( model_name='badgeclass', name='slug', field=randomslugfield.fields.RandomSlugField(blank=True, editable=False, length=7, max_length=7, unique=True), ), migrations.AlterField( model_name='badgeclass', name='image', field=models.ImageField(upload_to='uploads/badges/'), ), ]
26.740741
122
0.628809
76
722
5.815789
0.723684
0.095023
0.085973
0.104072
0
0
0
0
0
0
0
0.064935
0.253463
722
26
123
27.769231
0.755102
0.094183
0
0.210526
1
0
0.110599
0.03533
0
0
0
0
0
1
0
false
0
0.157895
0
0.315789
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d6f5c7c92dd0905f3aec94fff3ac8b43d06bc4f8
2,084
py
Python
pynars/utils/tools.py
AIxer/PyNARS
443b6a5e1c9779a1b861df1ca51ce5a190998d2e
[ "MIT" ]
null
null
null
pynars/utils/tools.py
AIxer/PyNARS
443b6a5e1c9779a1b861df1ca51ce5a190998d2e
[ "MIT" ]
null
null
null
pynars/utils/tools.py
AIxer/PyNARS
443b6a5e1c9779a1b861df1ca51ce5a190998d2e
[ "MIT" ]
null
null
null
import sys from typing import Callable, List try: sys.getsizeof(0) getsizeof = lambda x: sys.getsizeof(x) except: # import resource getsizeof = lambda _: 1#resource.getrusage(resource.RUSAGE_SELF).ru_maxrss def get_size(obj, seen=None): """Recursively finds size of objects""" size = getsizeof(obj) if seen is None: seen = set() obj_id = id(obj) if obj_id in seen: return 0 # Important mark as seen *before* entering recursion to gracefully handle # self-referential objects seen.add(obj_id) if isinstance(obj, dict): size += sum([get_size(v, seen) for v in obj.values()]) size += sum([get_size(k, seen) for k in obj.keys()]) elif hasattr(obj, '__dict__'): size += get_size(obj.__dict__, seen) elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)): size += sum([get_size(i, seen) for i in obj]) return size def list_contains(base_list, obj_list): '''''' if len(base_list) < len(obj_list): return False obj0 = obj_list[0] for i, base in enumerate(base_list[:len(base_list)+1 - len(obj_list)]): if base == obj0: if base_list[i: i+len(obj_list)] == obj_list: return True return False def rand_seed(x: int): import random random.seed(x) import numpy as np np.random.seed(x) # if using pytorch, set its seed! # # import torch # # torch.manual_seed(x) # # torch.cuda.manual_seed(x) # # torch.cuda.manual_seed_all(x) find_var_with_pos: Callable[[list, list, List[list]], list] = lambda pos_search, variables, positions: [var for var, pos in zip(variables, positions) if pos[:len(pos_search)] == pos_search] # find those variables with a common head of position. e.g. pos_search=[0], variables=[1, 1, 2, 2], and positions=[[0, 2, 0, 0], [0, 2, 1, 0], [0, 3, 0], [1, 0]], then return [1, 1, 2] find_pos_with_pos: Callable[[list, List[list]], list] = lambda pos_search, positions: [pos for pos in positions if pos[:len(pos_search)] == pos_search]
33.079365
374
0.640595
323
2,084
3.96904
0.312694
0.043682
0.046802
0.032761
0.168487
0.168487
0.168487
0.054602
0
0
0
0.017337
0.225048
2,084
63
375
33.079365
0.776471
0.242802
0
0
0
0
0.010296
0
0
0
0
0
0
1
0.078947
false
0
0.105263
0
0.289474
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6f7adc0b1077f898d2e6f6848ac5efb072221fd
7,331
py
Python
funwalk/hyphae.py
cameronmartino/funwalk
24d4bcf6790322f53c0e066492303b4fd2e0980e
[ "MIT" ]
null
null
null
funwalk/hyphae.py
cameronmartino/funwalk
24d4bcf6790322f53c0e066492303b4fd2e0980e
[ "MIT" ]
null
null
null
funwalk/hyphae.py
cameronmartino/funwalk
24d4bcf6790322f53c0e066492303b4fd2e0980e
[ "MIT" ]
1
2019-12-16T19:45:03.000Z
2019-12-16T19:45:03.000Z
from __future__ import division import numpy as np import pandas as pd import math from tqdm import tqdm from matplotlib.colors import rgb2hex def MonodExt(k1,k2,kt,l,Stmp,ks=200): # source: Lejeune et al 1995, Morphology of Trichoderma reesei QM 9414 in Submerged Cultures return (k1+k2*(l/(l+kt)))*(Stmp/(Stmp+ks)) class hyphal_walk(object): def __init__(self,minTheta=10,maxTheta=100,avgRate=28,H=1440,N=1200,M=1e10,tstep=.0005, q=0.004,S0=5e5,k1=50,maxktip=None,k2=None,kt=5,init_n=20,width=100, set_start_center=True,use_monod=True,normal_theta=True): """ minTheta = 10 #*pi/180 - minimum angle a branch can occur maxTheta = 100 #*pi/180 - maximum angle a branch can occur H = 1440 # number of hours N = 1200 # max simulation rounds M = 1e10 # max hyphae (carrying capacity) tstep = .0005 # time step (hours/step) q = 0.004 # branching frequency (maybe need to be scaled of what the time step is) S0 = 5e5 # intital conc. of substrate mg in whole grid (evenly dist.) k1 = 50 # (µm/h) initial tip extension rate, value estimated from Spohr et al 1998 figure 5 maxktip = 2*k1 # (µm/h) maximum tip extension rate, value estimated from Spohr et al 1998 figure 5 k2 = maxktip - k1 # (µm/h) difference between k1 and maxktip kt = 5 # saturation constant init_n = 20 # starting spores width = 100 # view window (um) (this is just 1 cm) set_start_center = True # if you want the model to start all spores at (0,0) """ if maxktip is None: maxktip = k1 if k2 is None: k2 = maxktip - k1 self.minTheta = minTheta self.maxTheta = maxTheta #self.avgRate = avgRate self.H = H self.N = N self.M = M self.tstep = tstep self.q = q self.S0 = S0 self.k1 = k1 self.maxktip = maxktip self.k2 = k2 self.kt = kt self.init_n = init_n self.width = width self.set_start_center = set_start_center self.use_monod = use_monod self.normal_theta = normal_theta self.hyphae = self.intialize_hyphae() self.Sgrid = self.intialize_subtrate() def intialize_hyphae(self): hyphae = {} centers = np.array([0,0]).reshape(1, 2) # for each spore make and intital random walk direction (no movement yet) if self.normal_theta==True: theta_init = {i:angle_ for i,angle_ in enumerate(np.linspace(0,360,self.init_n))} for spore_i in range(0,self.init_n): if self.set_start_center==False: rxy = np.random.uniform(0,round(self.width),2) + centers else: rxy = centers if self.normal_theta==True: iTheta = theta_init[spore_i] else: iTheta = np.around(np.random.uniform(0,360),1) hyphae[spore_i] = {'x0':rxy[:,0], 'y0':rxy[:,1], 'x':rxy[:,0], 'y':rxy[:,1], 'angle':iTheta, 'biomass':0, 't':0, 'l':0} return hyphae def intialize_subtrate(self,block_div=2): # make a substrate grid Sgrid = [] # make a substrate grid size_of_block = round(self.width/block_div) grid_min = list(np.linspace(-self.width,self.width, size_of_block)[:-1]) grid_max = list(np.linspace(-self.width,self.width, size_of_block)[1:]) for i in range(len(grid_max)): Sgrid.append(pd.DataFrame([[self.S0/len(grid_max)]*len(grid_max),grid_min,grid_max, [grid_min[i]]*len(grid_max),[grid_max[i]]*len(grid_max)], index=['S','X_Gmin','X_Gmax','Y_Gmin','Y_Gmax']).T) Sgrid = pd.concat(Sgrid,axis=0).reset_index() return Sgrid def run_simulation(self): # run until i exceeds limits time_snapshot_hy = {} time_snapshot_sub = {} for i in tqdm(range(0,self.N)): bio_mass = 0 if len(self.hyphae)>=self.M: # hit carrying cpacity of the system print('broke capacity first') break # otherwise continue to model for j in range(0,len(self.hyphae)): # find tip in substrate grid grid_index = self.Sgrid[((self.Sgrid['Y_Gmin']<=self.hyphae[j]['y'][0])&\ (self.Sgrid['X_Gmin']<=self.hyphae[j]['x'][0]))==True].index.max() if np.isnan(grid_index): # left view space continue if round(self.Sgrid.loc[grid_index,'S'])!=0: # get current extention if self.use_monod==True: ext = MonodExt(self.k1,self.k2,self.kt, self.hyphae[j]['l'], self.Sgrid.loc[grid_index,'S']) else: ext = self.maxktip # extend in x and y dx = ext * self.tstep * np.cos(self.hyphae[j]['angle']*np.pi/180) # new coordinate in x-axis dy = ext * self.tstep * np.sin(self.hyphae[j]['angle']*np.pi/180) # new coordinate in y-axis # biomass created for hyphae j dl_c = np.sqrt(dx**2 + dy**2) # (constant to scale biomass density) dl_c *= 1 bio_mass += dl_c # subtract used substrate if self.use_monod==True: self.Sgrid.loc[grid_index,'S'] = self.Sgrid.loc[grid_index,'S'] - dl_c # update location self.hyphae[j]['x'] = self.hyphae[j]['x']+dx self.hyphae[j]['y'] = self.hyphae[j]['y']+dy self.hyphae[j]['l'] = np.sqrt((self.hyphae[j]['x'][0]-self.hyphae[j]['x0'][0])**2 \ +(self.hyphae[j]['y'][0]-self.hyphae[j]['y0'][0])**2 ) self.hyphae[j]['biomass'] = self.hyphae[j]['biomass'] + dl_c # randomly split if np.random.uniform(0,1) < self.q: direction = [-1,1][round(np.random.uniform(0,1))] newangle = direction*round(np.random.uniform(self.minTheta,self.maxTheta)) newangle += self.hyphae[j]['angle'] self.hyphae[len(self.hyphae)] = {'x0':self.hyphae[j]['x'], 'y0':self.hyphae[j]['y'], 'x':self.hyphae[j]['x'], 'y':self.hyphae[j]['y'], 'angle':newangle, 'biomass':0, 't':i, 'l':0} time_snapshot_hy[i] = pd.DataFrame(self.hyphae.copy()).copy() time_snapshot_sub[i] = pd.DataFrame(self.Sgrid.copy()).copy() return time_snapshot_hy,time_snapshot_sub
47.915033
116
0.506343
939
7,331
3.848775
0.257721
0.077476
0.063918
0.019923
0.192861
0.125069
0.075263
0.075263
0.075263
0.075263
0
0.04117
0.370482
7,331
153
117
47.915033
0.741928
0.214568
0
0.065421
0
0
0.026414
0
0
0
0
0
0
1
0.046729
false
0
0.056075
0.009346
0.149533
0.009346
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6f9290567678db771e6a8c50fd70341df8c77a3
1,636
py
Python
Tree/(CODECHEF) Binary_Operations.py
XitizVerma/Data-Structures-and-Algorithms-Advanced
610225eeb7e0b4ade229ec86355901ad1ca38784
[ "MIT" ]
1
2020-08-27T06:59:52.000Z
2020-08-27T06:59:52.000Z
Tree/(CODECHEF) Binary_Operations.py
XitizVerma/Data-Structures-and-Algorithms-Advanced
610225eeb7e0b4ade229ec86355901ad1ca38784
[ "MIT" ]
null
null
null
Tree/(CODECHEF) Binary_Operations.py
XitizVerma/Data-Structures-and-Algorithms-Advanced
610225eeb7e0b4ade229ec86355901ad1ca38784
[ "MIT" ]
null
null
null
import sys sys.setrecursionlimit(10**6) class Node: def __init__(self, val, pos): self.left = None self.right = None self.pos = pos self.val = val def insert(node, val, pos): if node is None: print(pos) return Node(val, pos) if val < node.val: # move to left child node.left = insert(node.left, val, 2*pos) else: # move to right child node.right = insert(node.right, val, 2*pos+1) return node def minValueNode(node): current = node while current.left is not None: current = current.left return current def delete(node,val, case=True): if node is None: return node # search if val < node.val: # move to left child node.left = delete(node.left, val, case) elif val > node.val: # move to right child node.right = delete(node.right, val, case) else: # here found if case: print(node.pos) # Now delete node and replacement if node.left is None and node.right is None: # check left child, if None node = None elif node.left is None: node = node.right elif node.right is None: node = node.left else: temp = minValueNode(node.right) node.val = temp.val node.right = delete(node.right, temp.val, False) return node root = None def main(q): global root oper, elem = input().split() if oper == 'i': root = insert(root, int(elem), 1) else: root = delete(root, int(elem), True) if q>1: main(q-1) main(int(input()))
27.266667
81
0.56846
232
1,636
3.991379
0.228448
0.097192
0.032397
0.045356
0.182505
0.12959
0.075594
0.075594
0.075594
0.075594
0
0.008182
0.327628
1,636
60
82
27.266667
0.833636
0.094743
0
0.203704
0
0
0.000679
0
0
0
0
0
0
1
0.092593
false
0
0.018519
0
0.222222
0.037037
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6fa1a37daf66eeb44908ffa65e9185aef668bb0
814
py
Python
kubernetes/generate.py
gottaegbert/penter
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
[ "MIT" ]
13
2020-01-04T07:37:38.000Z
2021-08-31T05:19:58.000Z
kubernetes/generate.py
gottaegbert/penter
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
[ "MIT" ]
3
2020-06-05T22:42:53.000Z
2020-08-24T07:18:54.000Z
kubernetes/generate.py
gottaegbert/penter
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
[ "MIT" ]
9
2020-10-19T04:53:06.000Z
2021-08-31T05:20:01.000Z
from kubernetes import client, config import json # 生成YML def main(): pod = create_pod("dev") print(json.dumps(client.ApiClient().sanitize_for_serialization(pod))) def create_pod(environment): return client.V1Pod( api_version="v1", kind="Pod", metadata=client.V1ObjectMeta( name="test-pod", ), spec=client.V1PodSpec( containers=[ client.V1Container( name="test-container", image="nginx", env=[ client.V1EnvVar( name="ENV", value=environment, ) ] ) ] ) ) if __name__ == '__main__': main()
22
73
0.443489
63
814
5.52381
0.634921
0.051724
0
0
0
0
0
0
0
0
0
0.013514
0.454545
814
36
74
22.611111
0.77027
0.006143
0
0
0
0
0.057001
0
0
0
0
0
0
1
0.068966
false
0
0.068966
0.034483
0.172414
0.034483
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6fb376cc1a2cdba245a1630f216a970be324900
1,680
py
Python
manim/animation/update.py
philschatz/manim
e3359a571d9a02a08979b3e037ddada3e874eb7c
[ "MIT" ]
5
2019-02-22T14:10:08.000Z
2022-03-13T01:03:49.000Z
manim/animation/update.py
philschatz/manim
e3359a571d9a02a08979b3e037ddada3e874eb7c
[ "MIT" ]
17
2021-04-10T13:47:17.000Z
2021-05-17T21:25:30.000Z
manim/animation/update.py
philschatz/manim
e3359a571d9a02a08979b3e037ddada3e874eb7c
[ "MIT" ]
1
2021-03-31T20:46:51.000Z
2021-03-31T20:46:51.000Z
"""Animations that update mobjects.""" __all__ = ["UpdateFromFunc", "UpdateFromAlphaFunc", "MaintainPositionRelativeTo"] import operator as op import typing from ..animation.animation import Animation if typing.TYPE_CHECKING: from ..mobject.mobject import Mobject class UpdateFromFunc(Animation): """ update_function of the form func(mobject), presumably to be used when the state of one mobject is dependent on another simultaneously animated mobject """ def __init__( self, mobject: "Mobject", update_function: typing.Callable[["Mobject"], typing.Any], suspend_mobject_updating: bool = False, **kwargs ) -> None: self.update_function = update_function super().__init__( mobject, suspend_mobject_updating=suspend_mobject_updating, **kwargs ) def interpolate_mobject(self, alpha: float) -> None: self.update_function(self.mobject) class UpdateFromAlphaFunc(UpdateFromFunc): def interpolate_mobject(self, alpha: float) -> None: self.update_function(self.mobject, alpha) class MaintainPositionRelativeTo(Animation): def __init__( self, mobject: "Mobject", tracked_mobject: "Mobject", **kwargs ) -> None: self.tracked_mobject = tracked_mobject self.diff = op.sub( mobject.get_center(), tracked_mobject.get_center(), ) super().__init__(mobject, **kwargs) def interpolate_mobject(self, alpha: float) -> None: target = self.tracked_mobject.get_center() location = self.mobject.get_center() self.mobject.shift(target - location + self.diff)
28.965517
81
0.674405
175
1,680
6.217143
0.348571
0.077206
0.058824
0.060662
0.217831
0.171875
0.171875
0.171875
0.125
0.125
0
0
0.225595
1,680
57
82
29.473684
0.83628
0.108929
0
0.189189
0
0
0.059264
0.017711
0
0
0
0
0
1
0.135135
false
0
0.108108
0
0.324324
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6fb8c0182b842a2a3c6c84fa4f6e7466f61a46b
5,583
py
Python
nets/mobilenet.py
DanielWong0623/Dog-Face-Recognition-PyTorch
70920a65617d9b6de59919d8920e4a1a133d58d3
[ "MIT" ]
null
null
null
nets/mobilenet.py
DanielWong0623/Dog-Face-Recognition-PyTorch
70920a65617d9b6de59919d8920e4a1a133d58d3
[ "MIT" ]
null
null
null
nets/mobilenet.py
DanielWong0623/Dog-Face-Recognition-PyTorch
70920a65617d9b6de59919d8920e4a1a133d58d3
[ "MIT" ]
null
null
null
import torch import torch.nn as nn def _make_divisible(ch, divisor=8, min_ch=None): if min_ch is None: min_ch = divisor new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor) if new_ch < 0.9 * ch: new_ch += divisor return new_ch # ---------------------- # MobileNetV1 # ---------------------- # 普通卷积+BN+ReLU def conv_bn(inp, oup, stride=1): return nn.Sequential( nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.ReLU6() ) # DW卷积++BN+ReLU def conv_dw(inp, oup, stride=1): return nn.Sequential( nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), nn.BatchNorm2d(inp), nn.ReLU6(), # PW nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(), ) class MobileNetV1(nn.Module): def __init__(self): super(MobileNetV1, self).__init__() self.stage1 = nn.Sequential( # H, W, C # 224, 224, 3 -> 112, 112, 32 conv_bn(3, 32, 2), # 112, 112, 32 -> 112, 112, 64 conv_dw(32, 64, 1), # 112, 112, 64 -> 56, 56, 128 conv_dw(64, 128, 2), conv_dw(128, 128, 1), # 56 ,56 ,128 -> 28, 28, 256 conv_dw(128, 256, 2), conv_dw(256, 256, 1), ) self.stage2 = nn.Sequential( # 28, 28, 256 -> 14, 14, 512 conv_dw(256, 512, 2), conv_dw(512, 512, 1), conv_dw(512, 512, 1), conv_dw(512, 512, 1), conv_dw(512, 512, 1), conv_dw(512, 512, 1), ) self.stage3 = nn.Sequential( # 14, 14, 512 -> 7, 7, 1024 conv_dw(512, 1024, 2), conv_dw(1024, 1024, 1), ) # 7, 7, 1024 -> 1, 1, 1024 self.avg = nn.AdaptiveAvgPool2d((1, 1)) # 1, 1, 1024 -> 1, 1, 1000 self.fc = nn.Linear(1024, 1000) def forward(self, x): x = self.stage1(x) x = self.stage2(x) x = self.stage3(x) x = self.avg(x) x = x.view(-1, 1024) x = self.fc(x) return x # ---------------------- # MobileNet V2 # ---------------------- class ConvBNReLU(nn.Sequential): def __init__(self, in_channel, out_channel, kernel_size=3, stride=1, groups=1): padding = (kernel_size - 1) // 2 super(ConvBNReLU, self).__init__( nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_channel), nn.ReLU6(inplace=True) ) class InvertedResidual(nn.Module): def __init__(self, in_channel, out_channel, stride, expand_ratio): super(InvertedResidual, self).__init__() hidden_channel = in_channel * expand_ratio self.use_shortcut = stride == 1 and in_channel == out_channel layers = [] if expand_ratio != 1: # 1x1 pointwise conv layers.append(ConvBNReLU(in_channel, hidden_channel, kernel_size=1)) layers.extend([ # 3x3 depthwise conv ConvBNReLU(hidden_channel, hidden_channel, stride=stride, groups=hidden_channel), # 1x1 pointwise conv(linear) nn.Conv2d(hidden_channel, out_channel, kernel_size=1, bias=False), nn.BatchNorm2d(out_channel), ]) self.conv = nn.Sequential(*layers) def forward(self, x): if self.use_shortcut: return x + self.conv(x) else: return self.conv(x) class MobileNetV2(nn.Module): def __init__(self, num_classes=1000, alpha=1.0, round_nearest=8): super(MobileNetV2, self).__init__() block = InvertedResidual input_channel = _make_divisible(32 * alpha, round_nearest) last_channel = _make_divisible(1280 * alpha, round_nearest) inverted_residual_setting = [ # t, c, n, s [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ] features = [] # conv1 layer features.append(ConvBNReLU(3, input_channel, stride=2)) # building inverted residual residual blockes for t, c, n, s in inverted_residual_setting: output_channel = _make_divisible(c * alpha, round_nearest) for i in range(n): stride = s if i == 0 else 1 features.append(block(input_channel, output_channel, stride, expand_ratio=t)) input_channel = output_channel # building last several layers features.append(ConvBNReLU(input_channel, last_channel, 1)) # combine feature layers self.features = nn.Sequential(*features) # building classifier self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.classifier = nn.Sequential( nn.Dropout(0.2), nn.Linear(last_channel, num_classes) ) def forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x if __name__ == '__main__': model = MobileNetV2() # model = MobileNetV1() input = torch.randn(1, 3, 224, 224) out = model(input) print(out.shape)
30.675824
105
0.51818
684
5,583
4.049708
0.209064
0.030325
0.019495
0.039711
0.177256
0.150181
0.098556
0.053791
0.053791
0.053791
0
0.095736
0.348916
5,583
181
106
30.845304
0.6663
0.106574
0
0.144
0
0
0.001675
0
0
0
0
0
0
1
0.08
false
0
0.016
0.016
0.184
0.008
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6fc1bb2c513a94df13f8c3502baea0cf10d6404
538
py
Python
xbrr/edinet/reader/aspects/stock.py
5laps2go/xbrr
4c0824b53bfe971111d60e6c1ff4e36f4f4845a3
[ "MIT" ]
null
null
null
xbrr/edinet/reader/aspects/stock.py
5laps2go/xbrr
4c0824b53bfe971111d60e6c1ff4e36f4f4845a3
[ "MIT" ]
null
null
null
xbrr/edinet/reader/aspects/stock.py
5laps2go/xbrr
4c0824b53bfe971111d60e6c1ff4e36f4f4845a3
[ "MIT" ]
null
null
null
from xbrr.base.reader.base_parser import BaseParser from xbrr.edinet.reader.element_value import ElementValue class Stock(BaseParser): def __init__(self, reader): tags = { "dividend_paid": "jpcrp_cor:DividendPaidPerShareSummaryOfBusinessResults", # 一株配当 "dividends_surplus": "jppfs_cor:DividendsFromSurplus", # 剰余金の配当 "purchase_treasury_stock": "jppfs_cor:PurchaseOfTreasuryStock", # 自社株買い } super().__init__(reader, ElementValue, tags)
38.428571
96
0.665428
48
538
7.104167
0.6875
0.046921
0
0
0
0
0
0
0
0
0
0
0.247212
538
13
97
41.384615
0.841975
0.031599
0
0
0
0
0.32882
0.270793
0
0
0
0
0
1
0.1
false
0
0.2
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6fc289ed39bfadb67662fbe1b9dc0d8849d8f8c
2,031
py
Python
ClusterAuthors.py
wjs018/JitaChat
0b289f7f6e65c1a8b0627e7c3573307580042d1c
[ "MIT" ]
null
null
null
ClusterAuthors.py
wjs018/JitaChat
0b289f7f6e65c1a8b0627e7c3573307580042d1c
[ "MIT" ]
null
null
null
ClusterAuthors.py
wjs018/JitaChat
0b289f7f6e65c1a8b0627e7c3573307580042d1c
[ "MIT" ]
null
null
null
"""This program first reads in the sqlite database made by ParseAuthors.py. Then, after just a little data cleaning, it undergoes PCA decomposition. After being decomposed via PCA, the author data is then clustered by way of a K-means clustering algorithm. The number of clusters can be set by changing the value of n_clusters.""" import sqlite3 import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn.cluster import KMeans if __name__ == '__main__': # Filepath of sqlite database made by ParseAuthors.py db_path = '/media/sf_G_DRIVE/jita1407/authors.sqlite' # Load this into a dataframe conn = sqlite3.connect(db_path, detect_types=sqlite3.PARSE_DECLTYPES) dataframe = pd.read_sql_query("SELECT * FROM Authors", conn) conn.close() # Get rid of some redundant data to make analysis cleaner and more straightforward dataframe = dataframe.drop(['int_skew', 'unique_messages'], axis=1) # Separate out our list of Authors from the data about them authors = dataframe.ix[:,1].copy() data = dataframe.ix[:,2:7].copy() # Set up our PCA decomposition pca = PCA() pca.fit(data.as_matrix()) # Transform our data into features calculated by PCA transformed = pca.transform(data.as_matrix()) # Cluster our data according to K-means n_clusters = 2 # number of clusters to organize data into n_init = 20 # number of times to replicate clustering n_jobs = 1 # number of processors to use for clustering (-1 for all) kmeans = KMeans(n_clusters=n_clusters, n_init=n_init, n_jobs=n_jobs).fit(transformed) # Get the results of the clustering centers = kmeans.cluster_centers_ labels = kmeans.labels_ # Make some plots # Plot explained variance for each PCA component #plt.bar(np.arange(len(pca.explained_variance_)), pca.explained_variance_)
31.734375
89
0.690793
286
2,031
4.776224
0.493007
0.023426
0.026354
0.029283
0.04978
0.04978
0
0
0
0
0
0.010336
0.237814
2,031
64
90
31.734375
0.872093
0.476613
0
0
0
0
0.088995
0.039234
0
0
0
0
0
1
0
false
0
0.26087
0
0.26087
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6fcc73481c40c481a1a9aa8423bcb41c20526de
718
py
Python
Leetcode/304. Range Sum Query 2D - Immutable/solution1.py
asanoviskhak/Outtalent
c500e8ad498f76d57eb87a9776a04af7bdda913d
[ "MIT" ]
51
2020-07-12T21:27:47.000Z
2022-02-11T19:25:36.000Z
Leetcode/304. Range Sum Query 2D - Immutable/solution1.py
CrazySquirrel/Outtalent
8a10b23335d8e9f080e5c39715b38bcc2916ff00
[ "MIT" ]
null
null
null
Leetcode/304. Range Sum Query 2D - Immutable/solution1.py
CrazySquirrel/Outtalent
8a10b23335d8e9f080e5c39715b38bcc2916ff00
[ "MIT" ]
32
2020-07-27T13:54:24.000Z
2021-12-25T18:12:50.000Z
class NumMatrix: def __init__(self, matrix: List[List[int]]): if not matrix or not matrix[0]: return None m, n = len(matrix), len(matrix[0]) self.dp = [[0] * (n + 1) for _ in range(m + 1)] for r in range(m): for c in range(n): self.dp[r + 1][c + 1] = self.dp[r + 1][c] + self.dp[r][c + 1] + matrix[r][c] - self.dp[r][c] def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int: return self.dp[row2 + 1][col2 + 1] - self.dp[row1][col2 + 1] - self.dp[row2 + 1][col1] + self.dp[row1][col1] # Your NumMatrix object will be instantiated and called as such: # obj = NumMatrix(matrix) # param_1 = obj.sumRegion(row1,col1,row2,col2)
42.235294
116
0.568245
122
718
3.295082
0.327869
0.134328
0.069652
0.039801
0.087065
0
0
0
0
0
0
0.056075
0.254875
718
16
117
44.875
0.695327
0.182451
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0
0.1
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
d6fd117c0e7ff9f0666672a6e3ed6dee73755e6c
2,951
py
Python
backend/app/ColorConsole.py
GJCav/thywy
3c458bccdd23bab78b6a8bd65603c7845e643d70
[ "MIT" ]
8
2022-01-23T07:30:06.000Z
2022-02-15T03:39:25.000Z
backend/app/ColorConsole.py
Dr-Left/thuwy
3c458bccdd23bab78b6a8bd65603c7845e643d70
[ "MIT" ]
5
2022-01-21T03:31:22.000Z
2022-03-04T00:01:59.000Z
backend/app/ColorConsole.py
Dr-Left/thuwy
3c458bccdd23bab78b6a8bd65603c7845e643d70
[ "MIT" ]
2
2022-01-23T08:09:46.000Z
2022-02-24T05:55:02.000Z
""" 格式: \033[0m -> 默认字体显示 \033[显示方式;前景色;背景色m -> 格式 三个参数顺序不敏感,因为值各不相同 显示方式列表: 0 - 默认值 1 - 高亮 4 - 下划线 5 - 闪烁 7 - 反显 8 - 不可见 前景色: 30 - 黑色 31 - 红色 32 - 绿色 33 - 黄色 34 - 蓝色 35 - 梅色 36 - 青色 37 - 白色 背景色: 40 - 黑色 前景色+10即可 """ from copy import copy as _copy METHOD_DEFAULT = -1 METHOD_BOLD = 1 METHOD_UNDERLINE = 4 METHOD_FLASH = 5 METHOD_REVERSE = 7 METHOD_HIDE = 8 FORE_BLACK = 30 FORE_RED = 31 FORE_GREEN = 32 FORE_YELLOW = 33 FORE_BLUE = 34 FORE_PLUM = 35 FORE_CYAN = 36 FORE_WHITE = 37 FORE_DEFAULT = -1 BACK_BLACK = 40 BACK_RED = 41 BACK_GREEN = 42 BACK_YELLOW = 43 BACK_BLUE = 44 BACK_PLUM = 45 BACK_CYAN = 46 BACK_WHITE = 47 BACK_DEFAULT = -1 def _ColorDecoratorAll(content, method, foreColor, backColor): rtn = "\033[" if method != METHOD_DEFAULT: rtn += str(method) if foreColor != FORE_DEFAULT: rtn += ";" + str(foreColor) if backColor != BACK_DEFAULT: rtn += ";" + str(backColor) rtn += "m" + content + "\033[0m" return rtn class _StrDecorator: method = METHOD_DEFAULT foreColor = FORE_DEFAULT backColor = BACK_DEFAULT def __init__( self, method=METHOD_DEFAULT, foreColor=FORE_DEFAULT, backColor=BACK_DEFAULT ): self.method = method self.foreColor = foreColor self.backColor = backColor def __add__(self, ano): rtn = _copy(self) if ano.method != METHOD_DEFAULT: rtn.method = ano.method if ano.foreColor != FORE_DEFAULT: rtn.foreColor = ano.foreColor if ano.backColor != BACK_DEFAULT: rtn.backColor = ano.backColor return rtn def __call__(self, str): return _ColorDecoratorAll(str, self.method, self.foreColor, self.backColor) # Fore color Black = _StrDecorator(foreColor=FORE_BLACK) Red = _StrDecorator(foreColor=FORE_RED) Green = _StrDecorator(foreColor=FORE_GREEN) Yellow = _StrDecorator(foreColor=FORE_YELLOW) Blue = _StrDecorator(foreColor=FORE_BLUE) Plum = _StrDecorator(foreColor=FORE_PLUM) Cyan = _StrDecorator(foreColor=FORE_CYAN) White = _StrDecorator(foreColor=FORE_WHITE) # Method Bold = _StrDecorator(method=METHOD_BOLD) Underline = _StrDecorator(method=METHOD_UNDERLINE) Flash = _StrDecorator(method=METHOD_FLASH) Reverse = _StrDecorator(method=METHOD_REVERSE) Hide = _StrDecorator(method=METHOD_HIDE) # Back Color BackBlack = _StrDecorator(backColor=BACK_BLACK) BackRed = _StrDecorator(backColor=BACK_RED) BackGreen = _StrDecorator(backColor=BACK_GREEN) BackYellow = _StrDecorator(backColor=BACK_YELLOW) BackBlue = _StrDecorator(backColor=BACK_BLUE) BackPlum = _StrDecorator(backColor=BACK_PLUM) BackCyan = _StrDecorator(backColor=BACK_CYAN) BackWhite = _StrDecorator(backColor=BACK_WHITE) # Some short cuts FontInfo = _StrDecorator() # All default FontStrength = _copy(Bold) FontWarining = Yellow + Bold FontError = Red + Bold
22.18797
83
0.689597
367
2,951
5.280654
0.280654
0.080495
0.103199
0.022704
0.060888
0.060888
0.060888
0.060888
0.060888
0
0
0.034677
0.218231
2,951
132
84
22.356061
0.805375
0.120637
0
0.024691
0
0
0.005821
0
0
0
0
0
0
1
0.049383
false
0
0.012346
0.012346
0.148148
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ba334945cd720fa1ed44ce5324c0dde69e532940
5,765
py
Python
src/pnmol/kernels.py
schmidtjonathan/pnmol-experiments
e07396079e2f2038011f3a377022482991090c5a
[ "MIT" ]
1
2022-02-24T18:25:43.000Z
2022-02-24T18:25:43.000Z
src/pnmol/kernels.py
schmidtjonathan/pnmol-experiments
e07396079e2f2038011f3a377022482991090c5a
[ "MIT" ]
null
null
null
src/pnmol/kernels.py
schmidtjonathan/pnmol-experiments
e07396079e2f2038011f3a377022482991090c5a
[ "MIT" ]
null
null
null
import abc from functools import cached_property, partial import jax import jax.numpy as jnp class Kernel(abc.ABC): """Covariance kernel interface.""" @abc.abstractmethod def __call__(self, X, Y): raise NotImplementedError class _PairwiseKernel(Kernel): @partial(jax.jit, static_argnums=(0,)) def __call__(self, X, Y): # Single element of the Gram matrix: # X.shape=(d,), Y.shape=(d,) -> K.shape = () if X.ndim == Y.ndim <= 1: return self.pairwise(X, Y) # Diagonal of the Gram matrix: # X.shape=(N,d), Y.shape=(N,d) -> K.shape = (N,) if X.shape == Y.shape: return self._evaluate_inner(X, Y) # Full Gram matrix: # X.shape=[N,d), Y.shape=(d,K) -> K.shape = (N,K) return self._evaluate_outer(X, Y) @abc.abstractmethod def pairwise(self, x, y): raise NotImplementedError @cached_property def _evaluate_inner(self): return jax.jit(jax.vmap(self.pairwise, (0, 0), 0)) @cached_property def _evaluate_outer(self): _pairwise_row = jax.jit(jax.vmap(self.pairwise, (0, None), 0)) return jax.jit(jax.vmap(_pairwise_row, (None, 1), 1)) def __str__(self): return f"{self.__class__.__name__}()" def __add__(self, other): @jax.jit def pairwise_new(x, y): return self.pairwise(x, y) + other.pairwise(x, y) return Lambda(pairwise_new) class Lambda(_PairwiseKernel): def __init__(self, fun, /): self._lambda_fun = jax.jit(fun) @partial(jax.jit, static_argnums=(0,)) def pairwise(self, x, y): return self._lambda_fun(x, y) class _RadialKernel(_PairwiseKernel): r"""Radial kernels. k(x,y) = output_scale * \varphi(\|x-y\|*input_scale) """ def __init__( self, *, output_scale=1.0, input_scale=1.0, ): self._output_scale = output_scale self._input_scale = input_scale @property def output_scale(self): return self._output_scale @property def output_scale_squared(self): return self.output_scale ** 2 @property def input_scale(self): return self._input_scale @property def input_scale_squared(self): return self.input_scale ** 2 @abc.abstractmethod def pairwise(self, X, Y): raise NotImplementedError @partial(jax.jit, static_argnums=0) def _distance_squared_l2(self, X, Y): return (X - Y).dot(X - Y) class SquareExponential(_RadialKernel): @partial(jax.jit, static_argnums=0) def pairwise(self, x, y): dist_squared = self._distance_squared_l2(x, y) * self.input_scale_squared return self.output_scale_squared * jnp.exp(-dist_squared / 2.0) class Matern52(_RadialKernel): # Careful! Matern52 is not differentiable at x=y! # Therefore, it is likely unusable for PNMOL... @partial(jax.jit, static_argnums=(0,)) def pairwise(self, x, y): dist_unscaled = self._distance_squared_l2(x, y) dist_scaled = jnp.sqrt(5.0 * dist_unscaled * self.input_scale_squared) A = 1 + dist_scaled + dist_scaled ** 2.0 / 3.0 B = jnp.exp(-dist_scaled) return self.output_scale_squared * A * B class Polynomial(_PairwiseKernel): """k(x,y) = (x.T @ y + c)^d""" def __init__(self, *, order=2, const=1.0): self._order = order self._const = const @property def order(self): return self._order @property def const(self): return self._const @partial(jax.jit, static_argnums=(0,)) def pairwise(self, x, y): return (x.dot(y) + self.const) ** self.order class WhiteNoise(_PairwiseKernel): def __init__(self, *, output_scale=1.0): self._output_scale = output_scale @property def output_scale(self): return self._output_scale @partial(jax.jit, static_argnums=(0,)) def pairwise(self, x, y): return self.output_scale ** 2 * jnp.all(x == y) class _StackedKernel(Kernel): def __init__(self, *, kernel_list): self.kernel_list = kernel_list @partial(jax.jit, static_argnums=0) def __call__(self, X, Y): gram_matrix_list = [k(X, Y) for k in self.kernel_list] # Diagonal of the Gram matrix: # Concatenate the results together if X.shape == Y.shape: return jnp.concatenate(gram_matrix_list) # Full Gram matrix: # Block diag the gram matrix return jax.scipy.linalg.block_diag(*gram_matrix_list) def duplicate(kernel, num): """Create a stack of kernels such that the Gram matrix becomes block diagonal. The blocks are all identical. """ return _StackedKernel(kernel_list=[kernel] * num) def mle_input_scale(*, mesh_points, data, kernel_type, input_scale_trials): scale_to_log_lklhd = partial( input_scale_to_log_likelihood, data=data, kernel_type=kernel_type, mesh_points=mesh_points, ) scale_to_log_lklhd_optimised = jax.jit(jax.vmap(scale_to_log_lklhd)) log_likelihood_values = scale_to_log_lklhd_optimised(input_scale=input_scale_trials) index_max = jnp.argmax(log_likelihood_values) return input_scale_trials[index_max] @partial(jax.jit, static_argnums=3) def input_scale_to_log_likelihood(input_scale, mesh_points, data, kernel_type): kernel = kernel_type(input_scale=input_scale) K = kernel(mesh_points, mesh_points.T) return log_likelihood(gram_matrix=K, y=data, n=data.shape[0]) @jax.jit def log_likelihood(gram_matrix, y, n): a = y @ jnp.linalg.solve(gram_matrix, y) b = jnp.log(jnp.linalg.det(gram_matrix)) c = n * jnp.log(2 * jnp.pi) return -0.5 * (a + b + c)
27.193396
88
0.640937
802
5,765
4.337905
0.174564
0.016097
0.018971
0.049152
0.425122
0.28399
0.245473
0.190572
0.159241
0.125898
0
0.01118
0.239722
5,765
211
89
27.322275
0.782569
0.113096
0
0.311111
0
0
0.005333
0.005333
0
0
0
0
0
1
0.237037
false
0
0.02963
0.103704
0.533333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
ba3469f6edcb9686d5729fdce8d6db4a402b74d8
148
py
Python
pkgs/ops-pkg/src/genie/libs/ops/msdp/ios/msdp.py
jbronikowski/genielibs
200a34e5fe4838a27b5a80d5973651b2e34ccafb
[ "Apache-2.0" ]
94
2018-04-30T20:29:15.000Z
2022-03-29T13:40:31.000Z
pkgs/ops-pkg/src/genie/libs/ops/msdp/ios/msdp.py
jbronikowski/genielibs
200a34e5fe4838a27b5a80d5973651b2e34ccafb
[ "Apache-2.0" ]
67
2018-12-06T21:08:09.000Z
2022-03-29T18:00:46.000Z
pkgs/ops-pkg/src/genie/libs/ops/msdp/ios/msdp.py
jbronikowski/genielibs
200a34e5fe4838a27b5a80d5973651b2e34ccafb
[ "Apache-2.0" ]
49
2018-06-29T18:59:03.000Z
2022-03-10T02:07:59.000Z
# super class from genie.libs.ops.msdp.iosxe.msdp import Msdp as MsdpXE class Msdp(MsdpXE): ''' Msdp Ops Object ''' pass
18.5
58
0.601351
20
148
4.45
0.65
0
0
0
0
0
0
0
0
0
0
0
0.297297
148
8
59
18.5
0.855769
0.189189
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
ba3553670d430c80e3adc22fd5128171a993576f
740
py
Python
tests/api/fixtures.py
eroberts9789/virtool-workflow
18219eec2b9b934cedd3770ac319f40305c165f2
[ "MIT" ]
5
2020-09-24T20:29:08.000Z
2022-03-17T14:50:56.000Z
tests/api/fixtures.py
eroberts9789/virtool-workflow
18219eec2b9b934cedd3770ac319f40305c165f2
[ "MIT" ]
126
2020-10-01T23:38:34.000Z
2022-03-31T08:26:28.000Z
tests/api/fixtures.py
eroberts9789/virtool-workflow
18219eec2b9b934cedd3770ac319f40305c165f2
[ "MIT" ]
5
2020-09-29T21:29:46.000Z
2021-07-27T20:34:58.000Z
import aiohttp import pytest from aiohttp import web from virtool_workflow.api.client import JobApiHttpSession from tests.api.mocks.mock_api import mock_routes @pytest.fixture def loop(event_loop): return event_loop @pytest.fixture async def jobs_api_url(): return "/api" @pytest.fixture async def mock_jobs_api_app(loop): app = web.Application(loop=loop) for route_table in mock_routes: app.add_routes(route_table) return app @pytest.fixture async def http(mock_jobs_api_app, aiohttp_client) -> aiohttp.ClientSession: """Create an http client for accessing the mocked Jobs API.""" session = await aiohttp_client(mock_jobs_api_app, auto_decompress=False) return JobApiHttpSession(session)
21.764706
76
0.768919
106
740
5.150943
0.386792
0.064103
0.098901
0.115385
0
0
0
0
0
0
0
0
0.156757
740
33
77
22.424242
0.875
0
0
0.190476
0
0
0.0059
0
0
0
0
0
0
1
0.047619
false
0
0.238095
0.047619
0.47619
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ba389c274378dda4739cb2b43943f78e0f632ed1
1,359
py
Python
src/pbn_api/migrations/0021_auto_20210808_1204.py
iplweb/django-bpp
85f183a99d8d5027ae4772efac1e4a9f21675849
[ "BSD-3-Clause" ]
1
2017-04-27T19:50:02.000Z
2017-04-27T19:50:02.000Z
src/pbn_api/migrations/0021_auto_20210808_1204.py
mpasternak/django-bpp
434338821d5ad1aaee598f6327151aba0af66f5e
[ "BSD-3-Clause" ]
41
2019-11-07T00:07:02.000Z
2022-02-27T22:09:39.000Z
src/pbn_api/migrations/0021_auto_20210808_1204.py
iplweb/bpp
f027415cc3faf1ca79082bf7bacd4be35b1a6fdf
[ "BSD-3-Clause" ]
null
null
null
# Generated by Django 3.0.14 on 2021-08-08 10:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("pbn_api", "0020_przemapuj"), ] operations = [ migrations.AlterField( model_name="scientist", name="lastName", field=models.TextField(blank=True, db_index=True, null=True), ), migrations.AlterField( model_name="scientist", name="name", field=models.TextField(blank=True, db_index=True, null=True), ), migrations.AlterField( model_name="scientist", name="orcid", field=models.TextField(blank=True, db_index=True, null=True), ), migrations.AlterField( model_name="scientist", name="pbnId", field=models.TextField(blank=True, db_index=True, null=True), ), migrations.AlterField( model_name="scientist", name="polonUid", field=models.TextField(blank=True, db_index=True, null=True), ), migrations.AlterField( model_name="scientist", name="qualifications", field=models.TextField( blank=True, db_index=True, null=True, verbose_name="Tytuł" ), ), ]
29.543478
74
0.557763
133
1,359
5.586466
0.308271
0.161507
0.201884
0.234186
0.726783
0.726783
0.670256
0.670256
0.670256
0.670256
0
0.021739
0.323032
1,359
45
75
30.2
0.78587
0.033848
0
0.615385
1
0
0.094584
0
0
0
0
0
0
1
0
false
0
0.025641
0
0.102564
0
0
0
0
null
0
1
1
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ba38dbf48279ca33d67ba94668726fa34f3bcd92
11,047
py
Python
all_functions/configs/proxy_scraper/pygoogle-0.6/googletest.py
Heroku-elasa/-heroku-buildpack-python-ieee-new
06ec2fda04d9e478ed2506400e460489b0ca91ab
[ "MIT" ]
null
null
null
all_functions/configs/proxy_scraper/pygoogle-0.6/googletest.py
Heroku-elasa/-heroku-buildpack-python-ieee-new
06ec2fda04d9e478ed2506400e460489b0ca91ab
[ "MIT" ]
15
2021-03-18T20:25:13.000Z
2022-03-02T14:54:33.000Z
all_functions/configs/proxy_scraper/pygoogle-0.6/googletest.py
Heroku-elasa/heroku-buildpack-python-ieee-new
06ec2fda04d9e478ed2506400e460489b0ca91ab
[ "MIT" ]
1
2017-03-04T16:48:55.000Z
2017-03-04T16:48:55.000Z
"""Unit test for google.py""" __author__ = "Mark Pilgrim (f8dy@diveintomark.org)" __version__ = "$Revision: 1.4 $" __date__ = "$Date: 2004/02/06 21:00:53 $" __copyright__ = "Copyright (c) 2002 Mark Pilgrim" __license__ = "Python" import google import unittest import sys, os import GoogleSOAPFacade from StringIO import StringIO class BaseClass(unittest.TestCase): q = "python unit testing" url = "http://www.python.org/" phrase = "ptyhon" searchparams = {"func":"doGoogleSearch"} luckyparams = {} luckyparams.update(searchparams) luckyparams.update({"feelingLucky":1}) metaparams = {} metaparams.update(searchparams) metaparams.update({"showMeta":1}) reverseparams = {} reverseparams.update(searchparams) reverseparams.update({"reverseOrder":1}) cacheparams = {"func":"doGetCachedPage"} spellingparams = {"func":"doSpellingSuggestion"} envkey = "GOOGLE_LICENSE_KEY" badkey = "a" class Redirector(BaseClass): def setUp(self): self.savestdout = sys.stdout self.output = StringIO() sys.stdout = self.output def tearDown(self): sys.stdout = self.savestdout class CommandLineTest(Redirector): def lastOutput(self): self.output.seek(0) rc = self.output.read() self.output.seek(0) return rc def testVersion(self): """-v should print version""" google.main(["-v"]) commandLineAnswer = self.lastOutput() google._version() self.assertEqual(commandLineAnswer, self.lastOutput()) def testVersionLong(self): """--version should print version""" google.main(["--version"]) commandLineAnswer = self.lastOutput() google._version() self.assertEqual(commandLineAnswer, self.lastOutput()) def testHelp(self): """-h should print usage""" google.main(["-h"]) commandLineAnswer = self.lastOutput() google._usage() self.assertEqual(commandLineAnswer, self.lastOutput()) def testHelpLong(self): """--help should print usage""" google.main(["--help"]) commandLineAnswer = self.lastOutput() google._usage() self.assertEqual(commandLineAnswer, self.lastOutput()) def testSearch(self): """-s should search""" google.main(["-s %s" % self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.searchparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testSearchLong(self): """--search should search""" google.main(["--search", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.searchparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testSearchDefault(self): """no options + search phrase should search""" google.main([self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.searchparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testNoOptions(self): """no options at all should print usage""" google.main([]) commandLineAnswer = self.lastOutput() google._usage() self.assertEqual(commandLineAnswer, self.lastOutput()) def testCache(self): """-c should retrieve cache""" google.main(["-c", self.url]) commandLineAnswer = self.lastOutput() google._output(google.doGetCachedPage(self.url), self.cacheparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testCacheLong(self): """--cache should retrieve cache""" google.main(["--cache", self.url]) commandLineAnswer = self.lastOutput() google._output(google.doGetCachedPage(self.url), self.cacheparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testSpelling(self): """-p should check spelling""" google.main(["-p", self.phrase]) commandLineAnswer = self.lastOutput() google._output(google.doSpellingSuggestion(self.phrase), self.spellingparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testSpellingLong(self): """--spelling should check spelling""" google.main(["--spelling", self.phrase]) commandLineAnswer = self.lastOutput() google._output(google.doSpellingSuggestion(self.phrase), self.spellingparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testLucky(self): """-l should return only first result""" google.main(["-l", "-s", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.luckyparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testLucky1(self): """-1 should return only first result""" google.main(["-1", "-s", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.luckyparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testLuckyLong(self): """--lucky should return only first result""" google.main(["--lucky", "-s", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.luckyparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testMeta(self): """-m should return meta information""" google.main(["-m", "-s", self.q]) commandLineAnswer = self.lastOutput() commandLineAnswer = commandLineAnswer[:commandLineAnswer.index('searchTime')] google._output(google.doGoogleSearch(self.q), self.metaparams) realAnswer = self.lastOutput() realAnswer = realAnswer[:realAnswer.index('searchTime')] self.assertEqual(commandLineAnswer, realAnswer) def testMetaLong(self): """--meta should return meta information""" google.main(["--meta", "-s", self.q]) commandLineAnswer = self.lastOutput() commandLineAnswer = commandLineAnswer[:commandLineAnswer.index('searchTime')] google._output(google.doGoogleSearch(self.q), self.metaparams) realAnswer = self.lastOutput() realAnswer = realAnswer[:realAnswer.index('searchTime')] self.assertEqual(commandLineAnswer, realAnswer) def testReverse(self): """-r should reverse results""" google.main(["-r", "-s", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.reverseparams) self.assertEqual(commandLineAnswer, self.lastOutput()) def testReverseLong(self): """--reverse should reverse results""" google.main(["--reverse", "-s", self.q]) commandLineAnswer = self.lastOutput() google._output(google.doGoogleSearch(self.q), self.reverseparams) self.assertEqual(commandLineAnswer, self.lastOutput()) class LicenseKeyTest(Redirector): licensefile = "googlekey.txt" licensebackup = "googlekey.txt.bak" def safeRename(self, dirname, old, new): if dirname: old = os.path.join(dirname, old) new = os.path.join(dirname, new) try: os.rename(old, new) except OSError: pass def safeDelete(self, dirname, filename): if dirname: filename = os.path.join(dirname, filename) try: os.remove(filename) except OSError: pass def createfile(self, dirname, filename, content): if dirname: filename = os.path.join(dirname, filename) fsock = open(filename, "w") fsock.write(content) fsock.close() def rememberKeys(self): self.moduleLicenseKey = google.LICENSE_KEY self.envLicenseKey = os.environ.get(self.envkey, None) self.safeRename(os.environ["HOME"], self.licensefile, self.licensebackup) self.safeRename("", self.licensefile, self.licensebackup) self.safeRename(google._getScriptDir(), self.licensefile, self.licensebackup) def restoreKeys(self): google.LICENSE_KEY = self.moduleLicenseKey if self.envLicenseKey: os.environ[self.envkey] = self.envLicenseKey self.safeDelete(os.environ["HOME"], self.licensefile) self.safeRename(os.environ["HOME"], self.licensebackup, self.licensefile) self.safeDelete("", self.licensefile) self.safeRename("", self.licensebackup, self.licensefile) self.safeDelete(google._getScriptDir(), self.licensefile) self.safeRename(google._getScriptDir(), self.licensebackup, self.licensefile) def clearKeys(self): google.setLicense(None) if os.environ.get(self.envkey): del os.environ[self.envkey] def setUp(self): Redirector.setUp(self) self.rememberKeys() self.clearKeys() def tearDown(self): Redirector.tearDown(self) self.clearKeys() self.restoreKeys() def testNoKey(self): """having no license key should raise google.NoLicenseKey""" self.assertRaises(google.NoLicenseKey, google.doGoogleSearch, q=self.q) def testPassInvalidKey(self): """passing invalid license key should fail with faultType""" self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q, license_key=self.badkey) def testSetInvalidKey(self): """setting invalid module-level license key should fail with faultType""" google.setLicense(self.badkey) self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q) def testEnvInvalidKey(self): """invalid environment variable license key should fail with faultType""" os.environ[self.envkey] = self.badkey self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q) def testHomeDirKey(self): """invalid license key in home directory should fail with faultType""" self.createfile(os.environ["HOME"], self.licensefile, self.badkey) self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q) def testCurDirKey(self): """invalid license key in current directory should fail with faultType""" self.createfile("", self.licensefile, self.badkey) self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q) def testScriptDirKey(self): """invalid license key in script directory should fail with faultType""" self.createfile(google._getScriptDir(), self.licensefile, self.badkey) self.assertRaises(GoogleSOAPFacade.faultType, google.doGoogleSearch, q=self.q) if __name__ == "__main__": unittest.main()
37.962199
111
0.650674
1,080
11,047
6.603704
0.181481
0.074593
0.156478
0.088194
0.646242
0.563657
0.495794
0.460881
0.449103
0.439708
0
0.003384
0.224314
11,047
290
112
38.093103
0.828918
0.094505
0
0.404651
0
0
0.047431
0.002331
0
0
0
0
0.12093
1
0.172093
false
0.013953
0.023256
0
0.27907
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ba38dbf6245155e0bd5d6fb74ada7b2d40f61c9a
1,591
py
Python
services/processdata/processdata/server.py
matheusmercadante/space-hub
6956d4fad5c92f2ce5903852bdd77e124d7941ef
[ "RSA-MD" ]
null
null
null
services/processdata/processdata/server.py
matheusmercadante/space-hub
6956d4fad5c92f2ce5903852bdd77e124d7941ef
[ "RSA-MD" ]
null
null
null
services/processdata/processdata/server.py
matheusmercadante/space-hub
6956d4fad5c92f2ce5903852bdd77e124d7941ef
[ "RSA-MD" ]
null
null
null
import sys import asyncio import tornado.ioloop from classes.rabbitmq_tornado import TornadoAdapter from tornado import gen from services.read_sheet import read_sheet RABBIT_URI = "amqp://guest:guest@localhost:5672/" @gen.coroutine def handle_message(logger, message): logger.info("File request {}".format(message)) res = read_sheet(message) logger.info("File result {}".format(res)) return res if __name__ == "__main__": if sys.platform == 'win32': asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) configuration = dict( publish=dict( outgoing_1=dict( exchange="processdata-rpc", exchange_type="direct", routing_key="processdata", queue="process-data-finished", durable=True, auto_delete=False, prefetch_count=1 ) ), receive=dict( incoming=dict( exchange="processdata-rpc", exchange_type="direct", routing_key="processdata", queue="process-data-comming", durable=True, auto_delete=False, prefetch_count=1 ) ) ) # Using Tornado IO Loop io_loop = tornado.ioloop.IOLoop.current() rabbit_connection = TornadoAdapter(rabbitmq_url=RABBIT_URI, configuration=configuration, io_loop=io_loop) rabbit_connection.receive(handler=handle_message, queue=configuration["receive"]["incoming"]["queue"]) io_loop.start()
30.596154
109
0.615336
160
1,591
5.9
0.4625
0.03178
0.036017
0.044492
0.256356
0.256356
0.256356
0.256356
0.17161
0.17161
0
0.007909
0.284727
1,591
52
110
30.596154
0.821617
0.013199
0
0.272727
0
0
0.128107
0.035054
0
0
0
0
0
1
0.022727
false
0
0.136364
0
0.181818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ba3a19f10a71c2771193a02d9bae8cb86fc3ea41
16,428
py
Python
second-floor.py
levabd/smart-climat-daemon
8ff273eeb74fb03ea04fda11b0128fa13d35b500
[ "MIT" ]
null
null
null
second-floor.py
levabd/smart-climat-daemon
8ff273eeb74fb03ea04fda11b0128fa13d35b500
[ "MIT" ]
1
2021-06-02T03:55:13.000Z
2021-06-02T03:55:13.000Z
second-floor.py
levabd/smart-climat-daemon
8ff273eeb74fb03ea04fda11b0128fa13d35b500
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import json import argparse import re import datetime import paramiko import requests # cmd ['ssh', 'smart', # 'mkdir -p /home/levabd/smart-home-temp-humidity-monitor; # cat - > /home/levabd/smart-home-temp-humidity-monitor/lr.json'] from btlewrap import available_backends, BluepyBackend from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \ MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY br_state = {} cb_state = {} f = open('/home/pi/smart-climat-daemon/ac_br_state.json') br_state = json.load(f) f = open('/home/pi/smart-climat-daemon/ac_cb_state.json') cb_state = json.load(f) dummy_ac_url = 'http://smart.levabd.pp.ua:2002' def valid_mitemp_mac(mac, pat=re.compile(r"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")): """Check for valid mac addresses.""" if not pat.match(mac.upper()): raise argparse.ArgumentTypeError( 'The MAC address "{}" seems to be in the wrong format'.format(mac)) return mac # turn_on_humidifier(): # """Turn on humidifier on a first floor.""" # hummidifier_plug = chuangmi_plug.ChuangmiPlug( # ip='192.168.19.61', # token='14f5b868a58ef4ffaef6fece61c65b16', # start_id=0, # debug=1, # lazy_discover=True, # model='chuangmi.plug.m1') # hummidifier_plug.on() # # # def turn_off_humidifier(): # """Turn off humidifier on a first floor.""" # hummidifier_plug = chuangmi_plug.ChuangmiPlug( # ip='192.168.19.61', # token='14f5b868a58ef4ffaef6fece61c65b16', # start_id=0, # debug=1, # lazy_discover=True, # model='chuangmi.plug.m1') # hummidifier_plug.off() def check_if_ac_off(room): """Check if AC is turned off.""" status_url = dummy_ac_url if room == 'br': status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a' elif room == 'cb': status_url = 'http://smart.levabd.pp.ua:2002/status-office?key=27fbc501b51b47663e77c46816a' response = requests.get(status_url, timeout=(20, 30)) if 'Pow' in response.json(): print(response.json()['Pow']) if response.json()['Pow'] == "ON": return False return True return None def check_if_ac_heat(room): """Check if AC is turned for a automate cooling.""" status_url = dummy_ac_url if room == 'br': status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a' elif room == 'cb': status_url = 'http://smart.levabd.pp.ua:2002/status-office?key=27fbc501b51b47663e77c46816a' response = requests.get(status_url, timeout=(20, 30)) print(response.json()) if 'Pow' in response.json(): if (response.json()['Pow'] == "ON") and (response.json()['Mod'] == "HEAT"): return True return False return None def check_if_ac_cool(room): """Check if AC is turned for a automate cooling.""" status_url = dummy_ac_url if room == 'br': status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a' elif room == 'cb': status_url = 'http://smart.levabd.pp.ua:2002/status-office?key=27fbc501b51b47663e77c46816a' response = requests.get(status_url, timeout=(20, 30)) print(response.json()) if 'Pow' in response.json(): if (response.json()['Pow'] == "ON") and (response.json()['Mod'] == "COOL"): return True return False return None def set_cool_temp_ac(room, temp): """Set AC temerature of cooling if AC already turned cool.""" state = {} state = br_state if room == 'br' else cb_state # 'cb' if (not state['wasTurnedCool'] == 1 and check_if_ac_cool(room)) or (check_if_ac_heat('br')): return temp_url = dummy_ac_url if room == 'br': temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-bedroom?key=27fbc501b51b47663e77c46816a&temp=' elif room == 'cb': temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-office?key=27fbc501b51b47663e77c46816a&temp=' response = requests.get(temp_url + temp) print(response) def turn_on_cool_ac(room): """Turn on AC for a cooling if it was not.""" state = {} state = br_state if room == 'br' else cb_state # 'cb' ac_cool = check_if_ac_cool(room) if ((state['wasTurnedCool'] == 1) and not state['triedTurnedCool'] == 1) or (ac_cool is None) or (check_if_ac_heat('br')): return if ac_cool and (state['triedTurnedCool'] == 1): if room == 'br': br_state['triedTurnedOff'] = 0 br_state['wasTurnedOff'] = 0 br_state['triedTurnedCool'] = 0 br_state['wasTurnedCool'] = 1 br_state['triedTurnedHeat'] = 0 br_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedOff'] = 0 cb_state['wasTurnedOff'] = 0 cb_state['triedTurnedCool'] = 0 cb_state['wasTurnedCool'] = 1 cb_state['triedTurnedHeat'] = 0 cb_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) return cool_url = dummy_ac_url turn_on_url = dummy_ac_url temp_url = dummy_ac_url if room == 'br': turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-bedroom?key=27fbc501b51b47663e77c46816a' cool_url = 'http://smart.levabd.pp.ua:2002/cool-bedroom?autoFan=false&key=27fbc501b51b47663e77c46816a' temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-bedroom?key=27fbc501b51b47663e77c46816a&temp=26' elif room == 'cb': turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-office?key=27fbc501b51b47663e77c46816a' cool_url = 'http://smart.levabd.pp.ua:2002/cool-office?autoFan=false&key=27fbc501b51b47663e77c46816a' temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-office?key=27fbc501b51b47663e77c46816a&temp=26' if room == 'br': br_state['triedTurnedCool'] = 1 br_state['wasTurnedCool'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedCool'] = 1 cb_state['wasTurnedCool'] = 0 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) response = requests.get(temp_url) print(response) response = requests.get(cool_url) print(response) response = requests.get(turn_on_url) print(response) def turn_on_heat_ac(room): """Turn on AC for a heating if it was not.""" state = {} state = br_state if room == 'br' else cb_state # 'cb' ac_heat = check_if_ac_heat(room) if ((state['wasTurnedHeat'] == 1) and not state['triedTurnedHeat'] == 1) or (ac_heat is None): return if ac_heat and (state['triedTurnedHeat'] == 1): if room == 'br': br_state['triedTurnedOff'] = 0 br_state['wasTurnedOff'] = 0 br_state['triedTurnedCool'] = 0 br_state['wasTurnedCool'] = 0 br_state['triedTurnedHeat'] = 0 br_state['wasTurnedHeat'] = 1 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedOff'] = 0 cb_state['wasTurnedOff'] = 0 cb_state['triedTurnedCool'] = 0 cb_state['wasTurnedCool'] = 0 cb_state['triedTurnedHeat'] = 0 cb_state['wasTurnedHeat'] = 1 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) return heat_url = dummy_ac_url turn_on_url = dummy_ac_url temp_url = dummy_ac_url if room == 'br': turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-bedroom?key=27fbc501b51b47663e77c46816a' heat_url = 'http://smart.levabd.pp.ua:2002/heat-bedroom?key=27fbc501b51b47663e77c46816a' temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-bedroom?key=27fbc501b51b47663e77c46816a&temp=25' elif room == 'cb': turn_on_url = 'http://smart.levabd.pp.ua:2002/powerOn-office?key=27fbc501b51b47663e77c46816a' heat_url = 'http://smart.levabd.pp.ua:2002/heat-office?autoFan=false&key=27fbc501b51b47663e77c46816a' temp_url = 'http://smart.levabd.pp.ua:2002/setTemp-office?key=27fbc501b51b47663e77c46816a&temp=25' if room == 'br': br_state['triedTurnedHeat'] = 1 br_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedHeat'] = 1 cb_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) response = requests.get(temp_url) print(response) response = requests.get(heat_url) print(response) response = requests.get(turn_on_url) print(response) def turn_off_ac(room): """Turn off AC .""" state = {} state = br_state if room == 'br' else cb_state # 'cb' ac_off = check_if_ac_off(room) if ((state['wasTurnedOff'] == 1) and not state['triedTurnedOff'] == 1) or (ac_off is None): return if ac_off and (state['triedTurnedCool'] == 1): if room == 'br': br_state['triedTurnedOff'] = 0 br_state['wasTurnedOff'] = 1 br_state['triedTurnedCool'] = 0 br_state['wasTurnedCool'] = 0 br_state['triedTurnedHeat'] = 0 br_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedOff'] = 0 cb_state['wasTurnedOff'] = 1 cb_state['triedTurnedCool'] = 0 cb_state['wasTurnedCool'] = 0 cb_state['triedTurnedHeat'] = 0 cb_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) turn_url = dummy_ac_url if room == 'br': turn_url = 'http://smart.levabd.pp.ua:2002/powerOff-bedroom?key=27fbc501b51b47663e77c46816a' elif room == 'cb': turn_url = 'http://smart.levabd.pp.ua:2002/powerOff-office?key=27fbc501b51b47663e77c46816a' if room == 'br': br_state['triedTurnedOff'] = 1 br_state['wasTurnedOff'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) elif room == 'cb': cb_state['triedTurnedOff'] = 1 cb_state['wasTurnedOff'] = 0 with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) response = requests.get(turn_url) print(response) def record_temp_humid(temperature, humidity, room): """Record temperature and humidity data for web interface monitor""" dicty = { "temperature": temperature, "humidity": humidity } ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect('smart.levabd.pp.ua', port = 2001, username='levabd', password='vapipu280.') sftp = ssh.open_sftp() with sftp.open('smart-home-temp-humidity-monitor/' + room + '.json', 'w') as outfile: json.dump(dicty, outfile) ssh.close() def poll_temp_humidity(room): """Poll data frstate['triedTurnedOff']om the sensor.""" today = datetime.datetime.today() backend = BluepyBackend mac = '58:2d:34:38:be:2e' if room == 'br' else '58:2d:34:39:27:4e' # 'cb' poller = MiTempBtPoller(mac, backend) temperature = poller.parameter_value(MI_TEMPERATURE) humidity = poller.parameter_value(MI_HUMIDITY) print("Month: {}".format(today.month)) print("Getting data from Mi Temperature and Humidity Sensor") print("FW: {}".format(poller.firmware_version())) print("Name: {}".format(poller.name())) print("Battery: {}".format(poller.parameter_value(MI_BATTERY))) print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE))) print("Humidity: {}".format(poller.parameter_value(MI_HUMIDITY))) return (today, temperature, humidity) # scan(args): # """Scan for sensors.""" # backend = _get_backend(args) # print('Scanning for 10 seconds...') # devices = mitemp_scanner.scan(backend, 10) # devices = [] # print('Found {} devices:'.format(len(devices))) # for device in devices: # print(' {}'.format(device)) def list_backends(_): """List all available backends.""" backends = [b.__name__ for b in available_backends()] print('\n'.join(backends)) def main(): """Main function.""" # check bedroom (today, temperature, humidity) = poll_temp_humidity('br') # if (humidity > 49) and (today.month < 10) and (today.month > 4): # turn_off_humidifier() # if (humidity < 31) and (today.month < 10) and (today.month > 4): # turn_on_humidifier() # if (humidity < 31) and ((today.month > 9) or (today.month < 5)): # turn_on_humidifier() # if (humidity > 49) and ((today.month > 9) or (today.month < 5)): # turn_off_humidifier() # # Prevent Sleep of Xiaomi Smart Plug # hummidifier_plug = chuangmi_plug.ChuangmiPlug( # ip='192.168.19.59', # token='14f5b868a58ef4ffaef6fece61c65b16', # start_id=0, # debug=0, # lazy_discover=True, # model='chuangmi.plug.m1') # print(hummidifier_plug.status()) # Record temperature and humidity for monitor record_temp_humid(temperature, humidity, 'br') # clear env at night if today.hour == 3: br_state['triedTurnedOff'] = 0 br_state['wasTurnedOff'] = 0 br_state['triedTurnedCool'] = 0 br_state['wasTurnedCool'] = 0 br_state['triedTurnedHeat'] = 0 br_state['wasTurnedHeat'] = 0 cb_state['triedTurnedOff'] = 0 cb_state['wasTurnedOff'] = 0 cb_state['triedTurnedCool'] = 0 cb_state['wasTurnedCool'] = 0 cb_state['triedTurnedHeat'] = 0 cb_state['wasTurnedHeat'] = 0 with open('/home/pi/smart-climat-daemon/ac_br_state.json', 'w') as file: json.dump(br_state, file) with open('/home/pi/smart-climat-daemon/ac_cb_state.json', 'w') as file: json.dump(cb_state, file) # if (temperature > 24.0) and (today.month < 6) and (today.month > 3) and (today.hour < 11) and (today.hour > 3): # turn_on_cool_ac('br') if (temperature > 32) and (today.hour < 24) and (today.hour > 7): turn_on_cool_ac('br') if (temperature > 25.3) and (today.month < 10) and (today.month > 4) and (today.hour < 8) and (today.hour > 4): turn_on_cool_ac('br') if (temperature < 22) and (today.month == 10) and (today.hour < 9): turn_on_heat_ac('br') if (temperature < 22) and (today.month == 10) and (today.hour > 22): turn_on_heat_ac('br') if (temperature > 25) and (today.month == 10) and (today.hour < 9): turn_off_ac('br') if (temperature > 25) and (today.month == 10) and (today.hour > 22): turn_off_ac('br') if (today.month == 10) and (today.hour == 0) and (today.minute == 0): turn_off_ac('br') if (temperature < 23.3) and (today.hour < 8) and (today.hour > 4) and (not(check_if_ac_heat('br'))): turn_off_ac('br') if (temperature < 19) and (today.hour < 24) and (today.hour > 8) and (not(check_if_ac_heat('br'))): turn_off_ac('br') # _if (temperature < 20) and ((today.month > 9) or (today.month < 5)) and (today.hour < 24) and (today.hour > 9): # turn_on_heat_ac() # if (temperature > 22) and ((today.month > 9) or (today.month < 5)): # turn_off_ac() # record the office room numbers (_, temperature, humidity) = poll_temp_humidity('cb') record_temp_humid(temperature, humidity, 'cb') if __name__ == '__main__': main()
40.562963
126
0.623204
2,166
16,428
4.560018
0.110342
0.036145
0.031589
0.036448
0.754885
0.691202
0.664068
0.616786
0.597854
0.57315
0
0.064908
0.229121
16,428
404
127
40.663366
0.715019
0.164475
0
0.589041
0
0.034247
0.294749
0.060597
0
0
0
0
0
1
0.041096
false
0.003425
0.027397
0
0.126712
0.065068
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ba3ad00158e6db261842bb50d50fbeca583ec7db
2,880
py
Python
swot_simulator/error/orbital.py
CNES/swot_simulator
92d0bb4a274ec9923265567968beea3be4283e61
[ "BSD-3-Clause" ]
17
2020-05-28T08:20:11.000Z
2022-03-25T07:40:48.000Z
swot_simulator/error/orbital.py
CNES/swot_simulator
92d0bb4a274ec9923265567968beea3be4283e61
[ "BSD-3-Clause" ]
7
2021-07-21T02:15:52.000Z
2021-11-14T10:46:41.000Z
swot_simulator/error/orbital.py
CNES/swot_simulator
92d0bb4a274ec9923265567968beea3be4283e61
[ "BSD-3-Clause" ]
8
2020-05-17T13:53:43.000Z
2022-03-25T07:40:58.000Z
# Copyright (c) 2021 CNES/JPL # # All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE file. """ Orbital error ------------- """ from typing import Dict, Tuple # import dask.array as da import numpy as np # from .. import random_signal from .. import settings from .. import VOLUMETRIC_MEAN_RADIUS #: Signal amplitude of the orbital error in micro-radians AMPLITUDE = 100 #: Delta T of the spatial sampling in seconds DT = 60 def _orbital_error_spectrum( orbit_duration: np.timedelta64, rng: np.random.Generator) -> Tuple[np.ndarray, float]: """Calculate orbital error spectrum Args: orbit_duration (float): Orbit duration in fractional days rng (np.random.Generator): Random number generator Returns: tuple: (yg, fmaxr) """ df = 1 / (1000 * 86400) spatial_frequency = np.arange(df, 1 / DT, df) orbital_frequency = 1 / float( orbit_duration.astype("timedelta64[us]").astype("float64") * 1e-6) sigma_peak = orbital_frequency / 1000 ps_orbital = np.exp(-0.5 * (spatial_frequency - orbital_frequency)**2 / sigma_peak**2) ps_orbital[ps_orbital < 1 / 1000] = 0. ps_orbital /= np.sum(ps_orbital * df) ps_orbital *= AMPLITUDE**2 return random_signal.gen_psd_1d(spatial_frequency, ps_orbital, rng, alpha=10) class Orbital: """ Simulate the error orbital Args: parameters (Parameters): Simulation parameters. orbit_duration (np.timedelta64): Orbit duration. """ def __init__(self, parameters: settings.Parameters, orbit_duration: np.timedelta64) -> None: yg, self.fmaxr = _orbital_error_spectrum(orbit_duration, parameters.rng()) self.yg = da.from_array(yg, name="orbital_error").persist() assert parameters.height is not None height = parameters.height * 1e-3 self.conversion_factor = (1 + height / VOLUMETRIC_MEAN_RADIUS) * 1e-3 def generate( self, time: np.ndarray, x_ac: np.ndarray, ) -> Dict[str, np.ndarray]: """Generate orbital error Args: time (np.ndarray): time vector Returns: np.ndarray: orbital error """ time = time.astype("datetime64[us]").astype("float64") * 1e-6 xg = np.linspace(0, 0.5 / self.fmaxr * self.yg.shape[0], self.yg.shape[0]) error_orbital = np.interp(np.mod(time, xg.max()), xg, self.yg.compute()) return { "simulated_error_orbital": x_ac * error_orbital[:, np.newaxis] * self.conversion_factor, }
30.315789
77
0.587847
340
2,880
4.835294
0.382353
0.058394
0.036496
0.047445
0.105839
0
0
0
0
0
0
0.0335
0.305556
2,880
94
78
30.638298
0.7885
0.246181
0
0
0
0
0.03848
0.011203
0
0
0
0
0.020833
1
0.0625
false
0
0.125
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0