hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
c4a03463130fb943338bd0c3471073a7d727db33
14,885
py
Python
tests/compression/level-timing-brotli.py
tabulon-ext/dedupsqlfs
9dfbed17450e7f2a499a7381e0368d08ae3c700d
[ "MIT" ]
22
2015-04-09T09:00:00.000Z
2022-03-23T00:16:04.000Z
tests/compression/level-timing-brotli.py
tabulon-ext/dedupsqlfs
9dfbed17450e7f2a499a7381e0368d08ae3c700d
[ "MIT" ]
119
2015-02-11T21:39:27.000Z
2021-07-27T23:04:49.000Z
tests/compression/level-timing-brotli.py
tabulon-ext/dedupsqlfs
9dfbed17450e7f2a499a7381e0368d08ae3c700d
[ "MIT" ]
7
2016-03-16T11:53:45.000Z
2022-02-24T13:47:31.000Z
#/usr/bin/env python3 # -*- coding: utf8 -*- import sys import os import time import random import struct import io import hashlib dirname = "dedupsqlfs" # Figure out the directy which is the prefix # path-of-current-file/.. curpath = os.path.abspath( sys.argv[0] ) if os.path.islink(curpath): curpath = os.readlink(curpath) currentdir = os.path.dirname( curpath ) basedir = os.path.abspath( os.path.join( currentdir, "..", ".." ) ) dynloaddir = os.path.abspath( os.path.join( basedir, "lib-dynload" ) ) sys.path.insert( 0, dynloaddir ) sys.path.insert( 0, basedir ) nROUNDS = 5 cROUNDS = range(nROUNDS) blockSize = 1024*128 blockCnt = 16 dataMin = ord('A') dataMax = ord('Z') processData = () processDataLength = 0 compressedData = {} dataHash = '' def generate_data(): global processData, processDataLength, dataHash rnd = random.SystemRandom() md5 = hashlib.md5() for cnt in range(blockCnt): b = io.BytesIO() for n in range(blockSize): if n % 1024 == 0: sys.stdout.write(".") sys.stdout.flush() b.write(struct.pack('B', rnd.randint(dataMin, dataMax))) processData += (b.getvalue(),) processDataLength += b.tell() md5.update(b.getvalue()) print("") dataHash = md5.digest() return def generate_file_data(file_path): global processData, blockCnt, processDataLength, dataHash md5 = hashlib.md5() f = open(file_path, 'rb') f.seek(0, 2) nblk = f.tell() / blockSize processDataLength = f.tell() f.seek(0, 0) last_p = 0 while True: block = f.read(blockSize) if block: processData += (block,) blockCnt += 1 md5.update(block) else: break p = int(20.0 * blockCnt / nblk) if p > last_p: last_p = p sys.stdout.write(".") sys.stdout.flush() f.close() print("") dataHash = md5.digest() return if len(sys.argv) > 1: print("Get file data") generate_file_data(sys.argv[1]) else: print("Generate random data") generate_data() print("Done") def do_simple_ctest(method, name): global compressedData dt = 0.0 lcdata = 0.0 nblk = len(processData) ldata = processDataLength * nROUNDS for n in range(nROUNDS): sys.stdout.write(".") sys.stdout.flush() last_p = 0 iblk = 0 for data in processData: t1 = time.time() cdata = method(data) t2 = time.time() dt += t2 - t1 if n == 0: if not compressedData.get(name): compressedData[ name ] = () compressedData[ name ] += (cdata,) lcdata += len(cdata) p = int(20.0 * iblk / nblk) if p > last_p: last_p = p sys.stdout.write("*") sys.stdout.flush() iblk += 1 print("") return dt / nROUNDS, ldata / nROUNDS, 100.0 * lcdata / ldata def do_level_ctest(method, name, level): global compressedData key = name + "_" + str(level) dt = 0.0 lcdata = 0.0 nblk = len(processData) ldata = processDataLength * nROUNDS for n in range(nROUNDS): sys.stdout.write(".") sys.stdout.flush() last_p = 0 iblk = 0 for data in processData: t1 = time.time() cdata = method(data, level) t2 = time.time() dt += t2 - t1 if n == 0: if not compressedData.get(key): compressedData[ key ] = () compressedData[ key ] += (cdata,) lcdata += len(cdata) p = int(20.0 * iblk / nblk) if p > last_p: last_p = p sys.stdout.write("*") sys.stdout.flush() iblk += 1 print("") return dt / nROUNDS, ldata / nROUNDS, 100.0 * lcdata / ldata def do_level_ctest_lzma(method, name, level): global compressedData key = name + "_" + str(level) dt = 0.0 lcdata = 0.0 nblk = len(processData) ldata = processDataLength * nROUNDS for n in range(nROUNDS): sys.stdout.write(".") sys.stdout.flush() last_p = 0 iblk = 0 for data in processData: t1 = time.time() cdata = method(data, preset=level) t2 = time.time() dt += t2 - t1 if n == 0: if not compressedData.get(key): compressedData[ key ] = () compressedData[ key ] += (cdata,) lcdata += len(cdata) p = int(20.0 * iblk / nblk) if p > last_p: last_p = p sys.stdout.write("*") sys.stdout.flush() iblk += 1 print("") return dt / nROUNDS, ldata / nROUNDS, 100.0 * lcdata / ldata def do_level_ctest_brotli(method, name, level): global compressedData key = name + "_" + str(level) dt = 0.0 lcdata = 0.0 nblk = len(processData) ldata = processDataLength * nROUNDS for n in range(nROUNDS): sys.stdout.write(".") sys.stdout.flush() last_p = 0 iblk = 0 for data in processData: t1 = time.time() cdata = method(data, 0, level) t2 = time.time() dt += t2 - t1 if n == 0: if not compressedData.get(key): compressedData[ key ] = () compressedData[ key ] += (cdata,) lcdata += len(cdata) p = int(20.0 * iblk / nblk) if p > last_p: last_p = p sys.stdout.write("*") sys.stdout.flush() iblk += 1 print("") return dt / nROUNDS, ldata / nROUNDS, 100.0 * lcdata / ldata def do_simple_dtest(method, name): global compressedData dt = 0.0 ldata = 0.0 cdataAll = compressedData[ name ] nblk = len(cdataAll) for n in range(nROUNDS): sys.stdout.write(".") sys.stdout.flush() md5 = hashlib.md5() last_p = 0 iblk = 0 for cdata in cdataAll: t1 = time.time() data = method(cdata) t2 = time.time() dt += t2 - t1 md5.update(data) ldata += len(data) p = int(20.0 * iblk / nblk) if p > last_p: last_p = p sys.stdout.write("*") sys.stdout.flush() iblk += 1 dh = md5.digest() if dh != dataHash: raise ValueError("Original data hash != decompressed! %r != %r" % (dataHash, dh,)) print("") return dt / nROUNDS, ldata / nROUNDS def do_level_dtest(method, name, level): global compressedData dt = 0.0 ldata = 0.0 key = name + "_" + str(level) cdataAll = compressedData[ key ] nblk = len(cdataAll) for n in range(nROUNDS): sys.stdout.write(".") sys.stdout.flush() md5 = hashlib.md5() last_p = 0 iblk = 0 for cdata in cdataAll: t1 = time.time() data = method(cdata) t2 = time.time() dt += t2 - t1 md5.update(data) ldata += len(data) p = int(20.0 * iblk / nblk) if p > last_p: last_p = p sys.stdout.write("*") sys.stdout.flush() iblk += 1 dh = md5.digest() if dh != dataHash: raise ValueError("Original data hash != decompressed! %r != %r" % (dataHash, dh,)) print("") return dt / nROUNDS, ldata / nROUNDS COMPRESSION_SUPPORTED=[ ('brotli' , range(0,12), do_level_ctest_brotli,), ('zlib' , range(1,10), do_level_ctest,), ] DECOMPRESSION_SUPPORTED=[ ('brotli' , range(0,12), do_level_dtest,), ('zlib' , range(1,10), do_level_dtest,), ] CTIMING={} DTIMING={} if len(sys.argv) > 1: print("Do %s compressions of file data to collect mean time and speed..." % nROUNDS) else: print("Do %s compressions of random string to collect mean time and speed..." % nROUNDS) print("\n") for c, levels, test_func in COMPRESSION_SUPPORTED: print("Test %r" % c) if c in sys.modules: del sys.modules[c] if c == 'lz4h': if 'lz3' in sys.modules: del sys.modules['lz4'] m = __import__('lz4') m = m.compressHC else: m = __import__(c) m = m.compress if not levels: dt, ldata, ratio_proc = test_func(m, c) CTIMING[ c ] = (dt, ratio_proc, ldata / dt / 1024.0 / 1024.0,) else: CTIMING[ c ] = {} for level in levels: print("-- level: %r" % level) dt, ldata, ratio_proc = test_func(m, c, level) CTIMING[c][level] = (dt, ratio_proc, ldata / dt / 1024.0 / 1024.0,) print("\nResults:\n") for c, results in CTIMING.items(): if type(results) is tuple: dt = results[0] ratio_proc = results[1] speed_mbps = results[2] print("Compression %r: %.6f sec, %.2f %% ratio, %.2f Mb/s" % (c, dt, ratio_proc, speed_mbps,)) else: data = results.values() min_v = 10**10 max_v = 0 for lvl, _results in results.items(): dt = _results[0] ratio_proc = _results[1] speed_mbps = _results[2] if dt > max_v: max_v = dt if dt < min_v: min_v = dt dv = max_v - min_v print("Compression %r:" % c) for lvl, _results in results.items(): dt = _results[0] ratio_proc = _results[1] speed_mbps = _results[2] print("-- level: %s, time: %.6f sec, about %.2f %% times, %.2f %% ratio, %.2f Mb/s" % ( lvl, dt, (dt-min_v)*100.0/max_v, ratio_proc, speed_mbps)) print("\nTable of times:") cmps = CTIMING.keys() _cmps = ["level"] _cmps.extend(cmps) print("\t".join("%-9s" % c for c in _cmps)) for level in range(0,10): row = ["%8s" % level] for c in cmps: results = CTIMING[c] if type(results) is tuple: row.append("%.6f" % results[0]) else: found = False for lvl, r in results.items(): if lvl == level: row.append("%.6f" % r[0]) found = True if not found: row.append(" "*8) print("\t".join(row)) print("\nTable of ratio in %:") print("\t".join("%-9s" % c for c in _cmps)) for level in range(0,21): row = ["%8s" % level] for c in cmps: results = CTIMING[c] if type(results) is tuple: eff = "%.2f" % results[1] row.append("%8s" % eff) else: found = False for lvl, r in results.items(): if lvl == level: eff = "%.2f" % r[1] row.append("%8s" % eff) found = True if not found: row.append(" "*8) print("\t".join(row)) print("\nTable of speed in Mb/s:") print("\t".join("%-9s" % c for c in _cmps)) for level in range(0,21): row = ["%8s" % level] for c in cmps: results = CTIMING[c] if type(results) is tuple: eff = "%.2f" % results[2] row.append("%8s" % eff) else: found = False for lvl, r in results.items(): if lvl == level: eff = "%.2f" % r[2] row.append("%8s" % eff) found = True if not found: row.append(" "*8) print("\t".join(row)) print("\n") if len(sys.argv) > 1: print("Do %s decompressions of file data to collect mean time and speed..." % nROUNDS) else: print("Do %s decompressions of random string to collect mean time and speed..." % nROUNDS) print("\n") for c, levels, test_func in DECOMPRESSION_SUPPORTED: print("Test %r" % c) if c in sys.modules: del sys.modules[c] if c == 'lz4h': if 'lz4' in sys.modules: del sys.modules['lz4'] m = __import__('lz4') m = m.decompress else: m = __import__(c) m = m.decompress if not levels: dt, ldata = test_func(m, c) DTIMING[ c ] = (dt, ldata / dt / 1024.0 / 1024.0,) else: DTIMING[ c ] = {} for level in levels: print("-- level: %r" % level) dt, ldata = test_func(m, c, level) DTIMING[c][level] = (dt, ldata / dt / 1024.0 / 1024.0,) print("\nResults:\n") for c, results in DTIMING.items(): if type(results) is tuple: dt = results[0] speed_mbps = results[1] print("Decompression %r: %.6f sec, %.2f Mb/s" % (c, dt, speed_mbps,)) else: data = results.values() min_v = 10**10 max_v = 0 for lvl, _results in results.items(): dt = _results[0] speed_mbps = _results[1] if dt > max_v: max_v = dt if dt < min_v: min_v = dt dv = max_v - min_v print("Decompression %r:" % c) for lvl, _results in results.items(): dt = _results[0] speed_mbps = _results[1] print("-- level: %s, time: %.6f sec, about %.2f %% times, %.2f Mb/s" % ( lvl, dt, (dt-min_v)*100.0/max_v, speed_mbps)) print("\nTable of times:") cmps = DTIMING.keys() _cmps = ["level"] _cmps.extend(cmps) print("\t".join("%-9s" % c for c in _cmps)) for level in range(0,21): row = ["%8s" % level] for c in cmps: results = DTIMING[c] if type(results) is tuple: row.append("%.6f" % results[0]) else: found = False for lvl, r in results.items(): if lvl == level: row.append("%.6f" % r[0]) found = True if not found: row.append(" "*8) print("\t".join(row)) print("\nTable of speed in Mb/s:") print("\t".join("%-9s" % c for c in _cmps)) for level in range(0,21): row = ["%8s" % level] for c in cmps: results = DTIMING[c] if type(results) is tuple: eff = "%.2f" % results[1] row.append("%8s" % eff) else: found = False for lvl, r in results.items(): if lvl == level: eff = "%.2f" % r[1] row.append("%8s" % eff) found = True if not found: row.append(" "*8) print("\t".join(row))
23.969404
102
0.49392
1,833
14,885
3.932351
0.107474
0.034961
0.027192
0.033019
0.78399
0.758324
0.732658
0.70616
0.682436
0.668701
0
0.033811
0.37212
14,885
620
103
24.008065
0.737428
0.007256
0
0.738758
0
0.004283
0.070873
0
0
0
0
0
0
1
0.017131
false
0
0.023555
0
0.057816
0.09636
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c4c162bb2a4b1ae73be8ace69d762a917c4f5109
3,819
py
Python
venv/Lib/site-packages/numpy/typing/tests/data/reveal/array_constructors.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
41
2021-06-19T13:57:18.000Z
2021-12-02T17:08:53.000Z
venv/Lib/site-packages/numpy/typing/tests/data/reveal/array_constructors.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
5
2021-05-07T10:31:27.000Z
2021-05-07T10:33:37.000Z
venv/Lib/site-packages/numpy/typing/tests/data/reveal/array_constructors.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
4
2021-07-02T03:09:51.000Z
2021-11-25T13:00:10.000Z
from typing import List, Any import numpy as np class SubClass(np.ndarray): ... i8: np.int64 A: np.ndarray B: SubClass C: List[int] def func(i: int, j: int, **kwargs: Any) -> SubClass: ... reveal_type(np.asarray(A)) # E: ndarray reveal_type(np.asarray(B)) # E: ndarray reveal_type(np.asarray(C)) # E: ndarray reveal_type(np.asanyarray(A)) # E: ndarray reveal_type(np.asanyarray(B)) # E: SubClass reveal_type(np.asanyarray(B, dtype=int)) # E: ndarray reveal_type(np.asanyarray(C)) # E: ndarray reveal_type(np.ascontiguousarray(A)) # E: ndarray reveal_type(np.ascontiguousarray(B)) # E: ndarray reveal_type(np.ascontiguousarray(C)) # E: ndarray reveal_type(np.asfortranarray(A)) # E: ndarray reveal_type(np.asfortranarray(B)) # E: ndarray reveal_type(np.asfortranarray(C)) # E: ndarray reveal_type(np.require(A)) # E: ndarray reveal_type(np.require(B)) # E: SubClass reveal_type(np.require(B, requirements=None)) # E: SubClass reveal_type(np.require(B, dtype=int)) # E: ndarray reveal_type(np.require(B, requirements="E")) # E: ndarray reveal_type(np.require(B, requirements=["ENSUREARRAY"])) # E: ndarray reveal_type(np.require(B, requirements={"F", "E"})) # E: ndarray reveal_type(np.require(B, requirements=["C", "OWNDATA"])) # E: SubClass reveal_type(np.require(B, requirements="W")) # E: SubClass reveal_type(np.require(B, requirements="A")) # E: SubClass reveal_type(np.require(C)) # E: ndarray reveal_type(np.linspace(0, 10)) # E: numpy.ndarray reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray, Any] reveal_type(np.logspace(0, 10)) # E: numpy.ndarray reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray reveal_type(np.zeros_like(A)) # E: numpy.ndarray reveal_type(np.zeros_like(C)) # E: numpy.ndarray reveal_type(np.zeros_like(B)) # E: SubClass reveal_type(np.zeros_like(B, dtype=np.int64)) # E: numpy.ndarray reveal_type(np.ones_like(A)) # E: numpy.ndarray reveal_type(np.ones_like(C)) # E: numpy.ndarray reveal_type(np.ones_like(B)) # E: SubClass reveal_type(np.ones_like(B, dtype=np.int64)) # E: numpy.ndarray reveal_type(np.empty_like(A)) # E: numpy.ndarray reveal_type(np.empty_like(C)) # E: numpy.ndarray reveal_type(np.empty_like(B)) # E: SubClass reveal_type(np.empty_like(B, dtype=np.int64)) # E: numpy.ndarray reveal_type(np.full_like(A, i8)) # E: numpy.ndarray reveal_type(np.full_like(C, i8)) # E: numpy.ndarray reveal_type(np.full_like(B, i8)) # E: SubClass reveal_type(np.full_like(B, i8, dtype=np.int64)) # E: numpy.ndarray reveal_type(np.ones(1)) # E: numpy.ndarray reveal_type(np.ones([1, 1, 1])) # E: numpy.ndarray reveal_type(np.full(1, i8)) # E: numpy.ndarray reveal_type(np.full([1, 1, 1], i8)) # E: numpy.ndarray reveal_type(np.indices([1, 2, 3])) # E: numpy.ndarray reveal_type(np.indices([1, 2, 3], sparse=True)) # E: tuple[numpy.ndarray] reveal_type(np.fromfunction(func, (3, 5))) # E: SubClass reveal_type(np.identity(10)) # E: numpy.ndarray reveal_type(np.atleast_1d(A)) # E: numpy.ndarray reveal_type(np.atleast_1d(C)) # E: numpy.ndarray reveal_type(np.atleast_1d(A, A)) # E: list[numpy.ndarray] reveal_type(np.atleast_1d(A, C)) # E: list[numpy.ndarray] reveal_type(np.atleast_1d(C, C)) # E: list[numpy.ndarray] reveal_type(np.atleast_2d(A)) # E: numpy.ndarray reveal_type(np.atleast_3d(A)) # E: numpy.ndarray reveal_type(np.vstack([A, A])) # E: numpy.ndarray reveal_type(np.vstack([A, C])) # E: numpy.ndarray reveal_type(np.vstack([C, C])) # E: numpy.ndarray reveal_type(np.hstack([A, A])) # E: numpy.ndarray reveal_type(np.stack([A, A])) # E: numpy.ndarray reveal_type(np.stack([A, A], axis=0)) # E: numpy.ndarray reveal_type(np.stack([A, A], out=B)) # E: SubClass reveal_type(np.block([[A, A], [A, A]])) # E: numpy.ndarray reveal_type(np.block(C)) # E: numpy.ndarray
37.07767
77
0.704111
649
3,819
4.003082
0.104777
0.26174
0.314088
0.394919
0.886451
0.8495
0.670901
0.575058
0.285219
0.118168
0
0.01632
0.11757
3,819
102
78
37.441176
0.754599
0.268133
0
0
0
0
0.008804
0
0
0
0
0
0
1
0.013158
false
0
0.026316
0
0.052632
0
0
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c4c450b8c9f27e462a4277d0d77893791a443c1e
190
py
Python
locomotion/__init__.py
alaukiknpant/locomotion
e7c9486afc02d3ad1f6ef15bc33f96d5bee6054f
[ "MIT" ]
null
null
null
locomotion/__init__.py
alaukiknpant/locomotion
e7c9486afc02d3ad1f6ef15bc33f96d5bee6054f
[ "MIT" ]
null
null
null
locomotion/__init__.py
alaukiknpant/locomotion
e7c9486afc02d3ad1f6ef15bc33f96d5bee6054f
[ "MIT" ]
null
null
null
import locomotion.extendedDTW as extendedDTW from locomotion.animal import * import locomotion.trajectory as trajectory import locomotion.heatmap as heatmap import locomotion.write as write
31.666667
44
0.863158
24
190
6.833333
0.375
0.390244
0
0
0
0
0
0
0
0
0
0
0.105263
190
5
45
38
0.964706
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
c4d0713a8e060147581c16fea1a962e0490d2574
42
py
Python
research/DailyPriceInfo/test_create_home_data_dir.py
davidcui79/price_models
5307eebeca3d09b8d78b63586947b31ff835f5f3
[ "Apache-2.0" ]
null
null
null
research/DailyPriceInfo/test_create_home_data_dir.py
davidcui79/price_models
5307eebeca3d09b8d78b63586947b31ff835f5f3
[ "Apache-2.0" ]
null
null
null
research/DailyPriceInfo/test_create_home_data_dir.py
davidcui79/price_models
5307eebeca3d09b8d78b63586947b31ff835f5f3
[ "Apache-2.0" ]
null
null
null
import utils utils.get_path_to_data_dir()
14
28
0.857143
8
42
4
0.875
0
0
0
0
0
0
0
0
0
0
0
0.071429
42
3
28
14
0.820513
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f20abd90a38fb85c6596d3dae65acfd2035e48d4
155
py
Python
configurabledockerspawner/__init__.py
zimcke/dockerspawner
c98201233198e2c3561f8427763eb6060b8277a5
[ "BSD-3-Clause" ]
null
null
null
configurabledockerspawner/__init__.py
zimcke/dockerspawner
c98201233198e2c3561f8427763eb6060b8277a5
[ "BSD-3-Clause" ]
null
null
null
configurabledockerspawner/__init__.py
zimcke/dockerspawner
c98201233198e2c3561f8427763eb6060b8277a5
[ "BSD-3-Clause" ]
null
null
null
from ._version import __version__ from .configurabledockerspawner import ConfigurableDockerSpawner __all__ = ['__version__', 'ConfigurableDockerSpawner']
31
64
0.851613
11
155
10.818182
0.454545
0
0
0
0
0
0
0
0
0
0
0
0.083871
155
4
65
38.75
0.838028
0
0
0
0
0
0.232258
0.16129
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
f21d443c4ccdb6e407d1c50e12d00c0746ff8821
49
py
Python
sql_to_pypika/exceptions.py
pahwaranger/sql_to_pypika
55ecf48aa96ddee4b16042459b75251e94cfbe6c
[ "MIT" ]
null
null
null
sql_to_pypika/exceptions.py
pahwaranger/sql_to_pypika
55ecf48aa96ddee4b16042459b75251e94cfbe6c
[ "MIT" ]
null
null
null
sql_to_pypika/exceptions.py
pahwaranger/sql_to_pypika
55ecf48aa96ddee4b16042459b75251e94cfbe6c
[ "MIT" ]
null
null
null
class ExpressionSyntaxError(Exception): pass
16.333333
39
0.795918
4
49
9.75
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
49
2
40
24.5
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
1ef493a058f7d2c6fc1df4a0a421597863aa328c
120
py
Python
draftjs_exporter_markdown/styles.py
thibaudcolas/draftjs_exporter_markdown
00d8ae4ff1c63a80d6a9bd92375d413c75090a74
[ "MIT" ]
2
2020-07-30T20:35:27.000Z
2020-11-16T08:22:03.000Z
draftjs_exporter_markdown/styles.py
thibaudcolas/draftjs_exporter_markdown
00d8ae4ff1c63a80d6a9bd92375d413c75090a74
[ "MIT" ]
14
2018-09-16T12:13:16.000Z
2022-01-25T20:24:40.000Z
draftjs_exporter_markdown/styles.py
thibaudcolas/draftjs_exporter_markdown
00d8ae4ff1c63a80d6a9bd92375d413c75090a74
[ "MIT" ]
2
2018-09-14T23:05:22.000Z
2020-11-16T08:22:10.000Z
from .markdown import inline def inline_style(mark): return lambda props: inline([mark, props['children'], mark])
20
64
0.725
16
120
5.375
0.6875
0
0
0
0
0
0
0
0
0
0
0
0.15
120
5
65
24
0.843137
0
0
0
0
0
0.066667
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
0
0
0
5
4816232c5e64f9e84e6105e2800fa9ab4ec3f09e
76
py
Python
python-scripts/fsdepth/__init__.py
adityaka/misc_scripts
b28f71eb9b7eb429b44aeb9cb34f12355023125e
[ "BSD-3-Clause" ]
1
2018-01-16T18:21:07.000Z
2018-01-16T18:21:07.000Z
python-scripts/fsdepth/__init__.py
adityaka/misc_scripts
b28f71eb9b7eb429b44aeb9cb34f12355023125e
[ "BSD-3-Clause" ]
1
2017-05-09T07:13:52.000Z
2017-06-12T05:24:08.000Z
python-scripts/fsdepth/__init__.py
adityaka/misc_scripts
b28f71eb9b7eb429b44aeb9cb34f12355023125e
[ "BSD-3-Clause" ]
1
2021-09-03T14:17:00.000Z
2021-09-03T14:17:00.000Z
from depthfinder import find_max_depth __all__ = [find_max_depth]
10.857143
39
0.736842
10
76
4.8
0.7
0.291667
0.5
0
0
0
0
0
0
0
0
0
0.223684
76
6
40
12.666667
0.813559
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
48360ee00c85a788959b933414eca13100d7b65b
17,204
py
Python
keylime/tpm/tpm2_objects_test.py
THS-on/keylime
bb904fc98d9674832e630542d211e71102873b4d
[ "Apache-2.0" ]
192
2019-05-08T14:43:50.000Z
2022-03-28T20:21:28.000Z
keylime/tpm/tpm2_objects_test.py
THS-on/keylime
bb904fc98d9674832e630542d211e71102873b4d
[ "Apache-2.0" ]
694
2019-04-18T14:08:36.000Z
2022-03-31T13:55:37.000Z
keylime/tpm/tpm2_objects_test.py
THS-on/keylime
bb904fc98d9674832e630542d211e71102873b4d
[ "Apache-2.0" ]
97
2019-04-17T19:04:00.000Z
2022-03-20T18:19:28.000Z
""" SPDX-License-Identifier: Apache-2.0 Copyright 2021 Red Hat, Inc. """ import base64 import unittest from cryptography.hazmat.backends import default_backend from cryptography.x509 import load_der_x509_certificate from keylime.tpm.tpm2_objects import ( ek_low_tpm2b_public_from_pubkey, get_tpm2b_public_name, get_tpm2b_public_object_attributes, object_attributes_description, pubkey_from_tpm2b_public, OA_FIXEDTPM, OA_STCLEAR, OA_FIXEDPARENT, OA_SENSITIVEDATAORIGIN, OA_USERWITHAUTH, OA_ADMINWITHPOLICY, OA_NODA, OA_ENCRYPTEDDUPLICATION, OA_RESTRICTED, OA_DECRYPT, OA_SIGN_ENCRYPT, ) class TestTpm2Objects(unittest.TestCase): def test_get_tpm2b_public_name(self): test_pub = base64.b64decode( "ARgAAQALAAUAcgAAABAAFAALCAAAAAAAAQDJBIF+SxeEt8TAwcnMZIvJWs3luBARcI" "HXC7I/XH7ZXbwLyispm/tpvhRw0w60JbwF4om1LbApQbG9cWR7AOi3ykv5bOgszsIG" "DOYJNfWuylW2uQBvMPEeF+ysrCjFTl5HOhXEpaz+E//juoKS2Jh9zYr2kt8rnGAJyj" "a10LUsYNt4h6eyeLVrsZIckkKP4tZwPOokfdX+6YCtGy5Y1buTvBSGNWa+VGo6hZVD" "649mg6EHyv0geSHXojx0Iqjsl/NQXzOCvyuaf6CBu9pkiIZCePlrl2uD1tXEdX0ipB" "B9Fppc/5cJQ2NyJOuvi4MUK5y38QpwnZwd4Utr2WdyEPoF" ) test_pub_correct_name = ( "000b347dbfebe5bdbc55f6782a3cba91610f9d1b554a1aef07b4db28cf36da9390" "09" ) new_name = get_tpm2b_public_name(test_pub) self.assertEqual(new_name, test_pub_correct_name) def test_get_tpm2b_public_object_attributes(self): test_pub = base64.b64decode( "ARgAAQALAAUAcgAAABAAFAALCAAAAAAAAQDJBIF+SxeEt8TAwcnMZIvJWs3luBARcI" "HXC7I/XH7ZXbwLyispm/tpvhRw0w60JbwF4om1LbApQbG9cWR7AOi3ykv5bOgszsIG" "DOYJNfWuylW2uQBvMPEeF+ysrCjFTl5HOhXEpaz+E//juoKS2Jh9zYr2kt8rnGAJyj" "a10LUsYNt4h6eyeLVrsZIckkKP4tZwPOokfdX+6YCtGy5Y1buTvBSGNWa+VGo6hZVD" "649mg6EHyv0geSHXojx0Iqjsl/NQXzOCvyuaf6CBu9pkiIZCePlrl2uD1tXEdX0ipB" "B9Fppc/5cJQ2NyJOuvi4MUK5y38QpwnZwd4Utr2WdyEPoF" ) expected_attributes = ( OA_RESTRICTED | OA_USERWITHAUTH | OA_SIGN_ENCRYPT | OA_FIXEDTPM | OA_FIXEDPARENT | OA_SENSITIVEDATAORIGIN ) new_attributes = get_tpm2b_public_object_attributes(test_pub) self.assertEqual(new_attributes, expected_attributes) # Testing tpm2b_public_from_pubkey # These example certificates were standard EK certificates from a valid TPM, # so these fields are selected according to # TCG EK Credential Profile For TPM Family 2.0 # Level 0, Version 2.3, Revision 2" # Both are from the Low Ranges # The RSA set is according to Template L-1, section B.3.3 # The EC set is according to Template L-2, section B.3.4 def test_tpm2b_public_from_pubkey_rsa(self): test_rsa_cert = base64.b64decode( "MIIEnDCCA4SgAwIBAgIEL8wtHjANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCRE" "UxITAfBgNVBAoMGEluZmluZW9uIFRlY2hub2xvZ2llcyBBRzEaMBgGA1UECwwRT1BU" "SUdBKFRNKSBUUE0yLjAxNTAzBgNVBAMMLEluZmluZW9uIE9QVElHQShUTSkgUlNBIE" "1hbnVmYWN0dXJpbmcgQ0EgMDM1MB4XDTE4MDMwMTE0MTkzM1oXDTMzMDMwMTE0MTkz" "M1owADCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALaIriXJCSUKdvWRDY" "dRbtdTK8i7eCJwHV8NhQ8Cor8NKoVmrnOdDhGqXlrKyJTueA9D2P4yQlWZI+tD9PCV" "CHCQiGmqxxHQXgzCzx6z+57HTUNPDi16K6ZFPNs3UkhAQxeGLOy36XD35zpfgadtvc" "lxJC8L+UgKfXVAM3/oMj4cDXa4cbVKhlfIQXD9OhcNjvESPWVFw0dj7Q6HM0jEkezM" "ew5sJ3I+LET1cIIhUlXvX8fWLu2MHx9+6LIBjkN8SuMLjKBQZjh+rEbHoFuG7Ib9pN" "ucrPAycid4EBBQB65j9irZ8C+ZdUUkKM5hsDhcenm/0AdfqAGXsFtsEa8DuDECAwEA" "AaOCAZgwggGUMFsGCCsGAQUFBwEBBE8wTTBLBggrBgEFBQcwAoY/aHR0cDovL3BraS" "5pbmZpbmVvbi5jb20vT3B0aWdhUnNhTWZyQ0EwMzUvT3B0aWdhUnNhTWZyQ0EwMzUu" "Y3J0MA4GA1UdDwEB/wQEAwIAIDBYBgNVHREBAf8ETjBMpEowSDEWMBQGBWeBBQIBDA" "tpZDo0OTQ2NTgwMDEaMBgGBWeBBQICDA9TTEIgOTY3MCBUUE0yLjAxEjAQBgVngQUC" "AwwHaWQ6MDczZjAMBgNVHRMBAf8EAjAAMFAGA1UdHwRJMEcwRaBDoEGGP2h0dHA6Ly" "9wa2kuaW5maW5lb24uY29tL09wdGlnYVJzYU1mckNBMDM1L09wdGlnYVJzYU1mckNB" "MDM1LmNybDAVBgNVHSAEDjAMMAoGCCqCFABEARQBMB8GA1UdIwQYMBaAFM53FTtuEQ" "ykrilxoJhR70mTJiAqMBAGA1UdJQQJMAcGBWeBBQgBMCEGA1UdCQQaMBgwFgYFZ4EF" "AhAxDTALDAMyLjACAQACAXQwDQYJKoZIhvcNAQELBQADggEBAIJ7pvW3yj2wAHO1fq" "zOeKg/xQjBMZ2hdpqVmhc+gU7F7zCMF85iWodISkThp9aa6p7VptkNcp5BNE1ojx+3" "1aJZRAFTCV0b0QxKXELTVsQLvBVmKGtFuaP3FPDVJYIOnQtb8uF+2LduF5P9K6oXdF" "TFuh1kG8GU/UUnltA7h6u2qhnj5uvFEDz7pxX1lt/GbI1nTYB+0SYtveIglpFyZK71" "0FH9UAvvR8byEbK+adE+teBUOexdXhTC1ZmPZmTvHSqmeRV3UTZFZRnyOTBnN8QlN0" "pMVmwFTak931PqxV0xOSXkMcvTre39jzkhEJ+VMb5EOMFfsVn+b4snob9jank=" ) correct_rsa_obj = base64.b64decode( "AToAAQALAAMAsgAgg3GXZ0SEs/gakMyNRqXXJP1S124GUgtk8qHaGzMUaaoABgCAAE" "MAEAgAAAAAAAEAtoiuJckJJQp29ZENh1Fu11MryLt4InAdXw2FDwKivw0qhWauc50O" "EapeWsrIlO54D0PY/jJCVZkj60P08JUIcJCIaarHEdBeDMLPHrP7nsdNQ08OLXorpk" "U82zdSSEBDF4Ys7LfpcPfnOl+Bp229yXEkLwv5SAp9dUAzf+gyPhwNdrhxtUqGV8hB" "cP06Fw2O8RI9ZUXDR2PtDoczSMSR7Mx7Dmwncj4sRPVwgiFSVe9fx9Yu7YwfH37osg" "GOQ3xK4wuMoFBmOH6sRsegW4bshv2k25ys8DJyJ3gQEFAHrmP2KtnwL5l1RSQozmGw" "OFx6eb/QB1+oAZewW2wRrwO4MQ==" ) test_rsa_cert = load_der_x509_certificate( test_rsa_cert, backend=default_backend() ) new_rsa_obj = ek_low_tpm2b_public_from_pubkey( test_rsa_cert.public_key() ) self.assertEqual(new_rsa_obj.hex(), correct_rsa_obj.hex()) def test_tpm2b_public_from_pubkey_ec(self): test_ec_cert = base64.b64decode( "MIIDEDCCAragAwIBAgIEcYSJiTAKBggqhkjOPQQDAjCBgzELMAkGA1UEBhMCREUxIT" "AfBgNVBAoMGEluZmluZW9uIFRlY2hub2xvZ2llcyBBRzEaMBgGA1UECwwRT1BUSUdB" "KFRNKSBUUE0yLjAxNTAzBgNVBAMMLEluZmluZW9uIE9QVElHQShUTSkgRUNDIE1hbn" "VmYWN0dXJpbmcgQ0EgMDM1MB4XDTE4MDMwMTE0MTkxNloXDTMzMDMwMTE0MTkxNlow" "ADBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABNK9AtBnW5bwNG2ZIWDrM8w/h03Ht2" "lp3MUosV05DeBHWZEZfmKsHMBqpqDsIKkEgclQawA4BFR5YUvSdrSUDTGjggGYMIIB" "lDBbBggrBgEFBQcBAQRPME0wSwYIKwYBBQUHMAKGP2h0dHA6Ly9wa2kuaW5maW5lb2" "4uY29tL09wdGlnYUVjY01mckNBMDM1L09wdGlnYUVjY01mckNBMDM1LmNydDAOBgNV" "HQ8BAf8EBAMCAAgwWAYDVR0RAQH/BE4wTKRKMEgxFjAUBgVngQUCAQwLaWQ6NDk0Nj" "U4MDAxGjAYBgVngQUCAgwPU0xCIDk2NzAgVFBNMi4wMRIwEAYFZ4EFAgMMB2lkOjA3" "M2YwDAYDVR0TAQH/BAIwADBQBgNVHR8ESTBHMEWgQ6BBhj9odHRwOi8vcGtpLmluZm" "luZW9uLmNvbS9PcHRpZ2FFY2NNZnJDQTAzNS9PcHRpZ2FFY2NNZnJDQTAzNS5jcmww" "FQYDVR0gBA4wDDAKBggqghQARAEUATAfBgNVHSMEGDAWgBQ2WY8i7ITDxPZA0hwWfQ" "uRE3uQpDAQBgNVHSUECTAHBgVngQUIATAhBgNVHQkEGjAYMBYGBWeBBQIQMQ0wCwwD" "Mi4wAgEAAgF0MAoGCCqGSM49BAMCA0gAMEUCIQCdCv3+G+KsM4OiT3SgKqvE8r5ktD" "I5elC9xTmS9mDA3AIgcckalMvQVTst1pGMEyAI+OoXTnYA1sBRm27WJ6sZag8=" ) correct_ec_obj = base64.b64decode( "AHoAIwALAAMAsgAgg3GXZ0SEs/gakMyNRqXXJP1S124GUgtk8qHaGzMUaaoABgCAAE" "MAEAADABAAINK9AtBnW5bwNG2ZIWDrM8w/h03Ht2lp3MUosV05DeBHACBZkRl+Yqwc" "wGqmoOwgqQSByVBrADgEVHlhS9J2tJQNMQ==" ) test_ec_cert = load_der_x509_certificate( test_ec_cert, backend=default_backend() ) new_ec_obj = ek_low_tpm2b_public_from_pubkey(test_ec_cert.public_key()) self.assertEqual(new_ec_obj.hex(), correct_ec_obj.hex()) def test_pubkey_from_tpm2b_public_rsa(self): test_rsa_cert = base64.b64decode( "MIIEnDCCA4SgAwIBAgIEL8wtHjANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCRE" "UxITAfBgNVBAoMGEluZmluZW9uIFRlY2hub2xvZ2llcyBBRzEaMBgGA1UECwwRT1BU" "SUdBKFRNKSBUUE0yLjAxNTAzBgNVBAMMLEluZmluZW9uIE9QVElHQShUTSkgUlNBIE" "1hbnVmYWN0dXJpbmcgQ0EgMDM1MB4XDTE4MDMwMTE0MTkzM1oXDTMzMDMwMTE0MTkz" "M1owADCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALaIriXJCSUKdvWRDY" "dRbtdTK8i7eCJwHV8NhQ8Cor8NKoVmrnOdDhGqXlrKyJTueA9D2P4yQlWZI+tD9PCV" "CHCQiGmqxxHQXgzCzx6z+57HTUNPDi16K6ZFPNs3UkhAQxeGLOy36XD35zpfgadtvc" "lxJC8L+UgKfXVAM3/oMj4cDXa4cbVKhlfIQXD9OhcNjvESPWVFw0dj7Q6HM0jEkezM" "ew5sJ3I+LET1cIIhUlXvX8fWLu2MHx9+6LIBjkN8SuMLjKBQZjh+rEbHoFuG7Ib9pN" "ucrPAycid4EBBQB65j9irZ8C+ZdUUkKM5hsDhcenm/0AdfqAGXsFtsEa8DuDECAwEA" "AaOCAZgwggGUMFsGCCsGAQUFBwEBBE8wTTBLBggrBgEFBQcwAoY/aHR0cDovL3BraS" "5pbmZpbmVvbi5jb20vT3B0aWdhUnNhTWZyQ0EwMzUvT3B0aWdhUnNhTWZyQ0EwMzUu" "Y3J0MA4GA1UdDwEB/wQEAwIAIDBYBgNVHREBAf8ETjBMpEowSDEWMBQGBWeBBQIBDA" "tpZDo0OTQ2NTgwMDEaMBgGBWeBBQICDA9TTEIgOTY3MCBUUE0yLjAxEjAQBgVngQUC" "AwwHaWQ6MDczZjAMBgNVHRMBAf8EAjAAMFAGA1UdHwRJMEcwRaBDoEGGP2h0dHA6Ly" "9wa2kuaW5maW5lb24uY29tL09wdGlnYVJzYU1mckNBMDM1L09wdGlnYVJzYU1mckNB" "MDM1LmNybDAVBgNVHSAEDjAMMAoGCCqCFABEARQBMB8GA1UdIwQYMBaAFM53FTtuEQ" "ykrilxoJhR70mTJiAqMBAGA1UdJQQJMAcGBWeBBQgBMCEGA1UdCQQaMBgwFgYFZ4EF" "AhAxDTALDAMyLjACAQACAXQwDQYJKoZIhvcNAQELBQADggEBAIJ7pvW3yj2wAHO1fq" "zOeKg/xQjBMZ2hdpqVmhc+gU7F7zCMF85iWodISkThp9aa6p7VptkNcp5BNE1ojx+3" "1aJZRAFTCV0b0QxKXELTVsQLvBVmKGtFuaP3FPDVJYIOnQtb8uF+2LduF5P9K6oXdF" "TFuh1kG8GU/UUnltA7h6u2qhnj5uvFEDz7pxX1lt/GbI1nTYB+0SYtveIglpFyZK71" "0FH9UAvvR8byEbK+adE+teBUOexdXhTC1ZmPZmTvHSqmeRV3UTZFZRnyOTBnN8QlN0" "pMVmwFTak931PqxV0xOSXkMcvTre39jzkhEJ+VMb5EOMFfsVn+b4snob9jank=" ) test_rsa_cert = load_der_x509_certificate( test_rsa_cert, backend=default_backend() ) correct_rsa_obj = base64.b64decode( "AToAAQALAAMAsgAgg3GXZ0SEs/gakMyNRqXXJP1S124GUgtk8qHaGzMUaaoABgCAAE" "MAEAgAAAAAAAEAtoiuJckJJQp29ZENh1Fu11MryLt4InAdXw2FDwKivw0qhWauc50O" "EapeWsrIlO54D0PY/jJCVZkj60P08JUIcJCIaarHEdBeDMLPHrP7nsdNQ08OLXorpk" "U82zdSSEBDF4Ys7LfpcPfnOl+Bp229yXEkLwv5SAp9dUAzf+gyPhwNdrhxtUqGV8hB" "cP06Fw2O8RI9ZUXDR2PtDoczSMSR7Mx7Dmwncj4sRPVwgiFSVe9fx9Yu7YwfH37osg" "GOQ3xK4wuMoFBmOH6sRsegW4bshv2k25ys8DJyJ3gQEFAHrmP2KtnwL5l1RSQozmGw" "OFx6eb/QB1+oAZewW2wRrwO4MQ==" ) new_rsa_pubkey = pubkey_from_tpm2b_public(correct_rsa_obj) correct_rsa_pubkey = test_rsa_cert.public_key() new_rsa_pubkey_n = new_rsa_pubkey.public_numbers() correct_rsa_pubkey_n = correct_rsa_pubkey.public_numbers() self.assertEqual(new_rsa_pubkey.key_size, correct_rsa_pubkey.key_size) self.assertEqual(new_rsa_pubkey_n.e, correct_rsa_pubkey_n.e) self.assertEqual(new_rsa_pubkey_n.n, correct_rsa_pubkey_n.n) def test_pubkey_from_tpm2b_public_rsa_without_encryption(self): new_rsa_pubkey = pubkey_from_tpm2b_public( bytes.fromhex( "01180001000b00050072000000100014000b0800000000000100cac43903c6" "16bba049ce413c961c901b56181392c7999e672e6e5ecdd7a625d4702c3d78" "deac81e1372b0ca1894ac0f16add636bb53d3d5b112d8f3b169ccadef6bac0" "d909067d1ff81dae34b26cd538a52fa20ee7bbf3b16214417d35bde80cbb0f" "1b3267fd6211ecfb652f771f7eaeff560b91ef2f374ab1d37bba5a7a1c7cd4" "4961cdd7351ee060947f43244f45fc42ea6a1ea783aaa18dc8cce90d9a97f8" "da09e72637a0167fdbf4cc0d09f2f752d864d45bd34ed387acc0bcddca26c6" "1ebe9056013a35cd1d8011336af93579afa424fe50fd7e2b03270518505710" "82fcae891e2897e3117fd28bd03d2d2ffdfcfa0ff95f76af9383e3c9e59fe4" "dde753" ) ) new_rsa_pubkey_n = new_rsa_pubkey.public_numbers() self.assertEqual(new_rsa_pubkey.key_size, 2048) self.assertEqual(new_rsa_pubkey_n.e, 65537) self.assertEqual( str(new_rsa_pubkey_n.n), "255968986296679270326283402717529063492526907681140893873754141432" "890531031973586937300971300465026177966018575012122367284728088154" "485873651193407172159946655006581809152369460009001515677703036255" "837234635576083087037905135410736640524495731191518154258439490758" "531740360767515943902821573272461751306668217217601399605319344343" "524504419559281243744525835687758392857402638332592577865592671234" "679107983328133731582503713366603521336278457142403979969779706740" "010077961630324526931687863526905140593203113247551679416434551326" "587069716966112452602019925398408142602185862884082705845069125895" "71106286823536420841299", ) def test_pubkey_from_tpm2b_public_ec(self): test_ec_cert = base64.b64decode( "MIIDEDCCAragAwIBAgIEcYSJiTAKBggqhkjOPQQDAjCBgzELMAkGA1UEBhMCREUxIT" "AfBgNVBAoMGEluZmluZW9uIFRlY2hub2xvZ2llcyBBRzEaMBgGA1UECwwRT1BUSUdB" "KFRNKSBUUE0yLjAxNTAzBgNVBAMMLEluZmluZW9uIE9QVElHQShUTSkgRUNDIE1hbn" "VmYWN0dXJpbmcgQ0EgMDM1MB4XDTE4MDMwMTE0MTkxNloXDTMzMDMwMTE0MTkxNlow" "ADBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABNK9AtBnW5bwNG2ZIWDrM8w/h03Ht2" "lp3MUosV05DeBHWZEZfmKsHMBqpqDsIKkEgclQawA4BFR5YUvSdrSUDTGjggGYMIIB" "lDBbBggrBgEFBQcBAQRPME0wSwYIKwYBBQUHMAKGP2h0dHA6Ly9wa2kuaW5maW5lb2" "4uY29tL09wdGlnYUVjY01mckNBMDM1L09wdGlnYUVjY01mckNBMDM1LmNydDAOBgNV" "HQ8BAf8EBAMCAAgwWAYDVR0RAQH/BE4wTKRKMEgxFjAUBgVngQUCAQwLaWQ6NDk0Nj" "U4MDAxGjAYBgVngQUCAgwPU0xCIDk2NzAgVFBNMi4wMRIwEAYFZ4EFAgMMB2lkOjA3" "M2YwDAYDVR0TAQH/BAIwADBQBgNVHR8ESTBHMEWgQ6BBhj9odHRwOi8vcGtpLmluZm" "luZW9uLmNvbS9PcHRpZ2FFY2NNZnJDQTAzNS9PcHRpZ2FFY2NNZnJDQTAzNS5jcmww" "FQYDVR0gBA4wDDAKBggqghQARAEUATAfBgNVHSMEGDAWgBQ2WY8i7ITDxPZA0hwWfQ" "uRE3uQpDAQBgNVHSUECTAHBgVngQUIATAhBgNVHQkEGjAYMBYGBWeBBQIQMQ0wCwwD" "Mi4wAgEAAgF0MAoGCCqGSM49BAMCA0gAMEUCIQCdCv3+G+KsM4OiT3SgKqvE8r5ktD" "I5elC9xTmS9mDA3AIgcckalMvQVTst1pGMEyAI+OoXTnYA1sBRm27WJ6sZag8=" ) correct_ec_obj = base64.b64decode( "AHoAIwALAAMAsgAgg3GXZ0SEs/gakMyNRqXXJP1S124GUgtk8qHaGzMUaaoABgCAAE" "MAEAADABAAINK9AtBnW5bwNG2ZIWDrM8w/h03Ht2lp3MUosV05DeBHACBZkRl+Yqwc" "wGqmoOwgqQSByVBrADgEVHlhS9J2tJQNMQ==" ) test_ec_cert = load_der_x509_certificate( test_ec_cert, backend=default_backend() ) new_ec_pubkey = pubkey_from_tpm2b_public(correct_ec_obj) correct_ec_pubkey = test_ec_cert.public_key() new_ec_pubkey_n = new_ec_pubkey.public_numbers() correct_ec_pubkey_n = correct_ec_pubkey.public_numbers() self.assertEqual( new_ec_pubkey_n.curve.name, correct_ec_pubkey_n.curve.name ) self.assertEqual(new_ec_pubkey_n.x, correct_ec_pubkey_n.x) self.assertEqual(new_ec_pubkey_n.y, correct_ec_pubkey_n.y) def test_pubkey_from_tpm2b_public_ec_without_encryption(self): new_ec_pubkey = pubkey_from_tpm2b_public( bytes.fromhex( "00580023000b00050072000000100018000b000300100020c74568135840f4" "97ad575ebeabe6d01f3f098b5a768111ab423d5f26b259a4f000205ec0f586" "b53e348bc916b43a015e6ceefd947d685e59ff65357499f2c4788cba" ) ) new_ec_pubkey_n = new_ec_pubkey.public_numbers() self.assertEqual(new_ec_pubkey_n.curve.name, "secp256r1") self.assertEqual( str(new_ec_pubkey_n.x), "901328876186929754842544537316510944104832864446891914011641755043" "34705501424", ) self.assertEqual( str(new_ec_pubkey_n.y), "428583369628394219355595706223697775291854911504755996137787899503" "32157332666", ) def test_object_attributes_description(self): with self.subTest(attrs="sign-encrypt"): val = object_attributes_description((OA_SIGN_ENCRYPT)) self.assertEqual(val, "sign-encrypt") with self.subTest(attrs="<empty>"): val = object_attributes_description((0)) self.assertEqual(val, "") with self.subTest(attrs="<all>"): val = object_attributes_description( ( OA_FIXEDTPM | OA_STCLEAR | OA_FIXEDPARENT | OA_SENSITIVEDATAORIGIN | OA_USERWITHAUTH | OA_ADMINWITHPOLICY | OA_NODA | OA_ENCRYPTEDDUPLICATION | OA_RESTRICTED | OA_DECRYPT | OA_SIGN_ENCRYPT ) ) self.assertEqual( val, "fixed-tpm | st-clear | fixed-parent | sensitive-data-origin | " "user-with-auth | admin-with-policy | no-da | " "encrypted-duplication | restricted | decrypt | sign-encrypt", ) if __name__ == "__main__": unittest.main()
52.773006
80
0.738084
987
17,204
12.519757
0.273556
0.018694
0.018937
0.015295
0.759408
0.73772
0.705754
0.674678
0.669337
0.66084
0
0.166137
0.213148
17,204
325
81
52.935385
0.746694
0.025401
0
0.506803
0
0
0.537369
0.525012
0
0
0
0
0.064626
1
0.030612
false
0
0.017007
0
0.05102
0
0
0
1
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
486006912fb29109a08932b398b278205c067141
8,388
py
Python
expense_manager/app.py
sonalimahajan12/Automation-scripts
9bcf8c71bc103ff6a3392a4528220781446b5b7a
[ "MIT" ]
496
2020-10-07T15:45:34.000Z
2022-03-29T16:40:30.000Z
expense_manager/app.py
sonalimahajan12/Automation-scripts
9bcf8c71bc103ff6a3392a4528220781446b5b7a
[ "MIT" ]
550
2020-10-07T15:31:53.000Z
2022-03-20T22:00:38.000Z
expense_manager/app.py
sonalimahajan12/Automation-scripts
9bcf8c71bc103ff6a3392a4528220781446b5b7a
[ "MIT" ]
388
2020-10-07T15:45:21.000Z
2022-03-27T14:54:46.000Z
import db import tkinter as tk from tkinter.ttk import Button, Frame, Entry, Label LARGE_FONT = ("Verdana", 32) class ExpenseManager: def __init__(self, master): self.frame = Frame(master) self.frame.pack() self.main_window() # display function calls for database update deletion and listing added or deleted# def added(self, boxaile): myLabel = Label(boxaile, text="The value has been inserted") myLabel.grid(row=4, column=0) def delete(self, boxaile): myLabel = Label(boxaile, text="The value was deleted") myLabel.grid(row=4, column=0) def display_all(self, database): select_all = database return select_all def insert(self, database, val1, val2, val3): goods = val1.get() price = val2.get() date = val3.get() insertion = database(goods, price, date) return insertion def find_expense(self, database, val1, val2): goods = val1.get() price = val2.get() find = database(goods, price) return find def delete_expense(self, database, val1, val2): goods = val1.get() price = val2.get() delete = database(goods, price) return delete # MAIN WINDOW def main_window(self): button1 = Button(self.frame, text="Groceries expenses", command=self.groceries) button1.pack() button2 = Button(self.frame, text="Household expenses", command=self.household) button2.pack() button3 = Button(self.frame, text="Entertainment expenses", command=self.entertainment) button3.pack() button4 = Button(self.frame, text="Other expenses", command=self.other) button4.pack() button5 = Button(self.frame, text="EXIT", command=exit) button5.pack() # INSERT VALUES def groceries(self): top = tk.Toplevel(self.frame) top.title('Groceries expenses') Label(top, text="Name of good").grid(row=1, column=0, sticky=tk.W, pady=2) Label(top, text="Price").grid(row=2, column=0, sticky=tk.W, pady=2) Label(top, text="Date of purchase").grid(row=3, column=0, sticky=tk.W, pady=2) e1 = Entry(top) e1.grid(row=1, column=1, sticky=tk.W, pady=2) e2 = Entry(top) e2.grid(row=2, column=1, sticky=tk.W, pady=2) e3 = Entry(top) e3.grid(row=3, column=1, sticky=tk.W, pady=2) text = tk.Text(top, width=40, height=10) text.grid(row=5, column=1, columnspan=2) # BUTTONS B1 = Button(top, text="Insert Values", command=lambda: (self.insert(db.insert_groceries, e1, e2, e3), self.added(top))) B1.grid(row=1, column=2) B2 = Button(top, text="Select All", command=lambda: (text.delete(1.0, tk.END), text.insert(tk.END, self.display_all(db.select_all_groceries())))) B2.grid(row=2, column=2) B3 = Button(top, text="Find value", command=lambda: (text.delete(1.0, tk.END), text.insert(tk.END, self.find_expense(db.select_grocery, e1, e2)))) B3.grid(row=2, column=3) B3 = Button(top, text="Delete expense", command=lambda: (self.delete_expense(db.delete_grocery, e1, e2), self.delete(top))) B3.grid(row=4, column=2) B5 = Button(top, text="Exit", command=exit) B5.grid(row=4, column=3) def household(self): top = tk.Toplevel(self.frame) top.title('Household expenses') Label(top, text="Name of good").grid(row=1, column=0, sticky=tk.W, pady=2) Label(top, text="Price").grid(row=2, column=0, sticky=tk.W, pady=2) Label(top, text="Date of purchase").grid(row=3, column=0, sticky=tk.W, pady=2) e1 = Entry(top) e1.grid(row=1, column=1, sticky=tk.W, pady=2) e2 = Entry(top) e2.grid(row=2, column=1, sticky=tk.W, pady=2) e3 = Entry(top) e3.grid(row=3, column=1, sticky=tk.W, pady=2) text = tk.Text(top, width=40, height=10) text.grid(row=5, column=1, columnspan=2) # BUTTONS### B1 = Button(top, text="Insert Values", command=lambda: (self.insert(db.insert_household, e1, e2, e3), self.added(top))) B1.grid(row=1, column=2) B2 = Button(top, text="Select All", command=lambda: (text.delete(1.0, tk.END), text.insert(tk.END, self.display_all(db.select_all_household())))) B2.grid(row=2, column=2) B3 = Button(top, text="Find value", command=lambda: (text.delete(1.0, tk.END), text.insert(tk.END, self.find_expense(db.select_household, e1, e2)))) B3.grid(row=2, column=3) B3 = Button(top, text="Delete expense", command=lambda: (self.delete_expense(db.delete_household, e1, e2), self.delete(top))) B3.grid(row=4, column=2) B5 = Button(top, text="Exit", command=exit) B5.grid(row=4, column=3) def entertainment(self): top = tk.Toplevel(self.frame) top.title('Entertainment expenses') Label(top, text="Name of good").grid(row=1, column=0, sticky=tk.W, pady=2) Label(top, text="Price").grid(row=2, column=0, sticky=tk.W, pady=2) Label(top, text="Date of purchase").grid(row=3, column=0, sticky=tk.W, pady=2) e1 = Entry(top) e1.grid(row=1, column=1, sticky=tk.W, pady=2) e2 = Entry(top) e2.grid(row=2, column=1, sticky=tk.W, pady=2) e3 = Entry(top) e3.grid(row=3, column=1, sticky=tk.W, pady=2) text = tk.Text(top, width=40, height=10) text.grid(row=5, column=1, columnspan=2) # BUTTONS B1 = Button(top, text="Insert Values", command=lambda: (self.insert(db.insert_entertrainment, e1, e2, e3), self.added(top))) B1.grid(row=1, column=2) B2 = Button(top, text="Select All", command=lambda: (text.delete(1.0, tk.END), text.insert(tk.END, self.display_all(db.select_all_entertrainment())))) B2.grid(row=2, column=2) B3 = Button(top, text="Find value", command=lambda: (text.delete(1.0, tk.END), text.insert(tk.END, self.find_expense(db.select_entertainment, e1, e2)))) B3.grid(row=2, column=3) B3 = Button(top, text="Delete expense", command=lambda: (self.delete_expense(db.delete_entertainment, e1, e2), self.delete(top))) B3.grid(row=4, column=2) B5 = Button(top, text="Exit", command=exit) B5.grid(row=4, column=3) def other(self): top = tk.Toplevel(self.frame) top.title('Entertainment expenses') Label(top, text="Name of good").grid(row=1, column=0, sticky=tk.W, pady=2) Label(top, text="Price").grid(row=2, column=0, sticky=tk.W, pady=2) Label(top, text="Date of purchase").grid(row=3, column=0, sticky=tk.W, pady=2) e1 = Entry(top) e1.grid(row=1, column=1, sticky=tk.W, pady=2) e2 = Entry(top) e2.grid(row=2, column=1, sticky=tk.W, pady=2) e3 = Entry(top) e3.grid(row=3, column=1, sticky=tk.W, pady=2) text = tk.Text(top, width=40, height=10) text.grid(row=5, column=1, columnspan=2) # BUTTONS### B1 = Button(top, text="Insert Values", command=lambda: (self.insert(db.insert_other, e1, e2, e3), self.added(top))) B1.grid(row=1, column=2) B2 = Button(top, text="Select All", command=lambda: ( text.delete(1.0, tk.END), text.insert(tk.END, self.display_all(db.select_all_other())))) B2.grid(row=2, column=2) B3 = Button(top, text="Find value", command=lambda: ( text.delete(1.0, tk.END), text.insert(tk.END, self.find_expense(db.select_other, e1, e2)))) B3.grid(row=2, column=3) B3 = Button(top, text="Delete expense", command=lambda: (self.delete_expense(db.delete_other, e1, e2), self.delete(top))) B3.grid(row=4, column=2) B5 = Button(top, text="Exit", command=exit) B5.grid(row=4, column=3) def main(): # db.create_tables(connection) root = tk.Tk() root.geometry('600x500') root.title("Expense Manager") ExpenseManager(root) root.mainloop() main()
36.951542
112
0.587506
1,214
8,388
4.022241
0.096376
0.071677
0.044235
0.063895
0.754045
0.754045
0.74913
0.73889
0.707762
0.707762
0
0.045579
0.257153
8,388
226
113
37.115044
0.738084
0.019909
0
0.573171
0
0
0.069348
0
0
0
0
0
0
1
0.079268
false
0
0.018293
0
0.128049
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4869ae3220d88a488bed1f55f55c9c2f073c3970
15,119
py
Python
tests/notifications_python_client/test_notifications_api_client.py
currycoder/notifications-python-client
3d67a48e1b792e061739ec79a69505f8086b7455
[ "MIT" ]
null
null
null
tests/notifications_python_client/test_notifications_api_client.py
currycoder/notifications-python-client
3d67a48e1b792e061739ec79a69505f8086b7455
[ "MIT" ]
null
null
null
tests/notifications_python_client/test_notifications_api_client.py
currycoder/notifications-python-client
3d67a48e1b792e061739ec79a69505f8086b7455
[ "MIT" ]
null
null
null
from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import import base64 import io from future import standard_library from mock import Mock standard_library.install_aliases() from tests.conftest import TEST_HOST from notifications_python_client import prepare_upload def test_get_notification_by_id(notifications_client, rmock): endpoint = "{0}/v2/notifications/{1}".format(TEST_HOST, "123") rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_notification_by_id(123) assert rmock.called def test_get_received_texts(notifications_client, rmock): endpoint = "{0}/v2/received-text-messages".format(TEST_HOST) rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_received_texts() assert rmock.called def test_get_received_texts_older_than(notifications_client, rmock): endpoint = "{0}/v2/received-text-messages?older_than={1}".format(TEST_HOST, "older_id") rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_received_texts(older_than="older_id") assert rmock.called def test_get_all_received_texts_iterator_calls_get_received_texts(notifications_client, rmock): endpoint = "{0}/v2/received-text-messages".format(TEST_HOST) rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) list(notifications_client.get_received_texts_iterator()) assert rmock.called def test_get_all_notifications_by_type_and_status(notifications_client, rmock): endpoint = "{0}/v2/notifications?status={1}&template_type={2}".format(TEST_HOST, "status", "type") rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_all_notifications("status", "type") assert rmock.called def test_get_all_notifications_by_type(notifications_client, rmock): endpoint = "{0}/v2/notifications?template_type={1}".format(TEST_HOST, "type") rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_all_notifications(template_type="type") assert rmock.called def test_get_all_notifications_by_reference(notifications_client, rmock): endpoint = "{0}/v2/notifications?reference={1}".format(TEST_HOST, "reference") rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_all_notifications(reference="reference") assert rmock.called def test_get_all_notifications_by_older_than(notifications_client, rmock): endpoint = "{0}/v2/notifications?older_than={1}".format(TEST_HOST, "older_than") rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_all_notifications(older_than="older_than") assert rmock.called def test_get_all_notifications_by_status(notifications_client, rmock): endpoint = "{0}/v2/notifications?status={1}".format(TEST_HOST, "status") rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_all_notifications(status="status") assert rmock.called def test_get_all_notifications(notifications_client, rmock): endpoint = "{0}/v2/notifications".format(TEST_HOST) rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_all_notifications() assert rmock.called def test_create_sms_notification(notifications_client, rmock): endpoint = "{0}/v2/notifications/sms".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) notifications_client.send_sms_notification( phone_number="07700 900000", template_id="456" ) assert rmock.last_request.json() == { 'template_id': '456', 'phone_number': '07700 900000' } def test_create_sms_notification_with_personalisation(notifications_client, rmock): endpoint = "{0}/v2/notifications/sms".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) notifications_client.send_sms_notification( phone_number="07700 900000", template_id="456", personalisation={'name': 'chris'} ) assert rmock.last_request.json() == { 'template_id': '456', 'phone_number': '07700 900000', 'personalisation': {'name': 'chris'} } def test_create_sms_notification_with_sms_sender_id(notifications_client, rmock): endpoint = "{0}/v2/notifications/sms".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) notifications_client.send_sms_notification( phone_number="07700 900000", template_id="456", sms_sender_id="789" ) assert rmock.last_request.json() == { 'template_id': '456', 'phone_number': '07700 900000', 'sms_sender_id': '789' } def test_create_email_notification(notifications_client, rmock): endpoint = "{0}/v2/notifications/email".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) notifications_client.send_email_notification( email_address="to@example.com", template_id="456") assert rmock.last_request.json() == { 'template_id': '456', 'email_address': 'to@example.com' } def test_create_email_notification_with_email_reply_to_id(notifications_client, rmock): endpoint = "{0}/v2/notifications/email".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) notifications_client.send_email_notification( email_address="to@example.com", template_id="456", email_reply_to_id="789") assert rmock.last_request.json() == { 'template_id': '456', 'email_address': 'to@example.com', 'email_reply_to_id': '789' } def test_create_email_notification_with_personalisation(notifications_client, rmock): endpoint = "{0}/v2/notifications/email".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) notifications_client.send_email_notification( email_address="to@example.com", template_id="456", personalisation={'name': 'chris'} ) assert rmock.last_request.json() == { 'template_id': '456', 'email_address': 'to@example.com', 'personalisation': {'name': 'chris'} } def test_create_email_notification_with_document_stream_upload(notifications_client, rmock): endpoint = "{0}/v2/notifications/email".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) if hasattr(io, 'BytesIO'): mock_file = io.BytesIO(b'file-contents') else: mock_file = io.StringIO('file-contents') notifications_client.send_email_notification( email_address="to@example.com", template_id="456", personalisation={ 'name': 'chris', 'doc': prepare_upload(mock_file) } ) assert rmock.last_request.json() == { 'template_id': '456', 'email_address': 'to@example.com', 'personalisation': { 'name': 'chris', 'doc': {'file': 'ZmlsZS1jb250ZW50cw=='} } } def test_create_email_notification_with_document_file_upload(notifications_client, rmock): endpoint = "{0}/v2/notifications/email".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) with open('tests/test_files/test.pdf', 'rb') as f: notifications_client.send_email_notification( email_address="to@example.com", template_id="456", personalisation={ 'name': 'chris', 'doc': prepare_upload(f) } ) assert rmock.last_request.json() == { 'template_id': '456', 'email_address': 'to@example.com', 'personalisation': { 'name': 'chris', 'doc': {'file': 'JVBERi0xLjUgdGVzdAo='} } } def test_create_letter_notification(notifications_client, rmock): endpoint = "{0}/v2/notifications/letter".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) notifications_client.send_letter_notification( template_id="456", personalisation={'address_line_1': 'Foo', 'address_line_2': 'Bar', 'postcode': 'SW1 1AA'} ) assert rmock.last_request.json() == { 'template_id': '456', 'personalisation': { 'address_line_1': 'Foo', 'address_line_2': 'Bar', 'postcode': 'SW1 1AA' } } def test_create_letter_notification_with_reference(notifications_client, rmock): endpoint = "{0}/v2/notifications/letter".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) notifications_client.send_letter_notification( template_id="456", personalisation={'address_line_1': 'Foo', 'address_line_2': 'Bar', 'postcode': 'SW1 1AA'}, reference='Baz' ) assert rmock.last_request.json() == { 'template_id': '456', 'personalisation': { 'address_line_1': 'Foo', 'address_line_2': 'Bar', 'postcode': 'SW1 1AA' }, 'reference': 'Baz' } def test_send_precompiled_letter_notification(notifications_client, rmock, mocker): endpoint = "{0}/v2/notifications/letter".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) mock_file = Mock( read=Mock(return_value=b'file_contents'), ) notifications_client.send_precompiled_letter_notification( reference='Baz', pdf_file=mock_file ) assert rmock.last_request.json() == { 'reference': 'Baz', 'content': base64.b64encode(b'file_contents').decode('utf-8') } def test_send_precompiled_letter_notification_sets_postage(notifications_client, rmock, mocker): endpoint = "{0}/v2/notifications/letter".format(TEST_HOST) rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) mock_file = Mock( read=Mock(return_value=b'file_contents'), ) notifications_client.send_precompiled_letter_notification( reference='Baz', pdf_file=mock_file, postage='first' ) assert rmock.last_request.json() == { 'reference': 'Baz', 'content': base64.b64encode(b'file_contents').decode('utf-8'), 'postage': 'first' } def test_get_all_notifications_iterator_calls_get_notifications(notifications_client, rmock): endpoint = "{0}/v2/notifications".format(TEST_HOST) rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) list(notifications_client.get_all_notifications_iterator()) assert rmock.called def test_get_all_notifications_iterator_stops_if_empty_notification_list_returned( notifications_client, rmock ): responses = [ _generate_response('79f9c6ce-cd6a-4b47-a3e7-41e155f112b0', [1, 2]), _generate_response('3e8f2f0a-0f2b-4d1b-8a01-761f14a281bb') ] endpoint = "{0}/v2/notifications".format(TEST_HOST) rmock.request( "GET", endpoint, responses ) list(notifications_client.get_all_notifications_iterator()) assert rmock.call_count == 2 def test_get_all_notifications_iterator_gets_more_notifications_with_correct_id( notifications_client, rmock ): responses = [ _generate_response('79f9c6ce-cd6a-4b47-a3e7-41e155f112b0', [1, 2]), _generate_response('ea179232-3190-410d-b8ab-23dfecdd3157', [3, 4]), _generate_response('3e8f2f0a-0f2b-4d1b-8a01-761f14a281bb') ] endpoint = "{0}/v2/notifications".format(TEST_HOST) rmock.request("GET", endpoint, responses) list(notifications_client.get_all_notifications_iterator()) assert rmock.call_count == 3 def test_get_template(notifications_client, rmock): endpoint = "{0}/v2/template/{1}".format(TEST_HOST, "123") rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_template(123) assert rmock.called def test_get_template_version(notifications_client, rmock): endpoint = "{0}/v2/template/{1}/version/{2}".format(TEST_HOST, "123", 1) rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_template_version(123, 1) assert rmock.called def test_post_template_preview(notifications_client, rmock): endpoint = "{0}/v2/template/{1}/preview".format(TEST_HOST, "123") rmock.request( "POST", endpoint, json={"status": "success"}, status_code=200) notifications_client.post_template_preview(123, personalisation={'name': 'chris'}) assert rmock.called assert rmock.last_request.json() == { 'personalisation': {'name': 'chris'} } def test_get_all_templates(notifications_client, rmock): endpoint = "{0}/v2/templates".format(TEST_HOST) rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_all_templates() assert rmock.called def test_get_all_templates_by_type(notifications_client, rmock): endpoint = "{0}/v2/templates?type={1}".format(TEST_HOST, 'type') rmock.request( "GET", endpoint, json={"status": "success"}, status_code=200) notifications_client.get_all_templates('type') assert rmock.called def test_get_pdf_for_letter(notifications_client, rmock): endpoint = "{0}/v2/notifications/{1}/pdf".format(TEST_HOST, "123") rmock.request( "GET", endpoint, content=b'foo', status_code=200) response = notifications_client.get_pdf_for_letter('123') assert response.read() == b'foo' assert rmock.called def _generate_response(next_link_uuid, notifications=[]): return { 'json': { 'notifications': notifications, 'links': { 'next': 'http://localhost:6011/v2/notifications?older_than={}'.format(next_link_uuid) } }, 'status_code': 200 }
28.419173
102
0.655136
1,675
15,119
5.613134
0.096119
0.125292
0.079132
0.074452
0.864603
0.834716
0.805041
0.762604
0.698894
0.675495
0
0.040435
0.214829
15,119
531
103
28.472693
0.751579
0
0
0.62069
0
0
0.191018
0.062107
0
0
0
0
0.081281
1
0.078818
false
0
0.024631
0.002463
0.105911
0.002463
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
487a7e3fa468c05012dd66ee14f67e7ad21bd567
50
py
Python
cacheobj/__init__.py
youknowone/cacheobj
566fa71e5194a29879d880c37bf21b6fa83b1466
[ "BSD-2-Clause-FreeBSD" ]
1
2016-08-01T17:50:11.000Z
2016-08-01T17:50:11.000Z
cacheobj/__init__.py
youknowone/cacheobj
566fa71e5194a29879d880c37bf21b6fa83b1466
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
cacheobj/__init__.py
youknowone/cacheobj
566fa71e5194a29879d880c37bf21b6fa83b1466
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
from .core import CacheObject, SimpleCacheObject
16.666667
48
0.84
5
50
8.4
1
0
0
0
0
0
0
0
0
0
0
0
0.12
50
2
49
25
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
48845d95b4825fc29c5c786687f87174fa2bb38b
27
py
Python
urbanoctowaddle/__init__.py
TaiSakuma/urbanoctowaddle
0d297f0c47c97cc34d8816c78121b555efd79e7c
[ "BSD-3-Clause" ]
null
null
null
urbanoctowaddle/__init__.py
TaiSakuma/urbanoctowaddle
0d297f0c47c97cc34d8816c78121b555efd79e7c
[ "BSD-3-Clause" ]
null
null
null
urbanoctowaddle/__init__.py
TaiSakuma/urbanoctowaddle
0d297f0c47c97cc34d8816c78121b555efd79e7c
[ "BSD-3-Clause" ]
null
null
null
from .waddle import Waddle
13.5
26
0.814815
4
27
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.148148
27
1
27
27
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
6f864f4bf619de60e8ff7535839671c436fb107f
25
py
Python
angrmanagement/plugins/varec/__init__.py
DennyDai/angr-management
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
[ "BSD-2-Clause" ]
474
2015-08-10T17:47:15.000Z
2022-03-31T21:10:55.000Z
angrmanagement/plugins/varec/__init__.py
DennyDai/angr-management
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
[ "BSD-2-Clause" ]
355
2015-08-17T09:35:53.000Z
2022-03-31T21:29:52.000Z
angrmanagement/plugins/varec/__init__.py
DennyDai/angr-management
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
[ "BSD-2-Clause" ]
95
2015-08-11T14:36:12.000Z
2022-03-31T23:01:01.000Z
from .varec import VaRec
12.5
24
0.8
4
25
5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.16
25
1
25
25
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
6f9b1d8719e611290765afa25096acc5c0a8e6b3
39
py
Python
vdffit/io/psp/__init__.py
dstansby/vdffit
1fe6adc5c5ef2ace266f91fa6e29af91544ca768
[ "BSD-2-Clause" ]
null
null
null
vdffit/io/psp/__init__.py
dstansby/vdffit
1fe6adc5c5ef2ace266f91fa6e29af91544ca768
[ "BSD-2-Clause" ]
null
null
null
vdffit/io/psp/__init__.py
dstansby/vdffit
1fe6adc5c5ef2ace266f91fa6e29af91544ca768
[ "BSD-2-Clause" ]
null
null
null
from .mag import * from .span import *
13
19
0.692308
6
39
4.5
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.205128
39
2
20
19.5
0.870968
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
82fc2438679a48ba8938354f6576223c1e9b01d7
1,866
py
Python
fabfile.py
snowcloud/ct-snowhite
0692890cdf9f6eccefc127a30f8d6124107453a7
[ "BSD-3-Clause" ]
null
null
null
fabfile.py
snowcloud/ct-snowhite
0692890cdf9f6eccefc127a30f8d6124107453a7
[ "BSD-3-Clause" ]
null
null
null
fabfile.py
snowcloud/ct-snowhite
0692890cdf9f6eccefc127a30f8d6124107453a7
[ "BSD-3-Clause" ]
null
null
null
from __future__ import with_statement from fabric.api import * def up_reqs(upgrade=False): """docstring for up_reqs: fab up_reqs:upgrade """ upgrade = '--upgrade' if upgrade == 'upgrade' else '' local('pip install -r requirements.txt') def pull_codebase(): print("updating code...") # local('cd ~/virtualenvs/icnp && git pull') local('pip install -r requirements.txt --upgrade') def use_head(): proj='snowhite' local('cd ~/virtualenvs/%s && git checkout master' % proj) local('cd ~/virtualenvs/%s/src/ct-blog && git checkout master' % proj) local('cd ~/virtualenvs/%s/src/ct-framework && git checkout master' % proj) local('cd ~/virtualenvs/%s/src/ct-groups && git checkout master' % proj) local('cd ~/virtualenvs/%s/src/ct-template && git checkout master' % proj) local('cd ~/virtualenvs/%s/src/ct-wikiapp && git checkout master' % proj) local('cd ~/virtualenvs/%s/src/sc-utils && git checkout master' % proj) def use_dev(): proj='snowhite' warn("NEEDS A git checkout -b newone origin/newone IN EACH APP BEFORE FIRST TIME") local('cd ~/virtualenvs/%s && git checkout restyle-newlook' % proj) local('cd ~/virtualenvs/%s/src/ct-blog && git checkout master' % proj) local('cd ~/virtualenvs/%s/src/ct-framework && git checkout restyle' % proj) local('cd ~/virtualenvs/%s/src/ct-groups && git checkout restyle' % proj) local('cd ~/virtualenvs/%s/src/ct-template && git checkout dev' % proj) local('cd ~/virtualenvs/%s/src/ct-wikiapp && git checkout master' % proj) local('cd ~/virtualenvs/%s/src/sc-utils && git checkout master' % proj) """ cd ~/virtualenvs/snowhite/src/ct-framework git checkout -b dev origin/dev cd ~/virtualenvs/snowhite/src/ct-groups git checkout -b dev origin/dev cd ~/virtualenvs/snowhite/src/ct-template git checkout -b newone origin/newone """
42.409091
86
0.680064
263
1,866
4.78327
0.239544
0.18601
0.214626
0.211447
0.762321
0.72337
0.58744
0.58744
0.58744
0.58744
0
0
0.160236
1,866
43
87
43.395349
0.802808
0
0
0.296296
0
0
0.625162
0.237354
0
0
0
0
0
0
null
null
0
0.074074
null
null
0.037037
0
0
0
null
0
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
d213a10a64c30490801a13f44c9a3ac222032339
160
py
Python
src/misc/test_ft.py
ahmedwaqar/pyFTreetool
31decb2dc30a94d0b4a1d73d09c0e4e561af6a2a
[ "MIT" ]
null
null
null
src/misc/test_ft.py
ahmedwaqar/pyFTreetool
31decb2dc30a94d0b4a1d73d09c0e4e561af6a2a
[ "MIT" ]
8
2021-03-29T01:03:34.000Z
2021-05-02T15:03:30.000Z
src/misc/test_ft.py
ahmedwaqar/pyFTreetool
31decb2dc30a94d0b4a1d73d09c0e4e561af6a2a
[ "MIT" ]
null
null
null
import FTree as ft z = ft.Gates() G1 = z.and_gate([['E1']],[['E2']],[['E3']]) G2 = z.or_gate([['E1']],[['E2']],G1) G3 = z.and_gate([['E1']],G2,G1) print(G3)
16
43
0.5
30
160
2.566667
0.533333
0.233766
0.207792
0.25974
0
0
0
0
0
0
0
0.092857
0.125
160
9
44
17.777778
0.457143
0
0
0
0
0
0.075
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.166667
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
d236e6b6e3129638231ebe26fbbc25ce6ebaa673
130
py
Python
jaymap/tests/__init__.py
jreese/jaymap
e79b4a375a4c4b4ddccbe57f16631adf9eea8a7c
[ "MIT" ]
1
2021-03-05T19:32:59.000Z
2021-03-05T19:32:59.000Z
jaymap/tests/__init__.py
jreese/jaymap
e79b4a375a4c4b4ddccbe57f16631adf9eea8a7c
[ "MIT" ]
1
2021-01-18T23:43:05.000Z
2021-01-18T23:43:05.000Z
jaymap/tests/__init__.py
jreese/jaymap
e79b4a375a4c4b4ddccbe57f16631adf9eea8a7c
[ "MIT" ]
null
null
null
# Copyright 2021 John Reese # Licensed under the MIT license from .types.base import BaseTypes from .types.core import CoreTypes
21.666667
33
0.8
19
130
5.473684
0.842105
0.173077
0
0
0
0
0
0
0
0
0
0.036364
0.153846
130
5
34
26
0.909091
0.430769
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d238695c931da0d9ae9e082968d71167e1c13417
304
py
Python
app/core/events.py
plugns/boleto-FastAPI
44e6383d4057e61b908924647fce94853c6ef493
[ "MIT" ]
null
null
null
app/core/events.py
plugns/boleto-FastAPI
44e6383d4057e61b908924647fce94853c6ef493
[ "MIT" ]
null
null
null
app/core/events.py
plugns/boleto-FastAPI
44e6383d4057e61b908924647fce94853c6ef493
[ "MIT" ]
null
null
null
from typing import Callable from fastapi import FastAPI from loguru import logger from app.core.settings.app import AppSettings def create_start_app_handler(app: FastAPI,settings: AppSettings,) -> Callable: # type: ignore return def create_stop_app_handler(app: FastAPI) -> Callable: return
23.384615
94
0.786184
41
304
5.682927
0.463415
0.077253
0.111588
0.171674
0
0
0
0
0
0
0
0
0.148026
304
13
95
23.384615
0.899614
0.039474
0
0.25
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.5
0.25
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
5
d23a59c80f47dc4aac23adb51f2828f32b19c04d
97
py
Python
client/builder.py
seanthegeek/phishforall
5fcfffd2c225f50304903d69ec0e5392756dc690
[ "Apache-2.0" ]
11
2015-10-25T23:05:06.000Z
2021-08-23T18:49:16.000Z
client/builder.py
seanthegeek/tattle
5fcfffd2c225f50304903d69ec0e5392756dc690
[ "Apache-2.0" ]
null
null
null
client/builder.py
seanthegeek/tattle
5fcfffd2c225f50304903d69ec0e5392756dc690
[ "Apache-2.0" ]
5
2016-09-06T16:37:08.000Z
2019-11-09T15:15:06.000Z
from subprocess import check_call args = ["pyinstaller", "phishforall.spec"] check_call(args)
13.857143
42
0.762887
12
97
6
0.75
0.25
0.361111
0
0
0
0
0
0
0
0
0
0.123711
97
6
43
16.166667
0.847059
0
0
0
0
0
0.278351
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
d273b59b712807e698657e090d45db5565dd927d
690
py
Python
infra/libs/git2/data/__init__.py
allaparthi/monorail
e18645fc1b952a5a6ff5f06e0c740d75f1904473
[ "BSD-3-Clause" ]
2
2021-04-13T21:22:18.000Z
2021-09-07T02:11:57.000Z
infra/libs/git2/data/__init__.py
allaparthi/monorail
e18645fc1b952a5a6ff5f06e0c740d75f1904473
[ "BSD-3-Clause" ]
21
2020-09-06T02:41:05.000Z
2022-03-02T04:40:01.000Z
infra/libs/git2/data/__init__.py
allaparthi/monorail
e18645fc1b952a5a6ff5f06e0c740d75f1904473
[ "BSD-3-Clause" ]
null
null
null
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. ################################################################################ # Commit ################################################################################ # Exceptions from infra.libs.git2.data.commit import PartialCommit from infra.libs.git2.data.commit import UnexpectedHeader # Classes from infra.libs.git2.data.commit import CommitUser from infra.libs.git2.data.commit import CommitTimestamp from infra.libs.git2.data.commit import CommitData # Data from infra.libs.git2.data.commit import NULL_TIMESTAMP
36.315789
80
0.62029
81
690
5.271605
0.518519
0.126464
0.18267
0.238876
0.4637
0.4637
0.4637
0
0
0
0
0.016051
0.097101
690
18
81
38.333333
0.669342
0.269565
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d281d06f8bcd1391977f09677a53524ff3309337
103
py
Python
main.py
yedhrab/YSelenium
2e4beec2062008dddf6cc67dcbd23b9624c07882
[ "MIT" ]
1
2019-05-03T18:37:18.000Z
2019-05-03T18:37:18.000Z
main.py
yedhrab/YSelenium
2e4beec2062008dddf6cc67dcbd23b9624c07882
[ "MIT" ]
null
null
null
main.py
yedhrab/YSelenium
2e4beec2062008dddf6cc67dcbd23b9624c07882
[ "MIT" ]
null
null
null
"""Temel Çalıştırma Dosyası """ # import kısmı değiştirilerek çalışır from controllers import kariyer
17.166667
37
0.796117
11
103
7.454545
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.135922
103
5
38
20.6
0.921348
0.592233
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
96471164a3a38ffd244bc0c268dc3cbf5fd5624c
202
py
Python
cct/core2/instrument/components/__init__.py
awacha/cct
be1adbed2533df15c778051f3f4f9da0749c873a
[ "BSD-3-Clause" ]
1
2015-11-04T16:37:39.000Z
2015-11-04T16:37:39.000Z
cct/core2/instrument/components/__init__.py
awacha/cct
be1adbed2533df15c778051f3f4f9da0749c873a
[ "BSD-3-Clause" ]
null
null
null
cct/core2/instrument/components/__init__.py
awacha/cct
be1adbed2533df15c778051f3f4f9da0749c873a
[ "BSD-3-Clause" ]
1
2020-03-05T02:50:43.000Z
2020-03-05T02:50:43.000Z
from . import samples, beamstop, component, devicemanager,devicestatus, interpreter, io, motors, geometry, calibrants, \ scan, auth, datareduction, projects, expose, notifier, sensors, transmission
67.333333
120
0.782178
20
202
7.9
1
0
0
0
0
0
0
0
0
0
0
0
0.123762
202
2
121
101
0.892655
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
96562f0a8b7d8d5544b019af7c212b8d209d00c2
12,868
py
Python
Sketches/MPS/Kids/GestureRecognition/Patterns.py
sparkslabs/kamaelia_orig
24b5f855a63421a1f7c6c7a35a7f4629ed955316
[ "Apache-2.0" ]
12
2015-10-20T10:22:01.000Z
2021-07-19T10:09:44.000Z
Sketches/MPS/Kids/GestureRecognition/Patterns.py
sparkslabs/kamaelia_orig
24b5f855a63421a1f7c6c7a35a7f4629ed955316
[ "Apache-2.0" ]
2
2015-10-20T10:22:55.000Z
2017-02-13T11:05:25.000Z
Sketches/MPS/Kids/GestureRecognition/Patterns.py
sparkslabs/kamaelia_orig
24b5f855a63421a1f7c6c7a35a7f4629ed955316
[ "Apache-2.0" ]
6
2015-03-09T12:51:59.000Z
2020-03-01T13:06:21.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # STROKE PATTERNS & MULTI-STROKE GRAMMARS # patterns are a dictionary. # for each output symbol, there is a list of possible patterns. # each pattern is a list of points that the stroke is expect to pass through # and information about how the path, up to that point may curve. # [(x,y,00), (x1,y1,c1), (x2,y2,c2), ...] # x,y = coordinates in 1.0x1.0 normalised square that the stroke is expected to pass through # c = expected curvature: 0 = none/either # +1 = bulges out to the RHS of this segment # -1 = bulges out to the LHS of this segment # For the first point, there is no preceeding segment to look at the curvature of, # so specify a curvature of 00 BCK = chr(8) patterns = { "a" : [ (( 5.0, 0.3), [(0.9, 1.0, 00), (0.4, 1.0, +1), (0.0, 0.4, +1), (0.4, 0.0, +1), (0.9, 0.5, +1), (0.9, 0.9, 0), (0.9, 0.5, 0), (1.0, 0.0, +1)] ), (( 5.0, 0.3), [(0.4, 1.0, 00), (0.3, 1.0, +1), (0.0, 0.4, +1), (0.2, 0.0, +1), (0.4, 0.5, +1), (0.5, 0.8, 0), (0.6, 0.5, 0), (1.0, 0.0, +1)] ), ], "b" : [ (( 9.0, 0.5), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.0, 0.2, 0), (0.5, 0.6, -1), (1.0, 0.3, -1), (0.5, 0.0, -1), (0.0, 0.0, -1)] ), (( 9.0, 0.5), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.2, 0), (0.5, 0.0, +1), (1.0, 0.2, +1), (0.5, 0.5, +1), (0.2, 0.3, +1)] ), (( 9.0, 0.5), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.0, 0.1, 0), (0.5, 0.3, -1), (1.0, 0.2, -1), (0.5, 0.0, -1), (0.0, 0.0, -1)] ), (( 9.0, 0.5), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.2, 0), (0.5, 0.0, +1), (1.0, 0.1, +1), (0.5, 0.3, +1), (0.2, 0.2, +1)] ), ], "c" : [ (( 5.0, 0.3), [(1.0, 0.9, 00), (0.5, 1.0, +1), (0.0, 0.5, +1), (0.5, 0.0, +1), (1.0, 0.1, +1)] ), (( 5.0, 0.3), [(0.5, 1.0, 00), (0.0, 0.5, +1), (0.5, 0.0, +1), (1.0, 0.1, +1)] ), ], "d" : [ (( 9.0, 1.5), [(0.8, 0.5, 00), (0.4, 0.5, +1), (0.0, 0.2, +1), (0.5, 0.0, +1), (0.9, 0.5, +1), (0.9, 1.0, +1), (0.9, 0.5, +1), (1.0, 0.0, +1)] ), (( 9.0, 1.5), [(0.8, 0.5, 00), (0.4, 0.5, +1), (0.0, 0.2, +1), (0.5, 0.0, +1), (1.0, 0.5, +1), (1.0, 1.0, +1)] ), ], "e" : [ (( 3.0, 0.3), [(0.1, 0.5, 00), (1.0, 0.7, +1), (0.5, 1.0, +1), (0.0, 0.5, +1), (0.5, 0.0, +1), (0.9, 0.1, +1)] ), ], # f - see grammar rules for optional dash "f" : [ (( 4.0, 1.5), [(1.0, 1.0, 00), (0.5, 0.8, +1), (0.5, 0.5, +1), (0.4, 0.0, -1), (0.0, 0.3, -1), (0.5, 0.5, -1), (1.0, 0.5, 0)] ), ], "f0": [ (( 8.0, 2.0), [(1.0, 1.0, 00), (0.5, 1.0, +1), (0.0, 0.8, +1), (0.0, 0.5, 0), (0.0, 0.0, 0)] ), (( 8.0, 2.0), [(1.0, 1.0, 00), (0.5, 0.8, +1), (0.5, 0.5, +1), (0.5, 0.2, -1), (0.0, 0.0, -1)] ), ], "g" : [ (( 5.0, 0.3), [(1.0, 1.0, 00), (0.5, 1.0, +1), (0.0, 0.8, +1), (0.5, 0.6, +1), (1.0, 0.9, +1), (1.0, 1.0, 0), (1.0, 0.5, -1), (0.5, 0.0, -1), (0.0, 0.1, -1)] ), (( 5.0, 0.3), [(1.0, 1.0, 00), (0.5, 1.0, +1), (0.0, 0.8, +1), (0.5, 0.6, +1), (1.0, 0.8, +1), (1.0, 0.5, 0), (0.5, 0.0, -1), (0.0, 0.1, -1)] ), (( 5.0, 0.3), [(1.0, 1.0, 00), (0.7, 1.0, +1), (0.4, 0.8, +1), (0.7, 0.6, +1), (1.0, 0.9, +1), (1.0, 1.0, 0), (1.0, 0.5, -1), (0.5, 0.0, -1), (0.0, 0.2, -1)] ), ], "h" : [ (( 8.0, 1.5), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.0, 0.3, 0), (0.5, 0.4, -1), (1.0, 0.3, -1), (1.0, 0.0, -1)] ) ], # i - see grammar rules # j - see grammar rules for optional '.' "j" : [ (( 8.0, 1.5), [(1.0, 1.0, 00), (1.0, 0.5, 0), (0.0, 0.0, -1)] ) ], "k" : [ (( 8.0, 1.5), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.0, 0.2, 0), (0.9, 0.6, -1), (0.1, 0.3, +1), (1.0, 0.0, +1)] ), (( 8.0, 1.5), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.0, 0.3, 0), (0.5, 0.5, -1), (0.8, 0.4, -1), (0.5, 0.3, -1), (0.0, 0.3, -1), (1.0, 0.0, -1)] ), ], "l" : [ ((999., 0.5), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.5, 0.0, 0), (1.0, 0.0, 0)] ), ((999., 3.0), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0)] ), ], "m" : [ (( 3.0, 0.3), [(0.0, 0.0, 00), (0.0, 0.5, 0), (0.2, 1.0, -1), (0.5, 0.6, -1), (0.5, 0.2, 0), (0.5, 0.6, 0), (0.7, 1.0, -1), (1.0, 0.5, -1), (1.0, 0.0, 0)] ), (( 3.0, 0.3), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.0, 0.5, 0), (0.2, 1.0, -1), (0.5, 0.6, -1), (0.5, 0.2, 0), (0.5, 0.6, 0), (0.7, 1.0, -1), (1.0, 0.5, -1), (1.0, 0.2, 0)] ), (( 3.0, 0.3), [(0.0, 0.0, 00), (0.0, 0.5, 0), (0.2, 1.0, -1), (0.5, 0.6, 0), (0.7, 1.0, 0), (1.0, 0.5, -1), (1.0, 0.0, 0)] ), (( 3.0, 0.3), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.0, 0.5, 0), (0.2, 1.0, -1), (0.5, 0.6, -1), (0.7, 1.0, 0), (1.0, 0.5, -1), (1.0, 0.0, 0)] ), ], "n" : [ (( 3.0, 0.3), [(0.0, 0.0, 00), (0.0, 0.5, -1), (0.5, 1.0, -1), (1.0, 0.5, -1), (1.0, 0.0, -1)] ), (( 3.0, 0.3), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.0, 0.5, -1), (0.5, 1.0, -1), (1.0, 0.5, -1), (1.0, 0.0, -1)] ), ], "o" : [ (( 3.0, 0.3), [(0.5, 1.0, 00), (1.0, 0.5, -1), (0.5, 0.0, -1), (0.0, 0.5, -1), (0.5, 1.0, -1)] ), (( 3.0, 0.3), [(1.0, 0.5, 00), (0.5, 0.0, -1), (0.0, 0.5, -1), (0.5, 1.0, -1), (1.0, 0.5, -1)] ), (( 3.0, 0.3), [(0.5, 0.0, 00), (0.0, 0.5, -1), (0.5, 1.0, -1), (1.0, 0.5, -1), (0.5, 0.0, -1)] ), (( 3.0, 0.3), [(0.0, 0.5, 00), (0.5, 1.0, -1), (1.0, 0.5, -1), (0.5, 0.0, -1), (0.0, 0.5, -1)] ), (( 3.0, 0.3), [(0.5, 1.0, 00), (0.0, 0.5, -1), (0.5, 0.0, -1),(1.0, 0.5, -1), (0.5, 1.0, -1)] ), (( 3.0, 0.3), [(1.0, 0.5, 00), (0.5, 1.0, -1), (0.0, 0.5, -1), (0.5, 0.0, -1), (1.0, 0.5, -1)] ), (( 3.0, 0.3), [(0.5, 0.0, 00), (1.0, 0.5, -1), (0.5, 1.0, -1), (0.0, 0.5, -1), (0.5, 0.0, -1)] ), (( 3.0, 0.3), [(0.0, 0.5, 00), (0.5, 0.0, -1), (1.0, 0.5, -1), (0.5, 1.0, -1), (0.0, 0.5, -1)] ), ], "p" : [ (( 9.0, 1.5), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.0, 0.5, 0), (0.5, 1.0, -1), (1.0, 0.8, -1), (0.5, 0.6, -1), (0.2, 0.6, -1)] ), (( 9.0, 1.5), [(0.0, 0.0, 0), (0.0, 0.5, 0), (0.5, 1.0, -1), (1.0, 0.8, -1), (0.5, 0.6, -1), (0.2, 0.6, -1)] ), ], "q" : [ (( 9.0, 1.5), [(1.0, 1.0, 00), (0.5, 0.9, +1), (0.0, 0.8, +1), (0.5, 0.6, +1), (0.9, 0.9, +1), (1.0, 0.5, 0), (1.0, 0.0, 0)] ), (( 5.0, 0.6), [(0.6, 1.0, 00), (0.3, 0.9, +1), (0.0, 0.8, +1), (0.3, 0.6, +1), (0.6, 0.9, +1), (0.5, 0.5, 0), (0.5, 0.0, 0), (1.0, 0.3, 0)] ), ], "r" : [ (( 5.0, 0.5), [(0.0, 0.9, 00), (0.0, 0.5, 0), (0.0, 0.0, 0), (0.0, 0.7, 0), (0.5, 1.0, -1), (1.0, 0.9, -1)] ), (( 5.0, 1.0), [(0.0, 0.0, 0), (0.0, 0.5, 0), (1.0, 1.0, -1)] ), ], "s" : [ (( 5.0, 0.3), [(1.0, 0.9, 00), (0.5, 1.0, 0), (0.0, 0.7, +1), (0.5, 0.5, +1), (1.0, 0.3, -1), (0.5, 0.0, -1), (0.0, 0.1, 0)] ), ], # t - see grammar rules for multi stroke version "t" : [ (( 5.0, 0.3), [(0.5, 1.0, 00), (0.5, 0.5, 0), (0.2, 0.0, -1), (0.0, 0.2, -1), (0.5, 0.4, 0), (1.0, 0.5, 0)] ), ], "u" : [ (( 5.0, 0.6), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.5, 0.0, +1), (1.0, 0.5, +1), (1.0, 1.0, 0)] ), (( 5.0, 0.6), [(0.0, 1.0, 00), (0.0, 0.5, 0), (0.4, 0.0, +1), (0.8, 0.5, +1), (0.8, 1.0, 0), (0.8, 0.5, 0), (1.0, 0.0, +1)] ), ], "v" : [ (( 5.0, 0.6), [(0.0, 1.0, 00), (0.3, 0.5, 0), (0.5, 0.0, 0), (0.7, 0.5, 0), (1.0, 1.0, 0)] ), ], "w" : [ (( 3.0, 0.3), [(0.0, 1.0, 00), (0.2, 0.0, 0), (0.5, 0.5, 0), (0.8, 0.0, 0), (1.0, 1.0, 0)] ), (( 3.0, 0.3), [(0.0, 1.0, 00), (0.2, 0.0, 0), (0.5, 1.0, 0), (0.8, 0.0, 0), (1.0, 1.0, 0)] ), ], # x - see grammar rules for additional versions "x" : [ (( 3.0, 0.6), [(0.0, 1.0, 00), (0.5, 0.5, -1), (0.0, 0.0, -1), (0.5, 0.5, +1), (1.0, 1.0, -1), (0.5, 0.5, +1), (1.0, 0.0, +1)] ), ], "y" : [ (( 8.0, 1.5), [(0.0, 1.0, 00), (0.5, 0.5, +1), (1.0, 1.0, +1), (1.0, 0.5, 0), (0.5, 0.0, -1)] ), ], "z" : [ (( 5.0, 0.5), [(0.0, 1.0, 00), (0.5, 1.0, 0), (1.0, 1.0, 0), (0.5, 0.5, 0), (0.0, 0.0, 0), (0.5, 0.0, 0), (1.0, 0.0, 0)] ), ], " " : [ (( 0.3, 0.0), [(0.0, 0.0, 00), (0.5, 0.0, 0), (1.0, 0.0, 0)] ), ], BCK : [ (( 0.3, 0.0), [(1.0, 0.0, 00), (0.5, 0.0, 0), (0.0, 0.0, 0)] ), ], "\\": [ (( 1.5, 0.3), [(0.0, 1.0, 00), (0.5, 0.5, 0), (1.0, 0.0, 0)] ), ], "@": [ (( 2.0, 0.5), [(0.7, 0.7, 00), (0.3, 0.5, +1), (0.4, 0.4, +1), (0.7, 0.7, 0), (0.8, 0.3, +1), (1.0, 0.7, +1), (0.5, 1.0, +1), (0.0, 0.5, +1), (0.5, 0.0, +1)] ), ], "&": [ (( 5.0, 1.0), [(1.0, 0.0, 00), (0.5, 0.4, 0), (0.0, 0.8, -1), (0.5, 1.0, -1), (1.0, 0.8, -1), (0.5, 0.5, 0), (0.0, 0.2, 0), (0.5, 0.0, +1), (0.8, 0.2, +1)] ), (( 5.0, 1.0), [(1.0, 0.0, 00), (0.5, 0.4, 0), (0.0, 0.8, -1), (0.3, 1.0, -1), (0.6, 0.8, -1), (0.3, 0.5, 0), (0.0, 0.2, 0), (0.3, 0.0, +1), (0.5, 0.2, +1)] ), ], "'": [ (( 8.0, 0.5), [(0.0, 1.0, 00), (0.5, 1.0, 0), (1.0, 1.0, 0), (1.0, 0.5, 0), (1.0, 0.0, 0)] ), ], ",": [ (( 5.0, 0.3), [(1.0, 1.0, 00), (1.0, 0.5, 0), (1.0, 0.0, 0), (0.5, 0.0, 0), (0.0, 0.0, 0)] ), ], "\n": [ (( 5.0, 0.3), [(1.0, 1.0, 00), (0.5, 0.5, 0), (0.0, 0.0, 0)] ), ], "?0": [ (( 9.0, 1.5), [(0.2, 0.8, 00), (0.5, 1.0, -1), (1.0, 0.6, -1), (0.5, 0.2, -1), (0.5, 0.0, +1) ] ), ], "?1": [ (( 9.0, 1.5), [(0.0, 1.0, 00), (0.5, 1.0, -1), (1.0, 0.7, -1), (0.0, 0.5, -1), (0.0, 0.0, +1) ] ), ], } # also need to add a simple follow-on grammar # specify that, for this pattern, we override if: # the previous symbol was X # this pattern falls within minimum and maxmum bounding boxes, relative to the previous pattern # the new symbol will actually be. overrides like this will not become the 'previous' pattern # bounding boxes # we use BCK+"X" to substitute a symbol # we can specify patterns with
56.438596
100
0.303388
2,609
12,868
1.496359
0.075125
0.295082
0.199027
0.13832
0.591701
0.560195
0.533043
0.510758
0.486936
0.436988
0
0.294966
0.385608
12,868
227
101
56.687225
0.198836
0.162185
0
0.386905
0
0
0.003726
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
9676d96ebd6feb3380f4e7f5ca8bd8e3ed1dc22f
121
py
Python
uos3/configUp/admin.py
Axpere/telecommand-server
aa9bb61b127d914bd77a3bbe7ec39ef0dfc9f9ff
[ "MIT" ]
null
null
null
uos3/configUp/admin.py
Axpere/telecommand-server
aa9bb61b127d914bd77a3bbe7ec39ef0dfc9f9ff
[ "MIT" ]
1
2019-06-19T17:20:47.000Z
2019-06-19T17:20:47.000Z
uos3/configUp/admin.py
MNahad/telecommand-server
96d2f1e59cb4de581f6f1bbb3a61ed1b7062f91f
[ "MIT" ]
1
2020-04-22T20:39:49.000Z
2020-04-22T20:39:49.000Z
from django.contrib import admin from .models import config # Register your models here. admin.site.register(config)
13.444444
32
0.785124
17
121
5.588235
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.14876
121
8
33
15.125
0.92233
0.214876
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
96a17abf511133dd2ca08b4656825414b0dc1e0f
196
py
Python
django_priority_batch/__init__.py
dblenkus/django-priority-batch
8cc051b1196e90ef3ccc9d14d1ea277d46e31891
[ "Apache-2.0" ]
1
2018-10-16T10:56:53.000Z
2018-10-16T10:56:53.000Z
django_priority_batch/__init__.py
dblenkus/django-priority-batch
8cc051b1196e90ef3ccc9d14d1ea277d46e31891
[ "Apache-2.0" ]
3
2018-10-19T10:42:01.000Z
2018-10-21T10:14:56.000Z
django_priority_batch/__init__.py
dblenkus/django-priority-batch
8cc051b1196e90ef3ccc9d14d1ea277d46e31891
[ "Apache-2.0" ]
4
2018-10-19T08:03:48.000Z
2020-02-03T19:49:51.000Z
""".. Ignore pydocstyle D400. ===================== Django Priority Batch ===================== TODO. """ from .middleware import Middleware from .prioritized_batcher import PrioritizedBatcher
16.333333
51
0.612245
16
196
7.4375
0.8125
0
0
0
0
0
0
0
0
0
0
0.017241
0.112245
196
11
52
17.818182
0.666667
0.510204
0
0
0
0
0
0
0
0
0
0.090909
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
1
0
1
0
0
5
736e47018488375d8115d696e9eecfd3f5f618de
489
py
Python
website/admin.py
jrdbnntt-com/com_jrdbnntt_wedding
101c825f420076e36ea598332abc87da403910be
[ "MIT" ]
null
null
null
website/admin.py
jrdbnntt-com/com_jrdbnntt_wedding
101c825f420076e36ea598332abc87da403910be
[ "MIT" ]
null
null
null
website/admin.py
jrdbnntt-com/com_jrdbnntt_wedding
101c825f420076e36ea598332abc87da403910be
[ "MIT" ]
null
null
null
from website.models.guest import init as admin_init_guest from website.models.mail.subscription_group import init as admin_init_email_subscription_group from website.models.mail.template import init as admin_init_email_template from website.models.reservation.admin import init as admin_init_reservation from website.models.task import init as admin_init_task admin_init_reservation() admin_init_guest() admin_init_email_template() admin_init_email_subscription_group() admin_init_task()
40.75
94
0.879346
75
489
5.373333
0.2
0.223325
0.210918
0.210918
0.404467
0.129032
0
0
0
0
0
0
0.07362
489
11
95
44.454545
0.889625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
736f77995897b45e349223f3271f946829312bdd
127
py
Python
dsrt/data/__init__.py
sbarham/dsrt
bc664739f2f52839461d3e72773b71146fd56a9a
[ "MIT" ]
1
2019-02-11T10:05:33.000Z
2019-02-11T10:05:33.000Z
dsrt/data/__init__.py
sbarham/dsrt
bc664739f2f52839461d3e72773b71146fd56a9a
[ "MIT" ]
null
null
null
dsrt/data/__init__.py
sbarham/dsrt
bc664739f2f52839461d3e72773b71146fd56a9a
[ "MIT" ]
null
null
null
from . Properties import Properties from . SampleSet import SampleSet from . DataSet import DataSet from . Corpus import Corpus
31.75
35
0.818898
16
127
6.5
0.375
0
0
0
0
0
0
0
0
0
0
0
0.149606
127
4
36
31.75
0.962963
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7376bc085f7dca34e618b69efa9ccfeb363c7f2e
336
py
Python
src/coin_wizard/utils.py
magneticchen/CoinWizard
17f4070f47cbb185d8d2df116e09924d9adcbfa8
[ "MIT" ]
5
2021-02-24T11:27:50.000Z
2022-03-21T14:57:13.000Z
src/coin_wizard/utils.py
magneticchen/CoinWizard
17f4070f47cbb185d8d2df116e09924d9adcbfa8
[ "MIT" ]
null
null
null
src/coin_wizard/utils.py
magneticchen/CoinWizard
17f4070f47cbb185d8d2df116e09924d9adcbfa8
[ "MIT" ]
null
null
null
#!/usr/bin/python3 def translate_pair_to_splited(pair_name): if len(pair_name) != 6: return pair_name return pair_name[0:3].upper() + '_' + pair_name[3:6].upper() def translate_pair_to_unsplited(pair_name): if len(pair_name) != 7: return pair_name return pair_name[0:3].lower()+ pair_name[4:7].lower()
28
64
0.672619
55
336
3.8
0.363636
0.382775
0.267943
0.172249
0.488038
0.488038
0.287081
0.287081
0
0
0
0.039855
0.178571
336
11
65
30.545455
0.717391
0.050595
0
0.25
0
0
0.003145
0
0
0
0
0
0
1
0.25
false
0
0
0
0.75
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
73b3d4d52214da616ffce29df3dd1b7785ffb53d
213
py
Python
Practicas/Practica3/Documento/delShitFiles.py
DSarceno/2022LabSimu201900109
a75ab5baf1de706101dfb5689aecf349c3a68763
[ "MIT" ]
null
null
null
Practicas/Practica3/Documento/delShitFiles.py
DSarceno/2022LabSimu201900109
a75ab5baf1de706101dfb5689aecf349c3a68763
[ "MIT" ]
null
null
null
Practicas/Practica3/Documento/delShitFiles.py
DSarceno/2022LabSimu201900109
a75ab5baf1de706101dfb5689aecf349c3a68763
[ "MIT" ]
null
null
null
import subprocess import sys name = sys.argv[1] subprocess.call(['rm', name + '.aux']) subprocess.call(['rm', name + '.log']) subprocess.call(['rm', name + '.out']) subprocess.call(['rm', name + '.synctex.gz'])
21.3
45
0.633803
29
213
4.655172
0.448276
0.414815
0.474074
0.592593
0
0
0
0
0
0
0
0.005319
0.117371
213
9
46
23.666667
0.712766
0
0
0
0
0
0.14554
0
0
0
0
0
0
1
0
false
0
0.285714
0
0.285714
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
73cd6824027661bdf15c276c288b1481f0f267bc
27,107
py
Python
pezLogger/src/base/ANSI.py
YesmynameisPerry/pezLogger
1b5ff39d4d038463f0fa8d9a8eb9b4bb1d3c0a11
[ "MIT" ]
null
null
null
pezLogger/src/base/ANSI.py
YesmynameisPerry/pezLogger
1b5ff39d4d038463f0fa8d9a8eb9b4bb1d3c0a11
[ "MIT" ]
null
null
null
pezLogger/src/base/ANSI.py
YesmynameisPerry/pezLogger
1b5ff39d4d038463f0fa8d9a8eb9b4bb1d3c0a11
[ "MIT" ]
null
null
null
# ANSI COLOUR CONSTANTS # CONTROL ANSI_CONTROL_RESET: str = "\u001b[0m" ANSI_CONTROL_BOLD: str = "\u001b[1m" ANSI_CONTROL_FAINT: str = "\u001b[2m" ANSI_CONTROL_ITALIC: str = "\u001b[3m" ANSI_CONTROL_UNDERLINE: str = "\u001b[4m" ANSI_CONTROL_INVERT: str = "\u001b[7m" ANSI_CONTROL_STRIKE_THROUGH: str = "\u001b[9m" # 8 COLOUR FOREGROUND ANSI_FOREGROUND_BLACK: str = "\u001b[30m" ANSI_FOREGROUND_RED: str = "\u001b[31m" ANSI_FOREGROUND_GREEN: str = "\u001b[32m" ANSI_FOREGROUND_YELLOW: str = "\u001b[33m" ANSI_FOREGROUND_BLUE: str = "\u001b[34m" ANSI_FOREGROUND_MAGENTA: str = "\u001b[35m" ANSI_FOREGROUND_CYAN: str = "\u001b[36m" ANSI_FOREGROUND_WHITE: str = "\u001b[37m" # 16 COLOUR FOREGROUND: ANSI_FOREGROUND_BRIGHT_BLACK: str = "\u001b[30;1m" ANSI_FOREGROUND_BRIGHT_RED: str = "\u001b[31;1m" ANSI_FOREGROUND_BRIGHT_GREEN: str = "\u001b[32;1m" ANSI_FOREGROUND_BRIGHT_YELLOW: str = "\u001b[33;1m" ANSI_FOREGROUND_BRIGHT_BLUE: str = "\u001b[34;1m" ANSI_FOREGROUND_BRIGHT_MAGENTA: str = "\u001b[35;1m" ANSI_FOREGROUND_BRIGHT_CYAN: str = "\u001b[36;1m" ANSI_FOREGROUND_BRIGHT_WHITE: str = "\u001b[37;1m" # 256 COLOUR FOREGROUND: ANSI_FOREGROUND_256_0: str = "\u001b[38;5;0m" ANSI_FOREGROUND_256_1: str = "\u001b[38;5;1m" ANSI_FOREGROUND_256_2: str = "\u001b[38;5;2m" ANSI_FOREGROUND_256_3: str = "\u001b[38;5;3m" ANSI_FOREGROUND_256_4: str = "\u001b[38;5;4m" ANSI_FOREGROUND_256_5: str = "\u001b[38;5;5m" ANSI_FOREGROUND_256_6: str = "\u001b[38;5;6m" ANSI_FOREGROUND_256_7: str = "\u001b[38;5;7m" ANSI_FOREGROUND_256_8: str = "\u001b[38;5;8m" ANSI_FOREGROUND_256_9: str = "\u001b[38;5;9m" ANSI_FOREGROUND_256_10: str = "\u001b[38;5;10m" ANSI_FOREGROUND_256_11: str = "\u001b[38;5;11m" ANSI_FOREGROUND_256_12: str = "\u001b[38;5;12m" ANSI_FOREGROUND_256_13: str = "\u001b[38;5;13m" ANSI_FOREGROUND_256_14: str = "\u001b[38;5;14m" ANSI_FOREGROUND_256_15: str = "\u001b[38;5;15m" ANSI_FOREGROUND_256_16: str = "\u001b[38;5;16m" ANSI_FOREGROUND_256_17: str = "\u001b[38;5;17m" ANSI_FOREGROUND_256_18: str = "\u001b[38;5;18m" ANSI_FOREGROUND_256_19: str = "\u001b[38;5;19m" ANSI_FOREGROUND_256_20: str = "\u001b[38;5;20m" ANSI_FOREGROUND_256_21: str = "\u001b[38;5;21m" ANSI_FOREGROUND_256_22: str = "\u001b[38;5;22m" ANSI_FOREGROUND_256_23: str = "\u001b[38;5;23m" ANSI_FOREGROUND_256_24: str = "\u001b[38;5;24m" ANSI_FOREGROUND_256_25: str = "\u001b[38;5;25m" ANSI_FOREGROUND_256_26: str = "\u001b[38;5;26m" ANSI_FOREGROUND_256_27: str = "\u001b[38;5;27m" ANSI_FOREGROUND_256_28: str = "\u001b[38;5;28m" ANSI_FOREGROUND_256_29: str = "\u001b[38;5;29m" ANSI_FOREGROUND_256_30: str = "\u001b[38;5;30m" ANSI_FOREGROUND_256_31: str = "\u001b[38;5;31m" ANSI_FOREGROUND_256_32: str = "\u001b[38;5;32m" ANSI_FOREGROUND_256_33: str = "\u001b[38;5;33m" ANSI_FOREGROUND_256_34: str = "\u001b[38;5;34m" ANSI_FOREGROUND_256_35: str = "\u001b[38;5;35m" ANSI_FOREGROUND_256_36: str = "\u001b[38;5;36m" ANSI_FOREGROUND_256_37: str = "\u001b[38;5;37m" ANSI_FOREGROUND_256_38: str = "\u001b[38;5;38m" ANSI_FOREGROUND_256_39: str = "\u001b[38;5;39m" ANSI_FOREGROUND_256_40: str = "\u001b[38;5;40m" ANSI_FOREGROUND_256_41: str = "\u001b[38;5;41m" ANSI_FOREGROUND_256_42: str = "\u001b[38;5;42m" ANSI_FOREGROUND_256_43: str = "\u001b[38;5;43m" ANSI_FOREGROUND_256_44: str = "\u001b[38;5;44m" ANSI_FOREGROUND_256_45: str = "\u001b[38;5;45m" ANSI_FOREGROUND_256_46: str = "\u001b[38;5;46m" ANSI_FOREGROUND_256_47: str = "\u001b[38;5;47m" ANSI_FOREGROUND_256_48: str = "\u001b[38;5;48m" ANSI_FOREGROUND_256_49: str = "\u001b[38;5;49m" ANSI_FOREGROUND_256_50: str = "\u001b[38;5;50m" ANSI_FOREGROUND_256_51: str = "\u001b[38;5;51m" ANSI_FOREGROUND_256_52: str = "\u001b[38;5;52m" ANSI_FOREGROUND_256_53: str = "\u001b[38;5;53m" ANSI_FOREGROUND_256_54: str = "\u001b[38;5;54m" ANSI_FOREGROUND_256_55: str = "\u001b[38;5;55m" ANSI_FOREGROUND_256_56: str = "\u001b[38;5;56m" ANSI_FOREGROUND_256_57: str = "\u001b[38;5;57m" ANSI_FOREGROUND_256_58: str = "\u001b[38;5;58m" ANSI_FOREGROUND_256_59: str = "\u001b[38;5;59m" ANSI_FOREGROUND_256_60: str = "\u001b[38;5;60m" ANSI_FOREGROUND_256_61: str = "\u001b[38;5;61m" ANSI_FOREGROUND_256_62: str = "\u001b[38;5;62m" ANSI_FOREGROUND_256_63: str = "\u001b[38;5;63m" ANSI_FOREGROUND_256_64: str = "\u001b[38;5;64m" ANSI_FOREGROUND_256_65: str = "\u001b[38;5;65m" ANSI_FOREGROUND_256_66: str = "\u001b[38;5;66m" ANSI_FOREGROUND_256_67: str = "\u001b[38;5;67m" ANSI_FOREGROUND_256_68: str = "\u001b[38;5;68m" ANSI_FOREGROUND_256_69: str = "\u001b[38;5;69m" ANSI_FOREGROUND_256_70: str = "\u001b[38;5;70m" ANSI_FOREGROUND_256_71: str = "\u001b[38;5;71m" ANSI_FOREGROUND_256_72: str = "\u001b[38;5;72m" ANSI_FOREGROUND_256_73: str = "\u001b[38;5;73m" ANSI_FOREGROUND_256_74: str = "\u001b[38;5;74m" ANSI_FOREGROUND_256_75: str = "\u001b[38;5;75m" ANSI_FOREGROUND_256_76: str = "\u001b[38;5;76m" ANSI_FOREGROUND_256_77: str = "\u001b[38;5;77m" ANSI_FOREGROUND_256_78: str = "\u001b[38;5;78m" ANSI_FOREGROUND_256_79: str = "\u001b[38;5;79m" ANSI_FOREGROUND_256_80: str = "\u001b[38;5;80m" ANSI_FOREGROUND_256_81: str = "\u001b[38;5;81m" ANSI_FOREGROUND_256_82: str = "\u001b[38;5;82m" ANSI_FOREGROUND_256_83: str = "\u001b[38;5;83m" ANSI_FOREGROUND_256_84: str = "\u001b[38;5;84m" ANSI_FOREGROUND_256_85: str = "\u001b[38;5;85m" ANSI_FOREGROUND_256_86: str = "\u001b[38;5;86m" ANSI_FOREGROUND_256_87: str = "\u001b[38;5;87m" ANSI_FOREGROUND_256_88: str = "\u001b[38;5;88m" ANSI_FOREGROUND_256_89: str = "\u001b[38;5;89m" ANSI_FOREGROUND_256_90: str = "\u001b[38;5;90m" ANSI_FOREGROUND_256_91: str = "\u001b[38;5;91m" ANSI_FOREGROUND_256_92: str = "\u001b[38;5;92m" ANSI_FOREGROUND_256_93: str = "\u001b[38;5;93m" ANSI_FOREGROUND_256_94: str = "\u001b[38;5;94m" ANSI_FOREGROUND_256_95: str = "\u001b[38;5;95m" ANSI_FOREGROUND_256_96: str = "\u001b[38;5;96m" ANSI_FOREGROUND_256_97: str = "\u001b[38;5;97m" ANSI_FOREGROUND_256_98: str = "\u001b[38;5;98m" ANSI_FOREGROUND_256_99: str = "\u001b[38;5;99m" ANSI_FOREGROUND_256_100: str = "\u001b[38;5;100m" ANSI_FOREGROUND_256_101: str = "\u001b[38;5;101m" ANSI_FOREGROUND_256_102: str = "\u001b[38;5;102m" ANSI_FOREGROUND_256_103: str = "\u001b[38;5;103m" ANSI_FOREGROUND_256_104: str = "\u001b[38;5;104m" ANSI_FOREGROUND_256_105: str = "\u001b[38;5;105m" ANSI_FOREGROUND_256_106: str = "\u001b[38;5;106m" ANSI_FOREGROUND_256_107: str = "\u001b[38;5;107m" ANSI_FOREGROUND_256_108: str = "\u001b[38;5;108m" ANSI_FOREGROUND_256_109: str = "\u001b[38;5;109m" ANSI_FOREGROUND_256_110: str = "\u001b[38;5;110m" ANSI_FOREGROUND_256_111: str = "\u001b[38;5;111m" ANSI_FOREGROUND_256_112: str = "\u001b[38;5;112m" ANSI_FOREGROUND_256_113: str = "\u001b[38;5;113m" ANSI_FOREGROUND_256_114: str = "\u001b[38;5;114m" ANSI_FOREGROUND_256_115: str = "\u001b[38;5;115m" ANSI_FOREGROUND_256_116: str = "\u001b[38;5;116m" ANSI_FOREGROUND_256_117: str = "\u001b[38;5;117m" ANSI_FOREGROUND_256_118: str = "\u001b[38;5;118m" ANSI_FOREGROUND_256_119: str = "\u001b[38;5;119m" ANSI_FOREGROUND_256_120: str = "\u001b[38;5;120m" ANSI_FOREGROUND_256_121: str = "\u001b[38;5;121m" ANSI_FOREGROUND_256_122: str = "\u001b[38;5;122m" ANSI_FOREGROUND_256_123: str = "\u001b[38;5;123m" ANSI_FOREGROUND_256_124: str = "\u001b[38;5;124m" ANSI_FOREGROUND_256_125: str = "\u001b[38;5;125m" ANSI_FOREGROUND_256_126: str = "\u001b[38;5;126m" ANSI_FOREGROUND_256_127: str = "\u001b[38;5;127m" ANSI_FOREGROUND_256_128: str = "\u001b[38;5;128m" ANSI_FOREGROUND_256_129: str = "\u001b[38;5;129m" ANSI_FOREGROUND_256_130: str = "\u001b[38;5;130m" ANSI_FOREGROUND_256_131: str = "\u001b[38;5;131m" ANSI_FOREGROUND_256_132: str = "\u001b[38;5;132m" ANSI_FOREGROUND_256_133: str = "\u001b[38;5;133m" ANSI_FOREGROUND_256_134: str = "\u001b[38;5;134m" ANSI_FOREGROUND_256_135: str = "\u001b[38;5;135m" ANSI_FOREGROUND_256_136: str = "\u001b[38;5;136m" ANSI_FOREGROUND_256_137: str = "\u001b[38;5;137m" ANSI_FOREGROUND_256_138: str = "\u001b[38;5;138m" ANSI_FOREGROUND_256_139: str = "\u001b[38;5;139m" ANSI_FOREGROUND_256_140: str = "\u001b[38;5;140m" ANSI_FOREGROUND_256_141: str = "\u001b[38;5;141m" ANSI_FOREGROUND_256_142: str = "\u001b[38;5;142m" ANSI_FOREGROUND_256_143: str = "\u001b[38;5;143m" ANSI_FOREGROUND_256_144: str = "\u001b[38;5;144m" ANSI_FOREGROUND_256_145: str = "\u001b[38;5;145m" ANSI_FOREGROUND_256_146: str = "\u001b[38;5;146m" ANSI_FOREGROUND_256_147: str = "\u001b[38;5;147m" ANSI_FOREGROUND_256_148: str = "\u001b[38;5;148m" ANSI_FOREGROUND_256_149: str = "\u001b[38;5;149m" ANSI_FOREGROUND_256_150: str = "\u001b[38;5;150m" ANSI_FOREGROUND_256_151: str = "\u001b[38;5;151m" ANSI_FOREGROUND_256_152: str = "\u001b[38;5;152m" ANSI_FOREGROUND_256_153: str = "\u001b[38;5;153m" ANSI_FOREGROUND_256_154: str = "\u001b[38;5;154m" ANSI_FOREGROUND_256_155: str = "\u001b[38;5;155m" ANSI_FOREGROUND_256_156: str = "\u001b[38;5;156m" ANSI_FOREGROUND_256_157: str = "\u001b[38;5;157m" ANSI_FOREGROUND_256_158: str = "\u001b[38;5;158m" ANSI_FOREGROUND_256_159: str = "\u001b[38;5;159m" ANSI_FOREGROUND_256_160: str = "\u001b[38;5;160m" ANSI_FOREGROUND_256_161: str = "\u001b[38;5;161m" ANSI_FOREGROUND_256_162: str = "\u001b[38;5;162m" ANSI_FOREGROUND_256_163: str = "\u001b[38;5;163m" ANSI_FOREGROUND_256_164: str = "\u001b[38;5;164m" ANSI_FOREGROUND_256_165: str = "\u001b[38;5;165m" ANSI_FOREGROUND_256_166: str = "\u001b[38;5;166m" ANSI_FOREGROUND_256_167: str = "\u001b[38;5;167m" ANSI_FOREGROUND_256_168: str = "\u001b[38;5;168m" ANSI_FOREGROUND_256_169: str = "\u001b[38;5;169m" ANSI_FOREGROUND_256_170: str = "\u001b[38;5;170m" ANSI_FOREGROUND_256_171: str = "\u001b[38;5;171m" ANSI_FOREGROUND_256_172: str = "\u001b[38;5;172m" ANSI_FOREGROUND_256_173: str = "\u001b[38;5;173m" ANSI_FOREGROUND_256_174: str = "\u001b[38;5;174m" ANSI_FOREGROUND_256_175: str = "\u001b[38;5;175m" ANSI_FOREGROUND_256_176: str = "\u001b[38;5;176m" ANSI_FOREGROUND_256_177: str = "\u001b[38;5;177m" ANSI_FOREGROUND_256_178: str = "\u001b[38;5;178m" ANSI_FOREGROUND_256_179: str = "\u001b[38;5;179m" ANSI_FOREGROUND_256_180: str = "\u001b[38;5;180m" ANSI_FOREGROUND_256_181: str = "\u001b[38;5;181m" ANSI_FOREGROUND_256_182: str = "\u001b[38;5;182m" ANSI_FOREGROUND_256_183: str = "\u001b[38;5;183m" ANSI_FOREGROUND_256_184: str = "\u001b[38;5;184m" ANSI_FOREGROUND_256_185: str = "\u001b[38;5;185m" ANSI_FOREGROUND_256_186: str = "\u001b[38;5;186m" ANSI_FOREGROUND_256_187: str = "\u001b[38;5;187m" ANSI_FOREGROUND_256_188: str = "\u001b[38;5;188m" ANSI_FOREGROUND_256_189: str = "\u001b[38;5;189m" ANSI_FOREGROUND_256_190: str = "\u001b[38;5;190m" ANSI_FOREGROUND_256_191: str = "\u001b[38;5;191m" ANSI_FOREGROUND_256_192: str = "\u001b[38;5;192m" ANSI_FOREGROUND_256_193: str = "\u001b[38;5;193m" ANSI_FOREGROUND_256_194: str = "\u001b[38;5;194m" ANSI_FOREGROUND_256_195: str = "\u001b[38;5;195m" ANSI_FOREGROUND_256_196: str = "\u001b[38;5;196m" ANSI_FOREGROUND_256_197: str = "\u001b[38;5;197m" ANSI_FOREGROUND_256_198: str = "\u001b[38;5;198m" ANSI_FOREGROUND_256_199: str = "\u001b[38;5;199m" ANSI_FOREGROUND_256_200: str = "\u001b[38;5;200m" ANSI_FOREGROUND_256_201: str = "\u001b[38;5;201m" ANSI_FOREGROUND_256_202: str = "\u001b[38;5;202m" ANSI_FOREGROUND_256_203: str = "\u001b[38;5;203m" ANSI_FOREGROUND_256_204: str = "\u001b[38;5;204m" ANSI_FOREGROUND_256_205: str = "\u001b[38;5;205m" ANSI_FOREGROUND_256_206: str = "\u001b[38;5;206m" ANSI_FOREGROUND_256_207: str = "\u001b[38;5;207m" ANSI_FOREGROUND_256_208: str = "\u001b[38;5;208m" ANSI_FOREGROUND_256_209: str = "\u001b[38;5;209m" ANSI_FOREGROUND_256_210: str = "\u001b[38;5;210m" ANSI_FOREGROUND_256_211: str = "\u001b[38;5;211m" ANSI_FOREGROUND_256_212: str = "\u001b[38;5;212m" ANSI_FOREGROUND_256_213: str = "\u001b[38;5;213m" ANSI_FOREGROUND_256_214: str = "\u001b[38;5;214m" ANSI_FOREGROUND_256_215: str = "\u001b[38;5;215m" ANSI_FOREGROUND_256_216: str = "\u001b[38;5;216m" ANSI_FOREGROUND_256_217: str = "\u001b[38;5;217m" ANSI_FOREGROUND_256_218: str = "\u001b[38;5;218m" ANSI_FOREGROUND_256_219: str = "\u001b[38;5;219m" ANSI_FOREGROUND_256_220: str = "\u001b[38;5;220m" ANSI_FOREGROUND_256_221: str = "\u001b[38;5;221m" ANSI_FOREGROUND_256_222: str = "\u001b[38;5;222m" ANSI_FOREGROUND_256_223: str = "\u001b[38;5;223m" ANSI_FOREGROUND_256_224: str = "\u001b[38;5;224m" ANSI_FOREGROUND_256_225: str = "\u001b[38;5;225m" ANSI_FOREGROUND_256_226: str = "\u001b[38;5;226m" ANSI_FOREGROUND_256_227: str = "\u001b[38;5;227m" ANSI_FOREGROUND_256_228: str = "\u001b[38;5;228m" ANSI_FOREGROUND_256_229: str = "\u001b[38;5;229m" ANSI_FOREGROUND_256_230: str = "\u001b[38;5;230m" ANSI_FOREGROUND_256_231: str = "\u001b[38;5;231m" ANSI_FOREGROUND_256_232: str = "\u001b[38;5;232m" ANSI_FOREGROUND_256_233: str = "\u001b[38;5;233m" ANSI_FOREGROUND_256_234: str = "\u001b[38;5;234m" ANSI_FOREGROUND_256_235: str = "\u001b[38;5;235m" ANSI_FOREGROUND_256_236: str = "\u001b[38;5;236m" ANSI_FOREGROUND_256_237: str = "\u001b[38;5;237m" ANSI_FOREGROUND_256_238: str = "\u001b[38;5;238m" ANSI_FOREGROUND_256_239: str = "\u001b[38;5;239m" ANSI_FOREGROUND_256_240: str = "\u001b[38;5;240m" ANSI_FOREGROUND_256_241: str = "\u001b[38;5;241m" ANSI_FOREGROUND_256_242: str = "\u001b[38;5;242m" ANSI_FOREGROUND_256_243: str = "\u001b[38;5;243m" ANSI_FOREGROUND_256_244: str = "\u001b[38;5;244m" ANSI_FOREGROUND_256_245: str = "\u001b[38;5;245m" ANSI_FOREGROUND_256_246: str = "\u001b[38;5;246m" ANSI_FOREGROUND_256_247: str = "\u001b[38;5;247m" ANSI_FOREGROUND_256_248: str = "\u001b[38;5;248m" ANSI_FOREGROUND_256_249: str = "\u001b[38;5;249m" ANSI_FOREGROUND_256_250: str = "\u001b[38;5;250m" ANSI_FOREGROUND_256_251: str = "\u001b[38;5;251m" ANSI_FOREGROUND_256_252: str = "\u001b[38;5;252m" ANSI_FOREGROUND_256_253: str = "\u001b[38;5;253m" ANSI_FOREGROUND_256_254: str = "\u001b[38;5;254m" ANSI_FOREGROUND_256_255: str = "\u001b[38;5;255m" # 8 COLOUR BACKGROUND ANSI_BACKGROUND_BLACK: str = "\u001b[40m" ANSI_BACKGROUND_RED: str = "\u001b[41m" ANSI_BACKGROUND_GREEN: str = "\u001b[42m" ANSI_BACKGROUND_YELLOW: str = "\u001b[43m" ANSI_BACKGROUND_BLUE: str = "\u001b[44m" ANSI_BACKGROUND_MAGENTA: str = "\u001b[45m" ANSI_BACKGROUND_CYAN: str = "\u001b[46m" ANSI_BACKGROUND_WHITE: str = "\u001b[47m" # 16 COLOUR BACKGROUND ANSI_BACKGROUND_BRIGHT_BLACK: str = "\u001b[40;1m" ANSI_BACKGROUND_BRIGHT_RED: str = "\u001b[41;1m" ANSI_BACKGROUND_BRIGHT_GREEN: str = "\u001b[42;1m" ANSI_BACKGROUND_BRIGHT_YELLOW: str = "\u001b[43;1m" ANSI_BACKGROUND_BRIGHT_BLUE: str = "\u001b[44;1m" ANSI_BACKGROUND_BRIGHT_MAGENTA: str = "\u001b[45;1m" ANSI_BACKGROUND_BRIGHT_CYAN: str = "\u001b[46;1m" ANSI_BACKGROUND_BRIGHT_WHITE: str = "\u001b[47;1m" # 256 COLOUR BACKGROUND: ANSI_BACKGROUND_256_0: str = "\u001b[48;5;0m" ANSI_BACKGROUND_256_1: str = "\u001b[48;5;1m" ANSI_BACKGROUND_256_2: str = "\u001b[48;5;2m" ANSI_BACKGROUND_256_3: str = "\u001b[48;5;3m" ANSI_BACKGROUND_256_4: str = "\u001b[48;5;4m" ANSI_BACKGROUND_256_5: str = "\u001b[48;5;5m" ANSI_BACKGROUND_256_6: str = "\u001b[48;5;6m" ANSI_BACKGROUND_256_7: str = "\u001b[48;5;7m" ANSI_BACKGROUND_256_8: str = "\u001b[48;5;8m" ANSI_BACKGROUND_256_9: str = "\u001b[48;5;9m" ANSI_BACKGROUND_256_10: str = "\u001b[48;5;10m" ANSI_BACKGROUND_256_11: str = "\u001b[48;5;11m" ANSI_BACKGROUND_256_12: str = "\u001b[48;5;12m" ANSI_BACKGROUND_256_13: str = "\u001b[48;5;13m" ANSI_BACKGROUND_256_14: str = "\u001b[48;5;14m" ANSI_BACKGROUND_256_15: str = "\u001b[48;5;15m" ANSI_BACKGROUND_256_16: str = "\u001b[48;5;16m" ANSI_BACKGROUND_256_17: str = "\u001b[48;5;17m" ANSI_BACKGROUND_256_18: str = "\u001b[48;5;18m" ANSI_BACKGROUND_256_19: str = "\u001b[48;5;19m" ANSI_BACKGROUND_256_20: str = "\u001b[48;5;20m" ANSI_BACKGROUND_256_21: str = "\u001b[48;5;21m" ANSI_BACKGROUND_256_22: str = "\u001b[48;5;22m" ANSI_BACKGROUND_256_23: str = "\u001b[48;5;23m" ANSI_BACKGROUND_256_24: str = "\u001b[48;5;24m" ANSI_BACKGROUND_256_25: str = "\u001b[48;5;25m" ANSI_BACKGROUND_256_26: str = "\u001b[48;5;26m" ANSI_BACKGROUND_256_27: str = "\u001b[48;5;27m" ANSI_BACKGROUND_256_28: str = "\u001b[48;5;28m" ANSI_BACKGROUND_256_29: str = "\u001b[48;5;29m" ANSI_BACKGROUND_256_30: str = "\u001b[48;5;30m" ANSI_BACKGROUND_256_31: str = "\u001b[48;5;31m" ANSI_BACKGROUND_256_32: str = "\u001b[48;5;32m" ANSI_BACKGROUND_256_33: str = "\u001b[48;5;33m" ANSI_BACKGROUND_256_34: str = "\u001b[48;5;34m" ANSI_BACKGROUND_256_35: str = "\u001b[48;5;35m" ANSI_BACKGROUND_256_36: str = "\u001b[48;5;36m" ANSI_BACKGROUND_256_37: str = "\u001b[48;5;37m" ANSI_BACKGROUND_256_38: str = "\u001b[48;5;38m" ANSI_BACKGROUND_256_39: str = "\u001b[48;5;39m" ANSI_BACKGROUND_256_40: str = "\u001b[48;5;40m" ANSI_BACKGROUND_256_41: str = "\u001b[48;5;41m" ANSI_BACKGROUND_256_42: str = "\u001b[48;5;42m" ANSI_BACKGROUND_256_43: str = "\u001b[48;5;43m" ANSI_BACKGROUND_256_44: str = "\u001b[48;5;44m" ANSI_BACKGROUND_256_45: str = "\u001b[48;5;45m" ANSI_BACKGROUND_256_46: str = "\u001b[48;5;46m" ANSI_BACKGROUND_256_47: str = "\u001b[48;5;47m" ANSI_BACKGROUND_256_48: str = "\u001b[48;5;48m" ANSI_BACKGROUND_256_49: str = "\u001b[48;5;49m" ANSI_BACKGROUND_256_50: str = "\u001b[48;5;50m" ANSI_BACKGROUND_256_51: str = "\u001b[48;5;51m" ANSI_BACKGROUND_256_52: str = "\u001b[48;5;52m" ANSI_BACKGROUND_256_53: str = "\u001b[48;5;53m" ANSI_BACKGROUND_256_54: str = "\u001b[48;5;54m" ANSI_BACKGROUND_256_55: str = "\u001b[48;5;55m" ANSI_BACKGROUND_256_56: str = "\u001b[48;5;56m" ANSI_BACKGROUND_256_57: str = "\u001b[48;5;57m" ANSI_BACKGROUND_256_58: str = "\u001b[48;5;58m" ANSI_BACKGROUND_256_59: str = "\u001b[48;5;59m" ANSI_BACKGROUND_256_60: str = "\u001b[48;5;60m" ANSI_BACKGROUND_256_61: str = "\u001b[48;5;61m" ANSI_BACKGROUND_256_62: str = "\u001b[48;5;62m" ANSI_BACKGROUND_256_63: str = "\u001b[48;5;63m" ANSI_BACKGROUND_256_64: str = "\u001b[48;5;64m" ANSI_BACKGROUND_256_65: str = "\u001b[48;5;65m" ANSI_BACKGROUND_256_66: str = "\u001b[48;5;66m" ANSI_BACKGROUND_256_67: str = "\u001b[48;5;67m" ANSI_BACKGROUND_256_68: str = "\u001b[48;5;68m" ANSI_BACKGROUND_256_69: str = "\u001b[48;5;69m" ANSI_BACKGROUND_256_70: str = "\u001b[48;5;70m" ANSI_BACKGROUND_256_71: str = "\u001b[48;5;71m" ANSI_BACKGROUND_256_72: str = "\u001b[48;5;72m" ANSI_BACKGROUND_256_73: str = "\u001b[48;5;73m" ANSI_BACKGROUND_256_74: str = "\u001b[48;5;74m" ANSI_BACKGROUND_256_75: str = "\u001b[48;5;75m" ANSI_BACKGROUND_256_76: str = "\u001b[48;5;76m" ANSI_BACKGROUND_256_77: str = "\u001b[48;5;77m" ANSI_BACKGROUND_256_78: str = "\u001b[48;5;78m" ANSI_BACKGROUND_256_79: str = "\u001b[48;5;79m" ANSI_BACKGROUND_256_80: str = "\u001b[48;5;80m" ANSI_BACKGROUND_256_81: str = "\u001b[48;5;81m" ANSI_BACKGROUND_256_82: str = "\u001b[48;5;82m" ANSI_BACKGROUND_256_83: str = "\u001b[48;5;83m" ANSI_BACKGROUND_256_84: str = "\u001b[48;5;84m" ANSI_BACKGROUND_256_85: str = "\u001b[48;5;85m" ANSI_BACKGROUND_256_86: str = "\u001b[48;5;86m" ANSI_BACKGROUND_256_87: str = "\u001b[48;5;87m" ANSI_BACKGROUND_256_88: str = "\u001b[48;5;88m" ANSI_BACKGROUND_256_89: str = "\u001b[48;5;89m" ANSI_BACKGROUND_256_90: str = "\u001b[48;5;90m" ANSI_BACKGROUND_256_91: str = "\u001b[48;5;91m" ANSI_BACKGROUND_256_92: str = "\u001b[48;5;92m" ANSI_BACKGROUND_256_93: str = "\u001b[48;5;93m" ANSI_BACKGROUND_256_94: str = "\u001b[48;5;94m" ANSI_BACKGROUND_256_95: str = "\u001b[48;5;95m" ANSI_BACKGROUND_256_96: str = "\u001b[48;5;96m" ANSI_BACKGROUND_256_97: str = "\u001b[48;5;97m" ANSI_BACKGROUND_256_98: str = "\u001b[48;5;98m" ANSI_BACKGROUND_256_99: str = "\u001b[48;5;99m" ANSI_BACKGROUND_256_100: str = "\u001b[48;5;100m" ANSI_BACKGROUND_256_101: str = "\u001b[48;5;101m" ANSI_BACKGROUND_256_102: str = "\u001b[48;5;102m" ANSI_BACKGROUND_256_103: str = "\u001b[48;5;103m" ANSI_BACKGROUND_256_104: str = "\u001b[48;5;104m" ANSI_BACKGROUND_256_105: str = "\u001b[48;5;105m" ANSI_BACKGROUND_256_106: str = "\u001b[48;5;106m" ANSI_BACKGROUND_256_107: str = "\u001b[48;5;107m" ANSI_BACKGROUND_256_108: str = "\u001b[48;5;108m" ANSI_BACKGROUND_256_109: str = "\u001b[48;5;109m" ANSI_BACKGROUND_256_110: str = "\u001b[48;5;110m" ANSI_BACKGROUND_256_111: str = "\u001b[48;5;111m" ANSI_BACKGROUND_256_112: str = "\u001b[48;5;112m" ANSI_BACKGROUND_256_113: str = "\u001b[48;5;113m" ANSI_BACKGROUND_256_114: str = "\u001b[48;5;114m" ANSI_BACKGROUND_256_115: str = "\u001b[48;5;115m" ANSI_BACKGROUND_256_116: str = "\u001b[48;5;116m" ANSI_BACKGROUND_256_117: str = "\u001b[48;5;117m" ANSI_BACKGROUND_256_118: str = "\u001b[48;5;118m" ANSI_BACKGROUND_256_119: str = "\u001b[48;5;119m" ANSI_BACKGROUND_256_120: str = "\u001b[48;5;120m" ANSI_BACKGROUND_256_121: str = "\u001b[48;5;121m" ANSI_BACKGROUND_256_122: str = "\u001b[48;5;122m" ANSI_BACKGROUND_256_123: str = "\u001b[48;5;123m" ANSI_BACKGROUND_256_124: str = "\u001b[48;5;124m" ANSI_BACKGROUND_256_125: str = "\u001b[48;5;125m" ANSI_BACKGROUND_256_126: str = "\u001b[48;5;126m" ANSI_BACKGROUND_256_127: str = "\u001b[48;5;127m" ANSI_BACKGROUND_256_128: str = "\u001b[48;5;128m" ANSI_BACKGROUND_256_129: str = "\u001b[48;5;129m" ANSI_BACKGROUND_256_130: str = "\u001b[48;5;130m" ANSI_BACKGROUND_256_131: str = "\u001b[48;5;131m" ANSI_BACKGROUND_256_132: str = "\u001b[48;5;132m" ANSI_BACKGROUND_256_133: str = "\u001b[48;5;133m" ANSI_BACKGROUND_256_134: str = "\u001b[48;5;134m" ANSI_BACKGROUND_256_135: str = "\u001b[48;5;135m" ANSI_BACKGROUND_256_136: str = "\u001b[48;5;136m" ANSI_BACKGROUND_256_137: str = "\u001b[48;5;137m" ANSI_BACKGROUND_256_138: str = "\u001b[48;5;138m" ANSI_BACKGROUND_256_139: str = "\u001b[48;5;139m" ANSI_BACKGROUND_256_140: str = "\u001b[48;5;140m" ANSI_BACKGROUND_256_141: str = "\u001b[48;5;141m" ANSI_BACKGROUND_256_142: str = "\u001b[48;5;142m" ANSI_BACKGROUND_256_143: str = "\u001b[48;5;143m" ANSI_BACKGROUND_256_144: str = "\u001b[48;5;144m" ANSI_BACKGROUND_256_145: str = "\u001b[48;5;145m" ANSI_BACKGROUND_256_146: str = "\u001b[48;5;146m" ANSI_BACKGROUND_256_147: str = "\u001b[48;5;147m" ANSI_BACKGROUND_256_148: str = "\u001b[48;5;148m" ANSI_BACKGROUND_256_149: str = "\u001b[48;5;149m" ANSI_BACKGROUND_256_150: str = "\u001b[48;5;150m" ANSI_BACKGROUND_256_151: str = "\u001b[48;5;151m" ANSI_BACKGROUND_256_152: str = "\u001b[48;5;152m" ANSI_BACKGROUND_256_153: str = "\u001b[48;5;153m" ANSI_BACKGROUND_256_154: str = "\u001b[48;5;154m" ANSI_BACKGROUND_256_155: str = "\u001b[48;5;155m" ANSI_BACKGROUND_256_156: str = "\u001b[48;5;156m" ANSI_BACKGROUND_256_157: str = "\u001b[48;5;157m" ANSI_BACKGROUND_256_158: str = "\u001b[48;5;158m" ANSI_BACKGROUND_256_159: str = "\u001b[48;5;159m" ANSI_BACKGROUND_256_160: str = "\u001b[48;5;160m" ANSI_BACKGROUND_256_161: str = "\u001b[48;5;161m" ANSI_BACKGROUND_256_162: str = "\u001b[48;5;162m" ANSI_BACKGROUND_256_163: str = "\u001b[48;5;163m" ANSI_BACKGROUND_256_164: str = "\u001b[48;5;164m" ANSI_BACKGROUND_256_165: str = "\u001b[48;5;165m" ANSI_BACKGROUND_256_166: str = "\u001b[48;5;166m" ANSI_BACKGROUND_256_167: str = "\u001b[48;5;167m" ANSI_BACKGROUND_256_168: str = "\u001b[48;5;168m" ANSI_BACKGROUND_256_169: str = "\u001b[48;5;169m" ANSI_BACKGROUND_256_170: str = "\u001b[48;5;170m" ANSI_BACKGROUND_256_171: str = "\u001b[48;5;171m" ANSI_BACKGROUND_256_172: str = "\u001b[48;5;172m" ANSI_BACKGROUND_256_173: str = "\u001b[48;5;173m" ANSI_BACKGROUND_256_174: str = "\u001b[48;5;174m" ANSI_BACKGROUND_256_175: str = "\u001b[48;5;175m" ANSI_BACKGROUND_256_176: str = "\u001b[48;5;176m" ANSI_BACKGROUND_256_177: str = "\u001b[48;5;177m" ANSI_BACKGROUND_256_178: str = "\u001b[48;5;178m" ANSI_BACKGROUND_256_179: str = "\u001b[48;5;179m" ANSI_BACKGROUND_256_180: str = "\u001b[48;5;180m" ANSI_BACKGROUND_256_181: str = "\u001b[48;5;181m" ANSI_BACKGROUND_256_182: str = "\u001b[48;5;182m" ANSI_BACKGROUND_256_183: str = "\u001b[48;5;183m" ANSI_BACKGROUND_256_184: str = "\u001b[48;5;184m" ANSI_BACKGROUND_256_185: str = "\u001b[48;5;185m" ANSI_BACKGROUND_256_186: str = "\u001b[48;5;186m" ANSI_BACKGROUND_256_187: str = "\u001b[48;5;187m" ANSI_BACKGROUND_256_188: str = "\u001b[48;5;188m" ANSI_BACKGROUND_256_189: str = "\u001b[48;5;189m" ANSI_BACKGROUND_256_190: str = "\u001b[48;5;190m" ANSI_BACKGROUND_256_191: str = "\u001b[48;5;191m" ANSI_BACKGROUND_256_192: str = "\u001b[48;5;192m" ANSI_BACKGROUND_256_193: str = "\u001b[48;5;193m" ANSI_BACKGROUND_256_194: str = "\u001b[48;5;194m" ANSI_BACKGROUND_256_195: str = "\u001b[48;5;195m" ANSI_BACKGROUND_256_196: str = "\u001b[48;5;196m" ANSI_BACKGROUND_256_197: str = "\u001b[48;5;197m" ANSI_BACKGROUND_256_198: str = "\u001b[48;5;198m" ANSI_BACKGROUND_256_199: str = "\u001b[48;5;199m" ANSI_BACKGROUND_256_200: str = "\u001b[48;5;200m" ANSI_BACKGROUND_256_201: str = "\u001b[48;5;201m" ANSI_BACKGROUND_256_202: str = "\u001b[48;5;202m" ANSI_BACKGROUND_256_203: str = "\u001b[48;5;203m" ANSI_BACKGROUND_256_204: str = "\u001b[48;5;204m" ANSI_BACKGROUND_256_205: str = "\u001b[48;5;205m" ANSI_BACKGROUND_256_206: str = "\u001b[48;5;206m" ANSI_BACKGROUND_256_207: str = "\u001b[48;5;207m" ANSI_BACKGROUND_256_208: str = "\u001b[48;5;208m" ANSI_BACKGROUND_256_209: str = "\u001b[48;5;209m" ANSI_BACKGROUND_256_210: str = "\u001b[48;5;210m" ANSI_BACKGROUND_256_211: str = "\u001b[48;5;211m" ANSI_BACKGROUND_256_212: str = "\u001b[48;5;212m" ANSI_BACKGROUND_256_213: str = "\u001b[48;5;213m" ANSI_BACKGROUND_256_214: str = "\u001b[48;5;214m" ANSI_BACKGROUND_256_215: str = "\u001b[48;5;215m" ANSI_BACKGROUND_256_216: str = "\u001b[48;5;216m" ANSI_BACKGROUND_256_217: str = "\u001b[48;5;217m" ANSI_BACKGROUND_256_218: str = "\u001b[48;5;218m" ANSI_BACKGROUND_256_219: str = "\u001b[48;5;219m" ANSI_BACKGROUND_256_220: str = "\u001b[48;5;220m" ANSI_BACKGROUND_256_221: str = "\u001b[48;5;221m" ANSI_BACKGROUND_256_222: str = "\u001b[48;5;222m" ANSI_BACKGROUND_256_223: str = "\u001b[48;5;223m" ANSI_BACKGROUND_256_224: str = "\u001b[48;5;224m" ANSI_BACKGROUND_256_225: str = "\u001b[48;5;225m" ANSI_BACKGROUND_256_226: str = "\u001b[48;5;226m" ANSI_BACKGROUND_256_227: str = "\u001b[48;5;227m" ANSI_BACKGROUND_256_228: str = "\u001b[48;5;228m" ANSI_BACKGROUND_256_229: str = "\u001b[48;5;229m" ANSI_BACKGROUND_256_230: str = "\u001b[48;5;230m" ANSI_BACKGROUND_256_231: str = "\u001b[48;5;231m" ANSI_BACKGROUND_256_232: str = "\u001b[48;5;232m" ANSI_BACKGROUND_256_233: str = "\u001b[48;5;233m" ANSI_BACKGROUND_256_234: str = "\u001b[48;5;234m" ANSI_BACKGROUND_256_235: str = "\u001b[48;5;235m" ANSI_BACKGROUND_256_236: str = "\u001b[48;5;236m" ANSI_BACKGROUND_256_237: str = "\u001b[48;5;237m" ANSI_BACKGROUND_256_238: str = "\u001b[48;5;238m" ANSI_BACKGROUND_256_239: str = "\u001b[48;5;239m" ANSI_BACKGROUND_256_240: str = "\u001b[48;5;240m" ANSI_BACKGROUND_256_241: str = "\u001b[48;5;241m" ANSI_BACKGROUND_256_242: str = "\u001b[48;5;242m" ANSI_BACKGROUND_256_243: str = "\u001b[48;5;243m" ANSI_BACKGROUND_256_244: str = "\u001b[48;5;244m" ANSI_BACKGROUND_256_245: str = "\u001b[48;5;245m" ANSI_BACKGROUND_256_246: str = "\u001b[48;5;246m" ANSI_BACKGROUND_256_247: str = "\u001b[48;5;247m" ANSI_BACKGROUND_256_248: str = "\u001b[48;5;248m" ANSI_BACKGROUND_256_249: str = "\u001b[48;5;249m" ANSI_BACKGROUND_256_250: str = "\u001b[48;5;250m" ANSI_BACKGROUND_256_251: str = "\u001b[48;5;251m" ANSI_BACKGROUND_256_252: str = "\u001b[48;5;252m" ANSI_BACKGROUND_256_253: str = "\u001b[48;5;253m" ANSI_BACKGROUND_256_254: str = "\u001b[48;5;254m" ANSI_BACKGROUND_256_255: str = "\u001b[48;5;255m"
47.723592
52
0.756557
4,897
27,107
3.854809
0.109863
0.233512
0.230545
0.149176
0
0
0
0
0
0
0
0.299859
0.082709
27,107
567
53
47.80776
0.45932
0.005829
0
0
0
0
0.311333
0
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
73dc98fa4a40013b6702b97f9295f12338fa55ef
135
py
Python
ethmeet/__init__.py
sourcerer2/bBot
9992d50f05e097d43d29ef9fc1ea39c1a0608860
[ "Apache-2.0" ]
1
2021-01-05T05:08:38.000Z
2021-01-05T05:08:38.000Z
ethmeet/__init__.py
sourcerer2/bBot
9992d50f05e097d43d29ef9fc1ea39c1a0608860
[ "Apache-2.0" ]
2
2021-06-03T00:00:05.000Z
2021-06-03T00:00:48.000Z
ethmeet/__init__.py
sourcerer2/browserBot
9992d50f05e097d43d29ef9fc1ea39c1a0608860
[ "Apache-2.0" ]
null
null
null
from .attend import AttendGoogle, AttendZoom from .driver import Driver from .create import CreateGoogle from .login import LoginGoogle
33.75
44
0.844444
17
135
6.705882
0.588235
0
0
0
0
0
0
0
0
0
0
0
0.118519
135
4
45
33.75
0.957983
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
fb4e4fd0a8584c62f6fbaeedaa31af253333a34b
145
py
Python
setup.py
mbenadda/django-cas-mb
081779f89397cd7caac4dd2077d8280955987b9d
[ "MIT" ]
null
null
null
setup.py
mbenadda/django-cas-mb
081779f89397cd7caac4dd2077d8280955987b9d
[ "MIT" ]
null
null
null
setup.py
mbenadda/django-cas-mb
081779f89397cd7caac4dd2077d8280955987b9d
[ "MIT" ]
null
null
null
#!/usr/bin/env python import codecs from setuptools import setup setup(package_data={"django_cas_ng": ["locale/*/LC_MESSAGES/*", "py.typed"]})
20.714286
77
0.731034
21
145
4.857143
0.904762
0
0
0
0
0
0
0
0
0
0
0
0.089655
145
6
78
24.166667
0.772727
0.137931
0
0
0
0
0.346774
0.177419
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
fb653d352e33b65ee0f314713ea40a4d70f82297
59
py
Python
lynx_code/quark_hash.py
enkrypter/Lynx-wallet
166b7e5810f017a6e12bf96e54b0d44767b2a901
[ "MIT" ]
2
2019-09-19T10:57:19.000Z
2019-10-29T20:39:26.000Z
lynx_code/quark_hash.py
enkrypter/Lynx-wallet
166b7e5810f017a6e12bf96e54b0d44767b2a901
[ "MIT" ]
4
2019-11-17T17:40:13.000Z
2020-01-22T12:13:02.000Z
lynx_code/quark_hash.py
enkrypter/Lynx-wallet
166b7e5810f017a6e12bf96e54b0d44767b2a901
[ "MIT" ]
2
2019-10-04T01:58:18.000Z
2019-10-21T02:06:04.000Z
# -*- coding: utf-8 -*- from quark_hash import getPoWHash
14.75
33
0.677966
8
59
4.875
1
0
0
0
0
0
0
0
0
0
0
0.020408
0.169492
59
3
34
19.666667
0.77551
0.355932
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
fb94dd69659a59bc048a3618189431e3691f4906
27
py
Python
rsw/version.py
metaperl/revshareworks
923daad4fd30ca2142d2239fd4638c659eb66aa1
[ "MIT" ]
null
null
null
rsw/version.py
metaperl/revshareworks
923daad4fd30ca2142d2239fd4638c659eb66aa1
[ "MIT" ]
null
null
null
rsw/version.py
metaperl/revshareworks
923daad4fd30ca2142d2239fd4638c659eb66aa1
[ "MIT" ]
null
null
null
__version__ = '2015.12.24'
13.5
26
0.703704
4
27
3.75
1
0
0
0
0
0
0
0
0
0
0
0.333333
0.111111
27
1
27
27
0.291667
0
0
0
0
0
0.37037
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
fbaf5efdbe3ee0b83e91d9aea1acd650c3c92d0b
252
py
Python
__checkShellExecutionMode.py
simdevex/01.Basics
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
[ "MIT" ]
null
null
null
__checkShellExecutionMode.py
simdevex/01.Basics
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
[ "MIT" ]
null
null
null
__checkShellExecutionMode.py
simdevex/01.Basics
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
[ "MIT" ]
null
null
null
''' A Python program to determine if a Python shell is executing in32bit or 64bit mode on OS. ''' # For 32 bit it will return 32 and for 64 bit it will return 64 import platform, struct print(platform.architecture()[0]) print(struct.calcsize("P") * 8)
31.5
89
0.738095
44
252
4.227273
0.727273
0.075269
0.096774
0.16129
0
0
0
0
0
0
0
0.066667
0.166667
252
7
90
36
0.819048
0.603175
0
0
0
0
0.01087
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0.666667
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
fbb2f7f2e6297504001c334ed9d23e412104b41c
101
py
Python
tests/test_import.py
yetzu/magics-python
6562398beac6e9dea14525f0a13cd648130013fa
[ "Apache-2.0" ]
1
2021-12-23T12:52:24.000Z
2021-12-23T12:52:24.000Z
tests/test_import.py
yetzu/magics-python
6562398beac6e9dea14525f0a13cd648130013fa
[ "Apache-2.0" ]
null
null
null
tests/test_import.py
yetzu/magics-python
6562398beac6e9dea14525f0a13cd648130013fa
[ "Apache-2.0" ]
null
null
null
from importlib import util def test_import(): assert util.find_spec("Magics").name == "Magics"
16.833333
52
0.712871
14
101
5
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.158416
101
5
53
20.2
0.823529
0
0
0
0
0
0.118812
0
0
0
0
0
0.333333
1
0.333333
true
0
0.666667
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
0
0
0
5
83fe14a6cb20c33295972738b4d1d4d8b1e71e27
112
py
Python
manabi/apps/utils/__init__.py
aehlke/manabi
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
[ "MIT" ]
14
2015-10-03T07:34:28.000Z
2021-09-20T07:10:29.000Z
manabi/apps/utils/__init__.py
aehlke/manabi
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
[ "MIT" ]
23
2019-10-25T08:47:23.000Z
2022-01-30T02:00:45.000Z
manabi/apps/utils/__init__.py
aehlke/manabi
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
[ "MIT" ]
7
2016-10-04T08:10:36.000Z
2021-09-20T07:10:33.000Z
#from django.template.base import add_to_builtins #add_to_builtins('manabi.apps.utils.templatetags.dictaccess')
37.333333
61
0.848214
16
112
5.6875
0.8125
0.10989
0.285714
0
0
0
0
0
0
0
0
0
0.044643
112
2
62
56
0.850467
0.964286
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
f7aed8849a07e00fa202151150b213d26e494b3e
56
py
Python
bitmovin/__init__.py
camberbridge/bitmovin-python
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
[ "Unlicense" ]
44
2016-12-12T17:37:23.000Z
2021-03-03T09:48:48.000Z
bitmovin/__init__.py
camberbridge/bitmovin-python
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
[ "Unlicense" ]
38
2017-01-09T14:45:45.000Z
2022-02-27T18:04:33.000Z
bitmovin/__init__.py
camberbridge/bitmovin-python
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
[ "Unlicense" ]
27
2017-02-02T22:49:31.000Z
2019-11-21T07:04:57.000Z
from .bitmovin import Bitmovin from .resources import *
18.666667
30
0.803571
7
56
6.428571
0.571429
0
0
0
0
0
0
0
0
0
0
0
0.142857
56
2
31
28
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f7ca87937f0f8de268ce3f9cd9d9575399e7c763
193
py
Python
actions/lib/downtimes.py
userlocalhost/stackstorm-datadog
6c70d6023f63e6d5d805ceb6dd3bc1edeea8123d
[ "Apache-2.0" ]
164
2015-01-17T16:08:33.000Z
2021-08-03T02:34:07.000Z
actions/lib/downtimes.py
userlocalhost/stackstorm-datadog
6c70d6023f63e6d5d805ceb6dd3bc1edeea8123d
[ "Apache-2.0" ]
442
2015-01-01T11:19:01.000Z
2017-09-06T23:26:17.000Z
actions/lib/downtimes.py
userlocalhost/stackstorm-datadog
6c70d6023f63e6d5d805ceb6dd3bc1edeea8123d
[ "Apache-2.0" ]
202
2015-01-13T00:37:40.000Z
2020-11-07T11:30:10.000Z
from base import DatadogBaseAction from datadog import api class DatadogScheduleMonitorDowntime(DatadogBaseAction): def _run(self, **kwargs): return api.Downtime.create(**kwargs)
24.125
56
0.772021
20
193
7.4
0.75
0
0
0
0
0
0
0
0
0
0
0
0.150259
193
7
57
27.571429
0.902439
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0.2
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
f7df3c5ec150c9cab1314d08eaf8eafea92c78b5
413
py
Python
stuff/py/pycore/utils.py
clarete/memetalk
2604ba4b59b1f4e945557b1af4b9a829bcdc7501
[ "BSD-2-Clause" ]
null
null
null
stuff/py/pycore/utils.py
clarete/memetalk
2604ba4b59b1f4e945557b1af4b9a829bcdc7501
[ "BSD-2-Clause" ]
null
null
null
stuff/py/pycore/utils.py
clarete/memetalk
2604ba4b59b1f4e945557b1af4b9a829bcdc7501
[ "BSD-2-Clause" ]
null
null
null
from pdb import set_trace as br SEP = '_' def behavior_label(name): return name + SEP + 'Behavior' def cclass_label(name): return name + SEP + 'CompiledClass' def class_label(name): return name # + SEP + "Class" def cfun_label(owner_name, name): return owner_name + SEP + name + SEP + 'CompiledFunction' def fun_label(cfun_label, name): return cfun_label + SEP + name + SEP + "Function"
21.736842
61
0.682809
58
413
4.672414
0.362069
0.154982
0.221402
0.210332
0.243542
0
0
0
0
0
0
0
0.208232
413
18
62
22.944444
0.828746
0.03632
0
0
0
0
0.116162
0
0
0
0
0
0
1
0.416667
false
0
0.083333
0.416667
0.916667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
f7ea6b2e892e45ba4a2968716b36c2c50388693c
94,639
py
Python
src/genie/libs/parser/iosxr/tests/test_show_xconnect.py
alsyz/genieparser
e80a219851aa074482f9cccee1cb9fb42216e225
[ "Apache-2.0" ]
1
2021-10-01T05:41:06.000Z
2021-10-01T05:41:06.000Z
src/genie/libs/parser/iosxr/tests/test_show_xconnect.py
alsyz/genieparser
e80a219851aa074482f9cccee1cb9fb42216e225
[ "Apache-2.0" ]
4
2021-03-24T04:25:38.000Z
2021-03-28T04:31:21.000Z
src/genie/libs/parser/iosxr/tests/test_show_xconnect.py
alsyz/genieparser
e80a219851aa074482f9cccee1cb9fb42216e225
[ "Apache-2.0" ]
1
2021-04-05T22:05:15.000Z
2021-04-05T22:05:15.000Z
# Python import unittest from unittest.mock import Mock from pyats.topology import Device from genie.metaparser.util.exceptions import SchemaEmptyParserError from genie.libs.parser.iosxr.show_xconnect import (ShowL2vpnXconnect, ShowL2vpnXconnectDetail, ShowL2vpnXconnectSummary, ShowL2VpnXconnectBrief, ShowL2vpnXconnectMp2mpDetail) # ========================================== # Unit test for 'show l2vpn xconnect brief' # ========================================== class TestShowL2vpnXconnectBrief(unittest.TestCase): '''Unit test for 'show l2vpn xconnect brief' ''' maxDiff = None empty_output = {'execute.return_value': ''} golden_parsed_output1 = { 'atom': { 'like_to_like': { 'efp': { 'down': 0, 'unr': 0, 'up': 10 }, 'total': { 'down': 0, 'unr': 0, 'up': 10 } }, 'total': { 'down': 0, 'unr': 0, 'up': 10 } }, 'locally_switching': { 'like_to_like': { 'efp': { 'down': 0, 'unr': 0, 'up': 3 }, 'efp_invalid_ac': { 'down': 0, 'unr': 1, 'up': 0 }, 'invalid_ac': { 'down': 0, 'unr': 1, 'up': 0 }, 'total': { 'down': 0, 'unr': 2, 'up': 3 } }, 'total': { 'down': 0, 'unr': 2, 'up': 3 } } } golden_output1 = { 'execute.return_value': ''' RP/0/RP0/CPU0:ios# show l2vpn xconnect brief Mon Sep 19 10:52:27.818 UTC Locally Switching Like-to-Like UP DOWN UNR Invalid AC 0 0 1 EFP/Invalid AC 0 0 1 EFP 3 0 0 Total 3 0 2 Total 3 0 2 AToM Like-to-Like UP DOWN UNR EFP 10 0 0 Total 10 0 0 Total 10 0 0 ''' } golden_parsed_output2 = { 'atom': { 'like_to_like': { 'efp': { 'down': 0, 'unr': 0, 'up': 32 }, 'total': { 'down': 0, 'unr': 0, 'up': 32 } }, 'total': { 'down': 0, 'unr': 0, 'up': 32 } }, 'locally_switching': { 'like_to_like': { 'ether': { 'down': 0, 'unr': 0, 'up': 1 }, 'total': { 'down': 0, 'unr': 0, 'up': 1 } }, 'total': { 'down': 0, 'unr': 0, 'up': 1 } } } golden_output2 = { 'execute.return_value': ''' RP/0/RP0/CPU0:SIT-540#show l2vpn xconnect brief Sat Aug 4 14:48:34.079 IST Locally Switching Like-to-Like UP DOWN UNR Ether 1 0 0 Total 1 0 0 Total 1 0 0 AToM Like-to-Like UP DOWN UNR EFP 32 0 0 Total 32 0 0 Total 32 0 0 ''' } golden_parsed_output3 = {'total': {'down': 0, 'unr': 0, 'up': 0}} golden_output3 = { 'execute.return_value': ''' [2019-10-08 09:30:35,071] +++ R2_xr: executing command 'show l2vpn xconnect brief' +++ show l2vpn xconnect brief Tue Oct 8 16:30:05.044 UTC Total: 0 UP, 0 DOWN, 0 UNRESOLVED ''' } def test_empty(self): self.device = Mock(**self.empty_output) obj = ShowL2VpnXconnectBrief(device=self.device) with self.assertRaises(SchemaEmptyParserError): parsed_output = obj.parse() def test_golden1(self): self.device = Mock(**self.golden_output1) obj = ShowL2VpnXconnectBrief(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output1) def test_golden2(self): self.device = Mock(**self.golden_output2) obj = ShowL2VpnXconnectBrief(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output2) def test_golden3(self): self.device = Mock(**self.golden_output3) obj = ShowL2VpnXconnectBrief(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output3) # ================================================== # Unit test for 'show l2vpn xconnect' # ================================================== class TestShowL2vpnXconnect(unittest.TestCase): """Unit test for 'show l2vpn xconnect' """ device = Device(name='aDevice') empty_output = {'execute.return_value': ''} golden_parsed_output = { 'groups': { 'Test_XCONN_Group': { 'name': { '1000': { 'status': 'DN', 'segment1': { 'GigabitEthernet0/0/0/5.1000': { 'status': 'UP', 'segment2': { '10.4.1.206 1000': { 'status': 'DN', }, }, }, }, }, '2000': { 'status': 'DN', 'segment1': { 'GigabitEthernet0/0/0/5.2000': { 'status': 'UP', 'segment2': { '10.4.1.206 2000': { 'status': 'DN', }, }, }, }, }, }, }, 'Test_XCONN_Group2': { 'name': { '3000': { 'status': 'UR', 'segment1': { 'GigabitEthernet0/0/0/5.3000': { 'status': 'UR', 'segment2': { '10.4.1.206 3000': { 'status': 'DN', }, }, }, }, }, }, }, }, } golden_output = {'execute.return_value': ''' XRv01_NUC# show l2vpn xconnect Legend: ST = State, UP = Up, DN = Down, AD = Admin Down, UR = Unresolved, SB = Standby, SR = Standby Ready, (PP) = Partially Programmed XConnect Segment 1 Segment 2 Group Name ST Description ST Description ST ---------------------- -------------------- --------------------------- Test_XCONN_Group 1000 DN Gi0/0/0/5.1000 UP 10.4.1.206 1000 DN --------------------------------------------------------------------------- Test_XCONN_Group 2000 DN Gi0/0/0/5.2000 UP 10.4.1.206 2000 DN --------------------------------------------------------------------------- Test_XCONN_Group2 3000 UR Gi0/0/0/5.3000 UR 10.4.1.206 3000 DN --------------------------------------------------------------------------- '''} golden_parsed_output2 = { 'groups': { 'L2TPV3_V4_XC_GRP': { 'name': { 'L2TPV3_P2P_1': { 'status': 'UP', 'segment1': { 'GigabitEthernet0/2/0/1.2': { 'status': 'UP', 'segment2': { '10.154.26.26 100': { 'status': 'UP', }, }, }, }, }, 'L2TPV3_P2P_2': { 'status': 'UP', 'segment1': { 'GigabitEthernet0/2/0/1.3': { 'status': 'UP', 'segment2': { '10.154.26.26 200': { 'status': 'UP', }, }, }, }, }, }, }, }, } golden_output2 = {'execute.return_value': ''' RP/0/RSP0/CPU0:router# show l2vpn xconnect Wed May 21 09:06:47.944 UTC Legend: ST = State, UP = Up, DN = Down, AD = Admin Down, UR = Unresolved, SB = Standby, SR = Standby Ready, (PP) = Partially Programmed XConnect Segment 1 Segment 2 Group Name ST Description ST Description ST ------------------------ ----------------------------- --------------------------- L2TPV3_V4_XC_GRP L2TPV3_P2P_1 UP Gi0/2/0/1.2 UP 10.154.26.26 100 UP -------------------------------------------------------------------------------------- L2TPV3_V4_XC_GRP L2TPV3_P2P_2 UP Gi0/2/0/1.3 UP 10.154.26.26 200 UP -------------------------------------------------------------------------------------- '''} golden_parsed_output3 = { 'groups': { 'pe1-to-pe2': { 'name': { 'vpws_bl1_pe2': { 'segment1': { 'TenGigabitEthernet0/0/0/3/1.200': { 'segment2': { 'EVPN 12222,32222,10.4.1.1': { 'status': 'UP'} }, 'status': 'UP'} }, 'status': 'UP'}, 'vpws_pe1_pe1': { 'segment1': { 'TenGigabitEthernet0/0/0/3/1.100': { 'segment2': { 'EVPN 11111,31111,10.4.1.1': { 'status': 'UP'} }, 'status': 'UP'} }, 'status': 'UP'} } } } } golden_output3 = {'execute.return_value': ''' show l2vpn xconnect Fri Sep 27 17:02:50.459 EDT Legend: ST = State, UP = Up, DN = Down, AD = Admin Down, UR = Unresolved, SB = Standby, SR = Standby Ready, (PP) = Partially Programmed XConnect Segment 1 Segment 2 Group Name ST Description ST Description ST ------------------------ ----------------------------- ----------------------------- pe1-to-pe2 vpws_bl1_pe2 UP Te0/0/0/3/1.200 UP EVPN 12222,32222,10.4.1.1 UP ---------------------------------------------------------------------------------------- pe1-to-pe2 vpws_pe1_pe1 UP Te0/0/0/3/1.100 UP EVPN 11111,31111,10.4.1.1 UP ---------------------------------------------------------------------------------------- '''} golden_parsed_output4 = { 'groups': { 'genie_wqst': { 'name': { 'wsq_wqxt_ups2_cm2_21314': { 'status': 'UR', 'segment1': { 'Bundle-Ether2.61': { 'status': 'UR', 'segment2': { 'EVPN 21314,31314,10.4.1.1': { 'status': 'DN', }, }, }, }, }, }, }, 'genie_CM-QF-CF': { 'name': { 'G2-2-2-34-422': { 'status': 'UP', 'segment1': { 'GigabitEthernet2/2/2/34.422': { 'status': 'UP', 'segment2': { 'EVPN 3223,4112,10.1.21.93': { 'status': 'UP', }, }, }, }, }, }, }, 'genie_CM-3-EDQF': { 'name': { 'G2-2-2-34-322': { 'status': 'UP', 'segment1': { 'GigabitEthernet2/2/2/34.322': { 'status': 'UP', 'segment2': { '10.154.219.82 9593211': { 'status': 'UP', }, }, }, }, }, }, }, }, } golden_output4 = {'execute.return_value': ''' show l2vpn xconnect Mon Oct 7 16:22:44.651 EDT Legend: ST = State, UP = Up, DN = Down, AD = Admin Down, UR = Unresolved, SB = Standby, SR = Standby Ready, (PP) = Partially Programmed XConnect Segment 1 Segment 2 Group Name ST Description ST Description ST ------------------------ ----------------------------- ----------------------------- genie_wqst wsq_wqxt_ups2_cm2_21314 UR BE2.61 UR EVPN 21314,31314,10.4.1.1 DN ---------------------------------------------------------------------------------------- genie_CM-QF-CF G2-2-2-34-422 UP Gi2/2/2/34.422 UP EVPN 3223,4112,10.1.21.93 UP ---------------------------------------------------------------------------------------- genie_CM-3-EDQF G2-2-2-34-322 UP Gi2/2/2/34.322 UP 10.154.219.82 9593211 UP ---------------------------------------------------------------------------------------- '''} golden_parsed_output5 = { 'groups': { 'up-udpsf5-genie': { 'name': { 'up-udpsf5-genie': { 'status': 'UR', 'segment1': { '10.154.219.82 2015030201': { 'status': 'UR', 'segment2': { 'Nonexistent': { 'status': 'UR', }, }, }, }, }, }, }, 'up-udpsf2-genie': { 'name': { 'up-udpsf2-genie': { 'status': 'DN', 'segment1': { 'TenGigabitEthernet0/4/0/5': { 'status': 'UP', 'segment2': { '10.154.219.83 1152': { 'status': 'DN', }, }, }, }, }, }, }, 'UP-udpsf5genie-port': { 'name': { 'U-1-5-1-3': { 'status': 'UR', 'segment1': { '10.154.219.84 4293089094': { 'status': 'UR', 'segment2': { 'Nonexistent': { 'status': 'UR', }, }, }, }, }, }, }, }, } golden_output5 = {'execute.return_value': ''' show l2vpn xconnect Mon Oct 21 11:03:04.538 EDT Legend: ST = State, UP = Up, DN = Down, AD = Admin Down, UR = Unresolved, SB = Standby, SR = Standby Ready, (PP) = Partially Programmed XConnect Segment 1 Segment 2 Group Name ST Description ST Description ST ------------------------ ----------------------------- ----------------------------- up-udpsf5-genie up-udpsf5-genie UR 10.154.219.82 2015030201 UR Nonexistent UR ---------------------------------------------------------------------------------------- up-udpsf2-genie up-udpsf2-genie DN Te0/4/0/5 UP 10.154.219.83 1152 DN ---------------------------------------------------------------------------------------- UP-udpsf5genie-port U-1-5-1-3 UR 10.154.219.84 4293089094 UR Nonexistent UR ---------------------------------------------------------------------------------------- '''} golden_parsed_output6 = { "groups": { "vpws": { "name": { "vpws": { "status": "UR", "segment1": { "TenGigabitEthernet0/0/15/0": { "status": "UR", "segment2": { "EVPN 302,302,0.0.0.0": { "status": "DN" } } } } }, "vrp_vpws_2": { "status": "DN", "segment1": { "Bundle-Ether2.78": { "status": "UP", "segment2": { "EVPN 12345,67895,10.4.2.1": { "status": "DN" } } } } } } } } } golden_output6 = {'execute.return_value': ''' show l2vpn xconnect Thu Oct 24 11:40:08.221 EDT Legend: ST = State, UP = Up, DN = Down, AD = Admin Down, UR = Unresolved, SB = Standby, SR = Standby Ready, (PP) = Partially Programmed XConnect Segment 1 Segment 2 Group Name ST Description ST Description ST ------------------------ ----------------------------- ----------------------------- vpws vpws UR Te0/0/15/0 UR EVPN 302,302,0.0.0.0 DN ---------------------------------------------------------------------------------------- vpws vrp_vpws_2 DN BE2.78 UP EVPN 12345,67895,10.4.2.1 DN ---------------------------------------------------------------------------------------- '''} def test_empty(self): self.device = Mock(**self.empty_output) obj = ShowL2vpnXconnect(device=self.device) with self.assertRaises(SchemaEmptyParserError): parsed_output = obj.parse() def test_golden(self): self.maxDiff = None self.device = Mock(**self.golden_output) obj = ShowL2vpnXconnect(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output) def test_golden2(self): self.maxDiff = None self.device = Mock(**self.golden_output2) obj = ShowL2vpnXconnect(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output2) def test_golden3(self): self.maxDiff = None self.device = Mock(**self.golden_output3) obj = ShowL2vpnXconnect(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output3) def test_golden4(self): self.maxDiff = None self.device = Mock(**self.golden_output4) obj = ShowL2vpnXconnect(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output4) def test_golden5(self): self.maxDiff = None self.device = Mock(**self.golden_output5) obj = ShowL2vpnXconnect(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output5) def test_golden6(self): self.maxDiff = None self.device = Mock(**self.golden_output6) obj = ShowL2vpnXconnect(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output6) # ================================================== # Unit test for 'show l2vpn xconnect detail' # ================================================== class TestShowL2vpnXconnectDetail(unittest.TestCase): """Unit test for 'show l2vpn xconnect detail' """ device = Device(name='aDevice') empty_output = {'execute.return_value': ''} golden_parsed_output = { 'group': { 'tjub_xc': { 'xc': { 'siva_p2p': { 'state': 'down', 'interworking': 'none', 'monitor_session': { 'pw-span-test': { 'state': 'configured', }, }, 'ac': { 'GigabitEthernet1/5/1/2': { 'state': 'up', 'type': 'Ethernet', 'mtu': 2611, 'xc_id': '0x6111112', 'interworking': 'none', 'msti': 0, 'statistics': { 'packet_totals': { 'send': 100, }, 'byte_totals': { 'send': 20798, }, }, }, }, 'pw': { 'neighbor': { '10.19.2.2': { 'id': { 2: { 'state': 'down ( local ready )', 'pw_class': 'not set', 'xc_id': '0x6111112', 'encapsulation': 'MPLS', 'protocol': 'LDP', 'type': 'Ethernet', 'control_word': 'enabled', 'interworking': 'none', 'backup_disable_delay': 0, 'sequencing': 'not set', 'mpls': { 'label': { 'local': '41116', 'remote': 'unknown', }, 'group_id': { 'local': '0x6111411', 'remote': '1x1', }, 'interface': { 'local': 'GigabitEthernet1/5/1/2', 'remote': 'unknown', }, 'monitor_interface': { 'local': 'pw-span-test', 'remote': 'GigabitEthernet1/4/1/2', }, 'mtu': { 'local': '2611', 'remote': 'unknown', }, 'control_word': { 'local': 'enabled', 'remote': 'unknown', }, 'pw_type': { 'local': 'Ethernet', 'remote': 'unknown', }, 'vccv_cv_type': { 'local': '1x3', 'remote': '1x1', 'local_type': ['LSP ping verification'], 'remote_type': ['none'], }, 'vccv_cc_type': { 'local': '1x4', 'remote': '1x1', 'local_type': ['control word', 'router alert label'], 'remote_type': ['none'], }, }, 'create_time': '21/11/2008 11:35:17 (11:64:42 ago)', 'last_time_status_changed': '21/01/2008 21:37:15 (01:10:34 ago)', 'statistics': { 'packet_totals': { 'receive': 0, }, 'byte_totals': { 'receive': 0, }, }, }, }, }, }, }, 'backup_pw': { 'neighbor': { '10.66.3.3': { 'id': { 3: { 'state': 'up ( established )', 'pw_class': 'not set', 'xc_id': '1x1', 'encapsulation': 'MPLS', 'protocol': 'LDP', 'type': 'Ethernet', 'control_word': 'enabled', 'interworking': 'none', 'backup_disable_delay': 0, 'sequencing': 'not set', 'mpls': { 'label': { 'local': '41117', 'remote': '27114', }, 'group_id': { 'local': 'unassigned', 'remote': '1x6111511', }, 'interface': { 'local': 'unknown', 'remote': 'GigabitEthernet1/5/1/3', }, 'mtu': { 'local': '2611', 'remote': '2611', }, 'control_word': { 'local': 'enabled', 'remote': 'enabled', }, 'pw_type': { 'local': 'Ethernet', 'remote': 'Ethernet', }, 'vccv_cv_type': { 'local': '1x3', 'remote': '1x3', 'local_type': ['LSP ping verification'], 'remote_type': ['LSP ping verification'], }, 'vccv_cc_type': { 'local': '1x4', 'remote': '1x4', 'local_type': ['control word', 'router alert label'], 'remote_type': ['control word', 'router alert label'], }, }, 'create_time': '21/11/2008 11:45:44 (00:32:54 ago)', 'last_time_status_changed': '20/11/2008 21:45:48 (00:44:49 ago)', 'statistics': { 'packet_totals': { 'receive': 0, }, 'byte_totals': { 'receive': 0, }, }, }, }, }, }, }, }, }, }, }, } golden_output = {'execute.return_value': ''' show l2vpn xconnect detail Sat Sep 28 10:09:46.728 UTC Group tjub_xc, XC siva_p2p, state is down; Interworking none Monitor-Session: pw-span-test, state is configured AC: GigabitEthernet1/5/1/2, state is up Type Ethernet MTU 2611; XC ID 0x6111112; interworking none; MSTi 0 Statistics: packet totals: send 100 byte totals: send 20798 PW: neighbor 10.19.2.2, PW ID 2, state is down ( local ready ) PW class not set, XC ID 0x6111112 Encapsulation MPLS, protocol LDP PW type Ethernet, control word enabled, interworking none PW backup disable delay 0 sec Sequencing not set MPLS Local Remote ------------ ------------------------------ ----------------------------- Label 41116 unknown Group ID 0x6111411 1x1 Interface GigabitEthernet1/5/1/2 unknown Interface pw-span-test GigabitEthernet1/4/1/2 MTU 2611 unknown Control word enabled unknown PW type Ethernet unknown VCCV CV type 1x3 1x1 (none) (LSP ping verification) VCCV CC type 1x4 1x1 (none) (control word) (router alert label) ------------ ------------------------------ ----------------------------- Create time: 21/11/2008 11:35:17 (11:64:42 ago) Last time status changed: 21/01/2008 21:37:15 (01:10:34 ago) Statistics: packet totals: receive 0 byte totals: receive 0 Backup PW: PW: neighbor 10.66.3.3, PW ID 3, state is up ( established ) Backup for neighbor 10.19.2.2 PW ID 2 ( active ) PW class not set, XC ID 1x1 Encapsulation MPLS, protocol LDP PW type Ethernet, control word enabled, interworking none PW backup disable delay 0 sec Sequencing not set MPLS Local Remote ------------ ------------------------------ ----------------------------- Label 41117 27114 Group ID unassigned 1x6111511 Interface unknown GigabitEthernet1/5/1/3 MTU 2611 2611 Control word enabled enabled PW type Ethernet Ethernet VCCV CV type 1x3 1x3 (LSP ping verification) (LSP ping verification) VCCV CC type 1x4 1x4 (control word) (control word) (router alert label) (router alert label) ------------ ------------------------------ ----------------------------- Backup PW for neighbor 10.19.2.2 PW ID 2 Create time: 21/11/2008 11:45:44 (00:32:54 ago) Last time status changed: 20/11/2008 21:45:48 (00:44:49 ago) Statistics: packet totals: receive 0 byte totals: receive 0 '''} golden_parsed_output2 = { 'group': { 'qf2-to-tqjof2': { 'xc': { 'genie_bo3_vqt53_422': { 'state': 'up', 'interworking': 'none', 'ac': { 'TenGigE1/1/1/4/2.311': { 'state': 'up', 'type': 'VLAN', 'num_ranges': 1, 'vlan_ranges': ['311', '311'], 'rewrite_tags': '', 'mtu': 2611, 'xc_id': '1x3', 'interworking': 'none', 'statistics': { 'packet_totals': { 'receive': 4, 'send': 0, }, 'byte_totals': { 'receive': 291, 'send': 0, }, 'drops': { 'illegal_vlan': 0, 'illegal_length': 0, }, }, }, }, 'evpn': { 'neighbor': { '78.81.320.94': { 'id': { 'evi 21311': { 'state': 'up ( established )', 'ac_id': 41311, 'xc_id': '1xd1111112', 'encapsulation': 'MPLS', 'source_address': '78.81.320.99', 'encap_type': 'Ethernet', 'control_word': 'enabled', 'sequencing': 'not set', 'lsp': 'Up', 'evpn': { 'label': { 'local': '211124', 'remote': '211121', }, 'mtu': { 'local': '2611', 'remote': 'unknown', }, 'control_word': { 'local': 'enabled', 'remote': 'enabled', }, 'ac_id': { 'local': '31311', 'remote': '41311', }, 'evpn_type': { 'local': 'Ethernet', 'remote': 'Ethernet', }, }, 'create_time': '25/10/2019 14:17:28 (2x1e ago)', 'last_time_status_changed': '25/10/2019 15:13:33 (2x1e ago)', 'statistics': { 'packet_totals': { 'receive': 0, 'send': 4, }, 'byte_totals': { 'receive': 0, 'send': 291, }, }, }, }, }, }, }, }, }, }, 'qfw-to-tqjof2': { 'xc': { 'xstu_bo3_vqt2_211': { 'state': 'up', 'interworking': 'none', 'ac': { 'TenGigE1/1/1/4/2.211': { 'state': 'up', 'type': 'VLAN', 'num_ranges': 1, 'vlan_ranges': ['211', '211'], 'rewrite_tags': '', 'mtu': 2611, 'xc_id': '1x2', 'interworking': 'none', 'statistics': { 'packet_totals': { 'receive': 4, 'send': 0, }, 'byte_totals': { 'receive': 291, 'send': 0, }, 'drops': { 'illegal_vlan': 0, 'illegal_length': 0, }, }, }, }, 'evpn': { 'neighbor': { '78.81.321.93': { 'id': { 'evi 21211': { 'state': 'up ( established )', 'ac_id': 41211, 'xc_id': '1xd111113', 'encapsulation': 'MPLS', 'source_address': '78.81.321.99', 'encap_type': 'Ethernet', 'control_word': 'enabled', 'sequencing': 'not set', 'lsp': 'Up', 'evpn': { 'label': { 'local': '211123', 'remote': '211111', }, 'mtu': { 'local': '2611', 'remote': 'unknown', }, 'control_word': { 'local': 'enabled', 'remote': 'enabled', }, 'ac_id': { 'local': '31211', 'remote': '31211', }, 'evpn_type': { 'local': 'Ethernet', 'remote': 'Ethernet', }, }, 'create_time': '25/10/2019 15:10:17 (2x1e ago)', 'last_time_status_changed': '25/10/2019 15:15:33 (2x1e ago)', 'statistics': { 'packet_totals': { 'receive': 0, 'send': 4, }, 'byte_totals': { 'receive': 0, 'send': 291, }, }, }, }, }, }, }, }, }, }, }, } golden_output2 = {'execute.return_value': ''' show l2vpn xconnect detail Fri Oct 4 15:37:35.184 EDT Group qf2-to-tqjof2, XC genie_bo3_vqt53_422, state is up; Interworking none AC: TenGigE1/1/1/4/2.311, state is up Type VLAN; Num Ranges: 1 Rewrite Tags: [] VLAN ranges: [311, 311] MTU 2611; XC ID 1x3; interworking none Statistics: packets: received 4, sent 0 bytes: received 291, sent 0 drops: illegal VLAN 0, illegal length 0 EVPN: neighbor 78.81.320.94, PW ID: evi 21311, ac-id 41311, state is up ( established ) XC ID 1xd1111112 Encapsulation MPLS Source address 78.81.320.99 Encap type Ethernet, control word enabled Sequencing not set LSP : Up EVPN Local Remote ------------ ------------------------------ ----------------------------- Label 211124 211121 MTU 2611 unknown Control word enabled enabled AC ID 31311 41311 EVPN type Ethernet Ethernet ------------ ------------------------------ ----------------------------- Create time: 25/10/2019 14:17:28 (2x1e ago) Last time status changed: 25/10/2019 15:13:33 (2x1e ago) Statistics: packets: received 0, sent 4 bytes: received 0, sent 291 Group qfw-to-tqjof2, XC xstu_bo3_vqt2_211, state is up; Interworking none AC: TenGigE1/1/1/4/2.211, state is up Type VLAN; Num Ranges: 1 Rewrite Tags: [] VLAN ranges: [211, 211] MTU 2611; XC ID 1x2; interworking none Statistics: packets: received 4, sent 0 bytes: received 291, sent 0 drops: illegal VLAN 0, illegal length 0 EVPN: neighbor 78.81.321.93, PW ID: evi 21211, ac-id 41211, state is up ( established ) XC ID 1xd111113 Encapsulation MPLS Source address 78.81.321.99 Encap type Ethernet, control word enabled Sequencing not set LSP : Up EVPN Local Remote ------------ ------------------------------ ----------------------------- Label 211123 211111 MTU 2611 unknown Control word enabled enabled AC ID 31211 31211 EVPN type Ethernet Ethernet ------------ ------------------------------ ----------------------------- Create time: 25/10/2019 15:10:17 (2x1e ago) Last time status changed: 25/10/2019 15:15:33 (2x1e ago) Statistics: packets: received 0, sent 4 bytes: received 0, sent 291 '''} golden_parsed_output3 = { 'group': { 'CLIENT': { 'xc': { 'C1': { 'state': 'up', 'interworking': 'none', 'ac': { 'GigabitEthernet200/0/0/1.3109': { 'state': 'up, active in RG-ID 10', 'type': 'VLAN', 'num_ranges': 1, 'rewrite_tags': '', 'vlan_ranges': ['3109', '3109'], 'mtu': 1500, 'xc_id': '0x120000e', 'interworking': 'none', 'statistics': { 'packet_totals': { 'receive': 3711214, 'send': 3707556 }, 'byte_totals': { 'receive': 566159136, 'send': 793161693 }, 'drops': { 'illegal_vlan': 0, 'illegal_length': 0 } } } }, 'pw': { 'neighbor': { '192.168.1.1': { 'id': { 1384496: { 'state': 'up ( established )', 'pw_class': 'not set', 'xc_id': '0xa0000003', 'encapsulation': 'MPLS', 'protocol': 'LDP', 'source_address': '192.168.0.47', 'type': 'Ethernet', 'control_word': 'disabled', 'interworking': 'none', 'backup_disable_delay': 0, 'sequencing': 'not set', 'lsp': 'Up', 'status_tlv': 'not set', 'mpls': { 'label': { 'local': '24047', 'remote': '1784' }, 'group_id': { 'local': '0x4002580', 'remote': '0x7' }, 'interface': { 'local': 'GigabitEthernet200/0/0/1.3109', 'remote': 'C1' }, 'mtu': { 'local': '1500', 'remote': '1500' }, 'control_word': { 'local': 'disabled', 'remote': 'disabled' }, 'pw_type': { 'local': 'Ethernet', 'remote': 'Ethernet' }, 'vccv_cv_type': { 'local': '0x2', 'remote': '0x2', 'local_type': ['LSP ping verification'], 'remote_type': ['LSP ping verification'] }, 'vccv_cc_type': { 'local': '0x6', 'remote': '0x2', 'local_type': ['router alert label', 'TTL expiry'], 'remote_type': ['router alert label'] } }, 'create_time': '08/12/2020 01:02:44 (2w0d ago)', 'last_time_status_changed': '12/12/2020 14:05:44 (1w3d ago)', 'statistics': { 'packet_totals': { 'receive': 3707556, 'send': 3711214 }, 'byte_totals': { 'receive': 793161693, 'send': 566159136 } } } } } } } }, 'C2': { 'state': 'up', 'interworking': 'none', 'ac': { 'GigabitEthernet100/0/0/5.3100': { 'state': 'up, active in RG-ID 10', 'type': 'VLAN', 'num_ranges': 1, 'rewrite_tags': '', 'vlan_ranges': ['3100', '3100'], 'mtu': 9198, 'xc_id': '0x1200008', 'interworking': 'none', 'statistics': { 'packet_totals': { 'receive': 0, 'send': 225798 }, 'byte_totals': { 'receive': 0, 'send': 13547880 }, 'drops': { 'illegal_vlan': 0, 'illegal_length': 0 } } } }, 'pw': { 'neighbor': { '192.168.0.51': { 'id': { 1542017: { 'state': 'up ( established )', 'pw_class': 'not set', 'xc_id': '0xa0000005', 'encapsulation': 'MPLS', 'protocol': 'LDP', 'source_address': '192.168.0.47', 'type': 'Ethernet', 'control_word': 'disabled', 'interworking': 'none', 'backup_disable_delay': 0, 'sequencing': 'not set', 'lsp': 'Up', 'status_tlv': 'not set', 'mpls': { 'label': { 'local': '24043', 'remote': '26256' }, 'group_id': { 'local': '0x4001980', 'remote': '0x4002b40' }, 'monitor_interface': { 'local': 'GigabitEthernet100/0/0/5.3100', 'remote': 'GigabitEthernet300/0/0/23.571' }, 'mtu': { 'local': '9198', 'remote': '9198' }, 'control_word': { 'local': 'disabled', 'remote': 'disabled' }, 'pw_type': { 'local': 'Ethernet', 'remote': 'Ethernet' }, 'vccv_cv_type': { 'local': '0x2', 'remote': '0x2', 'local_type': ['LSP ping verification'], 'remote_type': ['LSP ping verification'] }, 'vccv_cc_type': { 'local': '0x6', 'remote': '0x6', 'local_type': ['router alert label', 'TTL expiry'], 'remote_type': ['router alert label', 'TTL expiry'] } }, 'create_time': '08/12/2020 01:02:44 (2w0d ago)', 'last_time_status_changed': '11/12/2020 12:45:30 (1w4d ago)', 'statistics': { 'packet_totals': { 'receive': 225798, 'send': 0 }, 'byte_totals': { 'receive': 13547880, 'send': 0 } } } } } } }, 'backup_pw': { 'neighbor': { '192.168.0.52': { 'id': { 1542017: { 'state': 'standby ( all ready )', 'pw_class': 'not set', 'xc_id': '0xa0000007', 'encapsulation': 'MPLS', 'protocol': 'LDP', 'source_address': '192.168.0.47', 'type': 'Ethernet', 'control_word': 'disabled', 'interworking': 'none', 'sequencing': 'not set', 'lsp': 'Up', 'status_tlv': 'not set', 'mpls': { 'label': { 'local': '24044', 'remote': '25981' }, 'group_id': { 'local': '0x4001980', 'remote': '0x4002b00' }, 'interface': { 'local': 'GigabitEthernet100/0/0/5.3100', 'remote': 'GigabitEthernet300/0/0/23.571' }, 'mtu': { 'local': '9198', 'remote': '9198' }, 'control_word': { 'local': 'disabled', 'remote': 'disabled' }, 'pw_type': { 'local': 'Ethernet', 'remote': 'Ethernet' }, 'vccv_cv_type': { 'local': '0x2', 'remote': '0x2', 'local_type': ['LSP ping verification'], 'remote_type': ['LSP ping verification'] }, 'vccv_cc_type': { 'local': '0x6', 'remote': '0x6', 'local_type': ['router alert label', 'TTL expiry'], 'remote_type': ['router alert label', 'TTL expiry'] } }, 'create_time': '08/12/2020 01:02:44 (2w0d ago)', 'last_time_status_changed': '08/12/2020 01:06:55 (2w0d ago)' } } } } } }, 'C3': { 'state': 'up', 'interworking': 'none', 'ac': { 'GigabitEthernet100/0/0/6.3100': { 'state': 'up, active in RG-ID 10', 'type': 'VLAN', 'num_ranges': 1, 'rewrite_tags': '', 'vlan_ranges': ['3100', '3100'], 'mtu': 9198, 'xc_id': '0x120000a', 'interworking': 'none', 'statistics': { 'packet_totals': { 'receive': 50709266, 'send': 81925195 }, 'byte_totals': { 'receive': 20472200681, 'send': 29487822535 }, 'drops': { 'illegal_vlan': 0, 'illegal_length': 0 } } } }, 'pw': { 'neighbor': { '192.168.0.51': { 'id': { 1542550: { 'state': 'up ( established )', 'pw_class': 'not set', 'xc_id': '0xa0000009', 'encapsulation': 'MPLS', 'protocol': 'LDP', 'source_address': '192.168.0.47', 'type': 'Ethernet', 'control_word': 'disabled', 'interworking': 'none', 'backup_disable_delay': 0, 'sequencing': 'not set', 'lsp': 'Up', 'status_tlv': 'not set', 'mpls': { 'label': { 'local': '24045', 'remote': '24029' }, 'group_id': { 'local': '0x4001940', 'remote': '0x4000180' }, 'monitor_interface': { 'local': 'GigabitEthernet100/0/0/6.3100', 'remote': 'TenGigE0/0/0/3.214' }, 'mtu': { 'local': '9198', 'remote': '9198' }, 'control_word': { 'local': 'disabled', 'remote': 'disabled' }, 'pw_type': { 'local': 'Ethernet', 'remote': 'Ethernet' }, 'vccv_cv_type': { 'local': '0x2', 'remote': '0x2', 'local_type': ['LSP ping verification'], 'remote_type': ['LSP ping verification'] }, 'vccv_cc_type': { 'local': '0x6', 'remote': '0x6', 'local_type': ['router alert label', 'TTL expiry'], 'remote_type': ['router alert label', 'TTL expiry'] } }, 'create_time': '08/12/2020 01:02:44 (2w0d ago)', 'last_time_status_changed': '08/12/2020 01:12:15 (2w0d ago)', 'statistics': { 'packet_totals': { 'receive': 81925195, 'send': 50709266 }, 'byte_totals': { 'receive': 29487822535, 'send': 20472200681 } } } } } } } } } } } } golden_output3 = { 'execute.return_value': ''' show l2vpn xconnect detail Tue Dec 22 16:40:24.524 AST Group CLIENT, XC C1, state is up; Interworking none AC: GigabitEthernet200/0/0/1.3109, state is up, active in RG-ID 10 Type VLAN; Num Ranges: 1 Rewrite Tags: [] VLAN ranges: [3109, 3109] MTU 1500; XC ID 0x120000e; interworking none Statistics: packets: received 3711214, sent 3707556 bytes: received 566159136, sent 793161693 drops: illegal VLAN 0, illegal length 0 PW: neighbor 192.168.1.1, PW ID 1384496, state is up ( established ) PW class not set, XC ID 0xa0000003 Encapsulation MPLS, protocol LDP Source address 192.168.0.47 PW type Ethernet, control word disabled, interworking none PW backup disable delay 0 sec Sequencing not set LSP : Up PW Status TLV in use MPLS Local Remote ------------ ------------------------------ ----------------------------- Label 24047 1784 Group ID 0x4002580 0x7 Interface GigabitEthernet200/0/0/1.3109 C1 MTU 1500 1500 Control word disabled disabled PW type Ethernet Ethernet VCCV CV type 0x2 0x2 (LSP ping verification) (LSP ping verification) VCCV CC type 0x6 0x2 (router alert label) (router alert label) (TTL expiry) ------------ ------------------------------ ----------------------------- Incoming Status (PW Status TLV): Status code: 0x0 (Up) in Notification message Outgoing Status (PW Status TLV): Status code: 0x0 (Up) in Notification message MIB cpwVcIndex: 2684354563 Create time: 08/12/2020 01:02:44 (2w0d ago) Last time status changed: 12/12/2020 14:05:44 (1w3d ago) Last time PW went down: 12/12/2020 14:00:30 (1w3d ago) Statistics: packets: received 3707556, sent 3711214 bytes: received 793161693, sent 566159136 Group CLIENT, XC C2, state is up; Interworking none AC: GigabitEthernet100/0/0/5.3100, state is up, active in RG-ID 10 Type VLAN; Num Ranges: 1 Rewrite Tags: [] VLAN ranges: [3100, 3100] MTU 9198; XC ID 0x1200008; interworking none Statistics: packets: received 0, sent 225798 bytes: received 0, sent 13547880 drops: illegal VLAN 0, illegal length 0 PW: neighbor 192.168.0.51, PW ID 1542017, state is up ( established ) PW class not set, XC ID 0xa0000005 Encapsulation MPLS, protocol LDP Source address 192.168.0.47 PW type Ethernet, control word disabled, interworking none PW backup disable delay 0 sec Sequencing not set LSP : Up PW Status TLV in use MPLS Local Remote ------------ ------------------------------ ----------------------------- Label 24043 26256 Group ID 0x4001980 0x4002b40 Interface GigabitEthernet100/0/0/5.3100 GigabitEthernet300/0/0/23.571 MTU 9198 9198 Control word disabled disabled PW type Ethernet Ethernet VCCV CV type 0x2 0x2 (LSP ping verification) (LSP ping verification) VCCV CC type 0x6 0x6 (router alert label) (router alert label) (TTL expiry) (TTL expiry) ------------ ------------------------------ ----------------------------- Incoming Status (PW Status TLV): Status code: 0x0 (Up) in Notification message Outgoing Status (PW Status TLV): Status code: 0x0 (Up) in Notification message MIB cpwVcIndex: 2684354565 Create time: 08/12/2020 01:02:44 (2w0d ago) Last time status changed: 11/12/2020 12:45:30 (1w4d ago) Last time PW went down: 11/12/2020 12:44:41 (1w4d ago) MAC withdraw messages: sent 0, received 0 Statistics: packets: received 225798, sent 0 bytes: received 13547880, sent 0 Backup PW: PW: neighbor 192.168.0.52, PW ID 1542017, state is standby ( all ready ) Backup for neighbor 192.168.0.51 PW ID 1542017 ( inactive ) PW class not set, XC ID 0xa0000007 Encapsulation MPLS, protocol LDP Source address 192.168.0.47 PW type Ethernet, control word disabled, interworking none Sequencing not set LSP : Up PW Status TLV in use MPLS Local Remote ------------ ------------------------------ ----------------------------- Label 24044 25981 Group ID 0x4001980 0x4002b00 Interface GigabitEthernet100/0/0/5.3100 GigabitEthernet300/0/0/23.571 MTU 9198 9198 Control word disabled disabled PW type Ethernet Ethernet VCCV CV type 0x2 0x2 (LSP ping verification) (LSP ping verification) VCCV CC type 0x6 0x6 (router alert label) (router alert label) (TTL expiry) (TTL expiry) ------------ ------------------------------ ----------------------------- Incoming Status (PW Status TLV): Status code: 0x20 (Standby) in Notification message Outgoing Status (PW Status TLV): Status code: 0x0 (Up) in Notification message MIB cpwVcIndex: 2684354567 Create time: 08/12/2020 01:02:44 (2w0d ago) Last time status changed: 08/12/2020 01:06:55 (2w0d ago) MAC withdraw messages: sent 0, received 0 Group CLIENT, XC C3, state is up; Interworking none AC: GigabitEthernet100/0/0/6.3100, state is up, active in RG-ID 10 Type VLAN; Num Ranges: 1 Rewrite Tags: [] VLAN ranges: [3100, 3100] MTU 9198; XC ID 0x120000a; interworking none Statistics: packets: received 50709266, sent 81925195 bytes: received 20472200681, sent 29487822535 drops: illegal VLAN 0, illegal length 0 PW: neighbor 192.168.0.51, PW ID 1542550, state is up ( established ) PW class not set, XC ID 0xa0000009 Encapsulation MPLS, protocol LDP Source address 192.168.0.47 PW type Ethernet, control word disabled, interworking none PW backup disable delay 0 sec Sequencing not set LSP : Up PW Status TLV in use MPLS Local Remote ------------ ------------------------------ ----------------------------- Label 24045 24029 Group ID 0x4001940 0x4000180 Interface GigabitEthernet100/0/0/6.3100 TenGigE0/0/0/3.214 MTU 9198 9198 Control word disabled disabled PW type Ethernet Ethernet VCCV CV type 0x2 0x2 (LSP ping verification) (LSP ping verification) VCCV CC type 0x6 0x6 (router alert label) (router alert label) (TTL expiry) (TTL expiry) ------------ ------------------------------ ----------------------------- Incoming Status (PW Status TLV): Status code: 0x0 (Up) in Notification message Outgoing Status (PW Status TLV): Status code: 0x0 (Up) in Notification message MIB cpwVcIndex: 2684354569 Create time: 08/12/2020 01:02:44 (2w0d ago) Last time status changed: 08/12/2020 01:12:15 (2w0d ago) Statistics: packets: received 81925195, sent 50709266 bytes: received 29487822535, sent 20472200681 ''' } def test_empty(self): self.device = Mock(**self.empty_output) obj = ShowL2vpnXconnectDetail(device=self.device) with self.assertRaises(SchemaEmptyParserError): parsed_output = obj.parse() def test_golden(self): self.maxDiff = None self.device = Mock(**self.golden_output) obj = ShowL2vpnXconnectDetail(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output) def test_golden2(self): self.maxDiff = None self.device = Mock(**self.golden_output2) obj = ShowL2vpnXconnectDetail(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output2) def test_golden3(self): self.maxDiff = None self.device = Mock(**self.golden_output3) obj = ShowL2vpnXconnectDetail(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output3) # ================================================== # Unit test for 'show l2vpn xconnect summary' # ================================================== class TestShowL2vpnXconnectSummary(unittest.TestCase): """Unit test for 'show l2vpn xconnect summary' """ device = Device(name='aDevice') empty_output = {'execute.return_value': ''} golden_parsed_output = { 'number_of_groups': { 'total': 0, }, 'number_of_xconnects': { 'total': 0, 'up': 0, 'down': 0, 'unresolved': 0, 'partially_programmed': 0, 'ac_pw': 0, 'ac_ac': 0, 'pw_pw': 0, 'monitor_session_pw': 0, }, 'number_of_admin_down_segments': { 'total': 0, }, 'number_of_mp2mp_xconnects': { 'total': 0, 'up': 0, 'down': 0, 'advertised': 0, 'non_advertised': 0, }, 'number_of_ce_connections': { 'total': 0, 'advertised': 0, 'non_advertised': 0, }, 'backup_pw': { 'configured': 0, 'up': 0, 'down': 0, 'admin_down': 0, 'unresolved': 0, 'standby': 0, 'standby_ready': 0, }, 'backup_interface': { 'configured': 0, 'up': 0, 'down': 0, 'admin_down': 0, 'unresolved': 0, 'standby': 0, }, } golden_output = {'execute.return_value': ''' Device#show l2vpn xconnect summary Thu Sep 26 11:00:09.210 EDT Number of groups: 0 Number of xconnects: 0 Up: 0 Down: 0 Unresolved: 0 Partially-programmed: 0 AC-PW: 0 AC-AC: 0 PW-PW: 0 Monitor-Session-PW: 0 Number of Admin Down segments: 0 Number of MP2MP xconnects: 0 Up 0 Down 0 Advertised: 0 Non-Advertised: 0 Number of CE Connections: 0 Advertised: 0 Non-Advertised: 0 Backup PW: Configured : 0 UP : 0 Down : 0 Admin Down : 0 Unresolved : 0 Standby : 0 Standby Ready: 0 Backup Interface: Configured : 0 UP : 0 Down : 0 Admin Down : 0 Unresolved : 0 Standby : 0 Device# '''} def test_empty(self): self.device = Mock(**self.empty_output) obj = ShowL2vpnXconnectSummary(device=self.device) with self.assertRaises(SchemaEmptyParserError): parsed_output = obj.parse() def test_golden(self): self.maxDiff = None self.device = Mock(**self.golden_output) obj = ShowL2vpnXconnectSummary(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output) # ================================================== # Unit test for 'show l2vpn xconnect mp2mp detail' # ================================================== class TestShowL2vpnXconnectMp2mpDetail(unittest.TestCase): """Unit test for 'show l2vpn xconnect mp2mp detail' """ device = Device(name='aDevice') empty_output = {'execute.return_value': ''} golden_parsed_output = { 'group': { 'gr1': { 'mp2mp': { 'mp1': { 'state': 'up', 'vpn_id': 100, 'vpn_mtu': 1500, 'l2_encapsulation': 'VLAN', 'auto_discovery': { 'BGP': { 'state': 'Advertised', 'event_name': 'Service Connected', 'route_distinguisher': '(auto) 10.36.3.3:32770', }, }, 'import_route_targets': ['10.16.2.2:100'], 'export_route_targets': ['10.16.2.2:100'], 'signaling_protocol': { 'BGP': { 'ce_range': 10, }, }, }, }, 'xc': { 'mp1.1:2': { 'state': 'up', 'interworking': 'none', 'local_ce_id': 1, 'remote_ce_id': 2, 'discovery_state': 'Advertised', 'ac': { 'GigabitEthernet0/1/0/1.1': { 'state': 'up', 'type': 'VLAN', 'num_ranges': 1, 'vlan_ranges': ['1', '1'], 'mtu': 1500, 'xc_id': '0x2000013', 'interworking': 'none', }, }, 'pw': { 'neighbor': { '10.4.1.1': { 'id': { 65538: { 'state': 'up ( established )', 'pw_class': 'not set', 'xc_id': '0x2000013', 'encapsulation': 'MPLS', 'protocol': 'BGP', 'mpls': { 'label': { 'local': '16031', 'remote': '16045', }, 'mtu': { 'local': '1500', 'remote': '1500', }, 'control_word': { 'local': 'enabled', 'remote': 'enabled', }, 'pw_type': { 'local': 'Ethernet VLAN', 'remote': 'Ethernet VLAN', }, 'ce_id': { 'local': '1', 'remote': '2', }, }, }, }, }, }, }, }, }, }, }, } golden_output = {'execute.return_value': ''' show l2vpn xconnect mp2mp detail Group gr1, MP2MP mp1, state: up VPN ID: 100 VPN MTU: 1500 L2 Encapsulation: VLAN Auto Discovery: BGP, state is Advertised (Service Connected) Route Distinguisher: (auto) 10.36.3.3:32770 Import Route Targets: 10.16.2.2:100 Export Route Targets: 10.16.2.2:100 Signaling protocol:BGP CE Range:10 Group gr1, XC mp1.1:2, state is up; Interworking none Local CE ID: 1, Remote CE ID: 2, Discovery State: Advertised AC: GigabitEthernet0/1/0/1.1, state is up Type VLAN; Num Ranges: 1 VLAN ranges: [1, 1] MTU 1500; XC ID 0x2000013; interworking none PW: neighbor 10.4.1.1, PW ID 65538, state is up ( established ) PW class not set, XC ID 0x2000013 Encapsulation MPLS, Auto-discovered (BGP), protocol BGP MPLS Local Remote ------------ ------------------------------ ----------------------------- Label 16031 16045 MTU 1500 1500 Control word enabled enabled PW type Ethernet VLAN Ethernet VLAN CE-ID 1 2 '''} def test_empty(self): self.device = Mock(**self.empty_output) obj = ShowL2vpnXconnectMp2mpDetail(device=self.device) with self.assertRaises(SchemaEmptyParserError): parsed_output = obj.parse() def test_golden(self): self.maxDiff = None self.device = Mock(**self.golden_output) obj = ShowL2vpnXconnectMp2mpDetail(device=self.device) parsed_output = obj.parse() self.assertEqual(parsed_output, self.golden_parsed_output) if __name__ == '__main__': unittest.main()
47.060666
109
0.271125
5,545
94,639
4.533093
0.090712
0.003819
0.016908
0.013606
0.826743
0.778246
0.735917
0.684317
0.646642
0.618038
0
0.111243
0.617209
94,639
2,011
110
47.060666
0.582604
0.009922
0
0.547863
0
0.013847
0.330967
0.041858
0
0
0.003289
0
0.011439
1
0.011439
false
0
0.004214
0
0.041541
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
79286ebbc601d1ba9dbd06ba57fa3f20d16a1802
5,010
py
Python
tests/unit_test/models/model_test.py
himalaya-singh-sheoran/kairon
23859173a3a8d4951ae02e1451fdbe25f17ce1fa
[ "Apache-2.0" ]
null
null
null
tests/unit_test/models/model_test.py
himalaya-singh-sheoran/kairon
23859173a3a8d4951ae02e1451fdbe25f17ce1fa
[ "Apache-2.0" ]
null
null
null
tests/unit_test/models/model_test.py
himalaya-singh-sheoran/kairon
23859173a3a8d4951ae02e1451fdbe25f17ce1fa
[ "Apache-2.0" ]
null
null
null
import pytest from kairon.api.models import HttpActionConfigRequest, HttpActionParameters class TestBotModels: def test_http_action_params_valid(self): assert HttpActionParameters(key="param1", value="param1", parameter_type="slot") assert HttpActionParameters(key="param1", value="param1", parameter_type="value") HttpActionParameters(key="key", value="", parameter_type="value") HttpActionParameters(key="key", value=None, parameter_type="value") assert HttpActionParameters(key="param1", value="param1", parameter_type="sender_id") assert HttpActionParameters(key="param1", value="", parameter_type="sender_id") assert HttpActionParameters(key="param1", parameter_type="sender_id") def test_http_action_params_invalid(self): with pytest.raises(ValueError, match=r".*key cannot be empty.*"): HttpActionParameters(key="", value="param1", parameter_type="slot") with pytest.raises(ValueError, match=r".*key cannot be empty.*"): HttpActionParameters(key=None, value="param1", parameter_type="slot") with pytest.raises(ValueError, match=r".*Provide name of the slot as value.*"): HttpActionParameters(key="key", value="", parameter_type="slot") with pytest.raises(ValueError, match=r".*Provide name of the slot as value.*"): HttpActionParameters(key="key", value=None, parameter_type="slot") with pytest.raises(ValueError, match=r".*parameter_type\n value is not a valid enumeration member.*"): HttpActionParameters(key="key", value="value", parameter_type="unknown_type") def test_http_action_config_request_valid(self): HttpActionConfigRequest( auth_token="", action_name="test_action", response="response", http_url="http://www.google.com", request_method="GET", http_params_list=[] ) HttpActionConfigRequest( auth_token=None, action_name="test_action", response="response", http_url="http://www.google.com", request_method="GET", http_params_list=[] ) def test_http_action_config_request_invalid(self): with pytest.raises(ValueError, match=r".*none is not an allowed value.*"): HttpActionConfigRequest(auth_token="", action_name=None, response="response", http_url="http://www.google.com", request_method="GET", http_params_list=[]) with pytest.raises(ValueError, match=r".*action_name is required*"): HttpActionConfigRequest(auth_token="", action_name="", response="response", http_url="http://www.google.com", request_method="GET", http_params_list=[]) with pytest.raises(ValueError, match=r".*none is not an allowed value.*"): HttpActionConfigRequest(auth_token="", action_name="http_action", response=None, http_url="http://www.google.com", request_method="GET", http_params_list=[]) with pytest.raises(ValueError, match=r".*URL is malformed.*"): HttpActionConfigRequest(auth_token="", action_name="http_action", response="response", http_url="", request_method="GET", http_params_list=[]) with pytest.raises(ValueError, match=r".*none is not an allowed value.*"): HttpActionConfigRequest(auth_token="", action_name="http_action", response="response", http_url=None, request_method="GET", http_params_list=[]) with pytest.raises(ValueError, match=r".URL is malformed.*"): HttpActionConfigRequest(auth_token="", action_name="http_action", response="response", http_url="www.google.com", request_method="GET", http_params_list=[]) with pytest.raises(ValueError, match=r".*Invalid HTTP method.*"): HttpActionConfigRequest(auth_token="", action_name="http_action", response="response", http_url="http://www.google.com", request_method="OPTIONS", http_params_list=[]) with pytest.raises(ValueError, match=r".*Invalid HTTP method.*"): HttpActionConfigRequest(auth_token="", action_name="http_action", response="response", http_url="http://www.google.com", request_method="", http_params_list=[]) with pytest.raises(ValueError, match=r".*none is not an allowed value.*"): HttpActionConfigRequest(auth_token="", action_name="http_action", response="response", http_url="http://www.google.com", request_method=None, http_params_list=[])
61.851852
113
0.613573
516
5,010
5.742248
0.125969
0.047249
0.075599
0.122848
0.890314
0.83969
0.81944
0.802902
0.683091
0.666554
0
0.002695
0.259481
5,010
80
114
62.625
0.795957
0
0
0.506849
0
0
0.192814
0
0
0
0
0
0.068493
1
0.054795
false
0
0.027397
0
0.09589
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f70f7266246a72a1f47bb872e52660dc84524048
167
py
Python
REL/ner/__init__.py
theblackcat102/REL
9daaf924d3b7ee75ba0738fd218ddbaeab989bd8
[ "MIT" ]
210
2020-02-27T14:10:57.000Z
2022-03-30T01:32:52.000Z
REL/ner/__init__.py
theblackcat102/REL
9daaf924d3b7ee75ba0738fd218ddbaeab989bd8
[ "MIT" ]
69
2020-03-06T09:58:43.000Z
2022-03-31T16:24:35.000Z
REL/ner/__init__.py
cnnlabs/REL
7e680a13fb26cb23d9ba9ea45efd01cb4c6c7871
[ "MIT" ]
57
2020-02-28T15:52:33.000Z
2022-03-16T11:28:19.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from REL.ner.base import NERBase, Span from REL.ner.flair_wrapper import load_flair_ner from REL.ner.ngram import Cmns
23.857143
48
0.748503
29
167
4.206897
0.655172
0.172131
0.245902
0
0
0
0
0
0
0
0
0.013699
0.125749
167
6
49
27.833333
0.821918
0.257485
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f711004432df38ae3c8341225cdbbae91d9826b0
10
py
Python
tasks/EPAM/python_course/foundation-python/l7/m7-19-image.py
AleksNeStu/projects
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
[ "Apache-2.0" ]
2
2022-01-19T18:01:35.000Z
2022-02-06T06:54:38.000Z
tasks/EPAM/python_course/foundation-python/l7/m7-5-logs.py
AleksNeStu/projects
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
[ "Apache-2.0" ]
null
null
null
tasks/EPAM/python_course/foundation-python/l7/m7-5-logs.py
AleksNeStu/projects
1a4c68dfbdcb77228f0f3617e58fd18fcb1f5dbb
[ "Apache-2.0" ]
null
null
null
"""Talk"""
10
10
0.4
1
10
4
1
0
0
0
0
0
0
0
0
0
0
0
0
10
1
10
10
0.4
0.4
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
f71781d481d127f72294f8baec04d9d74461c11a
121
py
Python
okta/models/usergroup/__init__.py
rkhleics/oktasdk-python
da8183444704c6d16831d1edd619390e9120dd70
[ "Apache-2.0" ]
1
2020-09-09T12:59:19.000Z
2020-09-09T12:59:19.000Z
okta/models/usergroup/__init__.py
torchbox/oktasdk-python
da8183444704c6d16831d1edd619390e9120dd70
[ "Apache-2.0" ]
null
null
null
okta/models/usergroup/__init__.py
torchbox/oktasdk-python
da8183444704c6d16831d1edd619390e9120dd70
[ "Apache-2.0" ]
2
2017-11-02T22:12:57.000Z
2019-09-16T08:02:23.000Z
from .UserGroup import UserGroup from .UserGroupProfile import UserGroupProfile from .UserGroupRule import UserGroupRule
30.25
46
0.876033
12
121
8.833333
0.416667
0
0
0
0
0
0
0
0
0
0
0
0.099174
121
3
47
40.333333
0.972477
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f73eacd68651aa35688f3850987079d8a3b7f40f
150
py
Python
exercises/darts/darts.py
southpush/python
048191583ed2cf668c6180d851d100f277a74101
[ "MIT" ]
null
null
null
exercises/darts/darts.py
southpush/python
048191583ed2cf668c6180d851d100f277a74101
[ "MIT" ]
null
null
null
exercises/darts/darts.py
southpush/python
048191583ed2cf668c6180d851d100f277a74101
[ "MIT" ]
null
null
null
def score(x, y): distance = x ** 2 + y ** 2 return 10 if distance <= 1 ** 2 else 5 if distance <= 5 ** 2 else 1 if distance <= 10 ** 2 else 0
37.5
101
0.546667
29
150
2.827586
0.448276
0.365854
0
0
0
0
0
0
0
0
0
0.134615
0.306667
150
3
102
50
0.653846
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
f7729be2b7b190482850d62c49c758aaaa3bd521
114
py
Python
source/spacy_ner/__init__.py
wwalterr/finance
412bd2f161f7d11c0c832313a9a09068fc002a4b
[ "MIT" ]
null
null
null
source/spacy_ner/__init__.py
wwalterr/finance
412bd2f161f7d11c0c832313a9a09068fc002a4b
[ "MIT" ]
null
null
null
source/spacy_ner/__init__.py
wwalterr/finance
412bd2f161f7d11c0c832313a9a09068fc002a4b
[ "MIT" ]
null
null
null
from .ner import spacy_ner from .ner_validation import ner_validation __all__ = ['spacy_ner', 'ner_validation']
19
42
0.789474
16
114
5.0625
0.375
0.481481
0
0
0
0
0
0
0
0
0
0
0.122807
114
5
43
22.8
0.81
0
0
0
0
0
0.201754
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
f79379a2377689454c0ed78c62ed59070ccbf1e0
151
py
Python
examples/py_native/bin.py
andrefmrocha/bazel
0d2e409ec8eeadfde90b1860935af65d76c90966
[ "Apache-2.0" ]
2
2018-02-20T12:56:23.000Z
2021-06-06T11:35:03.000Z
examples/py_native/bin.py
andrefmrocha/bazel
0d2e409ec8eeadfde90b1860935af65d76c90966
[ "Apache-2.0" ]
14
2021-06-12T01:31:36.000Z
2021-06-23T21:33:54.000Z
examples/py_native/bin.py
andrefmrocha/bazel
0d2e409ec8eeadfde90b1860935af65d76c90966
[ "Apache-2.0" ]
3
2018-02-20T12:56:28.000Z
2021-06-12T01:25:03.000Z
"""A tiny example binary for the native Python rules of Bazel.""" from examples.py_native.lib import GetNumber print "The number is %d" % GetNumber()
30.2
65
0.748344
24
151
4.666667
0.875
0
0
0
0
0
0
0
0
0
0
0
0.152318
151
4
66
37.75
0.875
0
0
0
0
0
0.186047
0
0
0
0
0
0
0
null
null
0
0.5
null
null
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
1
0
5
f79d9aab175817f1624b6c06ea5f1497971ef23d
82
py
Python
conftest.py
VC19-SDK/pyverificac19
a6c5550b3445b147577e9a0cc7f21a8151989870
[ "MIT" ]
8
2021-12-20T14:57:34.000Z
2022-01-14T01:24:45.000Z
conftest.py
VC19-SDK/pyverificac19
a6c5550b3445b147577e9a0cc7f21a8151989870
[ "MIT" ]
21
2021-12-20T09:55:57.000Z
2022-03-07T08:48:37.000Z
conftest.py
VC19-SDK/pyverificac19
a6c5550b3445b147577e9a0cc7f21a8151989870
[ "MIT" ]
2
2022-01-04T21:23:01.000Z
2022-02-04T10:32:54.000Z
from tests.setup import run_all def pytest_sessionstart(session): run_all()
13.666667
33
0.768293
12
82
5
0.833333
0.2
0
0
0
0
0
0
0
0
0
0
0.158537
82
5
34
16.4
0.869565
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
5
e3954daeec42e8af59565f770fce70e74d67a1b6
109
py
Python
accesslink-API/accesslink/__init__.py
mendelson/polar-data-analysis
04c7b8615d88e3966e8a71c4353ad23c61ff022d
[ "MIT" ]
115
2017-10-26T16:59:51.000Z
2022-03-29T13:56:48.000Z
accesslink-API/accesslink/__init__.py
mendelson/polar-data-analysis
04c7b8615d88e3966e8a71c4353ad23c61ff022d
[ "MIT" ]
14
2018-01-08T10:02:05.000Z
2022-02-17T16:05:01.000Z
accesslink-API/accesslink/__init__.py
mendelson/polar-data-analysis
04c7b8615d88e3966e8a71c4353ad23c61ff022d
[ "MIT" ]
61
2017-10-27T10:38:17.000Z
2022-03-11T20:03:52.000Z
#!/usr/bin/env python """Python wrapper for Polar Open AccessLink API""" from .accesslink import AccessLink
21.8
50
0.761468
15
109
5.533333
0.8
0
0
0
0
0
0
0
0
0
0
0
0.12844
109
4
51
27.25
0.873684
0.59633
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e3a750ce9bab40e341d30bdf6359886617910798
48
py
Python
tests/__init__.py
NKI-AI/xdrt
ca3e83459dd76521bac597465c815cf6a3da35ad
[ "Apache-2.0" ]
10
2020-12-02T01:25:21.000Z
2022-03-31T11:26:10.000Z
tests/__init__.py
NKI-AI/xdrt
ca3e83459dd76521bac597465c815cf6a3da35ad
[ "Apache-2.0" ]
5
2020-12-30T14:13:27.000Z
2022-03-31T15:06:13.000Z
tests/__init__.py
NKI-AI/xdrt
ca3e83459dd76521bac597465c815cf6a3da35ad
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 """Unit test package for xdr."""
16
32
0.645833
8
48
3.875
1
0
0
0
0
0
0
0
0
0
0
0.02439
0.145833
48
2
33
24
0.731707
0.833333
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
e3aa0f11ea3d41f62c98ec9ebb89011b2256a903
46,962
py
Python
opc/lookup_table.py
Nerdyvedi/py-opcr1
cd4ea89711c7d973194452d061b4a86b5cf1f6cb
[ "MIT" ]
2
2020-04-07T20:51:25.000Z
2020-06-13T13:29:42.000Z
opc/lookup_table.py
Nerdyvedi/py-opcr1
cd4ea89711c7d973194452d061b4a86b5cf1f6cb
[ "MIT" ]
null
null
null
opc/lookup_table.py
Nerdyvedi/py-opcr1
cd4ea89711c7d973194452d061b4a86b5cf1f6cb
[ "MIT" ]
null
null
null
""" Lookup Tables stored as python dictionaries for easy use """ OPC_LOOKUP = [ 0.01, 0.01, 0.01, 0.01, 0.01, 0.04, 0.11, 0.17, 0.22, 0.27, 0.31, 0.35, 0.38, 0.42, 0.45, 0.48, 0.51, 0.54, 0.57, 0.6, 0.62, 0.65, 0.67, 0.69, 0.72, 0.74, 0.76, 0.78, 0.81, 0.83, 0.85, 0.87, 0.89, 0.91, 0.93, 0.94, 0.96, 0.98, 1.0, 1.02, 1.03, 1.05, 1.07, 1.08, 1.1, 1.12, 1.13, 1.15, 1.17, 1.18, 1.2, 1.21, 1.23, 1.24, 1.26, 1.27, 1.29, 1.3, 1.32, 1.33, 1.34, 1.36, 1.37, 1.39, 1.4, 1.41, 1.43, 1.44, 1.45, 1.47, 1.48, 1.49, 1.5, 1.52, 1.53, 1.54, 1.56, 1.57, 1.58, 1.59, 1.6, 1.62, 1.63, 1.64, 1.65, 1.66, 1.68, 1.69, 1.7, 1.71, 1.72, 1.73, 1.75, 1.76, 1.77, 1.78, 1.79, 1.8, 1.81, 1.82, 1.83, 1.85, 1.86, 1.87, 1.88, 1.89, 1.9, 1.91, 1.92, 1.93, 1.94, 1.95, 1.96, 1.97, 1.98, 1.99, 2.0, 2.01, 2.02, 2.03, 2.04, 2.05, 2.06, 2.07, 2.08, 2.09, 2.1, 2.11, 2.12, 2.13, 2.14, 2.15, 2.16, 2.17, 2.18, 2.19, 2.2, 2.21, 2.22, 2.23, 2.24, 2.24, 2.25, 2.26, 2.27, 2.28, 2.29, 2.3, 2.31, 2.32, 2.33, 2.34, 2.34, 2.35, 2.36, 2.37, 2.38, 2.39, 2.4, 2.41, 2.41, 2.42, 2.43, 2.44, 2.45, 2.46, 2.47, 2.47, 2.48, 2.49, 2.5, 2.51, 2.52, 2.53, 2.53, 2.54, 2.55, 2.56, 2.57, 2.58, 2.58, 2.59, 2.6, 2.61, 2.62, 2.62, 2.63, 2.64, 2.65, 2.66, 2.67, 2.67, 2.68, 2.69, 2.7, 2.71, 2.71, 2.72, 2.73, 2.74, 2.74, 2.75, 2.76, 2.77, 2.78, 2.78, 2.79, 2.8, 2.81, 2.81, 2.82, 2.83, 2.84, 2.85, 2.85, 2.86, 2.87, 2.88, 2.88, 2.89, 2.9, 2.91, 2.91, 2.92, 2.93, 2.94, 2.94, 2.95, 2.96, 2.96, 2.97, 2.98, 2.99, 2.99, 3.0, 3.01, 3.02, 3.02, 3.03, 3.04, 3.05, 3.05, 3.06, 3.07, 3.07, 3.08, 3.09, 3.1, 3.1, 3.11, 3.12, 3.12, 3.13, 3.14, 3.14, 3.15, 3.16, 3.17, 3.17, 3.18, 3.19, 3.19, 3.2, 3.21, 3.21, 3.22, 3.23, 3.24, 3.24, 3.25, 3.26, 3.26, 3.27, 3.28, 3.28, 3.29, 3.3, 3.3, 3.31, 3.32, 3.32, 3.33, 3.34, 3.34, 3.35, 3.36, 3.36, 3.37, 3.38, 3.38, 3.39, 3.4, 3.4, 3.41, 3.42, 3.42, 3.43, 3.44, 3.44, 3.45, 3.46, 3.46, 3.47, 3.48, 3.48, 3.49, 3.5, 3.5, 3.51, 3.51, 3.52, 3.53, 3.53, 3.54, 3.55, 3.55, 3.56, 3.57, 3.57, 3.58, 3.58, 3.59, 3.6, 3.6, 3.61, 3.62, 3.62, 3.63, 3.64, 3.64, 3.65, 3.65, 3.66, 3.67, 3.67, 3.68, 3.69, 3.69, 3.7, 3.7, 3.71, 3.72, 3.72, 3.73, 3.73, 3.74, 3.75, 3.75, 3.76, 3.77, 3.77, 3.78, 3.78, 3.79, 3.8, 3.8, 3.81, 3.81, 3.82, 3.83, 3.83, 3.84, 3.84, 3.85, 3.86, 3.86, 3.87, 3.87, 3.88, 3.89, 3.89, 3.9, 3.9, 3.91, 3.92, 3.92, 3.93, 3.93, 3.94, 3.95, 3.95, 3.96, 3.96, 3.97, 3.97, 3.98, 3.99, 3.99, 4.0, 4.0, 4.01, 4.02, 4.02, 4.03, 4.03, 4.04, 4.04, 4.05, 4.06, 4.06, 4.07, 4.07, 4.08, 4.08, 4.09, 4.1, 4.1, 4.11, 4.11, 4.12, 4.12, 4.13, 4.14, 4.14, 4.15, 4.15, 4.16, 4.16, 4.17, 4.18, 4.18, 4.19, 4.19, 4.2, 4.2, 4.21, 4.22, 4.22, 4.23, 4.23, 4.24, 4.24, 4.25, 4.25, 4.26, 4.27, 4.27, 4.28, 4.28, 4.29, 4.29, 4.3, 4.3, 4.31, 4.32, 4.32, 4.33, 4.33, 4.34, 4.34, 4.35, 4.35, 4.36, 4.36, 4.37, 4.38, 4.38, 4.39, 4.39, 4.4, 4.4, 4.41, 4.41, 4.42, 4.42, 4.43, 4.44, 4.44, 4.45, 4.45, 4.46, 4.46, 4.47, 4.47, 4.48, 4.48, 4.49, 4.49, 4.5, 4.5, 4.51, 4.52, 4.52, 4.53, 4.53, 4.54, 4.54, 4.55, 4.55, 4.56, 4.56, 4.57, 4.57, 4.58, 4.58, 4.59, 4.59, 4.6, 4.61, 4.61, 4.62, 4.62, 4.63, 4.63, 4.64, 4.64, 4.65, 4.65, 4.66, 4.66, 4.67, 4.67, 4.68, 4.68, 4.69, 4.69, 4.7, 4.7, 4.71, 4.71, 4.72, 4.73, 4.73, 4.74, 4.74, 4.75, 4.75, 4.76, 4.76, 4.77, 4.77, 4.78, 4.78, 4.79, 4.79, 4.8, 4.8, 4.81, 4.81, 4.82, 4.82, 4.83, 4.83, 4.84, 4.84, 4.85, 4.85, 4.86, 4.86, 4.87, 4.87, 4.88, 4.88, 4.89, 4.89, 4.9, 4.9, 4.91, 4.91, 4.92, 4.92, 4.93, 4.93, 4.94, 4.94, 4.95, 4.95, 4.96, 4.96, 4.97, 4.97, 4.98, 4.98, 4.99, 4.99, 5.0, 5.0, 5.01, 5.01, 5.02, 5.02, 5.03, 5.03, 5.04, 5.04, 5.05, 5.05, 5.06, 5.06, 5.07, 5.07, 5.08, 5.08, 5.09, 5.09, 5.1, 5.1, 5.11, 5.11, 5.12, 5.12, 5.13, 5.13, 5.13, 5.14, 5.14, 5.15, 5.15, 5.16, 5.16, 5.17, 5.17, 5.18, 5.18, 5.19, 5.19, 5.2, 5.2, 5.21, 5.21, 5.22, 5.22, 5.23, 5.23, 5.24, 5.24, 5.25, 5.25, 5.26, 5.26, 5.26, 5.27, 5.27, 5.28, 5.28, 5.29, 5.29, 5.3, 5.3, 5.31, 5.31, 5.32, 5.32, 5.33, 5.33, 5.34, 5.34, 5.35, 5.35, 5.35, 5.36, 5.36, 5.37, 5.37, 5.38, 5.38, 5.39, 5.39, 5.4, 5.4, 5.41, 5.41, 5.42, 5.42, 5.43, 5.43, 5.43, 5.44, 5.44, 5.45, 5.45, 5.46, 5.46, 5.47, 5.47, 5.48, 5.48, 5.49, 5.49, 5.49, 5.5, 5.5, 5.51, 5.51, 5.52, 5.52, 5.53, 5.53, 5.54, 5.54, 5.55, 5.55, 5.55, 5.56, 5.56, 5.57, 5.57, 5.58, 5.58, 5.59, 5.59, 5.6, 5.6, 5.61, 5.61, 5.61, 5.62, 5.62, 5.63, 5.63, 5.64, 5.64, 5.65, 5.65, 5.66, 5.66, 5.66, 5.67, 5.67, 5.68, 5.68, 5.69, 5.69, 5.7, 5.7, 5.71, 5.71, 5.71, 5.72, 5.72, 5.73, 5.73, 5.74, 5.74, 5.75, 5.75, 5.75, 5.76, 5.76, 5.77, 5.77, 5.78, 5.78, 5.79, 5.79, 5.79, 5.8, 5.8, 5.81, 5.81, 5.82, 5.82, 5.83, 5.83, 5.83, 5.84, 5.84, 5.85, 5.85, 5.86, 5.86, 5.87, 5.87, 5.87, 5.88, 5.88, 5.89, 5.89, 5.9, 5.9, 5.91, 5.91, 5.91, 5.92, 5.92, 5.93, 5.93, 5.94, 5.94, 5.95, 5.95, 5.95, 5.96, 5.96, 5.97, 5.97, 5.98, 5.98, 5.98, 5.99, 5.99, 6.0, 6.0, 6.01, 6.01, 6.02, 6.02, 6.02, 6.03, 6.03, 6.04, 6.04, 6.05, 6.05, 6.05, 6.06, 6.06, 6.07, 6.07, 6.08, 6.08, 6.08, 6.09, 6.09, 6.1, 6.1, 6.11, 6.11, 6.12, 6.12, 6.12, 6.13, 6.13, 6.14, 6.14, 6.15, 6.15, 6.15, 6.16, 6.16, 6.17, 6.17, 6.18, 6.18, 6.18, 6.19, 6.19, 6.2, 6.2, 6.21, 6.21, 6.21, 6.22, 6.22, 6.23, 6.23, 6.24, 6.24, 6.24, 6.25, 6.25, 6.26, 6.26, 6.26, 6.27, 6.27, 6.28, 6.28, 6.29, 6.29, 6.29, 6.3, 6.3, 6.31, 6.31, 6.32, 6.32, 6.32, 6.33, 6.33, 6.34, 6.34, 6.35, 6.35, 6.35, 6.36, 6.36, 6.37, 6.37, 6.37, 6.38, 6.38, 6.39, 6.39, 6.4, 6.4, 6.4, 6.41, 6.41, 6.42, 6.42, 6.42, 6.43, 6.43, 6.44, 6.44, 6.45, 6.45, 6.45, 6.46, 6.46, 6.47, 6.47, 6.47, 6.48, 6.48, 6.49, 6.49, 6.5, 6.5, 6.5, 6.51, 6.51, 6.52, 6.52, 6.52, 6.53, 6.53, 6.54, 6.54, 6.55, 6.55, 6.55, 6.56, 6.56, 6.57, 6.57, 6.57, 6.58, 6.58, 6.59, 6.59, 6.59, 6.6, 6.6, 6.61, 6.61, 6.62, 6.62, 6.62, 6.63, 6.63, 6.64, 6.64, 6.64, 6.65, 6.65, 6.66, 6.66, 6.66, 6.67, 6.67, 6.68, 6.68, 6.68, 6.69, 6.69, 6.7, 6.7, 6.71, 6.71, 6.71, 6.72, 6.72, 6.73, 6.73, 6.73, 6.74, 6.74, 6.75, 6.75, 6.75, 6.76, 6.76, 6.77, 6.77, 6.77, 6.78, 6.78, 6.79, 6.79, 6.79, 6.8, 6.8, 6.81, 6.81, 6.81, 6.82, 6.82, 6.83, 6.83, 6.83, 6.84, 6.84, 6.85, 6.85, 6.85, 6.86, 6.86, 6.87, 6.87, 6.88, 6.88, 6.88, 6.89, 6.89, 6.9, 6.9, 6.9, 6.91, 6.91, 6.92, 6.92, 6.92, 6.93, 6.93, 6.94, 6.94, 6.94, 6.95, 6.95, 6.95, 6.96, 6.96, 6.97, 6.97, 6.97, 6.98, 6.98, 6.99, 6.99, 6.99, 7.0, 7.0, 7.01, 7.01, 7.01, 7.02, 7.02, 7.03, 7.03, 7.03, 7.04, 7.04, 7.05, 7.05, 7.05, 7.06, 7.06, 7.07, 7.07, 7.07, 7.08, 7.08, 7.09, 7.09, 7.09, 7.1, 7.1, 7.11, 7.11, 7.11, 7.12, 7.12, 7.12, 7.13, 7.13, 7.14, 7.14, 7.14, 7.15, 7.15, 7.16, 7.16, 7.16, 7.17, 7.17, 7.18, 7.18, 7.18, 7.19, 7.19, 7.2, 7.2, 7.2, 7.21, 7.21, 7.21, 7.22, 7.22, 7.23, 7.23, 7.23, 7.24, 7.24, 7.25, 7.25, 7.25, 7.26, 7.26, 7.27, 7.27, 7.27, 7.28, 7.28, 7.28, 7.29, 7.29, 7.3, 7.3, 7.3, 7.31, 7.31, 7.32, 7.32, 7.32, 7.33, 7.33, 7.34, 7.34, 7.34, 7.35, 7.35, 7.35, 7.36, 7.36, 7.37, 7.37, 7.37, 7.38, 7.38, 7.39, 7.39, 7.39, 7.4, 7.4, 7.4, 7.41, 7.41, 7.42, 7.42, 7.42, 7.43, 7.43, 7.44, 7.44, 7.44, 7.45, 7.45, 7.45, 7.46, 7.46, 7.47, 7.47, 7.47, 7.48, 7.48, 7.49, 7.49, 7.49, 7.5, 7.5, 7.5, 7.51, 7.51, 7.52, 7.52, 7.52, 7.53, 7.53, 7.53, 7.54, 7.54, 7.55, 7.55, 7.55, 7.56, 7.56, 7.57, 7.57, 7.57, 7.58, 7.58, 7.58, 7.59, 7.59, 7.6, 7.6, 7.6, 7.61, 7.61, 7.61, 7.62, 7.62, 7.63, 7.63, 7.63, 7.64, 7.64, 7.64, 7.65, 7.65, 7.66, 7.66, 7.66, 7.67, 7.67, 7.67, 7.68, 7.68, 7.69, 7.69, 7.69, 7.7, 7.7, 7.7, 7.71, 7.71, 7.72, 7.72, 7.72, 7.73, 7.73, 7.73, 7.74, 7.74, 7.75, 7.75, 7.75, 7.76, 7.76, 7.76, 7.77, 7.77, 7.78, 7.78, 7.78, 7.79, 7.79, 7.79, 7.8, 7.8, 7.81, 7.81, 7.81, 7.82, 7.82, 7.82, 7.83, 7.83, 7.84, 7.84, 7.84, 7.85, 7.85, 7.85, 7.86, 7.86, 7.87, 7.87, 7.87, 7.88, 7.88, 7.88, 7.89, 7.89, 7.9, 7.9, 7.9, 7.91, 7.91, 7.91, 7.92, 7.92, 7.93, 7.93, 7.93, 7.94, 7.94, 7.94, 7.95, 7.95, 7.95, 7.96, 7.96, 7.97, 7.97, 7.97, 7.98, 7.98, 7.98, 7.99, 7.99, 8.0, 8.0, 8.0, 8.01, 8.01, 8.01, 8.02, 8.02, 8.02, 8.03, 8.03, 8.04, 8.04, 8.04, 8.05, 8.05, 8.05, 8.06, 8.06, 8.07, 8.07, 8.07, 8.08, 8.08, 8.08, 8.09, 8.09, 8.09, 8.1, 8.1, 8.11, 8.11, 8.11, 8.12, 8.12, 8.12, 8.13, 8.13, 8.13, 8.14, 8.14, 8.15, 8.15, 8.15, 8.16, 8.16, 8.16, 8.17, 8.17, 8.18, 8.18, 8.18, 8.19, 8.19, 8.19, 8.2, 8.2, 8.2, 8.21, 8.21, 8.22, 8.22, 8.22, 8.23, 8.23, 8.23, 8.24, 8.24, 8.24, 8.25, 8.25, 8.26, 8.26, 8.26, 8.27, 8.27, 8.27, 8.28, 8.28, 8.28, 8.29, 8.29, 8.3, 8.3, 8.3, 8.31, 8.31, 8.31, 8.32, 8.32, 8.32, 8.33, 8.33, 8.33, 8.34, 8.34, 8.35, 8.35, 8.35, 8.36, 8.36, 8.36, 8.37, 8.37, 8.37, 8.38, 8.38, 8.39, 8.39, 8.39, 8.4, 8.4, 8.4, 8.41, 8.41, 8.41, 8.42, 8.42, 8.42, 8.43, 8.43, 8.44, 8.44, 8.44, 8.45, 8.45, 8.45, 8.46, 8.46, 8.46, 8.47, 8.47, 8.48, 8.48, 8.48, 8.49, 8.49, 8.49, 8.5, 8.5, 8.5, 8.51, 8.51, 8.51, 8.52, 8.52, 8.53, 8.53, 8.53, 8.54, 8.54, 8.54, 8.55, 8.55, 8.55, 8.56, 8.56, 8.56, 8.57, 8.57, 8.58, 8.58, 8.58, 8.59, 8.59, 8.59, 8.6, 8.6, 8.6, 8.61, 8.61, 8.61, 8.62, 8.62, 8.63, 8.63, 8.63, 8.64, 8.64, 8.64, 8.65, 8.65, 8.65, 8.66, 8.66, 8.66, 8.67, 8.67, 8.67, 8.68, 8.68, 8.69, 8.69, 8.69, 8.7, 8.7, 8.7, 8.71, 8.71, 8.71, 8.72, 8.72, 8.72, 8.73, 8.73, 8.74, 8.74, 8.74, 8.75, 8.75, 8.75, 8.76, 8.76, 8.76, 8.77, 8.77, 8.77, 8.78, 8.78, 8.78, 8.79, 8.79, 8.8, 8.8, 8.8, 8.81, 8.81, 8.81, 8.82, 8.82, 8.82, 8.83, 8.83, 8.83, 8.84, 8.84, 8.84, 8.85, 8.85, 8.85, 8.86, 8.86, 8.87, 8.87, 8.87, 8.88, 8.88, 8.88, 8.89, 8.89, 8.89, 8.9, 8.9, 8.9, 8.91, 8.91, 8.91, 8.92, 8.92, 8.93, 8.93, 8.93, 8.94, 8.94, 8.94, 8.95, 8.95, 8.95, 8.96, 8.96, 8.96, 8.97, 8.97, 8.97, 8.98, 8.98, 8.98, 8.99, 8.99, 9.0, 9.0, 9.0, 9.01, 9.01, 9.01, 9.02, 9.02, 9.02, 9.03, 9.03, 9.03, 9.04, 9.04, 9.04, 9.05, 9.05, 9.05, 9.06, 9.06, 9.06, 9.07, 9.07, 9.08, 9.08, 9.08, 9.09, 9.09, 9.09, 9.1, 9.1, 9.1, 9.11, 9.11, 9.11, 9.12, 9.12, 9.12, 9.13, 9.13, 9.13, 9.14, 9.14, 9.14, 9.15, 9.15, 9.16, 9.16, 9.16, 9.17, 9.17, 9.17, 9.18, 9.18, 9.18, 9.19, 9.19, 9.19, 9.2, 9.2, 9.2, 9.21, 9.21, 9.21, 9.22, 9.22, 9.22, 9.23, 9.23, 9.23, 9.24, 9.24, 9.25, 9.25, 9.25, 9.26, 9.26, 9.26, 9.27, 9.27, 9.27, 9.28, 9.28, 9.28, 9.29, 9.29, 9.29, 9.3, 9.3, 9.3, 9.31, 9.31, 9.31, 9.32, 9.32, 9.32, 9.33, 9.33, 9.33, 9.34, 9.34, 9.35, 9.35, 9.35, 9.36, 9.36, 9.36, 9.37, 9.37, 9.37, 9.38, 9.38, 9.38, 9.39, 9.39, 9.39, 9.4, 9.4, 9.4, 9.41, 9.41, 9.41, 9.42, 9.42, 9.42, 9.43, 9.43, 9.43, 9.44, 9.44, 9.44, 9.45, 9.45, 9.45, 9.46, 9.46, 9.47, 9.47, 9.47, 9.48, 9.48, 9.48, 9.49, 9.49, 9.49, 9.5, 9.5, 9.5, 9.51, 9.51, 9.51, 9.52, 9.52, 9.52, 9.53, 9.53, 9.53, 9.54, 9.54, 9.54, 9.55, 9.55, 9.55, 9.56, 9.56, 9.56, 9.57, 9.57, 9.57, 9.58, 9.58, 9.58, 9.59, 9.59, 9.59, 9.6, 9.6, 9.6, 9.61, 9.61, 9.62, 9.62, 9.62, 9.63, 9.63, 9.63, 9.64, 9.64, 9.64, 9.65, 9.65, 9.65, 9.66, 9.66, 9.66, 9.67, 9.67, 9.67, 9.68, 9.68, 9.68, 9.69, 9.69, 9.69, 9.7, 9.7, 9.7, 9.71, 9.71, 9.71, 9.72, 9.72, 9.72, 9.73, 9.73, 9.73, 9.74, 9.74, 9.74, 9.75, 9.75, 9.75, 9.76, 9.76, 9.76, 9.77, 9.77, 9.77, 9.78, 9.78, 9.78, 9.79, 9.79, 9.79, 9.8, 9.8, 9.81, 9.81, 9.81, 9.82, 9.82, 9.82, 9.83, 9.83, 9.83, 9.84, 9.84, 9.84, 9.85, 9.85, 9.85, 9.86, 9.86, 9.86, 9.87, 9.87, 9.87, 9.88, 9.88, 9.88, 9.89, 9.89, 9.89, 9.9, 9.9, 9.9, 9.91, 9.91, 9.91, 9.92, 9.92, 9.92, 9.93, 9.93, 9.93, 9.94, 9.94, 9.94, 9.95, 9.95, 9.95, 9.96, 9.96, 9.96, 9.97, 9.97, 9.97, 9.98, 9.98, 9.98, 9.99, 9.99, 9.99, 10.0, 10.0, 10.0, 10.01, 10.01, 10.01, 10.02, 10.02, 10.02, 10.03, 10.03, 10.03, 10.04, 10.04, 10.04, 10.05, 10.05, 10.05, 10.06, 10.06, 10.06, 10.07, 10.07, 10.07, 10.08, 10.08, 10.08, 10.09, 10.09, 10.09, 10.1, 10.1, 10.1, 10.11, 10.11, 10.11, 10.12, 10.12, 10.12, 10.13, 10.13, 10.13, 10.14, 10.14, 10.14, 10.15, 10.15, 10.15, 10.16, 10.16, 10.16, 10.17, 10.17, 10.17, 10.18, 10.18, 10.18, 10.19, 10.19, 10.19, 10.2, 10.2, 10.2, 10.21, 10.21, 10.21, 10.22, 10.22, 10.22, 10.23, 10.23, 10.23, 10.24, 10.24, 10.24, 10.25, 10.25, 10.25, 10.26, 10.26, 10.26, 10.27, 10.27, 10.27, 10.28, 10.28, 10.28, 10.29, 10.29, 10.29, 10.3, 10.3, 10.3, 10.31, 10.31, 10.31, 10.32, 10.32, 10.32, 10.33, 10.33, 10.33, 10.34, 10.34, 10.34, 10.35, 10.35, 10.35, 10.36, 10.36, 10.36, 10.37, 10.37, 10.37, 10.38, 10.38, 10.38, 10.39, 10.39, 10.39, 10.4, 10.4, 10.4, 10.41, 10.41, 10.41, 10.42, 10.42, 10.42, 10.43, 10.43, 10.43, 10.44, 10.44, 10.44, 10.45, 10.45, 10.45, 10.46, 10.46, 10.46, 10.47, 10.47, 10.47, 10.48, 10.48, 10.48, 10.49, 10.49, 10.49, 10.5, 10.5, 10.5, 10.51, 10.51, 10.51, 10.52, 10.52, 10.52, 10.53, 10.53, 10.53, 10.54, 10.54, 10.54, 10.55, 10.55, 10.55, 10.56, 10.56, 10.56, 10.57, 10.57, 10.57, 10.58, 10.58, 10.58, 10.59, 10.59, 10.59, 10.6, 10.6, 10.6, 10.61, 10.61, 10.61, 10.62, 10.62, 10.62, 10.63, 10.63, 10.63, 10.64, 10.64, 10.64, 10.65, 10.65, 10.65, 10.66, 10.66, 10.66, 10.67, 10.67, 10.67, 10.68, 10.68, 10.68, 10.69, 10.69, 10.69, 10.7, 10.7, 10.7, 10.71, 10.71, 10.71, 10.71, 10.72, 10.72, 10.72, 10.73, 10.73, 10.73, 10.74, 10.74, 10.74, 10.75, 10.75, 10.75, 10.76, 10.76, 10.76, 10.77, 10.77, 10.77, 10.78, 10.78, 10.78, 10.79, 10.79, 10.79, 10.8, 10.8, 10.8, 10.81, 10.81, 10.81, 10.82, 10.82, 10.82, 10.83, 10.83, 10.83, 10.84, 10.84, 10.84, 10.85, 10.85, 10.85, 10.86, 10.86, 10.86, 10.87, 10.87, 10.87, 10.88, 10.88, 10.88, 10.89, 10.89, 10.89, 10.9, 10.9, 10.9, 10.91, 10.91, 10.91, 10.92, 10.92, 10.92, 10.93, 10.93, 10.93, 10.94, 10.94, 10.94, 10.94, 10.95, 10.95, 10.95, 10.96, 10.96, 10.96, 10.97, 10.97, 10.97, 10.98, 10.98, 10.98, 10.99, 10.99, 10.99, 11.0, 11.0, 11.0, 11.01, 11.01, 11.01, 11.02, 11.02, 11.02, 11.03, 11.03, 11.03, 11.04, 11.04, 11.04, 11.05, 11.05, 11.05, 11.06, 11.06, 11.06, 11.07, 11.07, 11.07, 11.08, 11.08, 11.08, 11.09, 11.09, 11.09, 11.1, 11.1, 11.1, 11.11, 11.11, 11.11, 11.12, 11.12, 11.12, 11.12, 11.13, 11.13, 11.13, 11.14, 11.14, 11.14, 11.15, 11.15, 11.15, 11.16, 11.16, 11.16, 11.17, 11.17, 11.17, 11.18, 11.18, 11.18, 11.19, 11.19, 11.19, 11.2, 11.2, 11.2, 11.21, 11.21, 11.21, 11.22, 11.22, 11.22, 11.23, 11.23, 11.23, 11.24, 11.24, 11.24, 11.25, 11.25, 11.25, 11.26, 11.26, 11.26, 11.27, 11.27, 11.27, 11.27, 11.28, 11.28, 11.28, 11.29, 11.29, 11.29, 11.3, 11.3, 11.3, 11.31, 11.31, 11.31, 11.32, 11.32, 11.32, 11.33, 11.33, 11.33, 11.34, 11.34, 11.34, 11.35, 11.35, 11.35, 11.36, 11.36, 11.36, 11.37, 11.37, 11.37, 11.38, 11.38, 11.38, 11.39, 11.39, 11.39, 11.4, 11.4, 11.4, 11.41, 11.41, 11.41, 11.41, 11.42, 11.42, 11.42, 11.43, 11.43, 11.43, 11.44, 11.44, 11.44, 11.45, 11.45, 11.45, 11.46, 11.46, 11.46, 11.47, 11.47, 11.47, 11.48, 11.48, 11.48, 11.49, 11.49, 11.49, 11.5, 11.5, 11.5, 11.51, 11.51, 11.51, 11.52, 11.52, 11.52, 11.53, 11.53, 11.53, 11.54, 11.54, 11.54, 11.54, 11.55, 11.55, 11.55, 11.56, 11.56, 11.56, 11.57, 11.57, 11.57, 11.58, 11.58, 11.58, 11.59, 11.59, 11.59, 11.6, 11.6, 11.6, 11.61, 11.61, 11.61, 11.62, 11.62, 11.62, 11.63, 11.63, 11.63, 11.64, 11.64, 11.64, 11.65, 11.65, 11.65, 11.66, 11.66, 11.66, 11.67, 11.67, 11.67, 11.67, 11.68, 11.68, 11.68, 11.69, 11.69, 11.69, 11.7, 11.7, 11.7, 11.71, 11.71, 11.71, 11.72, 11.72, 11.72, 11.73, 11.73, 11.73, 11.74, 11.74, 11.74, 11.75, 11.75, 11.75, 11.76, 11.76, 11.76, 11.77, 11.77, 11.77, 11.78, 11.78, 11.78, 11.79, 11.79, 11.79, 11.79, 11.8, 11.8, 11.8, 11.81, 11.81, 11.81, 11.82, 11.82, 11.82, 11.83, 11.83, 11.83, 11.84, 11.84, 11.84, 11.85, 11.85, 11.85, 11.86, 11.86, 11.86, 11.87, 11.87, 11.87, 11.88, 11.88, 11.88, 11.89, 11.89, 11.89, 11.9, 11.9, 11.9, 11.9, 11.91, 11.91, 11.91, 11.92, 11.92, 11.92, 11.93, 11.93, 11.93, 11.94, 11.94, 11.94, 11.95, 11.95, 11.95, 11.96, 11.96, 11.96, 11.97, 11.97, 11.97, 11.98, 11.98, 11.98, 11.99, 11.99, 11.99, 12.0, 12.0, 12.0, 12.01, 12.01, 12.01, 12.01, 12.02, 12.02, 12.02, 12.03, 12.03, 12.03, 12.04, 12.04, 12.04, 12.05, 12.05, 12.05, 12.06, 12.06, 12.06, 12.07, 12.07, 12.07, 12.08, 12.08, 12.08, 12.09, 12.09, 12.09, 12.1, 12.1, 12.1, 12.11, 12.11, 12.11, 12.12, 12.12, 12.12, 12.12, 12.13, 12.13, 12.13, 12.14, 12.14, 12.14, 12.15, 12.15, 12.15, 12.16, 12.16, 12.16, 12.17, 12.17, 12.17, 12.18, 12.18, 12.18, 12.19, 12.19, 12.19, 12.2, 12.2, 12.2, 12.21, 12.21, 12.21, 12.22, 12.22, 12.22, 12.23, 12.23, 12.23, 12.23, 12.24, 12.24, 12.24, 12.25, 12.25, 12.25, 12.26, 12.26, 12.26, 12.27, 12.27, 12.27, 12.28, 12.28, 12.28, 12.29, 12.29, 12.29, 12.3, 12.3, 12.3, 12.31, 12.31, 12.31, 12.32, 12.32, 12.32, 12.33, 12.33, 12.33, 12.33, 12.34, 12.34, 12.34, 12.35, 12.35, 12.35, 12.36, 12.36, 12.36, 12.37, 12.37, 12.37, 12.38, 12.38, 12.38, 12.39, 12.39, 12.39, 12.4, 12.4, 12.4, 12.41, 12.41, 12.41, 12.42, 12.42, 12.42, 12.43, 12.43, 12.43, 12.44, 12.44, 12.44, 12.44, 12.45, 12.45, 12.45, 12.46, 12.46, 12.46, 12.47, 12.47, 12.47, 12.48, 12.48, 12.48, 12.49, 12.49, 12.49, 12.5, 12.5, 12.5, 12.51, 12.51, 12.51, 12.52, 12.52, 12.52, 12.53, 12.53, 12.53, 12.54, 12.54, 12.54, 12.54, 12.55, 12.55, 12.55, 12.56, 12.56, 12.56, 12.57, 12.57, 12.57, 12.58, 12.58, 12.58, 12.59, 12.59, 12.59, 12.6, 12.6, 12.6, 12.61, 12.61, 12.61, 12.62, 12.62, 12.62, 12.63, 12.63, 12.63, 12.64, 12.64, 12.64, 12.65, 12.65, 12.65, 12.65, 12.66, 12.66, 12.66, 12.67, 12.67, 12.67, 12.68, 12.68, 12.68, 12.69, 12.69, 12.69, 12.7, 12.7, 12.7, 12.71, 12.71, 12.71, 12.72, 12.72, 12.72, 12.73, 12.73, 12.73, 12.74, 12.74, 12.74, 12.75, 12.75, 12.75, 12.76, 12.76, 12.76, 12.76, 12.77, 12.77, 12.77, 12.78, 12.78, 12.78, 12.79, 12.79, 12.79, 12.8, 12.8, 12.8, 12.81, 12.81, 12.81, 12.82, 12.82, 12.82, 12.83, 12.83, 12.83, 12.84, 12.84, 12.84, 12.85, 12.85, 12.85, 12.86, 12.86, 12.86, 12.86, 12.87, 12.87, 12.87, 12.88, 12.88, 12.88, 12.89, 12.89, 12.89, 12.9, 12.9, 12.9, 12.91, 12.91, 12.91, 12.92, 12.92, 12.92, 12.93, 12.93, 12.93, 12.94, 12.94, 12.94, 12.95, 12.95, 12.95, 12.96, 12.96, 12.96, 12.97, 12.97, 12.97, 12.97, 12.98, 12.98, 12.98, 12.99, 12.99, 12.99, 13.0, 13.0, 13.0, 13.01, 13.01, 13.01, 13.02, 13.02, 13.02, 13.03, 13.03, 13.03, 13.04, 13.04, 13.04, 13.05, 13.05, 13.05, 13.06, 13.06, 13.06, 13.07, 13.07, 13.07, 13.08, 13.08, 13.08, 13.09, 13.09, 13.09, 13.09, 13.1, 13.1, 13.1, 13.11, 13.11, 13.11, 13.12, 13.12, 13.12, 13.13, 13.13, 13.13, 13.14, 13.14, 13.14, 13.15, 13.15, 13.15, 13.16, 13.16, 13.16, 13.17, 13.17, 13.17, 13.18, 13.18, 13.18, 13.19, 13.19, 13.19, 13.2, 13.2, 13.2, 13.2, 13.21, 13.21, 13.21, 13.22, 13.22, 13.22, 13.23, 13.23, 13.23, 13.24, 13.24, 13.24, 13.25, 13.25, 13.25, 13.26, 13.26, 13.26, 13.27, 13.27, 13.27, 13.28, 13.28, 13.28, 13.29, 13.29, 13.29, 13.3, 13.3, 13.3, 13.31, 13.31, 13.31, 13.32, 13.32, 13.32, 13.33, 13.33, 13.33, 13.33, 13.34, 13.34, 13.34, 13.35, 13.35, 13.35, 13.36, 13.36, 13.36, 13.37, 13.37, 13.37, 13.38, 13.38, 13.38, 13.39, 13.39, 13.39, 13.4, 13.4, 13.4, 13.41, 13.41, 13.41, 13.42, 13.42, 13.42, 13.43, 13.43, 13.43, 13.44, 13.44, 13.44, 13.45, 13.45, 13.45, 13.45, 13.46, 13.46, 13.46, 13.47, 13.47, 13.47, 13.48, 13.48, 13.48, 13.49, 13.49, 13.49, 13.5, 13.5, 13.5, 13.51, 13.51, 13.51, 13.52, 13.52, 13.52, 13.53, 13.53, 13.53, 13.54, 13.54, 13.54, 13.55, 13.55, 13.55, 13.56, 13.56, 13.56, 13.57, 13.57, 13.57, 13.58, 13.58, 13.58, 13.59, 13.59, 13.59, 13.59, 13.6, 13.6, 13.6, 13.61, 13.61, 13.61, 13.62, 13.62, 13.62, 13.63, 13.63, 13.63, 13.64, 13.64, 13.64, 13.65, 13.65, 13.65, 13.66, 13.66, 13.66, 13.67, 13.67, 13.67, 13.68, 13.68, 13.68, 13.69, 13.69, 13.69, 13.7, 13.7, 13.7, 13.71, 13.71, 13.71, 13.72, 13.72, 13.72, 13.73, 13.73, 13.73, 13.74, 13.74, 13.74, 13.74, 13.75, 13.75, 13.75, 13.76, 13.76, 13.76, 13.77, 13.77, 13.77, 13.78, 13.78, 13.78, 13.79, 13.79, 13.79, 13.8, 13.8, 13.8, 13.81, 13.81, 13.81, 13.82, 13.82, 13.82, 13.83, 13.83, 13.83, 13.84, 13.84, 13.84, 13.85, 13.85, 13.85, 13.86, 13.86, 13.86, 13.87, 13.87, 13.87, 13.88, 13.88, 13.88, 13.89, 13.89, 13.89, 13.9, 13.9, 13.9, 13.91, 13.91, 13.91, 13.91, 13.92, 13.92, 13.92, 13.93, 13.93, 13.93, 13.94, 13.94, 13.94, 13.95, 13.95, 13.95, 13.96, 13.96, 13.96, 13.97, 13.97, 13.97, 13.98, 13.98, 13.98, 13.99, 13.99, 13.99, 14.0, 14.0, 14.0, 14.01, 14.01, 14.01, 14.02, 14.02, 14.02, 14.03, 14.03, 14.03, 14.04, 14.04, 14.04, 14.05, 14.05, 14.05, 14.06, 14.06, 14.06, 14.07, 14.07, 14.07, 14.08, 14.08, 14.08, 14.09, 14.09, 14.09, 14.1, 14.1, 14.1, 14.11, 14.11, 14.11, 14.12, 14.12, 14.12, 14.12, 14.13, 14.13, 14.13, 14.14, 14.14, 14.14, 14.15, 14.15, 14.15, 14.16, 14.16, 14.16, 14.17, 14.17, 14.17, 14.18, 14.18, 14.18, 14.19, 14.19, 14.19, 14.2, 14.2, 14.2, 14.21, 14.21, 14.21, 14.22, 14.22, 14.22, 14.23, 14.23, 14.23, 14.24, 14.24, 14.24, 14.25, 14.25, 14.25, 14.26, 14.26, 14.26, 14.27, 14.27, 14.27, 14.28, 14.28, 14.28, 14.29, 14.29, 14.29, 14.3, 14.3, 14.3, 14.31, 14.31, 14.31, 14.32, 14.32, 14.32, 14.33, 14.33, 14.33, 14.34, 14.34, 14.34, 14.35, 14.35, 14.35, 14.36, 14.36, 14.36, 14.37, 14.37, 14.37, 14.38, 14.38, 14.38, 14.39, 14.39, 14.39, 14.4, 14.4, 14.4, 14.41, 14.41, 14.41, 14.42, 14.42, 14.42, 14.43, 14.43, 14.43, 14.44, 14.44, 14.44, 14.45, 14.45, 14.45, 14.46, 14.46, 14.46, 14.46, 14.47, 14.47, 14.47, 14.48, 14.48, 14.48, 14.49, 14.49, 14.49, 14.5, 14.5, 14.5, 14.51, 14.51, 14.51, 14.52, 14.52, 14.52, 14.53, 14.53, 14.53, 14.54, 14.54, 14.54, 14.55, 14.55, 14.55, 14.56, 14.56, 14.56, 14.57, 14.57, 14.57, 14.58, 14.58, 14.58, 14.59, 14.59, 14.59, 14.6, 14.6, 14.6, 14.61, 14.61, 14.61, 14.62, 14.62, 14.62, 14.63, 14.63, 14.63, 14.64, 14.64, 14.64, 14.65, 14.65, 14.65, 14.66, 14.66, 14.66, 14.67, 14.67, 14.67, 14.68, 14.68, 14.68, 14.69, 14.69, 14.69, 14.7, 14.7, 14.7, 14.71, 14.71, 14.71, 14.72, 14.72, 14.72, 14.73, 14.73, 14.73, 14.74, 14.74, 14.74, 14.75, 14.75, 14.75, 14.76, 14.76, 14.76, 14.77, 14.77, 14.77, 14.78, 14.78, 14.78, 14.79, 14.79, 14.79, 14.8, 14.8, 14.8, 14.81, 14.81, 14.81, 14.82, 14.82, 14.82, 14.83, 14.83, 14.83, 14.84, 14.84, 14.84, 14.85, 14.85, 14.85, 14.86, 14.86, 14.86, 14.87, 14.87, 14.87, 14.88, 14.88, 14.89, 14.89, 14.89, 14.9, 14.9, 14.9, 14.91, 14.91, 14.91, 14.92, 14.92, 14.92, 14.93, 14.93, 14.93, 14.94, 14.94, 14.94, 14.95, 14.95, 14.95, 14.96, 14.96, 14.96, 14.97, 14.97, 14.97, 14.98, 14.98, 14.98, 14.99, 14.99, 14.99, 15.0, 15.0, 15.0, 15.01, 15.01, 15.01, 15.02, 15.02, 15.02, 15.03, 15.03, 15.03, 15.04, 15.04, 15.04, 15.05, 15.05, 15.05, 15.06, 15.06, 15.06, 15.07, 15.07, 15.07, 15.08, 15.08, 15.08, 15.09, 15.09, 15.09, 15.1, 15.1, 15.1, 15.11, 15.11, 15.11, 15.12, 15.12, 15.12, 15.13, 15.13, 15.13, 15.14, 15.14, 15.14, 15.15, 15.15, 15.15, 15.16, 15.16, 15.16, 15.17, 15.17, 15.17, 15.18, 15.18, 15.19, 15.19, 15.19, 15.2, 15.2, 15.2, 15.21, 15.21, 15.21, 15.22, 15.22, 15.22, 15.23, 15.23, 15.23, 15.24, 15.24, 15.24, 15.25, 15.25, 15.25, 15.26, 15.26, 15.26, 15.27, 15.27, 15.27, 15.28, 15.28, 15.28, 15.29, 15.29, 15.29, 15.3, 15.3, 15.3, 15.31, 15.31, 15.31, 15.32, 15.32, 15.32, 15.33, 15.33, 15.33, 15.34, 15.34, 15.34, 15.35, 15.35, 15.36, 15.36, 15.36, 15.37, 15.37, 15.37, 15.38, 15.38, 15.38, 15.39, 15.39, 15.39, 15.4, 15.4, 15.4, 15.41, 15.41, 15.41, 15.42, 15.42, 15.42, 15.43, 15.43, 15.43, 15.44, 15.44, 15.44, 15.45, 15.45, 15.45, 15.46, 15.46, 15.46, 15.47, 15.47, 15.47, 15.48, 15.48, 15.48, 15.49, 15.49, 15.5, 15.5, 15.5, 15.51, 15.51, 15.51, 15.52, 15.52, 15.52, 15.53, 15.53, 15.53, 15.54, 15.54, 15.54, 15.55, 15.55, 15.55, 15.56, 15.56, 15.56, 15.57, 15.57, 15.57, 15.58, 15.58, 15.58, 15.59, 15.59, 15.59, 15.6, 15.6, 15.61, 15.61, 15.61, 15.62, 15.62, 15.62, 15.63, 15.63, 15.63, 15.64, 15.64, 15.64, 15.65, 15.65, 15.65, 15.66, 15.66, 15.66, 15.67, 15.67, 15.67, 15.68, 15.68, 15.68, 15.69, 15.69, 15.69, 15.7, 15.7, 15.71, 15.71, 15.71, 15.72, 15.72, 15.72, 15.73, 15.73, 15.73, 15.74, 15.74, 15.74, 15.75, 15.75, 15.75, 15.76, 15.76, 15.76, 15.77, 15.77, 15.77, 15.78, 15.78, 15.78, 15.79, 15.79, 15.8, 15.8, 15.8, 15.81, 15.81, 15.81, 15.82, 15.82, 15.82, 15.83, 15.83, 15.83, 15.84, 15.84, 15.84, 15.85, 15.85, 15.85, 15.86, 15.86, 15.86, 15.87, 15.87, 15.88, 15.88, 15.88, 15.89, 15.89, 15.89, 15.9, 15.9, 15.9, 15.91, 15.91, 15.91, 15.92, 15.92, 15.92, 15.93, 15.93, 15.93, 15.94, 15.94, 15.95, 15.95, 15.95, 15.96, 15.96, 15.96, 15.97, 15.97, 15.97, 15.98, 15.98, 15.98, 15.99, 15.99, 15.99, 16.0, 16.0, 16.0, 16.01, 16.01, 16.02, 16.02, 16.02, 16.03, 16.03, 16.03, 16.04, 16.04, 16.04, 16.05, 16.05, 16.05, 16.06, 16.06, 16.06, 16.07, 16.07, 16.08, 16.08, 16.08, 16.09, 16.09, 16.09, 16.1, 16.1, 16.1, 16.11, 16.11, 16.11, 16.12, 16.12, 16.12, 16.13, 16.13, 16.13, 16.14, 16.14, 16.15, 16.15, 16.15, 16.16, 16.16, 16.16, 16.17, 16.17, 16.17, 16.18, 16.18, 16.18, 16.19, 16.19, 16.2, 16.2, 16.2, 16.21, 16.21, 16.21, 16.22, 16.22, 16.22, 16.23, 16.23, 16.23, 16.24, 16.24, 16.24, 16.25, 16.25, 16.26, 16.26, 16.26, 16.27, 16.27, 16.27, 16.28, 16.28, 16.28, 16.29, 16.29, 16.29, 16.3, 16.3, 16.31, 16.31, 16.31, 16.32, 16.32, 16.32, 16.33, 16.33, 16.33, 16.34, 16.34, 16.34, 16.35, 16.35, 16.35, 16.36, 16.36, 16.37, 16.37, 16.37, 16.38, 16.38, 16.38, 16.39, 16.39, 16.39, 16.4, 16.4, 16.41, 16.41, 16.41, 16.42, 16.42, 16.42, 16.43, 16.43, 16.43, 16.44, 16.44, 16.44, 16.45, 16.45, 16.46, 16.46, 16.46, 16.47, 16.47, 16.47, 16.48, 16.48, 16.48, 16.49, 16.49, 16.49, 16.5, 16.5, 16.51, 16.51, 16.51, 16.52, 16.52, 16.52, 16.53, 16.53, 16.53, 16.54, 16.54, 16.55, 16.55, 16.55, 16.56, 16.56, 16.56, 16.57, 16.57, 16.57, 16.58, 16.58, 16.58, 16.59, 16.59, 16.6, 16.6, 16.6, 16.61, 16.61, 16.61, 16.62, 16.62, 16.62, 16.63, 16.63, 16.64, 16.64, 16.64, 16.65, 16.65, 16.65, 16.66, 16.66, 16.66, 16.67, 16.67, 16.68, 16.68, 16.68, 16.69, 16.69, 16.69, 16.7, 16.7, 16.7, 16.71, 16.71, 16.72, 16.72, 16.72, 16.73, 16.73, 16.73, 16.74, 16.74, 16.74, 16.75, 16.75, 16.76, 16.76, 16.76, 16.77, 16.77, 16.77, 16.78, 16.78, 16.78, 16.79, 16.79, 16.8, 16.8, 16.8, 16.81, 16.81, 16.81, 16.82, 16.82, 16.83, 16.83, 16.83, 16.84, 16.84, 16.84, 16.85, 16.85, 16.85, 16.86, 16.86, 16.87, 16.87, 16.87, 16.88, 16.88, 16.88, 16.89, 16.89, 16.9, 16.9, 16.9, 16.91, 16.91, 16.91, 16.92, 16.92, 16.92, 16.93, 16.93, 16.94, 16.94, 16.94, 16.95, 16.95, 16.95, 16.96, 16.96, 16.97, 16.97, 16.97, 16.98, 16.98, 16.98, 16.99, 16.99, 16.99, 17.0, 17.0, 17.01, 17.01, 17.01, 17.02, 17.02, 17.02, 17.03, 17.03, 17.04, 17.04, 17.04, 17.05, 17.05, 17.05, 17.06, 17.06, 17.07, 17.07, 17.07, 17.08, 17.08, 17.08, 17.09, 17.09, 17.1, 17.1, 17.1, 17.11, 17.11, 17.11, 17.12, 17.12, 17.13, 17.13, 17.13, 17.14, 17.14, 17.14, 17.15, 17.15, 17.16, 17.16, 17.16, 17.17, 17.17, 17.17, 17.18, 17.18, 17.19, 17.19, 17.19, 17.2, 17.2, 17.2, 17.21, 17.21, 17.22, 17.22, 17.22, 17.23, 17.23, 17.23, 17.24, 17.24, 17.25, 17.25, 17.25, 17.26, 17.26, 17.26, 17.27, 17.27, 17.28, 17.28, 17.28, 17.29, 17.29, 17.29, 17.3, 17.3, 17.31, 17.31, 17.31, 17.32, 17.32, 17.32, 17.33, 17.33, 17.34, 17.34, 17.34, 17.35, 17.35, 17.36, 17.36, 17.36, 17.37, 17.37, 17.37, 17.38, 17.38, 17.39, 17.39, 17.39, 17.4, 17.4, 17.4, 17.41, 17.41, 17.42, 17.42, 17.42, 17.43, 17.43, 17.44, 17.44, 17.44, 17.45, 17.45, 17.45, 17.46, 17.46, 17.47, 17.47, 17.47, 17.48, 17.48, 17.48, 17.49, 17.49, 17.5, 17.5 ]
11.445771
56
0.301712
8,203
46,962
1.727173
0.013532
0.002259
0.002964
0.003388
0.788608
0.096767
0.038679
0.0024
0.001129
0
0
0.630828
0.523679
46,962
4,102
57
11.448562
0.002548
0.001192
0
0.937531
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
1
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
54262b012b34ab188c1a9cc5978065464eead361
23
py
Python
server/views/__init__.py
kamujun/flask-vue-experiments
057cb25a481ffa0ea9b1b27b131d3d8bf814dfe7
[ "MIT" ]
1
2018-09-07T00:00:58.000Z
2018-09-07T00:00:58.000Z
server/views/__init__.py
kamujun/flask-vue-experiments
057cb25a481ffa0ea9b1b27b131d3d8bf814dfe7
[ "MIT" ]
null
null
null
server/views/__init__.py
kamujun/flask-vue-experiments
057cb25a481ffa0ea9b1b27b131d3d8bf814dfe7
[ "MIT" ]
null
null
null
from . root import root
23
23
0.782609
4
23
4.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.173913
23
1
23
23
0.947368
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
54284e667d8b13510f3ca35e4b4c70b577c2e51d
64
py
Python
malariagen_data/__init__.py
malariagen/malariagen-data-python
d5df2f5de023a2033ae0690b73e8729bb079e100
[ "MIT" ]
2
2021-01-20T10:32:14.000Z
2021-08-06T14:45:01.000Z
malariagen_data/__init__.py
malariagen/malariagen-data-python
d5df2f5de023a2033ae0690b73e8729bb079e100
[ "MIT" ]
158
2021-01-20T11:31:08.000Z
2022-03-31T13:15:20.000Z
malariagen_data/__init__.py
malariagen/malariagen-data-python
d5df2f5de023a2033ae0690b73e8729bb079e100
[ "MIT" ]
9
2021-01-20T16:37:14.000Z
2022-03-28T07:24:20.000Z
# flake8: noqa from .ag3 import Ag3 from .util import SiteClass
16
27
0.765625
10
64
4.9
0.7
0
0
0
0
0
0
0
0
0
0
0.056604
0.171875
64
3
28
21.333333
0.867925
0.1875
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
58377e81f5c2c1f09b404144148beb994bc035ef
651
py
Python
adventure_game/models/contracts/i_room.py
Def4l71diot/adventure-game-base
6f62ecf07980beba8804114b007d6bd7a4f55cc7
[ "MIT" ]
2
2017-10-23T14:40:35.000Z
2017-10-23T14:40:38.000Z
adventure_game/models/contracts/i_room.py
Def4l71diot/A-Day-in-the-Kremlin
6f62ecf07980beba8804114b007d6bd7a4f55cc7
[ "MIT" ]
null
null
null
adventure_game/models/contracts/i_room.py
Def4l71diot/A-Day-in-the-Kremlin
6f62ecf07980beba8804114b007d6bd7a4f55cc7
[ "MIT" ]
null
null
null
from abc import ABCMeta, abstractmethod class IRoom(metaclass=ABCMeta): @property @abstractmethod def id(self): pass @property @abstractmethod def name(self): pass @property @abstractmethod def description(self): pass @description.setter @abstractmethod def description(self, value): pass @property @abstractmethod def exits(self): pass @property @abstractmethod def items(self): pass @property @abstractmethod def puzzles(self): pass @abstractmethod def check_if_completed(self): pass
15.139535
39
0.603687
60
651
6.516667
0.383333
0.347826
0.383632
0.370844
0.337596
0
0
0
0
0
0
0
0.325653
651
42
40
15.5
0.890661
0
0
0.666667
0
0
0
0
0
0
0
0
0
1
0.242424
false
0.242424
0.030303
0
0.30303
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
583f7f1e858068a1ac77dbc8f38e1d3abf24d633
332
py
Python
main.py
fivestarsky/github_spider
3ab036f1fe8502195ab5431c6cfb5f9f1ab537e0
[ "Apache-2.0" ]
1
2021-12-10T01:48:04.000Z
2021-12-10T01:48:04.000Z
main.py
fivestarsky/github_spider
3ab036f1fe8502195ab5431c6cfb5f9f1ab537e0
[ "Apache-2.0" ]
null
null
null
main.py
fivestarsky/github_spider
3ab036f1fe8502195ab5431c6cfb5f9f1ab537e0
[ "Apache-2.0" ]
2
2021-12-16T08:54:01.000Z
2022-02-18T04:22:45.000Z
import scheduler.dynaconf_config_scheduler if __name__ == '__main__': # 请勿删除 scheduler.dynaconf_config_scheduler.start_up_dynaconf_config_scheduler() # 用于周期测试scheduler.dynaconf_config_scheduler.start_up_dynaconf_config_scheduler() # while True: # print(settings.GITHUB_TOKENS_ITER) # time.sleep(1)
30.181818
84
0.76506
37
332
6.216216
0.567568
0.304348
0.5
0.278261
0.46087
0.46087
0.46087
0.46087
0
0
0
0.003571
0.156627
332
11
85
30.181818
0.817857
0.457831
0
0
0
0
0.045714
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
585fed5f2c03c5a43bf7196a51f68374e69e16f3
75
py
Python
face_recognition_toolbox/__init__.py
evd995/face_recognition_toolbox
cf79002639b1979c29750cbd7a7294abd92a5e22
[ "MIT" ]
2
2019-06-06T16:30:33.000Z
2019-06-21T03:40:24.000Z
face_recognition_toolbox/__init__.py
evd995/face_recognition_toolbox
cf79002639b1979c29750cbd7a7294abd92a5e22
[ "MIT" ]
null
null
null
face_recognition_toolbox/__init__.py
evd995/face_recognition_toolbox
cf79002639b1979c29750cbd7a7294abd92a5e22
[ "MIT" ]
null
null
null
# Import tool to get the descriptor of an image from .utils import predict
25
47
0.786667
13
75
4.538462
0.923077
0
0
0
0
0
0
0
0
0
0
0
0.186667
75
2
48
37.5
0.967213
0.6
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
586f45d3f74840aeb0202fae41b4022e04be28e7
45,902
py
Python
elpis/transformer/test_transformer.py
guillaume-wisniewski/elpis
550c350fd0098751b9a502a253bc4066f15c47db
[ "Apache-2.0" ]
118
2018-11-25T22:00:11.000Z
2022-03-18T10:18:33.000Z
elpis/transformer/test_transformer.py
guillaume-wisniewski/elpis
550c350fd0098751b9a502a253bc4066f15c47db
[ "Apache-2.0" ]
189
2019-01-25T01:37:59.000Z
2022-02-16T02:31:23.000Z
elpis/transformer/test_transformer.py
guillaume-wisniewski/elpis
550c350fd0098751b9a502a253bc4066f15c47db
[ "Apache-2.0" ]
34
2018-11-28T20:31:38.000Z
2022-01-27T12:20:59.000Z
import pytest import json import shutil from pathlib import Path from . import DataTransformer, DataTransformerAbstractFactory TEST_FACTORY_TDTAF = '__TEST__TEMP_FACTORY_TDTAF' @pytest.fixture def remove_dtaf(): """ A pytest fixture that stores the collection of DataTransformerAbstractFactory names before the test is run, then deletes any DataTransformerAbstractFactory that were created for the test when the test ends. """ s_before = set(DataTransformerAbstractFactory._transformer_factories.keys()) yield None s_after = set(DataTransformerAbstractFactory._transformer_factories.keys()) for name in (s_after - s_before): DataTransformerAbstractFactory._transformer_factories.pop(name) return @pytest.fixture def tdtaf(remove_dtaf): """ A pytest fixture that yields a temporary Test DataTransformerAbstractFactory (tdtaf) that lives as long as the test runs. """ return DataTransformerAbstractFactory(TEST_FACTORY_TDTAF) ############################################################################### #### Test Factory #### ############################################################################### def test_factory_new(remove_dtaf): """ Check if a new factory can be created. """ TEST_FACTORY_NAME_CREATE = '__TEST_FACTORY_CREATE_TEST' DataTransformerAbstractFactory(TEST_FACTORY_NAME_CREATE) # White-box testing assert TEST_FACTORY_NAME_CREATE in DataTransformerAbstractFactory._transformer_factories t = type(DataTransformerAbstractFactory._transformer_factories[TEST_FACTORY_NAME_CREATE]) assert t == DataTransformerAbstractFactory return def test_factory_new_twice(remove_dtaf): """ Check if a two new factories can be created. """ DataTransformerAbstractFactory('__TEST_FACTORY_1') DataTransformerAbstractFactory('__TEST_FACTORY_2') # White-box testing names = DataTransformerAbstractFactory._transformer_factories.keys() assert '__TEST_FACTORY_1' in names assert '__TEST_FACTORY_2' in names return def test_factory_same_name(remove_dtaf): """ Raise an error when a new factory is created with an existing name. """ DataTransformerAbstractFactory('__TEST_FACTORY_SAME_NAME') with pytest.raises(ValueError): DataTransformerAbstractFactory('__TEST_FACTORY_SAME_NAME') return def test_factory_audio_extention(tdtaf): """ Check the setting and getting of the audio extentions. """ assert tdtaf.get_audio_extention() == 'wav' tdtaf.set_audio_extention('mp3') assert tdtaf.get_audio_extention() == 'mp3' return def test_factory_default_context(tdtaf): """ Check the default default context for importers and exporters is the empty {}. """ assert tdtaf._import_context == {} assert tdtaf._export_context == {} return def test_factory_set_default_context(tdtaf): """ Check the setting of the default context for importers and exporters. """ tdtaf.set_default_context({ 'field1': 'value1', 'field2': 'value2' }) assert tdtaf._import_context == { 'field1': 'value1', 'field2': 'value2' } assert tdtaf._export_context == { 'field1': 'value1', 'field2': 'value2' } return def test_factory_set_default_context_copy(tdtaf): """ Check the setting of the default context for importers and exporters is a copy. """ d = { 'field1': 'value1', 'field2': 'value2' } tdtaf.set_default_context(d) assert tdtaf._import_context is not d assert tdtaf._export_context is not d assert tdtaf._import_context is not tdtaf._export_context return def test_factory_set_default_context_twice(tdtaf): """ Attempting to set a default context twice is ambiguous and will raise an error. """ tdtaf.set_default_context({}) with pytest.raises(RuntimeError): tdtaf.set_default_context({}) return def test_factory_set_default_context_non_json(tdtaf): """ Non-JSONable types will raise an error. """ class Obj: # Non-JSONable type pass with pytest.raises(TypeError): tdtaf.set_default_context({'obj': Obj()}) return ############################################################################### #### Test Import Decorators #### ############################################################################### def test_factory_import_files(tdtaf): """ Test if the import_files fucntion can be registerd. """ @tdtaf.import_files('test') def import_test_files(file_paths, context, add_annotation, temp_dir): pass # White-box testing assert 'import_test_files' in tdtaf._attributes assert 'test' in tdtaf._import_extension_callbacks assert tdtaf._attributes['import_test_files'] is import_test_files return def test_factory_import_files_correct_arguments(tdtaf): """ Test if the import_files decorator raises an error when the arguments of the decorated function are not correct. """ with pytest.raises(RuntimeError): @tdtaf.import_files('test') def import_test_files(must, have, four, arguments, only): # pylint: disable=unused-variable pass return def test_factory_import_files_twice(tdtaf): """ Test if the import_files fucntion can be registerd twice on different file extentions. """ # Black-box testing @tdtaf.import_files('test1') def import_test1_files(file_paths, context, add_annotation, temp_dir): # pylint: disable=unused-variable pass @tdtaf.import_files('test2') def import_test2_files(file_paths, context, add_annotation, temp_dir): # pylint: disable=unused-variable pass # White-box testing assert 'import_test1_files' in tdtaf._attributes assert 'import_test2_files' in tdtaf._attributes assert 'test1' in tdtaf._import_extension_callbacks assert 'test2' in tdtaf._import_extension_callbacks return def test_factory_import_files_twice_same_ext(tdtaf): """ Test if when the import_files fucntion can be registerd twice on the same file extentions, an error is raised. """ @tdtaf.import_files('test') def import_test_files1(a, b, c, d): # pylint: disable=unused-variable pass with pytest.raises(RuntimeError): @tdtaf.import_files('test') def import_test_files2(a, b, c, d): # pylint: disable=unused-variable pass return def test_factory_import_directory(tdtaf): """ Test if the import_directory fucntion can be registerd. """ @tdtaf.import_directory def import_test_dir(dir_path, context, add_annotation, add_audio, temp_dir): pass # White-box testing assert 'import_test_dir' in tdtaf._attributes assert tdtaf._import_directory_callback is import_test_dir return def test_factory_import_directory_correct_arguments(tdtaf): """ Test if the import_directory decorator raises an error when the arguments of the decorated function are not correct. """ with pytest.raises(RuntimeError): @tdtaf.import_directory def import_test_dir(must, have, five, arguments, only, no, more, no_, less): # pylint: disable=unused-variable pass return def test_factory_import_directory_twice(tdtaf): """ Raise an error if the import_directory decorator is used twice. On importing a directory, it would be ambiguous as to which funciton to use. """ @tdtaf.import_directory def f1(a, b, c, d, e): # pylint: disable=unused-variable pass with pytest.raises(RuntimeError): @tdtaf.import_directory def f2(a, b, c, d, e): # pylint: disable=unused-variable pass return def test_factory_import_files_import_directory(tdtaf): """ If import_files and import_directory decorators are used together then raise an error. It becomes ambiguous as to which one to use. """ @tdtaf.import_files('test') def f1(a, b, c, d): # pylint: disable=unused-variable pass with pytest.raises(RuntimeError): @tdtaf.import_directory def f2(a, b, c, d, e): # pylint: disable=unused-variable pass return def test_factory_import_directory_import_files(tdtaf): """ If import_files and import_directory decorators are used together then raise an error. It becomes ambiguous as to which one to use. Test other way to test_factory_import_files_import_directory. """ @tdtaf.import_directory def f1(a, b, c, d, e): # pylint: disable=unused-variable pass with pytest.raises(RuntimeError): @tdtaf.import_files('test') def f2(a, b, c, d): # pylint: disable=unused-variable pass return ############################################################################### #### Test Export Decorators #### ############################################################################### def test_factory_export(tdtaf): """ Test if a function using the export decorator can be registerd. """ @tdtaf.export def export(annotations, context, output_dir, temp_dir): pass # White-box testing assert 'export' in tdtaf._attributes assert tdtaf._export_callback is export assert tdtaf._attributes['export'] is export return def test_factory_export_files_twice(tdtaf): """ Raise an error if the export deforator is used more than once. Specifying two export functions makes choosing between them ambiguous. """ @tdtaf.export def export1(annotations, context, output_dir, temp_dir): # pylint: disable=unused-variable pass with pytest.raises(RuntimeError): @tdtaf.export def export2(annotations, context, output_dir, temp_dir): # pylint: disable=unused-variable pass return ############################################################################### #### Test Import Settings #### ############################################################################### def test_factory_import_setting(tdtaf): """ Test if the import_setting fucntion can be registerd. """ tdtaf.import_setting('field1', str) assert tdtaf._import_ui_config == { 'field1': { 'type': 'str', 'ui': None } } assert tdtaf._import_context == { 'field1': None } assert tdtaf._export_ui_config == {} assert tdtaf._export_context == {} return def test_factory_import_setting_with_default(tdtaf): """ Check the default value is stored correctly. """ tdtaf.import_setting('field1', str, default='value1') assert tdtaf._import_ui_config == { 'field1': { 'type': 'str', 'ui': None } } assert tdtaf._import_context == { 'field1': 'value1' } assert tdtaf._export_ui_config == {} assert tdtaf._export_context == {} return def test_factory_import_setting_with_ui(tdtaf): """ Check the default value is ui. """ ui_config = { 'type': 'textbox', 'label': 'field1', 'placeholder': 'e.g. value here' } tdtaf.import_setting('field1', str, ui=ui_config) assert tdtaf._import_ui_config == { 'field1': { 'type': 'str', 'ui': { 'type': 'textbox', 'label': 'field1', 'placeholder': 'e.g. value here' } } } assert tdtaf._import_context == { 'field1': None } assert tdtaf._export_ui_config == {} assert tdtaf._export_context == {} return def test_factory_import_setting_conflict_default_config(tdtaf): """ If a key in the default config is set, an error should be raised if there is an attempt to create a setting with a name that is in the list of keys. """ tdtaf.set_default_context({ 'field1': 'value1' }) with pytest.raises(ValueError): tdtaf.import_setting('field1', str) return def test_factory_import_setting_twice(tdtaf): """ Check if two import settings can be specified. """ tdtaf.import_setting('field1', str) tdtaf.import_setting('field2', str) assert tdtaf._import_ui_config == { 'field1': { 'type': 'str', 'ui': None }, 'field2': { 'type': 'str', 'ui': None } } assert tdtaf._import_context == { 'field1': None, 'field2': None } assert tdtaf._export_ui_config == {} assert tdtaf._export_context == {} return def test_factory_import_setting_same_field_name(tdtaf): """ Raise an error if import_settings is used twice with the same field name. """ tdtaf.import_setting('field1', str) with pytest.raises(ValueError): tdtaf.import_setting('field1', str) return def test_factory_import_capable_default(tdtaf): """ Without specifying an importing function, the DataTransformer is not import capable. """ assert tdtaf.is_import_capable() == False return def test_factory_import_files_import_capable(tdtaf): """ Specifying an import_files decorated function will make the DataTransformer import capable. """ @tdtaf.import_files('test') def import_test_files(a, b, c, d): # pylint: disable=unused-variable pass assert tdtaf.is_import_capable() == True return def test_factory_import_directory_import_capable(tdtaf): """ Specifying an import_directory decorated function will make the DataTransformer import capable. """ @tdtaf.import_directory def import_test_dir(a, b, c, d, e): # pylint: disable=unused-variable pass assert tdtaf.is_import_capable() == True return ############################################################################### #### Test Export Settings #### ############################################################################### def test_factory_export_setting(tdtaf): """ Test if the export_setting fucntion can be registerd. """ tdtaf.export_setting('field1', str) assert tdtaf._export_ui_config == { 'field1': { 'type': 'str', 'ui': None } } assert tdtaf._export_context == { 'field1': None } assert tdtaf._import_ui_config == {} assert tdtaf._import_context == {} return def test_factory_export_setting_with_default(tdtaf): """ Check the default value is stored correctly. """ tdtaf.export_setting('field1', str, default='value1') assert tdtaf._export_ui_config == { 'field1': { 'type': 'str', 'ui': None } } assert tdtaf._export_context == { 'field1': 'value1' } assert tdtaf._import_ui_config == {} assert tdtaf._import_context == {} return def test_factory_export_setting_with_ui(tdtaf): """ Check the default value is ui. """ ui_config = { 'type': 'textbox', 'label': 'field1', 'placeholder': 'e.g. value here' } tdtaf.export_setting('field1', str, ui=ui_config) assert tdtaf._export_ui_config == { 'field1': { 'type': 'str', 'ui': { 'type': 'textbox', 'label': 'field1', 'placeholder': 'e.g. value here' } } } assert tdtaf._export_context == { 'field1': None } assert tdtaf._import_ui_config == {} assert tdtaf._import_context == {} return def test_factory_export_setting_conflict_default_config(tdtaf): """ If a key in the default config is set, an error should be raised if there is an attempt to create a setting with a name that is in the list of keys. """ tdtaf.set_default_context({ 'field1': 'value1' }) with pytest.raises(ValueError): tdtaf.export_setting('field1', str) return def test_factory_export_setting_twice(tdtaf): """ Check if two export settings can be specified. """ tdtaf.export_setting('field1', str) tdtaf.export_setting('field2', str) assert tdtaf._export_ui_config == { 'field1': { 'type': 'str', 'ui': None }, 'field2': { 'type': 'str', 'ui': None } } assert tdtaf._export_context == { 'field1': None, 'field2': None } assert tdtaf._import_ui_config == {} assert tdtaf._import_context == {} return def test_factory_export_setting_same_field_name(tdtaf): """ Raise an error if export_settings is used twice with the same field name. """ tdtaf.export_setting('field1', str) with pytest.raises(ValueError): tdtaf.export_setting('field1', str) return def test_factory_export_capable_default(tdtaf): """ Without specifying an export decorated function the DataTransformer is not export capable. """ assert tdtaf.is_export_capable() == False return def test_factory_export_export_capable(tdtaf): """ Specifying an export decorated function will make the DataTransformer export capable. """ @tdtaf.export def export(a, b, c, d): # pylint: disable=unused-variable pass assert tdtaf.is_export_capable() == True return ############################################################################### #### Test General Settings #### ############################################################################### def test_factory_general_setting(tdtaf): """ Test if the general_setting fucntion can be registerd. """ tdtaf.general_setting('field1', str) assert tdtaf._import_ui_config == { 'field1': { 'type': 'str', 'ui': None } } assert tdtaf._import_context == { 'field1': None } assert tdtaf._export_ui_config == { 'field1': { 'type': 'str', 'ui': None } } assert tdtaf._export_context == { 'field1': None } return def test_factory_general_setting_with_default(tdtaf): """ Check the default value is stored correctly. """ tdtaf.general_setting('field1', str, default='value1') assert tdtaf._import_ui_config == { 'field1': { 'type': 'str', 'ui': None } } assert tdtaf._import_context == { 'field1': 'value1' } assert tdtaf._export_ui_config == { 'field1': { 'type': 'str', 'ui': None } } assert tdtaf._export_context == { 'field1': 'value1' } return def test_factory_general_setting_with_ui(tdtaf): """ Check the default value is ui. """ ui_config = { 'type': 'textbox', 'label': 'field1', 'placeholder': 'e.g. value here' } tdtaf.general_setting('field1', str, ui=ui_config) assert tdtaf._import_ui_config == { 'field1': { 'type': 'str', 'ui': { 'type': 'textbox', 'label': 'field1', 'placeholder': 'e.g. value here' } } } assert tdtaf._import_context == { 'field1': None } assert tdtaf._export_ui_config == { 'field1': { 'type': 'str', 'ui': { 'type': 'textbox', 'label': 'field1', 'placeholder': 'e.g. value here' } } } assert tdtaf._export_context == { 'field1': None } return def test_factory_general_setting_conflict_default_config(tdtaf): """ If a key in the default config is set, an error should be raised if there is an attempt to create a setting with a name that is in the list of keys. """ tdtaf.set_default_context({ 'field1': 'value1' }) with pytest.raises(ValueError): tdtaf.general_setting('field1', str) return def test_factory_general_setting_conflict_import_setting(tdtaf): """ If a field is specified in the import settings then a field with the same name is specified in the general settings, an error is raised. This is because it is ambiguous as to use the import setting or the general setting for the importer. """ tdtaf.import_setting('field1', str) with pytest.raises(ValueError): tdtaf.general_setting('field1', str) return def test_factory_general_setting_conflict_export_setting(tdtaf): """ If a field is specified in the export settings then a field with the same name is specified in the general settings, an error is raised. This is because it is ambiguous as to use the export setting or the general setting for the exporter. """ tdtaf.export_setting('field1', str) with pytest.raises(ValueError): tdtaf.general_setting('field1', str) return def test_factory_general_setting_twice(tdtaf): """ Check if two general settings can be specified. """ tdtaf.general_setting('field1', str) tdtaf.general_setting('field2', str) assert tdtaf._import_ui_config == { 'field1': { 'type': 'str', 'ui': None }, 'field2': { 'type': 'str', 'ui': None } } assert tdtaf._import_context == { 'field1': None, 'field2': None } assert tdtaf._export_ui_config == { 'field1': { 'type': 'str', 'ui': None }, 'field2': { 'type': 'str', 'ui': None } } assert tdtaf._export_context == { 'field1': None, 'field2': None } return def test_factory_general_setting_same_field_name(tdtaf): """ Raise an error if general_settings is used twice with the same field name. """ tdtaf.general_setting('field1', str) with pytest.raises(ValueError): tdtaf.general_setting('field1', str) return ############################################################################### #### Test UI Settings #### ############################################################################### def test_factory_default_ui_configs(tdtaf): """ Check that there are no ui configurations on creation. """ assert tdtaf._import_ui_config == {} assert tdtaf._export_ui_config == {} return # TODO: Extensive testing needs to be done here. ############################################################################### #### Test Reprocess Audio Decorators #### ############################################################################### def test_factory_replace_reprocess_audio(tdtaf): """ Test if the replace_reprocess_audio fucntion can be registered. """ @tdtaf.replace_reprocess_audio def reprocess(audio_paths, output_dir_path, add_audio, temp_dir): pass # White-box testing assert tdtaf._audio_processing_callback is reprocess return def test_factory_replace_reprocess_audio_twice(tdtaf): """ The replace_reprocess_audio function can only be registered once. Registering it twice raises an error. """ @tdtaf.replace_reprocess_audio def reprocess1(audio_paths, output_dir_path, add_audio, temp_dir): # pylint: disable=unused-variable pass with pytest.raises(RuntimeError): @tdtaf.replace_reprocess_audio def reprocess2(audio_paths, output_dir_path, add_audio, temp_dir): # pylint: disable=unused-variable pass return ############################################################################### #### Test Factory Decorator Attributes #### ############################################################################### def test_factory_attr_name_is_existing_name_import_file(tdtaf): """ Raise an error when a function has the name of an arrtibute in the DataTransformer object. """ with pytest.raises(NameError): @tdtaf.import_files('test') def __doc__(a,b,c,d): # pylint: disable=unused-variable pass return def test_factory_attr_name_is_existing_name_import_directory(tdtaf): """ Raise an error when a function has the name of an arrtibute in the DataTransformer object. """ with pytest.raises(NameError): @tdtaf.import_directory def __doc__(a,b,c,d): # pylint: disable=unused-variable pass return def test_factory_attr_name_is_existing_name_export(tdtaf): """ Raise an error when a function has the name of an arrtibute in the DataTransformer object. """ with pytest.raises(NameError): @tdtaf.export def __doc__(a,b,c,d): # pylint: disable=unused-variable pass return def test_factory_two_attributes_with_same_name(tdtaf): """ If two functions are defined with the same name then raise an error. """ @tdtaf.import_files('test1') def f(a,b,c,d): # pylint: disable=unused-variable pass with pytest.raises(NameError): @tdtaf.import_files('test2') def f(a,b,c,d): # pylint: disable=function-redefined pass return ############################################################################### #### Test Building #### ############################################################################### from . import make_importer, make_exporter def test_build_importer(tdtaf, tmpdir): """ Check that the build_importer method constructs a DataTransformer that is import capable. """ @tdtaf.import_directory def import_dir(path, ctx, add_anno, add_audio, temp_dir): # pylint: disable=unused-variable pass dt = tdtaf.build_importer( str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda a: None, ) assert type(dt) == DataTransformer return def test_build_exporter(tdtaf, tmpdir): """ Check that the build_exporter method constructs a DataTransformer that is export capable. """ @tdtaf.export def ex(path, ctx, add_anno, temp_dir): # pylint: disable=unused-variable pass path_to_ctm_file = tmpdir.join('f.ctm') path_to_ctm_file.write('') path_to_audio_file = tmpdir.join('f.wav') path_to_audio_file.write('') path_to_output_file = tmpdir.join('f.output') path_to_output_file.write('') dt = tdtaf.build_exporter( str(path_to_ctm_file), str(path_to_audio_file), str(path_to_output_file), str(tmpdir.mkdir('temporary')), lambda ctx: None, ) assert type(dt) == DataTransformer return def test_make_importer(tdtaf, tmpdir): """ Use the make_importer function to create an importer data transformer. """ @tdtaf.import_directory def import_dir(path, ctx, add_anno, add_audio, temp_dir): # pylint: disable=unused-variable pass dt = make_importer( # pylint: disable=unused-variable TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) assert type(dt) == DataTransformer return def test_make_importer_non_existant(tmpdir): """ Using the make_importer requesting a name that has not been registered will raise an error. """ with pytest.raises(ValueError): dt = make_importer( # pylint: disable=unused-variable '__TEST_DOES_NOT_EXIST', str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) return def test_make_importer_from_export_only(tdtaf, tmpdir): """ Raise an error if the DataTransformerAbstractFactory associated with the name passed to the make_importer function is only capable of being an exporter. """ @tdtaf.export def ex(path, ctx, add_anno, temp_dir): # pylint: disable=unused-variable pass with pytest.raises(ValueError): dt = make_importer( # pylint: disable=unused-variable TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) return def test_make_exporter_from_import_only(tdtaf, tmpdir): """ Raise an error if the DataTransformerAbstractFactory associated with the name passed to the make_exporter function is only capable of being an importer. """ @tdtaf.import_directory def import_dir(path, ctx, add_anno, add_audio, temp_dir): # pylint: disable=unused-variable pass with pytest.raises(ValueError): dt = make_exporter( # pylint: disable=unused-variable TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) # TODO: change arguments for the exporter return def test_make_exporter(tdtaf, tmpdir): """ Use the make_exporter function to create an importer data transformer. """ @tdtaf.export def ex(path, ctx, add_anno, temp_dir): # pylint: disable=unused-variable pass dt = make_exporter( TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) # TODO: change arguments for the exporter assert type(dt) == DataTransformer return def test_make_exporter_non_existant(tmpdir): """ Using the make_exporter requesting a name that has not been registered will raise an error. """ with pytest.raises(ValueError): dt = make_exporter( # pylint: disable=unused-variable '__TEST_DOES_NOT_EXIST', str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) # TODO: change arguments for the exporter return ############################################################################### #### Test DataTransformer (Importer) #### ############################################################################### def test_dt_name(tdtaf, tmpdir): """ Check that the DataTransformer has the name specified. """ @tdtaf.import_directory def import_dir(path, ctx, add_anno, add_audio, temp_dir): # pylint: disable=unused-variable pass dt = make_importer( TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) assert dt.get_name() == TEST_FACTORY_TDTAF assert dt.get_state()['name'] == TEST_FACTORY_TDTAF return def test_dt_change_setting_callback(tdtaf, tmpdir): """ When a setting (context) is changed, the callback is triggered and notifying external objects of the change. """ @tdtaf.import_directory def import_dir(path, ctx, add_anno, add_audio, temp_dir): # pylint: disable=unused-variable pass tdtaf.general_setting('field', str) callback_called = False callback_field = None def callback(ctx): nonlocal callback_called nonlocal callback_field callback_called = True # pylint: disable=unused-variable callback_field = ctx['field'] # pylint: disable=unused-variable dt = make_importer( TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), callback ) dt.context['field'] = 'updated_value' assert callback_called == True assert callback_field == 'updated_value' return def test_dt_import_files_direct_call(tdtaf, tmpdir): """ Check that function decorated with import_files can be called directly by name from the DataTransformer object. The only parameter these functions accept directly is a list of files. """ ran_importer = False collection = tmpdir.mkdir('collection') file_list = [ str(collection.join(f'file{i}.test')) for i in range(3) ] for i in range(3): collection.join(f'file{i}.test').write('') @tdtaf.import_files('test') def import_test_files(paths, ctx, add_anno, temp_dir): # pylint: disable=unused-variable nonlocal ran_importer assert paths == file_list ran_importer = True # pylint: disable=unused-variable dt = make_importer( TEST_FACTORY_TDTAF, str(collection), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) dt.import_test_files(file_list) assert ran_importer == True return def test_dt_import_files_called_by_importer(tdtaf, tmpdir): """ Check that function decorated with import_files can be called indirectly by the DataTransformer's process function. """ ran_importer = False collection = tmpdir.mkdir('collection') file_list = [ str(collection.join(f'file{i}.test')) for i in range(3) ] for i in range(3): collection.join(f'file{i}.test').write('') shutil.copyfile('/recordings/transcribed/1_1_1.wav', f'{collection}/1_1_1.wav') @tdtaf.import_files('test') def import_test_files(paths, ctx, add_anno, temp_dir): # pylint: disable=unused-variable nonlocal ran_importer assert set(paths) == set(file_list) ran_importer = True # pylint: disable=unused-variable dt = make_importer( TEST_FACTORY_TDTAF, str(collection), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) dt.process() assert ran_importer == True return def test_dt_import_directory_direct_call(tdtaf, tmpdir): """ Check that the funtion decorated with import_directory can be called directly by name from the DataTransformer object. The only parameter these functions accept directly is a path to a directory. """ ran_importer = False collection = tmpdir.mkdir('collection') file_set = { f'file{i}.test' for i in range(3) } for i in range(3): collection.join(f'file{i}.test').write('') @tdtaf.import_directory def import_test_dir(path, ctx, add_anno, add_audio, temp_dir): # pylint: disable=unused-variable nonlocal ran_importer path = Path(path) assert { f.name for f in path.iterdir() } == file_set ran_importer = True # pylint: disable=unused-variable dt = make_importer( TEST_FACTORY_TDTAF, str(collection), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) dt.import_test_dir() assert ran_importer == True return def test_dt_import_directory_called_by_importer(tdtaf, tmpdir): """ Check that function decorated with import_directory can be called indirectly by the DataTransformer's process function. """ ran_importer = False collection = tmpdir.mkdir('collection') file_set = { f'file{i}.test' for i in range(3) } for i in range(3): collection.join(f'file{i}.test').write('') @tdtaf.import_directory def import_test_dir(path, ctx, add_anno, add_audio, temp_dir): # pylint: disable=unused-variable nonlocal ran_importer path = Path(path) assert { f.name for f in path.iterdir() } == file_set ran_importer = True # pylint: disable=unused-variable dt = make_importer( TEST_FACTORY_TDTAF, str(collection), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) dt.process() assert ran_importer == True return def test_dt_import_only_dirs(tdtaf, tmpdir): """ No files so no import happens. Tries and tricks the importer. """ ran_importer = False collection = tmpdir.mkdir('collection') for i in range(3): collection.mkdir(f'file{i}.test') # These are directories! @tdtaf.import_files('test') def import_test_files(path, ctx, add_anno, temp_dir): # pylint: disable=unused-variable nonlocal ran_importer #should never run ran_importer = True # pylint: disable=unused-variable dt = make_importer( TEST_FACTORY_TDTAF, str(collection), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) dt.process() assert ran_importer == False return def test_dt_import_path_non_existant(tdtaf, tmpdir): """ Raise an error if the import directory given does not exist. """ @tdtaf.import_files('test') def import_test_files(path, ctx, add_anno, temp_dir): # pylint: disable=unused-variable pass collection = tmpdir.join('collection') with pytest.raises(RuntimeError): dt = make_importer( TEST_FACTORY_TDTAF, str(collection), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) return def test_dt_import_missing_resampled(tdtaf, tmpdir): """ Raise an error if the resampled directory given does not exist. """ @tdtaf.import_files('test') def import_test_files(path, ctx, add_anno, temp_dir): # pylint: disable=unused-variable pass with pytest.raises(RuntimeError): dt = make_importer( TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.join('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) return def test_dt_import_missing_temporary(tdtaf, tmpdir): """ Raise an error if the temporary directory given does not exist. """ @tdtaf.import_files('test') def import_test_files(path, ctx, add_anno, temp_dir): # pylint: disable=unused-variable pass with pytest.raises(RuntimeError): dt = make_importer( TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.join('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) return def test_dt_import_files_has_context(tdtaf, tmpdir): """ Check that funcitons decorated with import_files gets the correct import context. """ ran_importer = False collection = tmpdir.mkdir('collection') collection.join(f'file0.test').write('') @tdtaf.import_files('test') def import_f(paths, ctx, add_anno, temp_dir): # pylint: disable=unused-variable nonlocal ran_importer assert 'field' in ctx ran_importer = True # pylint: disable=unused-variable tdtaf.import_setting('field', str) dt = make_importer( TEST_FACTORY_TDTAF, str(collection), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) assert dt.context['field'] == None dt.process() assert ran_importer == True return def test_dt_import_files_add_annotaion(tdtaf, tmpdir): """ Check that annotaions are added when the callback is used. """ ran_importer = False collection = tmpdir.mkdir('collection') collection.join(f'file0.test').write('') @tdtaf.import_files('test') def import_test_files(paths, ctx, add_anno, temp_dir): # pylint: disable=unused-variable nonlocal ran_importer add_anno('some_file', { 'audio_file_name': 'some_file.wav', 'transcript': 'la la la', 'start_ms': 0, 'stop_ms': 1100 }) ran_importer = True dt = make_importer( TEST_FACTORY_TDTAF, str(tmpdir.join('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) dt.process() assert ran_importer == True assert 'some_file' in dt._annotation_store assert dt._annotation_store['some_file'] == [{ 'audio_file_name': 'some_file.wav', 'transcript': 'la la la', 'start_ms': 0, 'stop_ms': 1100 }] return def test_dt_import_directory_has_context(tdtaf, tmpdir): """ Check that funcitons decorated with import_directory gets the correct import context. """ ran_importer = False @tdtaf.import_directory def import_dir(path, ctx, add_anno, add_audio, temp_dir): # pylint: disable=unused-variable nonlocal ran_importer assert 'field' in ctx ran_importer = True # pylint: disable=unused-variable tdtaf.import_setting('field', str) dt = make_importer( TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) assert dt.context['field'] == None dt.process() assert ran_importer == True return def test_dt_import_directory_add_annotaion(tdtaf, tmpdir): """ Check that annotaions are added when the callback is used. """ @tdtaf.import_directory def import_dir(path, ctx, add_anno, add_audio, temp_dir): # pylint: disable=unused-variable add_anno('some_file', { 'audio_file_name': 'some_file.wav', 'transcript': 'la la la', 'start_ms': 0, 'stop_ms': 1100 }) dt = make_importer( TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) dt.process() assert 'some_file' in dt._annotation_store assert dt._annotation_store['some_file'] == [{ 'audio_file_name': 'some_file.wav', 'transcript': 'la la la', 'start_ms': 0, 'stop_ms': 1100 }] return def test_dt_add_annotaion_wrong_type(tdtaf, tmpdir): """ Raise an error if the dictionary given to add_annotation contains an incorrect field or missing a field. """ @tdtaf.import_directory def import_dir(path, ctx, add_anno, add_audio, temp_dir): # pylint: disable=unused-variable add_anno('some_file', { 'wrong': 'some_file.wav', 'transcript': 'la la la', 'start_ms': 0, 'stop_ms': 1100 }) dt = make_importer( TEST_FACTORY_TDTAF, str(tmpdir.mkdir('collection')), str(tmpdir.mkdir('resampled')), str(tmpdir.mkdir('temporary')), str(tmpdir.join('annotaions.json')), lambda ctx: None ) with pytest.raises(TypeError): dt.process() return ############################################################################### #### Test DataTransformer (Importer) #### ############################################################################### # def test_dt_export_path_non_existant(): # """ # Raise an error if the directory given as the output path does not exist. # """ # pass # def test_dt_exporter_has_context(tdtaf, tmpdir): # """ # Check that funcitons decorated with export_directory gets the correct export # context. # """ # @tdtaf.export # def ex(path, ctx, add_anno): # pylint: disable=unused-variable # pass # tdtaf.export_setting('field', str) # dt = make_exporter( # TEST_FACTORY_TDTAF, # str(tmpdir.join('transcription.ctm')), # str(tmpdir.join('audio.wav')), # str(tmpdir.mkdir('temporary')), # str(tmpdir.join('transcription.ctm')), # lambda ctx: None # ) # # TODO: Change arguments above for exporter # assert dt.context['field'] == None # return # def test_dt_default_audio_resampler(): # """ # Check that the default audio resampler produces new audio files. # """ # pass
29.481053
118
0.603699
5,194
45,902
5.119561
0.061802
0.037231
0.031778
0.059907
0.816254
0.750367
0.703697
0.655259
0.633523
0.614494
0
0.005092
0.255523
45,902
1,557
119
29.481053
0.773037
0.240708
0
0.709979
0
0
0.099054
0.006274
0
0
0
0.003211
0.113306
1
0.132017
false
0.04158
0.223493
0
0.43659
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
589c3f245115df7b1cdf6841b9205e3ed2e91831
48
py
Python
travelperk_http_python/oauth/missing_code_exception.py
namelivia/travelperk-http-python
c6cbd88c999a49f7d61ae040029ca3e91ce72cae
[ "MIT" ]
2
2021-08-30T12:34:26.000Z
2021-08-31T07:56:12.000Z
travelperk_http_python/oauth/missing_code_exception.py
namelivia/travelperk-http-python
c6cbd88c999a49f7d61ae040029ca3e91ce72cae
[ "MIT" ]
6
2021-07-15T16:13:06.000Z
2022-03-03T09:45:07.000Z
travelperk_http_python/oauth/missing_code_exception.py
namelivia/travelperk-http-python
c6cbd88c999a49f7d61ae040029ca3e91ce72cae
[ "MIT" ]
null
null
null
class MissingCodeException(Exception): pass
16
38
0.791667
4
48
9.5
1
0
0
0
0
0
0
0
0
0
0
0
0.145833
48
2
39
24
0.926829
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
54651434cc33d296d6dff266f6f624aceb731cfe
42
py
Python
pyTrivialCache/__init__.py
roberto-reale/pyTrivialCache
bb3d855d7335a456b0a15323e49aa0b40b04c38c
[ "MIT" ]
null
null
null
pyTrivialCache/__init__.py
roberto-reale/pyTrivialCache
bb3d855d7335a456b0a15323e49aa0b40b04c38c
[ "MIT" ]
null
null
null
pyTrivialCache/__init__.py
roberto-reale/pyTrivialCache
bb3d855d7335a456b0a15323e49aa0b40b04c38c
[ "MIT" ]
null
null
null
from pyTrivialCache import pyTrivialCache
21
41
0.904762
4
42
9.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.095238
42
1
42
42
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
54819ae918c05b0c1d4009c5d78d5e708cc34f02
7,636
py
Python
project/forms.py
Tech-Matrix/Mentor-Mentee-tool
461acb1365566bd8c75c5cd4f95a52034e759a69
[ "MIT" ]
null
null
null
project/forms.py
Tech-Matrix/Mentor-Mentee-tool
461acb1365566bd8c75c5cd4f95a52034e759a69
[ "MIT" ]
null
null
null
project/forms.py
Tech-Matrix/Mentor-Mentee-tool
461acb1365566bd8c75c5cd4f95a52034e759a69
[ "MIT" ]
null
null
null
from flask_wtf import FlaskForm from flask_wtf.file import FileField, FileAllowed from flask_login import current_user from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField, SelectField, SelectMultipleField from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError from wtforms.widgets import TextArea from project.models import User import phonenumbers from flask_login import login_user, current_user, logout_user, login_required expertise_l = [("Engineering", "Engineering"), ("Commerce", "Commerce"), ("Medical", "Medical"), ("Arts", "Arts")] class RegistrationForm(FlaskForm): fullname = StringField('Full Name', validators=[DataRequired(), Length(min=2, max=50)]) username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)]) phone = StringField('Phone', validators=[DataRequired(), Length(min=8, max=15)]) email = StringField('Email', validators=[DataRequired(), Email()]) password = PasswordField('Password', validators=[DataRequired()]) confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')]) submit = SubmitField('Sign Up') def validate_username(self, username): user = User.query.filter_by(username=username.data).first() if user: raise ValidationError('That username is taken. Please choose a different one.') def validate_email(self, email): user = User.query.filter_by(email=email.data).first() if user: raise ValidationError('That email is taken. Please choose a different one.') # def validate_phone(self, phone): # try: # input_number = phonenumbers.parse(phone.data) # if not (phonenumbers.is_valid_number(input_number)): # raise ValidationError('Invalid phone number.') # except: # input_number = phonenumbers.parse("+91" + phone.data) # if not (phonenumbers.is_valid_number(input_number)): # raise ValidationError('Invalid phone number.') class LoginForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()]) password = PasswordField('Password', validators=[DataRequired()]) remember = BooleanField('Remember Me') submit = SubmitField('Login') class MenteeForm(FlaskForm): fullname = StringField('Full Name', validators=[DataRequired(), Length(min=2, max=50)]) username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)]) phone = StringField('Phone', validators=[DataRequired(), Length(min=8, max=15)]) email = StringField('Email', validators=[DataRequired(), Email()]) city = SelectField('city', choices=[('bangalore', 'Bangalore'), ('chennai', 'Chennai'), ('hyderabad', 'Hyderabad'), ('delhi', 'Delhi')]) gender = SelectField('gender', choices=[("Prefer Not To Tell", "Prefer Not To Tell"), ('Male', 'Male'), ('Female', 'Female')]) gender_pref = SelectField('gender preference', choices=[('Male', 'Male'), ('Female', 'Female')]) language_pref = SelectField("language preference", choices=[("English", "English"), ("Hindi", "Hindi"), ("Kannada", "Kannada")]) aspiration = SelectField('aspiration', choices=expertise_l) hobbies = TextAreaField("Tell us your hobbies", widget=TextArea(), validators=[DataRequired(), Length(min=2)]) b1 = TextAreaField("Qusetion 1", widget=TextArea(), validators=[DataRequired(), Length(min=10)]) b2 = TextAreaField("Qusetion 2", widget=TextArea(), validators=[DataRequired(), Length(min=10)]) submit2 = SubmitField('Update') def validate_username(self, username): user = User.query.filter_by(username=username.data).first() if user: if not(current_user and current_user.username == user.username): print(1) raise ValidationError('That username is taken. Please choose a different one.') def validate_email(self, email): user = User.query.filter_by(email=email.data).first() if user: if not (current_user and current_user.email == user.email): print(2) raise ValidationError('That email is taken. Please choose a different one.') # def validate_phone(self, phone): # try: # input_number = phonenumbers.parse(phone.data) # if not (phonenumbers.is_valid_number(input_number)): # raise ValidationError('Invalid phone number.') # except: # input_number = phonenumbers.parse("+91" + phone.data) # if not (phonenumbers.is_valid_number(input_number)): # raise ValidationError('Invalid phone number.') class MentorForm(FlaskForm): fullname = StringField('Full Name', validators=[DataRequired(), Length(min=2, max=50)]) username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)]) phone = StringField('Phone', validators=[DataRequired(), Length(min=5, max=15)]) email = StringField('Email', validators=[DataRequired(), Email()]) gender = SelectField('gender', choices=[('Male', 'Male'), ('Female', 'Female')]) city = SelectField('city', choices=[('bangalore', 'Bangalore'), ('chennai', 'Chennai'), ('hyderabad', 'Hyderabad'), ('delhi', 'Delhi')]) language = SelectField('language', choices=[("English", "English"), ("Hindi", "Hindi"), ("Kannada", "Kannada")]) expertise = SelectField('expertise', choices=expertise_l) hobbies = TextAreaField("Tell us your hobbies", widget=TextArea(), validators=[DataRequired(), Length(min=2)]) b1 = TextAreaField("Qusetion 1", widget=TextArea(), validators=[DataRequired(), Length(min=2)]) b2 = TextAreaField("Qusetion 2", widget=TextArea(), validators=[DataRequired(), Length(min=2)]) submit = SubmitField('Update') def validate_username(self, username): user = User.query.filter_by(username=username.data).first() if user: if not (current_user and current_user.username == user.username): raise ValidationError('That username is taken. Please choose a different one.') def validate_email(self, email): user = User.query.filter_by(email=email.data).first() if user: if not (current_user and current_user.email == user.email): raise ValidationError('That email is taken. Please choose a different one.') # def validate_phone(self, phone): # try: # input_number = phonenumbers.parse(phone.data) # if not (phonenumbers.is_valid_number(input_number)): # raise ValidationError('Invalid phone number.') # except: # input_number = phonenumbers.parse("+91" + phone.data) # if not (phonenumbers.is_valid_number(input_number)): # raise ValidationError('Invalid phone number.') class ContactForm(FlaskForm): name = StringField("Name") email = StringField("Email") subject = StringField("Subject") message = TextAreaField("Message") submit = SubmitField("Send") class ConnectForm(FlaskForm): connect = SubmitField(label='Connect') class FindForm(FlaskForm): find = SubmitField(label='Find Mentors') class DisconnectForm(FlaskForm): disconnect = SubmitField(label='Disconnect!')
49.908497
132
0.644709
782
7,636
6.216113
0.171356
0.099568
0.086402
0.095659
0.731125
0.720016
0.720016
0.700267
0.689364
0.689364
0
0.008699
0.217129
7,636
153
133
49.908497
0.80445
0.160948
0
0.539216
0
0
0.164263
0
0
0
0
0
0
1
0.058824
false
0.04902
0.088235
0
0.656863
0.019608
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
49a89e1053eec8eb5aa5c91fc45bd6423c271052
37
py
Python
src/server_design/algorithms/compressor/designSolutions/sol_836.py
robertpardillo/Funnel
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
[ "MIT" ]
1
2021-05-18T16:10:49.000Z
2021-05-18T16:10:49.000Z
src/server_design/algorithms/compressor/designSolutions/sol_836.py
robertpardillo/Funnel
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
[ "MIT" ]
null
null
null
src/server_design/algorithms/compressor/designSolutions/sol_836.py
robertpardillo/Funnel
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
[ "MIT" ]
null
null
null
def sol836(design_parameters): pass
18.5
31
0.810811
5
37
5.8
1
0
0
0
0
0
0
0
0
0
0
0.090909
0.108108
37
2
32
18.5
0.787879
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
49bc31733fef810932bd3d9720dc144383f19e08
47
py
Python
models/model_utils/__init__.py
Jungyhuk/plotcoder
4c5fe923dc69227c58d93f55b8a89fd8bb960703
[ "MIT" ]
10
2021-08-06T13:01:11.000Z
2022-01-26T16:58:48.000Z
models/model_utils/__init__.py
Jungyhuk/plotcoder
4c5fe923dc69227c58d93f55b8a89fd8bb960703
[ "MIT" ]
1
2021-09-29T07:54:17.000Z
2021-10-06T00:23:01.000Z
models/model_utils/__init__.py
Jungyhuk/plotcoder
4c5fe923dc69227c58d93f55b8a89fd8bb960703
[ "MIT" ]
2
2021-08-06T13:01:19.000Z
2021-10-05T14:16:59.000Z
from .logger import * from .supervisor import *
23.5
25
0.765957
6
47
6
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.148936
47
2
25
23.5
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
49cb9daf7069e629075ac627f8b8d62746e292fd
222
py
Python
cobweb/settings/__init__.py
CobwebOrg/cobweb-django
14241326860620dbaa64f7eefc6d4b393f80d23c
[ "MIT" ]
7
2017-09-14T18:52:58.000Z
2020-05-18T21:01:20.000Z
cobweb/settings/__init__.py
CobwebOrg/cobweb-django
14241326860620dbaa64f7eefc6d4b393f80d23c
[ "MIT" ]
151
2017-09-14T18:46:02.000Z
2022-02-10T09:18:44.000Z
cobweb/settings/__init__.py
CobwebOrg/cobweb-django
14241326860620dbaa64f7eefc6d4b393f80d23c
[ "MIT" ]
1
2017-10-29T19:37:29.000Z
2017-10-29T19:37:29.000Z
""" Settings for cobweb Django site. By default use production – test and debug environments should invoke the respective modules, which import production then make minimal changes. """ from .production import * # noqa
22.2
79
0.774775
30
222
5.766667
0.9
0
0
0
0
0
0
0
0
0
0
0
0.166667
222
9
80
24.666667
0.92973
0.828829
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
49ddb8b1140c03fa230d0d7c312ed3cb59b7088c
216
py
Python
backend/views/misc.py
y-a-r-g/idea-color-themes
c770dd14ee6cca58a5078c8885a7d8adac34965b
[ "MIT" ]
19
2015-04-29T16:16:07.000Z
2018-08-21T18:18:18.000Z
backend/views/misc.py
sdvoynikov/idea-color-themes
c770dd14ee6cca58a5078c8885a7d8adac34965b
[ "MIT" ]
2
2015-08-14T20:20:12.000Z
2015-10-21T17:15:28.000Z
backend/views/misc.py
sdvoynikov/idea-color-themes
c770dd14ee6cca58a5078c8885a7d8adac34965b
[ "MIT" ]
5
2015-07-04T02:29:05.000Z
2018-02-21T06:28:35.000Z
from django.http import HttpResponseRedirect from backend.views import view __author__ = 'sdvoynikov' @view(path=r'^twitter/?$') def index(_): return HttpResponseRedirect('https://twitter.com/IdeaColorThemes')
24
70
0.773148
24
216
6.75
0.791667
0
0
0
0
0
0
0
0
0
0
0
0.101852
216
9
70
24
0.835052
0
0
0
0
0
0.258065
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0.166667
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
49e9cfa50e802458fd042b431bef365c300f638e
22
py
Python
dmpling/__init__.py
tsitsimis/dmpling
1b78f9b4e36dd2619807adc123b683c427369656
[ "MIT" ]
6
2018-09-25T08:11:53.000Z
2022-01-05T04:45:27.000Z
dmpling/__init__.py
tsitsimis/dmpling
1b78f9b4e36dd2619807adc123b683c427369656
[ "MIT" ]
null
null
null
dmpling/__init__.py
tsitsimis/dmpling
1b78f9b4e36dd2619807adc123b683c427369656
[ "MIT" ]
2
2019-05-30T12:48:49.000Z
2020-10-21T08:14:54.000Z
from dmpling import *
11
21
0.772727
3
22
5.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.181818
22
1
22
22
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
49ef837e4634aa35bae5bbed10e6ad844f504de8
188
py
Python
basis_set_exchange/cli/__init__.py
lomalaspina/basis_set_exchange
c0c61e56c065d21a32e66c0285cc75a737bc7e68
[ "BSD-3-Clause" ]
108
2018-07-09T14:23:49.000Z
2022-03-30T08:26:15.000Z
basis_set_exchange/cli/__init__.py
susilehtola/basis_set_exchange
0185cecc56a67ad561167290fd56ac86c0c76ce7
[ "BSD-3-Clause" ]
230
2018-06-01T15:15:49.000Z
2022-03-30T12:02:11.000Z
basis_set_exchange/cli/__init__.py
susilehtola/basis_set_exchange
0185cecc56a67ad561167290fd56ac86c0c76ce7
[ "BSD-3-Clause" ]
38
2018-07-20T15:16:47.000Z
2022-03-30T08:32:45.000Z
# Python argcomplete searches for this string. If found, # it enables autocompletion # PYTHON_ARGCOMPLETE_OK from .bse_cli import run_bse_cli from .bsecurate_cli import run_bsecurate_cli
26.857143
56
0.835106
28
188
5.321429
0.642857
0.228188
0.161074
0
0
0
0
0
0
0
0
0
0.12766
188
6
57
31.333333
0.908537
0.542553
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
49f1bdbc821625496133d472c982502cb7aeb633
42
py
Python
link_analysis/__main__.py
Rexarrior/ALT
9271682f433f5c9675cf345026decc1d0d359391
[ "Apache-2.0" ]
1
2019-06-16T10:04:18.000Z
2019-06-16T10:04:18.000Z
link_analysis/__main__.py
robot-lab/judyst-link-analysis
9271682f433f5c9675cf345026decc1d0d359391
[ "Apache-2.0" ]
44
2018-10-11T22:27:59.000Z
2018-11-24T17:43:02.000Z
link_analysis/__main__.py
Rexarrior/ALT
9271682f433f5c9675cf345026decc1d0d359391
[ "Apache-2.0" ]
2
2019-04-05T22:52:53.000Z
2019-04-06T05:18:41.000Z
def main(): print("it's main") main()
10.5
22
0.547619
7
42
3.285714
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.214286
42
3
23
14
0.69697
0
0
0
0
0
0.214286
0
0
0
0
0
0
1
0.333333
true
0
0
0
0.333333
0.333333
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
5
b7389435ef9f67c5ed1d0f2a15ff6f3ccb97dd1d
163
py
Python
src/CAI/__init__.py
BjornFJohansson/CodonAdaptationIndex
ad60ad556895dca6755ebda76e19184ec0d6fe8a
[ "MIT" ]
null
null
null
src/CAI/__init__.py
BjornFJohansson/CodonAdaptationIndex
ad60ad556895dca6755ebda76e19184ec0d6fe8a
[ "MIT" ]
null
null
null
src/CAI/__init__.py
BjornFJohansson/CodonAdaptationIndex
ad60ad556895dca6755ebda76e19184ec0d6fe8a
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """docstring.""" from CAI._version import version as __version__ from .CAI import RSCU, relative_adaptiveness, CAI
20.375
49
0.711656
22
163
5
0.727273
0.127273
0
0
0
0
0
0
0
0
0
0.007092
0.134969
163
7
50
23.285714
0.77305
0.325153
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3f9c0afa04d802e9a1bedd7315d5771a4ed2af29
1,756
py
Python
pose_trackers/lighttrack/graph/unit_test/test_keypoints_to_graph_triplet.py
rcourivaud/video-to-pose3D
b908014fe2c531c075c11cee72bb798120f970c2
[ "MIT" ]
574
2019-07-12T08:35:18.000Z
2022-03-28T06:37:44.000Z
pose_trackers/lighttrack/graph/unit_test/test_keypoints_to_graph_triplet.py
rcourivaud/video-to-pose3D
b908014fe2c531c075c11cee72bb798120f970c2
[ "MIT" ]
55
2019-07-11T11:31:16.000Z
2022-03-11T23:54:54.000Z
pose_trackers/lighttrack/graph/unit_test/test_keypoints_to_graph_triplet.py
rcourivaud/video-to-pose3D
b908014fe2c531c075c11cee72bb798120f970c2
[ "MIT" ]
123
2019-09-06T07:08:40.000Z
2022-03-26T21:50:28.000Z
''' Author: Guanghan Ning E-mail: guanghan.ning@jd.com October 22th, 2018 Unit test for data preparation ''' import os import sys sys.path.append(os.path.abspath("../utils/")) from keypoints_to_graph_triplet import * import pickle def test_load_data_for_gcn_train(): dataset_str = "posetrack_18" dataset_split_str = "train" graph_triplet_list_all = load_data_for_gcn(dataset_str, dataset_split_str) print("graph_pair_list Top 10: {}".format(graph_triplet_list_all[0:10])) print("number of graph triplets collected: {}".format(len(graph_triplet_list_all))) output_folder = "." data_out_path = '{}/posetrack_train_data_triplet.pickle'.format(output_folder) with open(data_out_path, 'wb') as handle: pickle.dump(graph_triplet_list_all, handle) with open('./posetrack_train_data_triplet.pickle', 'rb') as handle: restore = pickle.load(handle) print(restore == graph_triplet_list_all) def test_load_data_for_gcn_val(): dataset_str = "posetrack_18" dataset_split_str = "val" graph_triplet_list_all = load_data_for_gcn(dataset_str, dataset_split_str) print("graph_pair_list Top 10: {}".format(graph_triplet_list_all[0:10])) print("number of graph triplets collected: {}".format(len(graph_triplet_list_all))) output_folder = "." data_out_path = '{}/posetrack_val_data_triplet.pickle'.format(output_folder) with open(data_out_path, 'wb') as handle: pickle.dump(graph_triplet_list_all, handle) with open('./posetrack_val_data_triplet.pickle', 'rb') as handle: restore = pickle.load(handle) print(restore == graph_triplet_list_all) if __name__ == "__main__": test_load_data_for_gcn_train() test_load_data_for_gcn_val()
33.132075
87
0.731777
253
1,756
4.648221
0.256917
0.112245
0.136054
0.161565
0.836735
0.823129
0.743197
0.681973
0.681973
0.681973
0
0.013477
0.154897
1,756
52
88
33.769231
0.778976
0.057517
0
0.529412
0
0
0.204169
0.089516
0
0
0
0
0
1
0.058824
false
0
0.117647
0
0.176471
0.176471
0
0
0
null
0
0
1
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
3fa21ab9e78dccea06c65a19d306e6a4996aa5cd
2,559
py
Python
jupyterlab_git/tests/test_settings.py
EnisBerk/jupyterlab-git
a558c76570b2d75e01717d769771e52e58bfd439
[ "BSD-3-Clause" ]
1
2021-02-12T17:10:21.000Z
2021-02-12T17:10:21.000Z
jupyterlab_git/tests/test_settings.py
EnisBerk/jupyterlab-git
a558c76570b2d75e01717d769771e52e58bfd439
[ "BSD-3-Clause" ]
1
2021-04-08T11:17:12.000Z
2021-04-08T11:17:12.000Z
jupyterlab_git/tests/test_settings.py
EnisBerk/jupyterlab-git
a558c76570b2d75e01717d769771e52e58bfd439
[ "BSD-3-Clause" ]
null
null
null
import json from pathlib import Path from unittest.mock import Mock, patch from packaging.version import parse import pytest import tornado from jupyterlab_git import __version__ from jupyterlab_git.handlers import GitSettingsHandler from .testutils import NS, assert_http_error, maybe_future @patch("jupyterlab_git.git.execute") async def test_git_get_settings_success(mock_execute, jp_fetch, jp_root_dir): # Given git_version = "2.10.3" jlab_version = "2.1.42-alpha.24" mock_execute.return_value = maybe_future( (0, "git version {}.os_platform.42".format(git_version), "") ) # When response = await jp_fetch( NS, "settings", method="GET", params={"version": jlab_version} ) # Then mock_execute.assert_called_once_with(["git", "--version"], cwd=".") assert response.code == 200 payload = json.loads(response.body) assert payload == { "frontendVersion": str(parse(jlab_version)), "gitVersion": git_version, "serverRoot": jp_root_dir.as_posix(), "serverVersion": str(parse(__version__)), } @patch("jupyterlab_git.git.execute") async def test_git_get_settings_no_git(mock_execute, jp_fetch, jp_root_dir): # Given jlab_version = "2.1.42-alpha.24" mock_execute.side_effect = FileNotFoundError( "[Errno 2] No such file or directory: 'git'" ) # When response = await jp_fetch( NS, "settings", method="GET", params={"version": jlab_version} ) # Then mock_execute.assert_called_once_with(["git", "--version"], cwd=".") assert response.code == 200 payload = json.loads(response.body) assert payload == { "frontendVersion": str(parse(jlab_version)), "gitVersion": None, "serverRoot": jp_root_dir.as_posix(), "serverVersion": str(parse(__version__)), } @patch("jupyterlab_git.git.execute") async def test_git_get_settings_no_jlab(mock_execute, jp_fetch, jp_root_dir): # Given git_version = "2.10.3" mock_execute.return_value = maybe_future( (0, "git version {}.os_platform.42".format(git_version), "") ) # When response = await jp_fetch(NS, "settings", method="GET") # Then mock_execute.assert_called_once_with(["git", "--version"], cwd=".") assert response.code == 200 payload = json.loads(response.body) assert payload == { "frontendVersion": None, "gitVersion": git_version, "serverRoot": jp_root_dir.as_posix(), "serverVersion": str(parse(__version__)), }
28.433333
77
0.66823
318
2,559
5.075472
0.257862
0.068154
0.033457
0.039033
0.799876
0.799876
0.799876
0.799876
0.78005
0.745973
0
0.017621
0.201641
2,559
89
78
28.752809
0.772394
0.018367
0
0.596774
0
0
0.179856
0.031175
0
0
0
0
0.16129
1
0
false
0
0.145161
0
0.145161
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
3fb717fa83008baed3a75f10281034e2636b8010
146
py
Python
notifications/admin.py
Crystal-Mackey/twitterclone
2f8bce58309bd46982f7fce519076cba30466268
[ "MIT" ]
null
null
null
notifications/admin.py
Crystal-Mackey/twitterclone
2f8bce58309bd46982f7fce519076cba30466268
[ "MIT" ]
null
null
null
notifications/admin.py
Crystal-Mackey/twitterclone
2f8bce58309bd46982f7fce519076cba30466268
[ "MIT" ]
null
null
null
from django.contrib import admin from notifications.models import Notification # Register your models here. admin.site.register(Notification)
20.857143
45
0.821918
18
146
6.666667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.123288
146
7
46
20.857143
0.9375
0.178082
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3fcaa48350708c10719ced44756a2c99ba1b9f23
172
py
Python
src/Africa/admin.py
MCN10/Demo1
3ff6b18f6da9f6b6b634513f9654c2c643014072
[ "Apache-2.0" ]
null
null
null
src/Africa/admin.py
MCN10/Demo1
3ff6b18f6da9f6b6b634513f9654c2c643014072
[ "Apache-2.0" ]
null
null
null
src/Africa/admin.py
MCN10/Demo1
3ff6b18f6da9f6b6b634513f9654c2c643014072
[ "Apache-2.0" ]
null
null
null
from django.contrib import admin from .models import * # Register your models here. admin.site.register(Country) admin.site.register(Dish) admin.site.register(Ingredient)
21.5
32
0.802326
24
172
5.75
0.541667
0.195652
0.369565
0
0
0
0
0
0
0
0
0
0.098837
172
7
33
24.571429
0.890323
0.151163
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
3fe8c4b9b035830ea9e68a1829c19e3ade232e6a
78
py
Python
ComunioScore/messenger/__init__.py
bierschi/ComunioScore
7a7065bf1788f1672d3bfc0e9be1811002427b48
[ "MIT" ]
4
2019-09-24T16:37:01.000Z
2021-09-08T09:41:55.000Z
ComunioScore/messenger/__init__.py
bierschi/ComunioScore
7a7065bf1788f1672d3bfc0e9be1811002427b48
[ "MIT" ]
null
null
null
ComunioScore/messenger/__init__.py
bierschi/ComunioScore
7a7065bf1788f1672d3bfc0e9be1811002427b48
[ "MIT" ]
1
2019-10-05T18:07:03.000Z
2019-10-05T18:07:03.000Z
from ComunioScore.messenger.comunioscore_telegram import ComunioScoreTelegram
39
77
0.923077
7
78
10.142857
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.051282
78
1
78
78
0.959459
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b74c874da96395052ab1298de1a2d92a843b7399
171
py
Python
lib/JumpScale/baselib/dnsman/__init__.py
rudecs/jumpscale_core7
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
[ "Apache-2.0" ]
null
null
null
lib/JumpScale/baselib/dnsman/__init__.py
rudecs/jumpscale_core7
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
[ "Apache-2.0" ]
4
2016-08-25T12:08:39.000Z
2018-04-12T12:36:01.000Z
lib/JumpScale/baselib/dnsman/__init__.py
rudecs/jumpscale_core7
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
[ "Apache-2.0" ]
3
2016-03-08T07:49:34.000Z
2018-10-19T13:56:43.000Z
from JumpScale import j def cb(): from .dnsFactory import DNSFactory return DNSFactory() j.base.loader.makeAvailable(j, 'tools') j.tools._register('dnsman', cb)
19
39
0.725146
23
171
5.347826
0.608696
0.097561
0
0
0
0
0
0
0
0
0
0
0.152047
171
8
40
21.375
0.848276
0
0
0
0
0
0.064327
0
0
0
0
0
0
1
0.166667
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b779dc23b17aaf28acbb573f59e0923f8a0f8229
46
py
Python
runserver.py
obsoleter/allangles
8cacdd845cfedcebedd45be302b374c4bad5ab2e
[ "MIT" ]
null
null
null
runserver.py
obsoleter/allangles
8cacdd845cfedcebedd45be302b374c4bad5ab2e
[ "MIT" ]
null
null
null
runserver.py
obsoleter/allangles
8cacdd845cfedcebedd45be302b374c4bad5ab2e
[ "MIT" ]
null
null
null
from allangles import app app.run(debug=True)
15.333333
25
0.804348
8
46
4.625
0.875
0
0
0
0
0
0
0
0
0
0
0
0.108696
46
2
26
23
0.902439
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b77e977cbc23d31dc9bdbaf60410bce47fc7001f
134
py
Python
surrortg/__init__.py
cedricsellin/surrogate
03f13d297fdd74d7e5d164821039acc38d0b2103
[ "MIT" ]
2
2020-11-25T00:29:07.000Z
2020-12-01T20:24:47.000Z
surrortg/__init__.py
cedricsellin/surrogate
03f13d297fdd74d7e5d164821039acc38d0b2103
[ "MIT" ]
1
2021-04-10T11:59:23.000Z
2021-04-10T11:59:23.000Z
surrortg/__init__.py
cedricsellin/surrogate
03f13d297fdd74d7e5d164821039acc38d0b2103
[ "MIT" ]
2
2021-01-28T17:43:13.000Z
2021-03-29T13:19:38.000Z
from .game import Game from .game_io import GameIO from .config_parser import get_config from .network.ge_api_client import ApiClient
26.8
44
0.843284
22
134
4.909091
0.590909
0.148148
0
0
0
0
0
0
0
0
0
0
0.119403
134
4
45
33.5
0.915254
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b7a7d213ee14ef9203ca0cc55c773066720fc8d9
2,322
py
Python
tests/test_calculus.py
hirnimeshrampuresoftware/quaternion
dac0c5cd5ea286e3583b71e1a1ee1620e7e7a03d
[ "MIT" ]
1
2021-06-04T01:42:53.000Z
2021-06-04T01:42:53.000Z
tests/test_calculus.py
hirnimeshrampuresoftware/quaternion
dac0c5cd5ea286e3583b71e1a1ee1620e7e7a03d
[ "MIT" ]
null
null
null
tests/test_calculus.py
hirnimeshrampuresoftware/quaternion
dac0c5cd5ea286e3583b71e1a1ee1620e7e7a03d
[ "MIT" ]
1
2019-12-06T23:57:14.000Z
2019-12-06T23:57:14.000Z
#!/usr/bin/env python from __future__ import print_function, division, absolute_import import math import numpy as np import quaternion from numpy import * import pytest try: import scipy has_scipy = True except: has_scipy = False eps = np.finfo(float).eps @pytest.mark.skipif(not has_scipy, reason="Scipy is not installed") def test_subset_interpolation(): from quaternion.calculus import spline t = np.linspace(0, 10, 100) f = np.sin(t) for i1, i2 in [[0, 100], [0, -1], [10, -10], [11, -10], [10, -11], [11, -11], [21, -21], [31, -31]]: f_out = spline(f, t, t_out=t[i1:i2]) f_sub = f[i1:i2] assert np.allclose(f_out, f_sub, atol=2*eps, rtol=2*eps) @pytest.mark.skipif(not has_scipy, reason="Scipy is not installed") def test_differentiation(): from quaternion.calculus import spline t = np.linspace(0, 10, 1000) f = np.sin(t) fprime = np.cos(t) assert np.allclose(spline(f, t, spline_degree=5, derivative_order=1), fprime, atol=1e-11, rtol=1e-11) f = np.exp(1j*t) fprime = 1j*np.exp(1j*t) assert np.allclose(spline(f, t, spline_degree=5, derivative_order=1), fprime, atol=1e-11, rtol=1e-11) @pytest.mark.skipif(not has_scipy, reason="Scipy is not installed") def test_antiderivative(): from quaternion.calculus import spline t = np.linspace(0, 10, 1000) f = np.cos(t) fint = np.sin(t) assert np.allclose(spline(f, t, spline_degree=5, derivative_order=-1), fint, atol=1e-11, rtol=1e-11) f = np.exp(1j*t) fint = 1j*(1-np.exp(1j*t)) assert np.allclose(spline(f, t, spline_degree=5, derivative_order=-1), fint, atol=1e-11, rtol=1e-11) @pytest.mark.skipif(not has_scipy, reason="Scipy is not installed") def test_integral(): from quaternion.calculus import spline t = np.linspace(0, 10, 1000) f = np.cos(t) fint = np.sin(t) assert np.allclose(spline(f, t, spline_degree=5, definite_integral_bounds=(t[0], t[-1])), fint[-1]-fint[0], atol=1e-11, rtol=1e-11) f = np.exp(1j*t) fint = 1j*(1-np.exp(1j*t)) # print(max(abs(spline(f, t, spline_degree=5, derivative_order=-1)-fint))) assert np.allclose(spline(f, t, spline_degree=5, definite_integral_bounds=(t[0], t[-1])), fint[-1]-fint[0], atol=1e-11, rtol=1e-11)
30.96
105
0.646856
393
2,322
3.725191
0.198473
0.032787
0.043716
0.06694
0.726776
0.726776
0.726776
0.726776
0.726776
0.726776
0
0.0704
0.192506
2,322
74
106
31.378378
0.7104
0.040052
0
0.566038
0
0
0.039515
0
0
0
0
0
0.132075
1
0.075472
false
0
0.207547
0
0.283019
0.018868
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4d01222aca997886f3c4b13fabd5bfffb54c1658
16
py
Python
py1805.py
gjxgjxgjx/flask1805
f3de70e319a81da5f64a73c52f329ea80976445e
[ "Apache-2.0" ]
null
null
null
py1805.py
gjxgjxgjx/flask1805
f3de70e319a81da5f64a73c52f329ea80976445e
[ "Apache-2.0" ]
null
null
null
py1805.py
gjxgjxgjx/flask1805
f3de70e319a81da5f64a73c52f329ea80976445e
[ "Apache-2.0" ]
null
null
null
print("sfhahfd")
16
16
0.75
2
16
6
1
0
0
0
0
0
0
0
0
0
0
0
0
16
1
16
16
0.75
0
0
0
0
0
0.411765
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
4d08447009fbb640ae22b588b707ca08071906f9
145
py
Python
src/iotpackage/__init__.py
dilawer11/iot-device-fingerprinting
afd13eca4323b9d9180dbf6f407a50650b768c24
[ "MIT" ]
3
2022-01-18T21:58:37.000Z
2022-02-08T18:19:26.000Z
src/iotpackage/__init__.py
dilawer11/iot-device-fingerprinting
afd13eca4323b9d9180dbf6f407a50650b768c24
[ "MIT" ]
null
null
null
src/iotpackage/__init__.py
dilawer11/iot-device-fingerprinting
afd13eca4323b9d9180dbf6f407a50650b768c24
[ "MIT" ]
1
2022-03-25T18:17:02.000Z
2022-03-25T18:17:02.000Z
__all__ = ['Utils', 'ModelTraining', 'PCAP2CSV', 'PreProcessing', 'Utils', 'FeatureExtraction', 'FeatureSelection', 'HostNameLookup', 'Filters']
72.5
144
0.724138
10
145
10.1
0.9
0
0
0
0
0
0
0
0
0
0
0.007463
0.075862
145
1
145
145
0.746269
0
0
0
0
0
0.675862
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4d092bec9832bba0b1a7ff2b4756aadc71d88a4b
24,214
py
Python
tests/test_0023-ttree-versions.py
eic/uproot4
deb8d88c2643521f372bf5005c51af8926016c7e
[ "BSD-3-Clause" ]
133
2020-05-08T21:34:11.000Z
2022-03-07T18:12:58.000Z
tests/test_0023-ttree-versions.py
eic/uproot4
deb8d88c2643521f372bf5005c51af8926016c7e
[ "BSD-3-Clause" ]
269
2020-05-13T02:42:24.000Z
2022-03-24T20:24:16.000Z
tests/test_0023-ttree-versions.py
eic/uproot4
deb8d88c2643521f372bf5005c51af8926016c7e
[ "BSD-3-Clause" ]
45
2020-05-15T17:48:04.000Z
2022-03-18T19:23:07.000Z
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE from __future__ import absolute_import import numpy import pytest import skhep_testdata import uproot truth = { "n": [ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, ], "b": [ True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, ], "ab": [ [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], [False, True, False], [True, False, True], ], "Ab": [ [], [True], [True, True], [True, True, True], [True, True, True, True], [], [False], [False, False], [False, False, False], [False, False, False, False], [], [True], [True, True], [True, True, True], [True, True, True, True], [], [False], [False, False], [False, False, False], [False, False, False, False], [], [True], [True, True], [True, True, True], [True, True, True, True], [], [False], [False, False], [False, False, False], [False, False, False, False], ], "i1": [ -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, ], "ai1": [ [-14, -13, -12], [-13, -12, -11], [-12, -11, -10], [-11, -10, -9], [-10, -9, -8], [-9, -8, -7], [-8, -7, -6], [-7, -6, -5], [-6, -5, -4], [-5, -4, -3], [-4, -3, -2], [-3, -2, -1], [-2, -1, 0], [-1, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], ], "Ai1": [ [], [-15], [-15, -13], [-15, -13, -11], [-15, -13, -11, -9], [], [-10], [-10, -8], [-10, -8, -6], [-10, -8, -6, -4], [], [-5], [-5, -3], [-5, -3, -1], [-5, -3, -1, 1], [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16], ], "u1": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, ], "au1": [ [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], [16, 17, 18], [17, 18, 19], [18, 19, 20], [19, 20, 21], [20, 21, 22], [21, 22, 23], [22, 23, 24], [23, 24, 25], [24, 25, 26], [25, 26, 27], [26, 27, 28], [27, 28, 29], [28, 29, 30], [29, 30, 31], [30, 31, 32], ], "Au1": [ [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16], [], [15], [15, 17], [15, 17, 19], [15, 17, 19, 21], [], [20], [20, 22], [20, 22, 24], [20, 22, 24, 26], [], [25], [25, 27], [25, 27, 29], [25, 27, 29, 31], ], "i2": [ -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, ], "ai2": [ [-14, -13, -12], [-13, -12, -11], [-12, -11, -10], [-11, -10, -9], [-10, -9, -8], [-9, -8, -7], [-8, -7, -6], [-7, -6, -5], [-6, -5, -4], [-5, -4, -3], [-4, -3, -2], [-3, -2, -1], [-2, -1, 0], [-1, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], ], "Ai2": [ [], [-15], [-15, -13], [-15, -13, -11], [-15, -13, -11, -9], [], [-10], [-10, -8], [-10, -8, -6], [-10, -8, -6, -4], [], [-5], [-5, -3], [-5, -3, -1], [-5, -3, -1, 1], [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16], ], "u2": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, ], "au2": [ [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], [16, 17, 18], [17, 18, 19], [18, 19, 20], [19, 20, 21], [20, 21, 22], [21, 22, 23], [22, 23, 24], [23, 24, 25], [24, 25, 26], [25, 26, 27], [26, 27, 28], [27, 28, 29], [28, 29, 30], [29, 30, 31], [30, 31, 32], ], "Au2": [ [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16], [], [15], [15, 17], [15, 17, 19], [15, 17, 19, 21], [], [20], [20, 22], [20, 22, 24], [20, 22, 24, 26], [], [25], [25, 27], [25, 27, 29], [25, 27, 29, 31], ], "i4": [ -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, ], "ai4": [ [-14, -13, -12], [-13, -12, -11], [-12, -11, -10], [-11, -10, -9], [-10, -9, -8], [-9, -8, -7], [-8, -7, -6], [-7, -6, -5], [-6, -5, -4], [-5, -4, -3], [-4, -3, -2], [-3, -2, -1], [-2, -1, 0], [-1, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], ], "Ai4": [ [], [-15], [-15, -13], [-15, -13, -11], [-15, -13, -11, -9], [], [-10], [-10, -8], [-10, -8, -6], [-10, -8, -6, -4], [], [-5], [-5, -3], [-5, -3, -1], [-5, -3, -1, 1], [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16], ], "u4": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, ], "au4": [ [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], [16, 17, 18], [17, 18, 19], [18, 19, 20], [19, 20, 21], [20, 21, 22], [21, 22, 23], [22, 23, 24], [23, 24, 25], [24, 25, 26], [25, 26, 27], [26, 27, 28], [27, 28, 29], [28, 29, 30], [29, 30, 31], [30, 31, 32], ], "Au4": [ [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16], [], [15], [15, 17], [15, 17, 19], [15, 17, 19, 21], [], [20], [20, 22], [20, 22, 24], [20, 22, 24, 26], [], [25], [25, 27], [25, 27, 29], [25, 27, 29, 31], ], "i8": [ -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, ], "ai8": [ [-14, -13, -12], [-13, -12, -11], [-12, -11, -10], [-11, -10, -9], [-10, -9, -8], [-9, -8, -7], [-8, -7, -6], [-7, -6, -5], [-6, -5, -4], [-5, -4, -3], [-4, -3, -2], [-3, -2, -1], [-2, -1, 0], [-1, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], ], "Ai8": [ [], [-15], [-15, -13], [-15, -13, -11], [-15, -13, -11, -9], [], [-10], [-10, -8], [-10, -8, -6], [-10, -8, -6, -4], [], [-5], [-5, -3], [-5, -3, -1], [-5, -3, -1, 1], [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16], ], "u8": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, ], "au8": [ [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17], [16, 17, 18], [17, 18, 19], [18, 19, 20], [19, 20, 21], [20, 21, 22], [21, 22, 23], [22, 23, 24], [23, 24, 25], [24, 25, 26], [25, 26, 27], [26, 27, 28], [27, 28, 29], [28, 29, 30], [29, 30, 31], [30, 31, 32], ], "Au8": [ [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16], [], [15], [15, 17], [15, 17, 19], [15, 17, 19, 21], [], [20], [20, 22], [20, 22, 24], [20, 22, 24, 26], [], [25], [25, 27], [25, 27, 29], [25, 27, 29, 31], ], "f4": [ -14.899999618530273, -13.899999618530273, -12.899999618530273, -11.899999618530273, -10.899999618530273, -9.899999618530273, -8.899999618530273, -7.900000095367432, -6.900000095367432, -5.900000095367432, -4.900000095367432, -3.9000000953674316, -2.9000000953674316, -1.899999976158142, -0.8999999761581421, 0.10000000149011612, 1.100000023841858, 2.0999999046325684, 3.0999999046325684, 4.099999904632568, 5.099999904632568, 6.099999904632568, 7.099999904632568, 8.100000381469727, 9.100000381469727, 10.100000381469727, 11.100000381469727, 12.100000381469727, 13.100000381469727, 14.100000381469727, ], "af4": [ [-13.899999618530273, -12.899999618530273, -11.899999618530273], [-12.899999618530273, -11.899999618530273, -10.899999618530273], [-11.899999618530273, -10.899999618530273, -9.899999618530273], [-10.899999618530273, -9.899999618530273, -8.899999618530273], [-9.899999618530273, -8.899999618530273, -7.900000095367432], [-8.899999618530273, -7.900000095367432, -6.900000095367432], [-7.900000095367432, -6.900000095367432, -5.900000095367432], [-6.900000095367432, -5.900000095367432, -4.900000095367432], [-5.900000095367432, -4.900000095367432, -3.9000000953674316], [-4.900000095367432, -3.9000000953674316, -2.9000000953674316], [-3.9000000953674316, -2.9000000953674316, -1.899999976158142], [-2.9000000953674316, -1.899999976158142, -0.8999999761581421], [-1.899999976158142, -0.8999999761581421, 0.10000000149011612], [-0.8999999761581421, 0.10000000149011612, 1.100000023841858], [0.10000000149011612, 1.100000023841858, 2.0999999046325684], [1.100000023841858, 2.0999999046325684, 3.0999999046325684], [2.0999999046325684, 3.0999999046325684, 4.099999904632568], [3.0999999046325684, 4.099999904632568, 5.099999904632568], [4.099999904632568, 5.099999904632568, 6.099999904632568], [5.099999904632568, 6.099999904632568, 7.099999904632568], [6.099999904632568, 7.099999904632568, 8.100000381469727], [7.099999904632568, 8.100000381469727, 9.100000381469727], [8.100000381469727, 9.100000381469727, 10.100000381469727], [9.100000381469727, 10.100000381469727, 11.100000381469727], [10.100000381469727, 11.100000381469727, 12.100000381469727], [11.100000381469727, 12.100000381469727, 13.100000381469727], [12.100000381469727, 13.100000381469727, 14.100000381469727], [13.100000381469727, 14.100000381469727, 15.100000381469727], [14.100000381469727, 15.100000381469727, 16.100000381469727], [15.100000381469727, 16.100000381469727, 17.100000381469727], ], "Af4": [ [], [-15.0], [-15.0, -13.899999618530273], [-15.0, -13.899999618530273, -12.800000190734863], [-15.0, -13.899999618530273, -12.800000190734863, -11.699999809265137], [], [-10.0], [-10.0, -8.899999618530273], [-10.0, -8.899999618530273, -7.800000190734863], [-10.0, -8.899999618530273, -7.800000190734863, -6.699999809265137], [], [-5.0], [-5.0, -3.9000000953674316], [-5.0, -3.9000000953674316, -2.799999952316284], [-5.0, -3.9000000953674316, -2.799999952316284, -1.7000000476837158], [], [0.0], [0.0, 1.100000023841858], [0.0, 1.100000023841858, 2.200000047683716], [0.0, 1.100000023841858, 2.200000047683716, 3.299999952316284], [], [5.0], [5.0, 6.099999904632568], [5.0, 6.099999904632568, 7.199999809265137], [5.0, 6.099999904632568, 7.199999809265137, 8.300000190734863], [], [10.0], [10.0, 11.100000381469727], [10.0, 11.100000381469727, 12.199999809265137], [10.0, 11.100000381469727, 12.199999809265137, 13.300000190734863], ], "f8": [ -14.9, -13.9, -12.9, -11.9, -10.9, -9.9, -8.9, -7.9, -6.9, -5.9, -4.9, -3.9000000000000004, -2.9000000000000004, -1.9000000000000004, -0.9000000000000004, 0.09999999999999964, 1.0999999999999996, 2.0999999999999996, 3.0999999999999996, 4.1, 5.1, 6.1, 7.1, 8.1, 9.1, 10.1, 11.1, 12.1, 13.1, 14.1, ], "af8": [ [-13.9, -12.9, -11.9], [-12.9, -11.9, -10.9], [-11.9, -10.9, -9.9], [-10.9, -9.9, -8.9], [-9.9, -8.9, -7.9], [-8.9, -7.9, -6.9], [-7.9, -6.9, -5.9], [-6.9, -5.9, -4.9], [-5.9, -4.9, -3.9000000000000004], [-4.9, -3.9000000000000004, -2.9000000000000004], [-3.9000000000000004, -2.9000000000000004, -1.9000000000000004], [-2.9000000000000004, -1.9000000000000004, -0.9000000000000004], [-1.9000000000000004, -0.9000000000000004, 0.09999999999999964], [-0.9000000000000004, 0.09999999999999964, 1.0999999999999996], [0.09999999999999964, 1.0999999999999996, 2.0999999999999996], [1.0999999999999996, 2.0999999999999996, 3.0999999999999996], [2.0999999999999996, 3.0999999999999996, 4.1], [3.0999999999999996, 4.1, 5.1], [4.1, 5.1, 6.1], [5.1, 6.1, 7.1], [6.1, 7.1, 8.1], [7.1, 8.1, 9.1], [8.1, 9.1, 10.1], [9.1, 10.1, 11.1], [10.1, 11.1, 12.1], [11.1, 12.1, 13.1], [12.1, 13.1, 14.1], [13.1, 14.1, 15.1], [14.1, 15.1, 16.1], [15.1, 16.1, 17.1], ], "Af8": [ [], [-15.0], [-15.0, -13.9], [-15.0, -13.9, -12.8], [-15.0, -13.9, -12.8, -11.7], [], [-10.0], [-10.0, -8.9], [-10.0, -8.9, -7.8], [-10.0, -8.9, -7.8, -6.7], [], [-5.0], [-5.0, -3.9], [-5.0, -3.9, -2.8], [-5.0, -3.9, -2.8, -1.7], [], [0.0], [0.0, 1.1], [0.0, 1.1, 2.2], [0.0, 1.1, 2.2, 3.3], [], [5.0], [5.0, 6.1], [5.0, 6.1, 7.2], [5.0, 6.1, 7.2, 8.3], [], [10.0], [10.0, 11.1], [10.0, 11.1, 12.2], [10.0, 11.1, 12.2, 13.3], ], "str": [ "hey-0", "hey-1", "hey-2", "hey-3", "hey-4", "hey-5", "hey-6", "hey-7", "hey-8", "hey-9", "hey-10", "hey-11", "hey-12", "hey-13", "hey-14", "hey-15", "hey-16", "hey-17", "hey-18", "hey-19", "hey-20", "hey-21", "hey-22", "hey-23", "hey-24", "hey-25", "hey-26", "hey-27", "hey-28", "hey-29", ], } @pytest.mark.parametrize( "version", [ "5.23.02", # 2009-02-26, TTree version 16, TBranch version 11 "5.24.00", # 2009-06-30, TTree version 16, TBranch version 11 "5.25.02", # 2009-10-01, TTree version 17, TBranch version 12 "5.26.00", # 2009-12-14, TTree version 18, TBranch version 12 "5.27.02", # 2010-04-27, TTree version 18, TBranch version 12 "5.28.00", # 2010-12-15, TTree version 18, TBranch version 12 "5.29.02", # 2011-04-21, TTree version 18, TBranch version 12 "5.30.00", # 2011-06-28, TTree version 19, TBranch version 12 "6.08.04", # 2017-01-13, TTree version 19, TBranch version 12 "6.10.05", # 2017-07-28, TTree version 19, TBranch version 12 is this 6.10.04? "6.14.00", # 2018-06-13, TTree version 20, TBranch version 13 "6.16.00", # 2019-01-23, TTree version 20, TBranch version 13 "6.18.00", # 2019-06-25, TTree version 20, TBranch version 13 "6.20.04", # 2020-04-01, TTree version 20, TBranch version 13 ], ) def test(version): with uproot.open( skhep_testdata.data_path("uproot-sample-{0}-uncompressed.root".format(version)) )["sample"] as sample: arrays = sample.arrays(sample.keys(), library="np") assert set(arrays.keys()) == set(truth.keys()) for key in truth.keys(): if isinstance( sample[key].interpretation, uproot.interpretation.jagged.AsJagged ): assert [row.tolist() for row in arrays[key]] == truth[key] else: assert arrays[key].tolist() == truth[key] assert sample.file._streamers is None
20.695726
89
0.330553
2,742
24,214
2.915755
0.056893
0.069794
0.094309
0.12833
0.845904
0.816385
0.617261
0.382739
0.382739
0.382739
0
0.476761
0.469522
24,214
1,169
90
20.71343
0.14566
0.032461
0
0.775862
0
0
0.017384
0.001495
0
0
0
0
0.003448
1
0.000862
false
0
0.00431
0
0.005172
0
0
0
0
null
0
0
0
1
1
0
0
0
0
0
1
0
0
0
0
0
1
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4d211004c137beda06e2dec11f456c9ebf165a5f
114
py
Python
ufdl-core-app/src/ufdl/core_app/migrations/hardware/__init__.py
waikato-ufdl/ufdl-backend
776fc906c61eba6c2f2e6324758e7b8a323e30d7
[ "Apache-2.0" ]
null
null
null
ufdl-core-app/src/ufdl/core_app/migrations/hardware/__init__.py
waikato-ufdl/ufdl-backend
776fc906c61eba6c2f2e6324758e7b8a323e30d7
[ "Apache-2.0" ]
85
2020-07-24T00:04:28.000Z
2022-02-10T10:35:15.000Z
ufdl-core-app/src/ufdl/core_app/migrations/hardware/__init__.py
waikato-ufdl/ufdl-backend
776fc906c61eba6c2f2e6324758e7b8a323e30d7
[ "Apache-2.0" ]
null
null
null
""" Functions for working with the known set of hardware generations. """ from ._hardware import iterate_hardware
22.8
65
0.789474
15
114
5.866667
0.866667
0
0
0
0
0
0
0
0
0
0
0
0.140351
114
4
66
28.5
0.897959
0.570175
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4d22e8c7e60c6f23e4e3bc6c29b184a01ff13735
143
py
Python
dashboard/admin.py
soham2109/covidhelpindia
dac656fbed35cae9f391f88d58b272d9dcf38db2
[ "MIT" ]
null
null
null
dashboard/admin.py
soham2109/covidhelpindia
dac656fbed35cae9f391f88d58b272d9dcf38db2
[ "MIT" ]
null
null
null
dashboard/admin.py
soham2109/covidhelpindia
dac656fbed35cae9f391f88d58b272d9dcf38db2
[ "MIT" ]
null
null
null
from django.contrib import admin from dashboard.models import resource_entry # Register your models here. admin.site.register(resource_entry)
23.833333
43
0.839161
20
143
5.9
0.65
0.220339
0
0
0
0
0
0
0
0
0
0
0.104895
143
5
44
28.6
0.921875
0.181818
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
4d56342172e515c9253ba773100afaa38ae1e791
131
py
Python
server/clean.py
SarthakRout/gradeLess
bff63dd60d1d2833733e6a64289a00b3585c4e8b
[ "MIT" ]
null
null
null
server/clean.py
SarthakRout/gradeLess
bff63dd60d1d2833733e6a64289a00b3585c4e8b
[ "MIT" ]
null
null
null
server/clean.py
SarthakRout/gradeLess
bff63dd60d1d2833733e6a64289a00b3585c4e8b
[ "MIT" ]
null
null
null
if __name__ == "__main__": open('response.txt', 'w').close() open('coord.txt', 'w').close() open('answerkey.txt', 'w').close()
21.833333
35
0.603053
18
131
3.944444
0.555556
0.169014
0.380282
0.366197
0
0
0
0
0
0
0
0
0.10687
131
5
36
26.2
0.606838
0
0
0
0
0
0.346154
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
12b34e039dd89d0cd164a882058df1971a99ec25
4,706
py
Python
epytope/Data/pssms/arb/mat/B_5401_9.py
christopher-mohr/epytope
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
[ "BSD-3-Clause" ]
7
2021-02-01T18:11:28.000Z
2022-01-31T19:14:07.000Z
epytope/Data/pssms/arb/mat/B_5401_9.py
christopher-mohr/epytope
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
[ "BSD-3-Clause" ]
22
2021-01-02T15:25:23.000Z
2022-03-14T11:32:53.000Z
epytope/Data/pssms/arb/mat/B_5401_9.py
christopher-mohr/epytope
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
[ "BSD-3-Clause" ]
4
2021-05-28T08:50:38.000Z
2022-03-14T11:45:32.000Z
B_5401_9 = {0: {'A': -0.7967467562851485, 'C': 0.6218890927041457, 'E': -0.748218303598456, 'D': -0.6064658241429575, 'G': -0.7075696032638925, 'F': 1.4955508588523267, 'I': -0.190616361816582, 'H': -0.06456372848408708, 'K': -0.5674431320840968, 'M': 1.3239576833310382, 'L': 0.7937233501441889, 'N': -0.0027954544036147564, 'Q': -0.2970988882232281, 'P': -0.45028997770049467, 'S': -0.4542312549252247, 'R': -0.7646632151935266, 'T': -0.33479761986744644, 'W': 0.9135908749726865, 'V': -0.21914577672328875, 'Y': -0.3098523598022598}, 1: {'A': -0.4714143119753974, 'C': -0.9971256438325977, 'E': -1.1844983262784883, 'D': -1.0905764186795208, 'G': -4.0, 'F': -1.01487017803411, 'I': -0.9873072971240872, 'H': -1.1682941884424098, 'K': -1.1682941884424098, 'M': -0.15724945047899022, 'L': -0.9335327003145286, 'N': -0.6921675249086519, 'Q': -0.6921675249086519, 'P': 1.4259750892112353, 'S': -1.0866148276961354, 'R': -1.2230806342177059, 'T': -0.9627230207207713, 'W': -1.0189332958136124, 'V': -0.9641663996807898, 'Y': -0.9926879551163937}, 2: {'A': 0.7325107944575395, 'C': 0.29623173237523137, 'E': -0.3463199063562832, 'D': -0.3528091987962721, 'G': -0.4436080437830357, 'F': 0.24037457186672137, 'I': 0.30848143084530877, 'H': 0.4500107661843465, 'K': -0.5588810281914541, 'M': 0.45120286132365434, 'L': -0.14921835220640783, 'N': 0.07139256144320993, 'Q': -0.4669694669682261, 'P': -0.32966636892111467, 'S': -0.12406327123918356, 'R': -0.22003055081372977, 'T': 0.5253682294863599, 'W': 0.4076676259060657, 'V': -0.013520236611408398, 'Y': 0.33368682611560785}, 3: {'A': -0.32470441488186513, 'C': 0.35983131005844826, 'E': 0.023728399561002363, 'D': -0.29398083298775124, 'G': -0.3920426332212583, 'F': 0.2826881468985426, 'I': 0.22028590307233986, 'H': 0.5613958881492079, 'K': -0.5262307937555406, 'M': -0.06171129286099271, 'L': 0.4285883786091315, 'N': 0.46363496232224927, 'Q': -0.4463639120585317, 'P': -0.27929592025759564, 'S': 0.43019461527080544, 'R': -0.39323140399676454, 'T': 0.03453994456641586, 'W': -0.4071943931821516, 'V': 0.2913701382349469, 'Y': -0.295726276016406}, 4: {'A': 0.03195952708422954, 'C': 0.7538733646825564, 'E': -0.7547890989360855, 'D': -0.12211607453782398, 'G': -0.1446995609146599, 'F': 0.2693336089527726, 'I': 0.3211572017932036, 'H': 0.3772265988198759, 'K': -0.29279614821190136, 'M': 0.7370583963284685, 'L': 0.2421050015230475, 'N': -0.37261986518660917, 'Q': -0.315310464966722, 'P': 0.21890234745700435, 'S': -0.13685583229793058, 'R': -0.2698128247055473, 'T': 0.47451543071535174, 'W': -0.3031845155031329, 'V': 0.0157976019316292, 'Y': -0.25086234910426647}, 5: {'A': 0.028417839945420176, 'C': -0.20193528768913796, 'E': -0.4466121085852556, 'D': -0.2525999236614306, 'G': 0.30800555356896553, 'F': 0.4466857896442223, 'I': -0.2383720936199336, 'H': -0.24122428102513188, 'K': -0.04013823236146156, 'M': -0.40791679815664383, 'L': 0.01713417578132417, 'N': 0.020199759714222987, 'Q': -0.025239683828625514, 'P': -0.3281030543479335, 'S': 0.4927275154608222, 'R': -0.47246384984216216, 'T': -0.10561132092945691, 'W': 0.27445293063752846, 'V': 0.18233252249318557, 'Y': -0.21353436951225688}, 6: {'A': 0.26004671363062154, 'C': 0.8099803859508007, 'E': -0.27885629550943, 'D': 0.051513203790618, 'G': -0.6745221645947497, 'F': 0.5483927720773524, 'I': 0.10040070340943318, 'H': 0.10472392842493776, 'K': -0.2362091477816544, 'M': -0.3307442597528809, 'L': -0.2597849167480332, 'N': 0.5478119817563006, 'Q': -0.6204955502246708, 'P': 0.14387965304623204, 'S': 0.07095077292389135, 'R': -0.6579094050205798, 'T': -0.16807317634771707, 'W': -0.018183475998687835, 'V': 0.44669985723367833, 'Y': 0.48969599630173083}, 7: {'A': 0.32244872773243227, 'C': -0.2725734720506514, 'E': -0.013021241684917518, 'D': -0.5821404383465739, 'G': -0.44344640485847203, 'F': -0.6654236181854725, 'I': 0.5296323108052593, 'H': -0.299498337343457, 'K': -0.4701230550961857, 'M': -0.41730021418189284, 'L': -0.1247186571935289, 'N': 0.20486665265354187, 'Q': -0.13350033396287797, 'P': 1.0896349174242896, 'S': 0.3573133995737231, 'R': -0.42379499588706887, 'T': 0.007837528607374382, 'W': -0.13053892771501727, 'V': 0.06652189147617324, 'Y': -0.12503075888110285}, 8: {'A': 1.7676678831508756, 'C': -0.13213373363052033, 'E': -4.0, 'D': -4.0, 'G': -4.0, 'F': -0.18002200669975046, 'I': 0.5903410936876187, 'H': -1.235731591704771, 'K': -1.2403230616567722, 'M': -0.2892787337604548, 'L': -0.5243382529625826, 'N': -4.0, 'Q': -4.0, 'P': -4.0, 'S': -0.13213373363052033, 'R': -1.0968825990341307, 'T': 0.369651084433176, 'W': -0.6206531401078962, 'V': 0.7323036343671341, 'Y': -0.6115641616848615}, -1: {'slope': 0.12914872901318256, 'intercept': -0.5555588594363089}}
4,706
4,706
0.695495
559
4,706
5.851521
0.368515
0.005503
0.001834
0.002446
0
0
0
0
0
0
0
0.710229
0.079686
4,706
1
4,706
4,706
0.045024
0
0
0
0
0
0.041215
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
12ba43d4d503eb95869c3d20fca399e08254deca
45
py
Python
runserver.py
waisuan/minblog_af
513d387f0df49435b417dcf7462ce6538f46ed19
[ "MIT" ]
null
null
null
runserver.py
waisuan/minblog_af
513d387f0df49435b417dcf7462ce6538f46ed19
[ "MIT" ]
null
null
null
runserver.py
waisuan/minblog_af
513d387f0df49435b417dcf7462ce6538f46ed19
[ "MIT" ]
null
null
null
from minblog2 import app app.run(debug=True)
15
24
0.8
8
45
4.5
0.875
0
0
0
0
0
0
0
0
0
0
0.025
0.111111
45
2
25
22.5
0.875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
12c9e3fe733ba77fe07050ba26ca3204f2260fc4
27,378
py
Python
free_flyer/free_flyer.py
sfang27/CoCo
9cded5a51f5bb71098ec3895e28ec8aa8e87c6ed
[ "MIT" ]
null
null
null
free_flyer/free_flyer.py
sfang27/CoCo
9cded5a51f5bb71098ec3895e28ec8aa8e87c6ed
[ "MIT" ]
null
null
null
free_flyer/free_flyer.py
sfang27/CoCo
9cded5a51f5bb71098ec3895e28ec8aa8e87c6ed
[ "MIT" ]
null
null
null
import os import cvxpy as cp import yaml import pickle import numpy as np import sys import pdb from numpy import sqrt import numpy as np import scipy.stats as stats # for cdf calculations sys.path.insert(1, os.environ['MLOPT']) from core import Problem class FreeFlyer(Problem): """Class to setup + solve free-flyer problems.""" def __init__(self, config=None, solver=cp.MOSEK): """Constructor for FreeFlyer class. Args: config: full path to config file. if None, load default config. solver: solver object to be used by cvxpy """ super().__init__() ## TODO(pculbertson): allow different sets of params to vary. if config is None: #use default config relative_path = os.path.dirname(os.path.abspath(__file__)) config = relative_path + '/config/default.p' config_file = open(config,"rb") _, prob_params, self.sampled_params = pickle.load(config_file) config_file.close() self.init_problem(prob_params) def init_problem(self,prob_params): # setup problem params self.n = 2; self.m = 2 self.N, self.Ak, self.Bk, self.Q, self.R, self.n_obs, \ self.posmin, self.posmax, self.velmin, self.velmax, \ self.umin, self.umax = prob_params self.H = 64 self.W = int(self.posmax[0] / self.posmax[1] * self.H) self.H, self.W = 32, 32 # mod for IRA - assuming actuation risk is normal, and iid in each dimension self.Delta = 0.1 # risk bounds TODO: maybe train on this param too? self.dt = 1 # time step size self.actSD = 0.01 # variance of actuation noise self.initSD = 0.00 # risk allocation params self.iraEps = 1e-4 # termination for risk reallocation self.iraAlpha = 0.5 # proportion of unused risk to redistribute self.iraEqTol = 1e-5 # tolerance for equality # calculate standard deviations ahead of time self.iraSD = sqrt([self.initSD**2+(ii*self.dt)*self.actSD**2 for ii in range(self.N)]) # time limit for risk allocation self.timeLimit = 1e3 self.init_bin_problem() self.init_mlopt_problem() def init_bin_problem(self): cons = [] # Variables x = cp.Variable((2*self.n,self.N)) # state u = cp.Variable((self.m,self.N-1)) # control y = cp.Variable((4*self.n_obs,self.N-1), boolean=True) self.bin_prob_variables = {'x':x, 'u':u, 'y':y} # Parameters x0 = cp.Parameter(2*self.n) xg = cp.Parameter(2*self.n) obstacles = cp.Parameter((4, self.n_obs)) self.bin_prob_parameters = {'x0': x0, 'xg': xg, 'obstacles': obstacles} # chance constraint safety margins N steps, n_obs obstacles # using parameters so we can perform IRA with the same problem instance # Initialise with 0 for each value, equivalent to deterministic planning # assume like they do that there's no risk at t0 self.bin_margin_parameters = cp.Parameter((self.N-1,self.n_obs)) cons += [x[:,0] == x0] # Dynamics constraints for ii in range(self.N-1): cons += [x[:,ii+1] - (self.Ak @ x[:,ii] + self.Bk @ u[:,ii]) == np.zeros(2*self.n)] M = 100. # big M value for i_obs in range(self.n_obs): for i_dim in range(self.n): o_min = obstacles[self.n*i_dim,i_obs] o_max = obstacles[self.n*i_dim+1,i_obs] for i_t in range(self.N-1): yvar_min = 4*i_obs + self.n*i_dim yvar_max = 4*i_obs + self.n*i_dim + 1 # adding safety margin # cons += [x[i_dim,i_t+1] <= o_min + M*y[yvar_min,i_t]] # cons += [-x[i_dim,i_t+1] <= -o_max + M*y[yvar_max,i_t]] cons += [x[i_dim,i_t+1] <= o_min + M*y[yvar_min,i_t] - self.bin_margin_parameters[i_t,i_obs]] cons += [-x[i_dim,i_t+1] <= -o_max + M*y[yvar_max,i_t] - self.bin_margin_parameters[i_t,i_obs]] for i_t in range(self.N-1): yvar_min, yvar_max = 4*i_obs, 4*(i_obs+1) cons += [sum([y[ii,i_t] for ii in range(yvar_min,yvar_max)]) <= 3] # Region bounds for kk in range(self.N): for jj in range(self.n): cons += [self.posmin[jj] - x[jj,kk] <= 0] cons += [x[jj,kk] - self.posmax[jj] <= 0] # Velocity constraints for kk in range(self.N): for jj in range(self.n): cons += [self.velmin - x[self.n+jj,kk] <= 0] cons += [x[self.n+jj,kk] - self.velmax <= 0] # Control constraints for kk in range(self.N-1): cons += [cp.norm(u[:,kk]) <= self.umax] lqr_cost = 0. # l2-norm of lqr_cost for kk in range(self.N): lqr_cost += cp.quad_form(x[:,kk]-xg, self.Q) for kk in range(self.N-1): lqr_cost += cp.quad_form(u[:,kk], self.R) self.bin_prob = cp.Problem(cp.Minimize(lqr_cost), cons) def init_mlopt_problem(self): cons = [] # Variables x = cp.Variable((2*self.n,self.N)) # state u = cp.Variable((self.m,self.N-1)) # control self.mlopt_prob_variables = {'x':x, 'u':u} # Parameters x0 = cp.Parameter(2*self.n) xg = cp.Parameter(2*self.n) obstacles = cp.Parameter((4, self.n_obs)) y = cp.Parameter((4*self.n_obs,self.N-1)) self.mlopt_prob_parameters = {'x0': x0, 'xg': xg, 'obstacles': obstacles, 'y':y} # chance constraint safety margins N steps, n_obs obstacles # using parameters so we can perform IRA with the same problem instance # Initialise with 0 for each value, equivalent to deterministic planning # This uses their assumption that there is no risk at t0 self.mlopt_margin_parameters = cp.Parameter((self.N-1,self.n_obs)) cons += [x[:,0] == x0] # Dynamics constraints for ii in range(self.N-1): cons += [x[:,ii+1] - (self.Ak @ x[:,ii] + self.Bk @ u[:,ii]) == np.zeros(2*self.n)] M = 100. # big M value for i_obs in range(self.n_obs): for i_dim in range(self.n): o_min = obstacles[self.n*i_dim,i_obs] o_max = obstacles[self.n*i_dim+1,i_obs] for i_t in range(self.N-1): yvar_min = 4*i_obs + self.n*i_dim yvar_max = 4*i_obs + self.n*i_dim + 1 # cons += [x[i_dim,i_t+1] <= o_min + M*y[yvar_min,i_t]] # cons += [-x[i_dim,i_t+1] <= -o_max + M*y[yvar_max,i_t]] cons += [x[i_dim,i_t+1] <= o_min + M*y[yvar_min,i_t]-self.mlopt_margin_parameters[i_t,i_obs]] cons += [-x[i_dim,i_t+1] <= -o_max + M*y[yvar_max,i_t]-self.mlopt_margin_parameters[i_t,i_obs]] for i_t in range(self.N-1): yvar_min, yvar_max = 4*i_obs, 4*(i_obs+1) cons += [sum([y[ii,i_t] for ii in range(yvar_min,yvar_max)]) <= 3] # Region bounds for kk in range(self.N): for jj in range(self.n): cons += [self.posmin[jj] - x[jj,kk] <= 0] cons += [x[jj,kk] - self.posmax[jj] <= 0] # Velocity constraints for kk in range(self.N): for jj in range(self.n): cons += [self.velmin - x[self.n+jj,kk] <= 0] cons += [x[self.n+jj,kk] - self.velmax <= 0] # Control constraints for kk in range(self.N-1): cons += [cp.norm(u[:,kk]) <= self.umax] M = 1000. # big M value lqr_cost = 0. # l2-norm of lqr_cost for kk in range(self.N): lqr_cost += cp.quad_form(x[:,kk]-xg, self.Q) for kk in range(self.N-1): lqr_cost += cp.quad_form(u[:,kk], self.R) self.mlopt_prob = cp.Problem(cp.Minimize(lqr_cost), cons) def solve_micp(self, params, solver=cp.MOSEK): """High-level method to solve parameterized MICP. Args: params: Dict of param values; keys are self.sampled_params, values are numpy arrays of specific param values. solver: cvxpy Solver object; defaults to Mosek. """ # set cvxpy parameters to their values for p in self.sampled_params: self.bin_prob_parameters[p].value = params[p] ## TODO(pculbertson): allow different sets of params to vary. # solve problem with cvxpy prob_success, cost, solve_time = False, np.Inf, np.Inf if solver == cp.MOSEK: # See: https://docs.mosek.com/9.1/dotnetfusion/param-groups.html#doc-param-groups msk_param_dict = {} with open(os.path.join(os.environ['MLOPT'], 'config/mosek.yaml')) as file: msk_param_dict = yaml.load(file, Loader=yaml.FullLoader) self.bin_prob.solve(solver=solver, mosek_params=msk_param_dict) elif solver == cp.GUROBI: grb_param_dict = {} with open(os.path.join(os.environ['MLOPT'], 'config/gurobi.yaml')) as file: grb_param_dict = yaml.load(file, Loader=yaml.FullLoader) self.bin_prob.solve(solver=solver, **grb_param_dict) solve_time = self.bin_prob.solver_stats.solve_time x_star, u_star, y_star = None, None, None if self.bin_prob.status == 'optimal' or self.bin_prob.status == 'optimal_inaccurate': prob_success = True cost = self.bin_prob.value x_star = self.bin_prob_variables['x'].value u_star = self.bin_prob_variables['u'].value y_star = self.bin_prob_variables['y'].value.astype(int) # Clear any saved params for p in self.sampled_params: self.bin_prob_parameters[p].value = None return prob_success, cost, solve_time, (x_star, u_star, y_star) def solve_ccmicp(self, params, solver=cp.MOSEK): """High-level method to solve parameterized cc-MICP. Args: params: Dict of param values; keys are self.sampled_params, values are numpy arrays of specific param values. solver: cvxpy Solver object; defaults to Mosek. """ # set cvxpy parameters to their values for p in self.sampled_params: self.bin_prob_parameters[p].value = params[p] obstacles = self.bin_prob_parameters['obstacles'] ## TODO(pculbertson): allow different sets of params to vary. # solve problem with cvxpy prob_success, cost, solve_time = False, np.Inf, np.Inf solve_time = 0 # initial even risk allocation - assuming no risk at t0 riskAlloc = [[self.Delta/((self.N-1) * self.n_obs) \ for i_obs in range(self.n_obs)] for i_t in range(self.N-1)] riskUsed = [[0 \ for i_obs in range(self.n_obs)] for i_t in range(self.N-1)] riskActive = [[0 \ for i_obs in range(self.n_obs)] for i_t in range(self.N-1)] # IRA loop prevCost = np.Inf # initialise incumbent cost to be inf while(solve_time < self.timeLimit): # convert risk allocation to margins margins = np.zeros((self.N - 1, self.n_obs)) for i_t in range(self.N - 1): for i_obs in range(self.n_obs): margins[i_t, i_obs] = stats.norm.isf(riskAlloc[i_t][i_obs],0, self.iraSD[i_t+1]) self.bin_margin_parameters.value = margins if solver == cp.MOSEK: # See: https://docs.mosek.com/9.1/dotnetfusion/param-groups.html#doc-param-groups msk_param_dict = {} with open(os.path.join(os.environ['MLOPT'], 'config/mosek.yaml')) as file: msk_param_dict = yaml.load(file, Loader=yaml.FullLoader) self.bin_prob.solve(solver=solver, mosek_params=msk_param_dict) elif solver == cp.GUROBI: grb_param_dict = {} with open(os.path.join(os.environ['MLOPT'], 'config/gurobi.yaml')) as file: grb_param_dict = yaml.load(file, Loader=yaml.FullLoader) self.bin_prob.solve(solver=solver, **grb_param_dict) if self.bin_prob.solver_stats.solve_time is not None: solve_time += self.bin_prob.solver_stats.solve_time x_star, u_star, y_star = None, None, None if self.bin_prob.status == 'optimal' or self.bin_prob.status == 'optimal_inaccurate': prob_success = True cost = self.bin_prob.value x_star = self.bin_prob_variables['x'].value u_star = self.bin_prob_variables['u'].value y_star = self.bin_prob_variables['y'].value.astype(int) # check for convergence and termination if prevCost - cost < self.iraEps: break else: # no convergence, reallocate risk prevCost = cost # calculate risk used for i_t in range(self.N-1): for i_obs in range(self.n_obs): # find max risk used currRiskUsed = 0 for i_dim in range(self.n): o_min = obstacles[self.n * i_dim, i_obs].value o_max = obstacles[self.n * i_dim + 1, i_obs].value yvar_min = 4*i_obs + self.n*i_dim yvar_max = 4*i_obs + self.n*i_dim + 1 if y_star[yvar_min,i_t] ==0: margin = o_min - x_star[i_dim,i_t+1] currRiskUsed = max(currRiskUsed, stats.norm.sf(margin,0,self.iraSD[i_t])) if y_star[yvar_max,i_t] ==0: margin = - o_max + x_star[i_dim,i_t+1] currRiskUsed = max(currRiskUsed, stats.norm.sf(margin,0,self.iraSD[i_t])) riskUsed[i_t][i_obs] = currRiskUsed # work out difference between risk allocated and risk used, collect unused risk riskResidual = 0 act_num = 0 for i_t in range(self.N-1): for i_obs in range(self.n_obs): riskActive[i_t][i_obs] = (riskAlloc[i_t][i_obs]-riskUsed[i_t][i_obs] <= self.iraEqTol) if not(riskActive[i_t][i_obs]): riskResidual += (1-self.iraAlpha)*(riskAlloc[i_t][i_obs]-riskUsed[i_t][i_obs]) riskAlloc[i_t][i_obs] = self.iraAlpha*riskAlloc[i_t][i_obs] + \ (1-self.iraAlpha)*(riskUsed[i_t][i_obs]) else: act_num += 1 # redistribute collected risk if act_num == 0: break riskRealloc = riskResidual/ act_num for i_t in range(self.N-1): for i_obs in range(self.n_obs): if riskActive[i_t][i_obs]: riskAlloc[i_t][i_obs] = riskAlloc[i_t][i_obs] + riskRealloc else: break # Clear any saved params for p in self.sampled_params: self.bin_prob_parameters[p].value = None return prob_success, cost, solve_time, (x_star, u_star, y_star) def solve_pinned(self, params, strat, solver=cp.MOSEK): """High-level method to solve MICP with pinned params & integer values. Args: params: Dict of param values; keys are self.sampled_params, values are numpy arrays of specific param values. strat: numpy integer array, corresponding to integer values for the desired strategy. solver: cvxpy Solver object; defaults to Mosek. """ # set cvxpy params to their values for p in self.sampled_params: self.mlopt_prob_parameters[p].value = params[p] self.mlopt_prob_parameters['y'].value = strat ## TODO(pculbertson): allow different sets of params to vary. # solve problem with cvxpy prob_success, cost, solve_time = False, np.Inf, np.Inf self.mlopt_prob.solve(solver=solver) solve_time = self.mlopt_prob.solver_stats.solve_time x_star, u_star, y_star = None, None, strat if self.mlopt_prob.status == 'optimal': prob_success = True cost = self.mlopt_prob.value x_star = self.mlopt_prob_variables['x'].value u_star = self.mlopt_prob_variables['u'].value # Clear any saved params for p in self.sampled_params: self.mlopt_prob_parameters[p].value = None self.mlopt_prob_parameters['y'].value = None return prob_success, cost, solve_time, (x_star, u_star, y_star) def solve_ccpinned(self, params, strat, solver=cp.MOSEK): """High-level method to solve MICP with pinned params & integer values. Args: params: Dict of param values; keys are self.sampled_params, values are numpy arrays of specific param values. strat: numpy integer array, corresponding to integer values for the desired strategy. solver: cvxpy Solver object; defaults to Mosek. """ # set cvxpy params to their values obstacles = self.mlopt_prob_parameters['obstacles'] for p in self.sampled_params: self.mlopt_prob_parameters[p].value = params[p] self.mlopt_prob_parameters['y'].value = strat solve_time_list = [] # initial even risk allocation - assuming no risk at t0 riskAlloc = [[self.Delta/((self.N-1) * self.n_obs) \ for i_obs in range(self.n_obs)] for i_t in range(self.N-1)] riskUsed = [[0 \ for i_obs in range(self.n_obs)] for i_t in range(self.N-1)] riskActive = [[0 \ for i_obs in range(self.n_obs)] for i_t in range(self.N-1)] ## TODO(pculbertson): allow different sets of params to vary. # IRA loop prevCost = np.Inf # initialise incumbent cost to be inf while(sum(solve_time_list)< self.timeLimit): # convert risk allocation to margins for i_t in range(self.N -1): for i_obs in range(self.n_obs): self.mlopt_margin_parameters[i_t,i_obs].value = \ stats.norm.isf(riskAlloc[i_t][i_obs],0, self.iraSD[i_t+1]) # solve problem with cvxpy prob_success, cost, solve_time = False, np.Inf, np.Inf self.mlopt_prob.solve(solver=solver) solve_time = self.mlopt_prob.solver_stats.solve_time solve_time_list.append(solve_time) x_star, u_star, y_star = None, None, strat if self.mlopt_prob.status == 'optimal': prob_success = True cost = self.mlopt_prob.value x_star = self.mlopt_prob_variables['x'].value u_star = self.mlopt_prob_variables['u'].value # check for convergence and termination if prevCost - cost < self.iraEps: break else: # no convergence, reallocate risk prevCost = cost # calculate risk used for i_t in range(self.N-1): for i_obs in range(self.n_obs): o_min = obstacles[self.n * i_dim, i_obs].value o_max = obstacles[self.n * i_dim + 1, i_obs].value # find max risk used currRiskUsed = 0 for i_dim in range(self.n): yvar_min = 4*i_obs + self.n*i_dim yvar_max = 4*i_obs + self.n*i_dim + 1 if y_star[yvar_min,i_t] ==0: margin = o_min - x_star[i_dim,i_t+1] currRiskUsed = max(currRiskUsed, stats.norm.sf(margin,0,self.iraSD[i_t])) if y_star[yvar_max,i_t] ==0: margin = - o_max + x_star[i_dim,i_t+1] currRiskUsed = max(currRiskUsed, stats.norm.sf(margin,0,self.iraSD[i_t])) riskUsed[i_t][i_obs] = currRiskUsed # work out difference between risk allocated and risk used, collect unused risk riskResidual = 0 for i_t in range(self.N-1): for i_obs in range(self.n_obs): riskActive[i_t][i_obs] = (riskAlloc[i_t][i_obs]-riskUsed[i_t][i_obs] <= self.iraEqTol) if not(riskActive[i_t][i_obs]): riskResidual += (1-self.iraAlpha)*(riskAlloc[i_t][i_obs]-riskUsed[i_t][i_obs]) riskAlloc[i_t][i_obs] = self.iraAlpha*riskAlloc + \ (1-self.iraAlpha)*(riskUsed[i_t][i_obs]) # redistribute collected risk riskRealloc = riskResidual/sum(riskActive) for i_t in range(self.N-1): for i_obs in range(self.n_obs): if riskActive[i_t][i_obs]: riskAlloc[i_t][i_obs] = riskAlloc + riskRealloc else: break solve_time = sum(solve_time_list) # Clear any saved params for p in self.sampled_params: self.mlopt_prob_parameters[p].value = None self.mlopt_prob_parameters['y'].value = None return prob_success, cost, solve_time, (x_star, u_star, y_star), solve_time_list def which_M(self, x, obstacles, eq_tol=1e-5, ineq_tol=1e-5): """Method to check which big-M constraints are active. Args: x: numpy array of size [2*self.n, self.N], state trajectory. obstacles: numpy array of size [4, self.n_obs] eq_tol: tolerance for equality constraints, default of 1e-5. ineq_tol : tolerance for ineq. constraints, default of 1e-5. Returns: violations: list of which logical constraints are violated. """ violations = [] # list of obstacle big-M violations for i_obs in range(self.n_obs): curr_violations = [] # violations for current obstacle for i_t in range(self.N-1): for i_dim in range(self.n): o_min = obstacles[self.n*i_dim,i_obs] if (x[i_dim,i_t+1] - o_min > ineq_tol): curr_violations.append(self.n*i_dim + 2*self.n*i_t) o_max = obstacles[self.n*i_dim+1,i_obs] if (-x[i_dim,i_t+1] + o_max > ineq_tol): curr_violations.append(self.n*i_dim+1 + 2*self.n*i_t) curr_violations = list(set(curr_violations)) curr_violations.sort() violations.append(curr_violations) return violations def construct_features(self, params, prob_features, ii_obs=None): """Helper function to construct feature vector from parameter vector. Args: params: Dict of param values; keys are self.sampled_params, values are numpy arrays of specific param values. prob_features: list of strings, desired features for classifier. ii_obs: index of obstacle strategy being queried; appends one-hot encoding to end of feature vector """ feature_vec = np.array([]) ## TODO(pculbertson): make this not hardcoded x0, xg = params['x0'], params['xg'] obstacles = params['obstacles'] for feature in prob_features: if feature == "x0": feature_vec = np.hstack((feature_vec, x0)) elif feature == "xg": feature_vec = np.hstack((feature_vec, xg)) elif feature == "obstacles": feature_vec = np.hstack((feature_vec, np.reshape(obstacles, (4*self.n_obs)))) elif feature == "obstacles_map": continue else: print('Feature {} is unknown'.format(feature)) # Append one-hot encoding to end if ii_obs is not None: one_hot = np.zeros(self.n_obs) one_hot[ii_obs] = 1. feature_vec = np.hstack((feature_vec, one_hot)) return feature_vec def construct_cnn_features(self, params, prob_features, ii_obs=None): """Helper function to construct 3xHxW image for CNN with obstacles shaded in blue and ii_obs shaded in red Args: params: Dict of param values; keys are self.sampled_params, values are numpy arrays of specific param values. prob_features: list of strings, desired features for classifier. ii_obs: index of obstacle strategy being queried; appends one-hot encoding to end of feature vector """ if "obstacles_map" not in prob_features: return None obstacles = params['obstacles'] # W_H_ratio = self.posmax[0] / self.posmax[1] # H = 32 # W = int(W_H_ratio * H) H, W = self.H, self.W posmin, posmax = self.posmin, self.posmax table_img = np.ones((3,H,W)) # If a particular obstacle requested, shade that in last obs_list = [ii for ii in range(self.n_obs) if ii is not ii_obs] if ii_obs is not None: obs_list.append(ii_obs) for ll in obs_list: obs = obstacles[:,ll] row_range = range(int(float(obs[2])/posmax[1]*H), int(float(obs[3])/posmax[1]*H)) col_range = range(int(float(obs[0])/posmax[0]*W), int(float(obs[1])/posmax[0]*W)) row_range = range(np.maximum(row_range[0],0), np.minimum(row_range[-1],H)) col_range = range(np.maximum(col_range[0],0), np.minimum(col_range[-1],W)) # 0 out RG channels, leaving only B channel on table_img[:2, row_range[0]:row_range[-1], col_range[0]:col_range[-1]] = 0. if ii_obs is not None and ll is ii_obs: # 0 out all channels and then turn R channel on table_img[:, row_range[0]:row_range[-1], col_range[0]:col_range[-1]] = 0. table_img[0, row_range[0]:row_range[-1], col_range[0]:col_range[-1]] = 1. return table_img
43.526232
114
0.547703
3,755
27,378
3.821305
0.099867
0.042163
0.04523
0.049341
0.771273
0.753014
0.732595
0.720886
0.707018
0.689874
0
0.0137
0.346811
27,378
628
115
43.595541
0.788682
0.216086
0
0.640751
0
0
0.01554
0
0
0
0
0.009554
0
1
0.029491
false
0
0.029491
0
0.08311
0.002681
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
12e57e220eace5c206a9fbc2fc1eaa9ebf812c13
57
py
Python
hamcode/__init__.py
peach-lasagna/True-Hamming-Code
2dff4ef9462e5fd96f61e3597f7e5bcd5fb4a552
[ "MIT" ]
null
null
null
hamcode/__init__.py
peach-lasagna/True-Hamming-Code
2dff4ef9462e5fd96f61e3597f7e5bcd5fb4a552
[ "MIT" ]
null
null
null
hamcode/__init__.py
peach-lasagna/True-Hamming-Code
2dff4ef9462e5fd96f61e3597f7e5bcd5fb4a552
[ "MIT" ]
null
null
null
from .encoder import encode from .decoder import decode
19
28
0.807018
8
57
5.75
0.75
0
0
0
0
0
0
0
0
0
0
0
0.157895
57
2
29
28.5
0.958333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4231220512472592b183e654029087274688f69b
199
py
Python
polyjit/experiments/polly/__init__.py
PolyJIT/polyjit.experiments
4ac51473a20f86d4b07b598ac4c9b09df0c8fcb6
[ "MIT" ]
null
null
null
polyjit/experiments/polly/__init__.py
PolyJIT/polyjit.experiments
4ac51473a20f86d4b07b598ac4c9b09df0c8fcb6
[ "MIT" ]
null
null
null
polyjit/experiments/polly/__init__.py
PolyJIT/polyjit.experiments
4ac51473a20f86d4b07b598ac4c9b09df0c8fcb6
[ "MIT" ]
null
null
null
from . import openmp from . import openmpvect from . import polly from . import pollyperformance from . import vectorize __all__ = ["openmp", "openmpvect", "polly", "pollyperformance", "vectorize"]
24.875
76
0.748744
21
199
6.904762
0.380952
0.344828
0
0
0
0
0
0
0
0
0
0
0.140704
199
7
77
28.428571
0.847953
0
0
0
0
0
0.231156
0
0
0
0
0
0
1
0
false
0
0.833333
0
0.833333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
424030c0ba7bdb5fb9a2cf78ab5c565f0cdc39a7
142
py
Python
ultrachronic/__init__.py
yoavram/ultrachronic
eabd55c6c97da606688a73244faee7572ae381d0
[ "MIT" ]
1
2017-01-08T10:38:56.000Z
2017-01-08T10:38:56.000Z
ultrachronic/__init__.py
yoavram/ultrachronic
eabd55c6c97da606688a73244faee7572ae381d0
[ "MIT" ]
1
2017-01-25T14:43:43.000Z
2017-01-26T06:13:49.000Z
ultrachronic/__init__.py
yoavram/ultrachronic
eabd55c6c97da606688a73244faee7572ae381d0
[ "MIT" ]
null
null
null
from .ultrachronic import jsonify_result, repeat from ._version import get_versions __version__ = get_versions()['version'] del get_versions
23.666667
48
0.823944
18
142
6
0.555556
0.305556
0.333333
0
0
0
0
0
0
0
0
0
0.105634
142
5
49
28.4
0.850394
0
0
0
0
0
0.049296
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
4255c6a20888edb49e5fa9d11a2a5ff4bbcd98e4
427
py
Python
shard/utils/replica.py
ridi/django-shard-library
405e1c213420e095f776d8d2969a147bb0793d9c
[ "BSD-3-Clause" ]
17
2018-03-12T11:37:14.000Z
2021-12-09T15:30:52.000Z
shard/utils/replica.py
ridi/django-shard-library
405e1c213420e095f776d8d2969a147bb0793d9c
[ "BSD-3-Clause" ]
12
2018-03-12T10:39:39.000Z
2018-08-21T03:26:09.000Z
shard/utils/replica.py
ridi/django-shard-library
405e1c213420e095f776d8d2969a147bb0793d9c
[ "BSD-3-Clause" ]
3
2018-03-12T10:32:11.000Z
2021-04-02T06:24:14.000Z
from django.conf import settings __all__ = ('get_replica_count', ) SHARD_REPLICA_COUNT_SETTING = 'SHARD_REPLICA_COUNT_SETTING' DEFAULT_REPLICA_COUNT = 512 def get_replica_count(shard_group: str) -> int: replica_settings = _get_shard_replica_settings() return replica_settings.get(shard_group, DEFAULT_REPLICA_COUNT) def _get_shard_replica_settings(): return getattr(settings, SHARD_REPLICA_COUNT_SETTING, {})
26.6875
67
0.807963
56
427
5.571429
0.339286
0.269231
0.163462
0.230769
0.185897
0
0
0
0
0
0
0.007937
0.114754
427
15
68
28.466667
0.81746
0
0
0
0
0
0.103045
0.063232
0
0
0
0
0
1
0.222222
false
0
0.111111
0.111111
0.555556
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
425a37d4d26be1eb87beeec1f1c88e6ddbbc5495
285
py
Python
tests/test_loader.py
nk2673/thai-tokenizer
bbed18b6c05117c917d67ba603f70f0b1e97b3db
[ "MIT" ]
4
2021-08-17T06:36:45.000Z
2022-03-09T05:34:29.000Z
tests/test_loader.py
nk2673/thai-tokenizer
bbed18b6c05117c917d67ba603f70f0b1e97b3db
[ "MIT" ]
1
2021-02-08T11:00:17.000Z
2021-02-08T11:00:17.000Z
tests/test_loader.py
nk2673/thai-tokenizer
bbed18b6c05117c917d67ba603f70f0b1e97b3db
[ "MIT" ]
2
2021-02-08T10:18:56.000Z
2021-02-12T10:09:36.000Z
import unittest from thai_tokenizer.loader import is_thai, contains_thai class TestLoader(unittest.TestCase): def setUp(self): # TODO: pass def test_is_thai(self): # TODO: pass def test_contains_thai(self): # TODO: pass
15.833333
56
0.614035
34
285
4.941176
0.5
0.142857
0.214286
0.178571
0.22619
0
0
0
0
0
0
0
0.312281
285
17
57
16.764706
0.857143
0.059649
0
0.333333
0
0
0
0
0
0
0
0.058824
0
1
0.333333
false
0.333333
0.222222
0
0.666667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
1
0
0
0
0
0
5
c41d4728211f474b0156b7a62eb0d3151131e879
149
py
Python
django_evolution/admin.py
clones/django-evolution
34b1131873da463d22801d8b845a72f35cb367bd
[ "BSD-3-Clause" ]
1
2016-05-09T04:23:18.000Z
2016-05-09T04:23:18.000Z
django_evolution/admin.py
clones/django-evolution
34b1131873da463d22801d8b845a72f35cb367bd
[ "BSD-3-Clause" ]
null
null
null
django_evolution/admin.py
clones/django-evolution
34b1131873da463d22801d8b845a72f35cb367bd
[ "BSD-3-Clause" ]
null
null
null
from django.contrib import admin from django_evolution.models import Version, Evolution admin.site.register(Version) admin.site.register(Evolution)
24.833333
54
0.845638
20
149
6.25
0.5
0.16
0.272
0
0
0
0
0
0
0
0
0
0.080537
149
5
55
29.8
0.912409
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
c446370bc0121345e5160cedb29638b000fb6fbc
64,826
py
Python
app/backend/wells/models.py
cedar-technologies/gwells
9023034698a9c25e5a49193242678c1aee3c6f4d
[ "Apache-2.0" ]
null
null
null
app/backend/wells/models.py
cedar-technologies/gwells
9023034698a9c25e5a49193242678c1aee3c6f4d
[ "Apache-2.0" ]
null
null
null
app/backend/wells/models.py
cedar-technologies/gwells
9023034698a9c25e5a49193242678c1aee3c6f4d
[ "Apache-2.0" ]
null
null
null
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.db import models from model_utils import FieldTracker from django.db import models from django.core.validators import MinValueValidator from decimal import Decimal import uuid from gwells.models import AuditModel, ProvinceStateCode, ScreenIntakeMethodCode, ScreenMaterialCode,\ ScreenOpeningCode, ScreenBottomCode, ScreenTypeCode, ScreenAssemblyTypeCode from gwells.models.lithology import ( LithologyDescriptionCode, LithologyColourCode, LithologyHardnessCode, LithologyMaterialCode, BedrockMaterialCode, BedrockMaterialDescriptorCode, LithologyStructureCode, LithologyMoistureCode, SurficialMaterialCode) from registries.models import Person from submissions.models import WellActivityCode class DecommissionMethodCode(AuditModel): decommission_method_code = models.CharField(primary_key=True, max_length=10, editable=False, verbose_name="Code") description = models.CharField(max_length=255, verbose_name="Description") display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'decommission_method_code' ordering = ['display_order'] def __str__(self): return self.description class BCGS_Numbers(AuditModel): bcgs_id = models.BigIntegerField(primary_key=True, editable=False) bcgs_number = models.CharField(max_length=20, verbose_name="BCGS Mapsheet Number") class Meta: db_table = 'bcgs_number' def __str__(self): return self.bcgs_number class ObsWellStatusCode(AuditModel): """ Observation Well Status. """ obs_well_status_code = models.CharField( primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=255) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'obs_well_status_code' ordering = ['display_order', 'obs_well_status_code'] def save(self, *args, **kwargs): self.validate() super(WellStatusCode, self).save(*args, **kwargs) class YieldEstimationMethodCode(AuditModel): """ The method used to estimate the yield of a well, e.g. Air Lifting, Bailing, Pumping. """ yield_estimation_method_code = models.CharField(primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'yield_estimation_method_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class WaterQualityCharacteristic(AuditModel): """ The characteristic of the well water, e.g. Fresh, Salty, Clear. """ water_quality_characteristic_guid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) code = models.CharField(max_length=10, unique=True) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() class Meta: db_table = 'water_quality_characteristic' ordering = ['display_order', 'description'] def __str__(self): return self.description class DevelopmentMethodCode(AuditModel): """ How the well was developed in order to remove the fine sediment and other organic or inorganic material that immediately surrounds the well screen, the drill hole or the intake area at the bottom of the well, e.g. air lifting, pumping, bailing. """ development_method_code = models.CharField(primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'development_method_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class FilterPackMaterialSizeCode(AuditModel): """ The size of material used to pack a well filter, e.g. 1.0 - 2.0 mm, 2.0 - 4.0 mm, 4.0 - 8.0 mm. """ filter_pack_material_size_code = models.CharField(primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'filter_pack_material_size_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class FilterPackMaterialCode(AuditModel): """ The material used to pack a well filter, e.g. Very coarse sand, Very fine gravel, Fine gravel. """ filter_pack_material_code = models.CharField(primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'filter_pack_material_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class LinerMaterialCode(AuditModel): """ Liner material installed in a well to protect the well pump or other works in the well from damage. """ liner_material_code = models.CharField( primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'liner_material_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class SurfaceSealMethodCode(AuditModel): """ Method used to install the surface seal in the annular space around the outside of the outermost casing and between mulitple casings of a well. """ surface_seal_method_code = models.CharField(primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'surface_seal_method_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class SurfaceSealMaterialCode(AuditModel): """ Sealant material used that is installed in the annular space around the outside of the outermost casing and between multiple casings of a well. """ surface_seal_material_code = models.CharField(primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'surface_seal_material_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class DrillingMethodCode(AuditModel): """ The method used to drill a well. For example, air rotary, dual rotary, cable tool, excavating, other. """ drilling_method_code = models.CharField(primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'drilling_method_code' ordering = ['display_order', 'description'] def __str__(self): return self.description # TODO: Remove this class - now using registries.something # Not a Code table, but a representative sample of data to support search class DrillingCompany(AuditModel): """ Companies who perform drilling. """ drilling_company_guid = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False) drilling_company_code = models.CharField( max_length=10, blank=True, null=True) name = models.CharField(max_length=200) class Meta: db_table = 'drilling_company' verbose_name_plural = 'Drilling Companies' def __str__(self): return self.name class LandDistrictCode(AuditModel): """ Lookup of Legal Land Districts. """ land_district_code = models.CharField( primary_key=True, max_length=10, editable=False) name = models.CharField(max_length=255) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'land_district_code' ordering = ['display_order', 'name'] def __str__(self): return self.name class LicencedStatusCode(AuditModel): """ LicencedStatusCode of Well. """ licenced_status_code = models.CharField( primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=255) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'licenced_status_code' ordering = ['display_order', 'licenced_status_code'] def save(self, *args, **kwargs): self.validate() super(LicencedStatusCode, self).save(*args, **kwargs) class IntendedWaterUseCode(AuditModel): """ Usage of Wells (water supply). """ intended_water_use_code = models.CharField( primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'intended_water_use_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class GroundElevationMethodCode(AuditModel): """ The method used to determine the ground elevation of a well. Some examples of methods to determine ground elevation include: GPS, Altimeter, Differential GPS, Level, 1:50,000 map, 1:20,000 map, 1:10,000 map, 1:5,000 map. """ ground_elevation_method_code = models.CharField(primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'ground_elevation_method_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class WellClassCode(AuditModel): """ Class of Well type. """ well_class_code = models.CharField( primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'well_class_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class WellStatusCode(AuditModel): """ Well Status. """ well_status_code = models.CharField( primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=255) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'well_status_code' ordering = ['display_order', 'well_status_code'] def save(self, *args, **kwargs): self.validate() super(WellStatusCode, self).save(*args, **kwargs) class WellSubclassCode(AuditModel): """ Subclass of Well type; we use GUID here as Django doesn't support multi-column PK's """ well_subclass_guid = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False) well_class = models.ForeignKey(WellClassCode, null=True, db_column='well_class_code', on_delete=models.PROTECT, blank=True) well_subclass_code = models.CharField(max_length=10) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'well_subclass_code' ordering = ['display_order', 'description'] def validate_unique(self, exclude=None): qs = Room.objects.filter(name=self.well_subclass_code) if qs.filter(well_class__well_class_code=self.well_class__well_class_code).exists(): raise ValidationError('Code must be unique per Well Class') def save(self, *args, **kwargs): self.validate_unique() super(WellSubclassCode, self).save(*args, **kwargs) def __str__(self): return self.description class WellYieldUnitCode(AuditModel): """ Units of Well Yield. """ well_yield_unit_code = models.CharField( primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'well_yield_unit_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class Well(AuditModel): """ Well information. """ well_guid = models.UUIDField( primary_key=False, default=uuid.uuid4, editable=False) well_tag_number = models.AutoField( primary_key=True, verbose_name='Well Tag Number') identification_plate_number = models.PositiveIntegerField( unique=True, blank=True, null=True, verbose_name="Well Identification Plate Number") owner_full_name = models.CharField( max_length=200, verbose_name='Owner Name') owner_mailing_address = models.CharField( max_length=100, verbose_name='Mailing Address') owner_city = models.CharField(max_length=100, verbose_name='Town/City') owner_province_state = models.ForeignKey( ProvinceStateCode, db_column='province_state_code', on_delete=models.CASCADE, blank=True, verbose_name='Province', null=True) owner_postal_code = models.CharField( max_length=10, blank=True, verbose_name='Postal Code') well_class = models.ForeignKey(WellClassCode, null=True, db_column='well_class_code', on_delete=models.CASCADE, verbose_name='Well Class') well_subclass = models.ForeignKey(WellSubclassCode, db_column='well_subclass_guid', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Well Subclass') intended_water_use = models.ForeignKey(IntendedWaterUseCode, db_column='intended_water_use_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Intended Water Use') well_status = models.ForeignKey(WellStatusCode, db_column='well_status_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Well Status') licenced_status = models.ForeignKey(LicencedStatusCode, db_column='licenced_status_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Licenced Status') street_address = models.CharField( max_length=100, blank=True, verbose_name='Street Address') city = models.CharField(max_length=50, blank=True, verbose_name='Town/City') legal_lot = models.CharField(max_length=10, blank=True, verbose_name='Lot') legal_plan = models.CharField( max_length=20, blank=True, verbose_name='Plan') legal_district_lot = models.CharField( max_length=20, blank=True, verbose_name='District Lot') legal_block = models.CharField( max_length=10, blank=True, verbose_name='Block') legal_section = models.CharField( max_length=10, blank=True, verbose_name='Section') legal_township = models.CharField( max_length=20, blank=True, verbose_name='Township') legal_range = models.CharField( max_length=10, blank=True, verbose_name='Range') land_district = models.ForeignKey(LandDistrictCode, db_column='land_district_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Land District') legal_pid = models.CharField(max_length=9, blank=True, null=True, verbose_name='Property Identification Description (PID)') well_location_description = models.CharField( max_length=500, blank=True, verbose_name='Description of Well Location') construction_start_date = models.DateTimeField( null=True, verbose_name="Construction Start Date") construction_end_date = models.DateTimeField( null=True, verbose_name="Construction Date") alteration_start_date = models.DateTimeField( null=True, verbose_name="Alteration Start Date") alteration_end_date = models.DateTimeField( null=True, verbose_name="Alteration Date") decommission_start_date = models.DateTimeField( null=True, verbose_name="Decommission Start Date") decommission_end_date = models.DateTimeField( null=True, verbose_name="Decommission Date") drilling_company = models.ForeignKey(DrillingCompany, db_column='drilling_company_guid', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Drilling Company') well_identification_plate_attached = models.CharField( max_length=500, blank=True, null=True, verbose_name='Well Identification Plate Is Attached') latitude = models.DecimalField( max_digits=8, decimal_places=6, blank=True, null=True, verbose_name='Latitude') longitude = models.DecimalField( max_digits=9, decimal_places=6, blank=True, null=True, verbose_name='Longitude') ground_elevation = models.DecimalField( max_digits=10, decimal_places=2, blank=True, null=True, verbose_name='Ground Elevation') ground_elevation_method = models.ForeignKey(GroundElevationMethodCode, db_column='ground_elevation_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Elevation Determined By') drilling_method = models.ForeignKey(DrillingMethodCode, db_column='drilling_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Drilling Method') other_drilling_method = models.CharField( max_length=50, blank=True, null=True, verbose_name='Specify Other Drilling Method') well_orientation = models.BooleanField(default=True, verbose_name='Orientation of Well', choices=( (True, 'vertical'), (False, 'horizontal'))) surface_seal_material = models.ForeignKey(SurfaceSealMaterialCode, db_column='surface_seal_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Surface Seal Material') surface_seal_length = models.DecimalField( max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Surface Seal Length') surface_seal_thickness = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Surface Seal Thickness') surface_seal_method = models.ForeignKey(SurfaceSealMethodCode, db_column='surface_seal_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Surface Seal Installation Method') backfill_type = models.CharField( max_length=250, blank=True, null=True, verbose_name="Backfill Material Above Surface Seal") backfill_depth = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Backfill Depth') liner_material = models.ForeignKey(LinerMaterialCode, db_column='liner_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Liner Material') liner_diameter = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Liner Diameter', validators=[MinValueValidator(Decimal('0.00'))]) liner_thickness = models.DecimalField(max_digits=5, decimal_places=3, blank=True, null=True, verbose_name='Liner Thickness', validators=[MinValueValidator(Decimal('0.00'))]) liner_from = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Liner From', validators=[MinValueValidator(Decimal('0.00'))]) liner_to = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Liner To', validators=[MinValueValidator(Decimal('0.01'))]) screen_intake_method = models.ForeignKey(ScreenIntakeMethodCode, db_column='screen_intake_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Intake Method') screen_type = models.ForeignKey(ScreenTypeCode, db_column='screen_type_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Type') screen_material = models.ForeignKey(ScreenMaterialCode, db_column='screen_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Material') other_screen_material = models.CharField( max_length=50, blank=True, verbose_name='Specify Other Screen Material') screen_opening = models.ForeignKey(ScreenOpeningCode, db_column='screen_opening_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Opening') screen_bottom = models.ForeignKey(ScreenBottomCode, db_column='screen_bottom_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Bottom') other_screen_bottom = models.CharField( max_length=50, blank=True, verbose_name='Specify Other Screen Bottom') filter_pack_from = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Filter Pack From', validators=[MinValueValidator(Decimal('0.00'))]) filter_pack_to = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Filter Pack To', validators=[MinValueValidator(Decimal('0.01'))]) filter_pack_thickness = models.DecimalField(max_digits=5, decimal_places=3, blank=True, null=True, verbose_name='Filter Pack Thickness', validators=[MinValueValidator(Decimal('0.00'))]) filter_pack_material = models.ForeignKey(FilterPackMaterialCode, db_column='filter_pack_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Filter Pack Material') filter_pack_material_size = models.ForeignKey(FilterPackMaterialSizeCode, db_column='filter_pack_material_size_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Filter Pack Material Size') development_method = models.ForeignKey(DevelopmentMethodCode, db_column='development_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Developed By') development_hours = models.DecimalField(max_digits=9, decimal_places=2, blank=True, null=True, verbose_name='Development Total Duration', validators=[MinValueValidator(Decimal('0.00'))]) development_notes = models.CharField( max_length=255, blank=True, verbose_name='Development Notes') water_quality_characteristics = models.ManyToManyField( WaterQualityCharacteristic, db_table='well_water_quality', blank=True, verbose_name='Obvious Water Quality Characteristics') water_quality_colour = models.CharField( max_length=60, blank=True, verbose_name='Water Quality Colour') water_quality_odour = models.CharField( max_length=60, blank=True, verbose_name='Water Quality Odour') total_depth_drilled = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Total Depth Drilled') finished_well_depth = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Finished Well Depth') final_casing_stick_up = models.DecimalField( max_digits=6, decimal_places=3, blank=True, null=True, verbose_name='Final Casing Stick Up') bedrock_depth = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Depth to Bedrock') water_supply_system_name = models.CharField( max_length=80, blank=True, null=True, verbose_name='Water Supply System Name') water_supply_system_well_name = models.CharField( max_length=80, blank=True, null=True, verbose_name='Water Supply System Well Name') static_water_level = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Static Water Level (BTOC)') well_yield = models.DecimalField( max_digits=8, decimal_places=3, blank=True, null=True, verbose_name='Estimated Well Yield') artesian_flow = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Flow') artesian_pressure = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Pressure') well_cap_type = models.CharField( max_length=40, blank=True, null=True, verbose_name='Well Cap') well_disinfected = models.BooleanField( default=False, verbose_name='Well Disinfected', choices=((False, 'No'), (True, 'Yes'))) comments = models.CharField(max_length=3000, blank=True, null=True) alternative_specs_submitted = \ models.BooleanField(default=False, verbose_name='Alternative specs submitted (if required)', choices=((False, 'No'), (True, 'Yes'))) well_yield_unit = models.ForeignKey( WellYieldUnitCode, db_column='well_yield_unit_code', on_delete=models.CASCADE, blank=True, null=True) # want to be integer in future diameter = models.CharField(max_length=9, blank=True) observation_well_number = models.CharField( max_length=3, blank=True, null=True, verbose_name="Observation Well Number") observation_well_status = models.ForeignKey( ObsWellStatusCode, db_column='obs_well_status_code', blank=True, null="True", verbose_name="Observation Well Status") ems = models.CharField(max_length=10, blank=True, null=True, verbose_name="Environmental Monitoring System (EMS) ID") utm_zone_code = models.CharField( max_length=10, blank=True, null=True, verbose_name="Zone") utm_northing = models.IntegerField( blank=True, null=True, verbose_name="UTM Northing") utm_easting = models.IntegerField( blank=True, null=True, verbose_name="UTM Easting") utm_accuracy_code = models.CharField( max_length=10, blank=True, null=True, verbose_name="Location Accuracy Code") bcgs_id = models.ForeignKey(BCGS_Numbers, db_column='bcgs_id', on_delete=models.CASCADE, blank=True, null=True, verbose_name="BCGS Mapsheet Number") decommission_reason = models.CharField( max_length=250, blank=True, null=True, verbose_name="Reason for Decommission") decommission_method = models.ForeignKey( DecommissionMethodCode, db_column='decommission_method_code', blank=True, null="True", verbose_name="Method of Decommission") sealant_material = models.CharField( max_length=100, blank=True, null=True, verbose_name="Sealant Material") backfill_material = models.CharField( max_length=100, blank=True, null=True, verbose_name="Backfill Material") decommission_details = models.CharField( max_length=250, blank=True, null=True, verbose_name="Decommission Details") tracker = FieldTracker() class Meta: db_table = 'well' def __str__(self): if self.well_tag_number: return '%d %s' % (self.well_tag_number, self.street_address) else: return 'No well tag number %s' % (self.street_address) # Custom JSON serialisation for Wells. Expand as needed. def as_dict(self): return { "latitude": self.latitude, "longitude": self.longitude, "guid": self.well_guid, "identification_plate_number": self.identification_plate_number, "street_address": self.street_address, "well_tag_number": self.well_tag_number } class Perforation(AuditModel): """ Liner Details """ perforation_guid = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False) well_tag_number = models.ForeignKey( Well, db_column='well_tag_number', on_delete=models.CASCADE, blank=True, null=True) liner_thickness = models.DecimalField( max_digits=5, decimal_places=3, blank=True, null=True, verbose_name='Liner Thickness') liner_diameter = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Liner Diameter') liner_from = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Liner From') liner_to = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Liner To') liner_perforation_from = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Perforation From') liner_perforation_to = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Perforation To') class Meta: db_table = 'perforation' ordering = ['liner_from', 'liner_to', 'liner_perforation_from', 'liner_perforation_to', 'perforation_guid'] def __str__(self): return self.description class LtsaOwner(AuditModel): """ Well owner information. """ lsts_owner_guid = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False) well = models.ForeignKey(Well, db_column='well_tag_number', on_delete=models.CASCADE, blank=True, null=True) full_name = models.CharField(max_length=200, verbose_name='Owner Name') mailing_address = models.CharField( max_length=100, verbose_name='Mailing Address') city = models.CharField(max_length=100, verbose_name='Town/City') province_state = models.ForeignKey( ProvinceStateCode, db_column='province_state_code', on_delete=models.CASCADE, verbose_name='Province') postal_code = models.CharField( max_length=10, blank=True, verbose_name='Postal Code') tracker = FieldTracker() class Meta: db_table = 'ltsa_owner' def __str__(self): return '%s %s' % (self.full_name, self.mailing_address) class CasingMaterialCode(AuditModel): """ The material used for casing a well, e.g., Cement, Plastic, Steel. """ casing_material_code = models.CharField(primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'casing_material_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class CasingCode(AuditModel): """ Type of Casing used on a well """ casing_code = models.CharField(primary_key=True, max_length=10, editable=False) description = models.CharField(max_length=100) display_order = models.PositiveIntegerField() effective_date = models.DateTimeField(blank=True, null=True) expiry_date = models.DateTimeField(blank=True, null=True) class Meta: db_table = 'casing_code' ordering = ['display_order', 'description'] def __str__(self): return self.description class AquiferWell(AuditModel): """ AquiferWell """ aquifer_well_guid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) aquifer_id = models.PositiveIntegerField(verbose_name="Aquifer Number", blank=True, null=True) well_tag_number = models.ForeignKey(Well, db_column='well_tag_number', to_field='well_tag_number', on_delete=models.CASCADE, blank=False, null=False) class Meta: db_table = 'aquifer_well' # TODO: This class needs to be moved to submissions.models (in order to do that, the fk references for a # number of other models needs to be updated) class ActivitySubmission(AuditModel): """ Activity information on a Well submitted by a user. """ filing_number = models.AutoField(primary_key=True) activity_submission_guid = models.UUIDField( primary_key=False, default=uuid.uuid4, editable=False) well = models.ForeignKey( Well, db_column='well_tag_number', on_delete=models.CASCADE, blank=True, null=True) well_activity_type = models.ForeignKey( WellActivityCode, db_column='well_activity_code', on_delete=models.CASCADE, verbose_name='Type of Work') well_class = models.ForeignKey(WellClassCode, null=True, db_column='well_class_code', on_delete=models.CASCADE, verbose_name='Well Class') well_subclass = models.ForeignKey(WellSubclassCode, db_column='well_subclass_guid', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Well Subclass') intended_water_use = models.ForeignKey(IntendedWaterUseCode, db_column='intended_water_use_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Intended Water Use') # Driller responsible should be a required field on all submissions, but for legacy well # information this may not be available, so we can't enforce this on a database level. driller_responsible = models.ForeignKey(Person, db_column='driller_responsible_guid', on_delete=models.PROTECT, verbose_name='Person Responsible for Drilling', blank=True, null=True) driller_name = models.CharField( max_length=200, blank=True, verbose_name='Name of Person Who Did the Work') consultant_name = models.CharField( max_length=200, blank=True, verbose_name='Consultant Name') consultant_company = models.CharField( max_length=200, blank=True, verbose_name='Consultant Company') # Work start & end date should be required fields on all submissions, but for legacy well # information this may not be available, so we can't enforce this on a database level. work_start_date = models.DateField( verbose_name='Work Start Date', null=True, blank=True) work_end_date = models.DateField( verbose_name='Work End Date', null=True, blank=True) owner_full_name = models.CharField( max_length=200, verbose_name='Owner Name') owner_mailing_address = models.CharField( max_length=100, verbose_name='Mailing Address') owner_city = models.CharField(max_length=100, verbose_name='Town/City') owner_province_state = models.ForeignKey( ProvinceStateCode, db_column='province_state_code', on_delete=models.CASCADE, verbose_name='Province') owner_postal_code = models.CharField( max_length=10, blank=True, verbose_name='Postal Code') street_address = models.CharField( max_length=100, blank=True, verbose_name='Street Address') city = models.CharField(max_length=50, blank=True, verbose_name='Town/City') legal_lot = models.CharField(max_length=10, blank=True, verbose_name='Lot') legal_plan = models.CharField( max_length=20, blank=True, verbose_name='Plan') legal_district_lot = models.CharField( max_length=20, blank=True, verbose_name='District Lot') legal_block = models.CharField( max_length=10, blank=True, verbose_name='Block') legal_section = models.CharField( max_length=10, blank=True, verbose_name='Section') legal_township = models.CharField( max_length=20, blank=True, verbose_name='Township') legal_range = models.CharField( max_length=10, blank=True, verbose_name='Range') land_district = models.ForeignKey(LandDistrictCode, db_column='land_district_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Land District') legal_pid = models.PositiveIntegerField( blank=True, null=True, verbose_name='PID') well_location_description = models.CharField( max_length=500, blank=True, verbose_name='Well Location Description') identification_plate_number = models.PositiveIntegerField( blank=True, null=True, verbose_name='Identification Plate Number') well_plate_attached = models.CharField( max_length=500, blank=True, verbose_name='Well Identification Plate Is Attached') latitude = models.DecimalField( max_digits=8, decimal_places=6, blank=True, null=True) longitude = models.DecimalField( max_digits=9, decimal_places=6, blank=True, null=True) ground_elevation = models.DecimalField( max_digits=10, decimal_places=2, blank=True, null=True, verbose_name='Ground Elevation') ground_elevation_method = models.ForeignKey(GroundElevationMethodCode, db_column='ground_elevation_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Elevation Determined By') drilling_method = models.ForeignKey(DrillingMethodCode, db_column='drilling_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Drilling Method') other_drilling_method = models.CharField( max_length=50, blank=True, verbose_name='Specify Other Drilling Method') well_orientation = models.BooleanField(default=True, verbose_name='Orientation of Well', choices=( (True, 'vertical'), (False, 'horizontal'))) water_supply_system_name = models.CharField( max_length=50, blank=True, verbose_name='Water Supply System Name') water_supply_system_well_name = models.CharField( max_length=50, blank=True, verbose_name='Water Supply System Well Name') surface_seal_material = models.ForeignKey(SurfaceSealMaterialCode, db_column='surface_seal_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Surface Seal Material') surface_seal_depth = models.DecimalField( max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Surface Seal Depth') surface_seal_thickness = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Surface Seal Thickness', validators=[MinValueValidator(Decimal('1.00'))]) surface_seal_method = models.ForeignKey(SurfaceSealMethodCode, db_column='surface_seal_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Surface Seal Installation Method') backfill_above_surface_seal = models.CharField( max_length=250, blank=True, verbose_name='Backfill Material Above Surface Seal') backfill_above_surface_seal_depth = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Backfill Depth') liner_material = models.ForeignKey(LinerMaterialCode, db_column='liner_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Liner Material') liner_diameter = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Liner Diameter', validators=[MinValueValidator(Decimal('0.00'))]) liner_thickness = models.DecimalField(max_digits=5, decimal_places=3, blank=True, null=True, verbose_name='Liner Thickness', validators=[MinValueValidator(Decimal('0.00'))]) liner_from = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Liner From', validators=[MinValueValidator(Decimal('0.00'))]) liner_to = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Liner To', validators=[MinValueValidator(Decimal('0.01'))]) screen_intake_method = models.ForeignKey(ScreenIntakeMethodCode, db_column='screen_intake_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Intake') screen_type = models.ForeignKey(ScreenTypeCode, db_column='screen_type_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Type') screen_material = models.ForeignKey(ScreenMaterialCode, db_column='screen_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Material') other_screen_material = models.CharField( max_length=50, blank=True, verbose_name='Specify Other Screen Material') screen_opening = models.ForeignKey(ScreenOpeningCode, db_column='screen_opening_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Opening') screen_bottom = models.ForeignKey(ScreenBottomCode, db_column='screen_bottom_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Bottom') other_screen_bottom = models.CharField( max_length=50, blank=True, verbose_name='Specify Other Screen Bottom') filter_pack_from = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Filter Pack From', validators=[MinValueValidator(Decimal('0.00'))]) filter_pack_to = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Filter Pack To', validators=[MinValueValidator(Decimal('0.01'))]) filter_pack_thickness = models.DecimalField(max_digits=5, decimal_places=3, blank=True, null=True, verbose_name='Filter Pack Thickness', validators=[MinValueValidator(Decimal('0.00'))]) filter_pack_material = models.ForeignKey(FilterPackMaterialCode, db_column='filter_pack_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Filter Pack Material') filter_pack_material_size = models.ForeignKey(FilterPackMaterialSizeCode, db_column='filter_pack_material_size_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Filter Pack Material Size') development_method = models.ForeignKey(DevelopmentMethodCode, db_column='development_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Development Method') development_hours = models.DecimalField(max_digits=9, decimal_places=2, blank=True, null=True, verbose_name='Development Total Duration', validators=[MinValueValidator(Decimal('0.00'))]) development_notes = models.CharField( max_length=255, blank=True, verbose_name='Development Notes') water_quality_characteristics = models.ManyToManyField( WaterQualityCharacteristic, db_table='activity_submission_water_quality', blank=True, verbose_name='Obvious Water Quality Characteristics') water_quality_colour = models.CharField( max_length=60, blank=True, verbose_name='Water Quality Colour') water_quality_odour = models.CharField( max_length=60, blank=True, verbose_name='Water Quality Odour') total_depth_drilled = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Total Depth Drilled') finished_well_depth = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Finished Well Depth') final_casing_stick_up = models.DecimalField( max_digits=5, decimal_places=3, blank=True, null=True, verbose_name='Final Casing Stick Up') bedrock_depth = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Depth to Bedrock') static_water_level = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Static Water Level (BTOC)') well_yield = models.DecimalField( max_digits=8, decimal_places=3, blank=True, null=True, verbose_name='Estimated Well Yield') artesian_flow = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, verbose_name='Artesian Flow') artesian_pressure = models.DecimalField( max_digits=5, decimal_places=2, blank=True, null=True, verbose_name='Artesian Pressure') well_cap_type = models.CharField( max_length=40, blank=True, verbose_name='Well Cap Type') well_disinfected = models.BooleanField( default=False, verbose_name='Well Disinfected?', choices=((False, 'No'), (True, 'Yes'))) comments = models.CharField(max_length=3000, blank=True) alternative_specs_submitted = models.BooleanField( default=False, verbose_name='Alternative specs submitted (if required)') well_yield_unit = models.ForeignKey( WellYieldUnitCode, db_column='well_yield_unit_code', on_delete=models.CASCADE, blank=True, null=True) # want to be integer in future diameter = models.CharField(max_length=9, blank=True) tracker = FieldTracker() class Meta: db_table = 'activity_submission' def __str__(self): if self.filing_number: return '%s %d %s %s' % (self.activity_submission_guid, self.filing_number, self.well_activity_type.well_activity_type_code, self.street_address) else: return '%s %s' % (self.activity_submission_guid, self.street_address) class LithologyDescription(AuditModel): """ Lithology information details """ lithology_description_guid = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False) activity_submission = models.ForeignKey( ActivitySubmission, db_column='filing_number', on_delete=models.CASCADE, blank=True, null=True) well_tag_number = models.ForeignKey( Well, db_column='well_tag_number', on_delete=models.CASCADE, blank=True, null=True) lithology_from = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='From', blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))]) lithology_to = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='To', blank=True, null=True, validators=[MinValueValidator(Decimal('0.01'))]) lithology_raw_data = models.CharField( max_length=250, blank=True, null=True, verbose_name='Raw Data') lithology_description = models.ForeignKey(LithologyDescriptionCode, db_column='lithology_description_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name="Description") lithology_colour = models.ForeignKey(LithologyColourCode, db_column='lithology_colour_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Colour') lithology_hardness = models.ForeignKey(LithologyHardnessCode, db_column='lithology_hardness_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Hardness') lithology_material = models.ForeignKey(LithologyMaterialCode, db_column='lithology_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name="Material") water_bearing_estimated_flow = models.DecimalField( max_digits=10, decimal_places=4, blank=True, null=True, verbose_name='Water Bearing Estimated Flow') water_bearing_estimated_flow_units = models.ForeignKey( WellYieldUnitCode, db_column='well_yield_unit_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Units') lithology_observation = models.CharField( max_length=250, blank=True, null=True, verbose_name='Observations') bedrock_material = models.ForeignKey(BedrockMaterialCode, db_column='bedrock_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Bedrock Material') bedrock_material_descriptor = models.ForeignKey( BedrockMaterialDescriptorCode, db_column='bedrock_material_descriptor_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Descriptor') lithology_structure = models.ForeignKey(LithologyStructureCode, db_column='lithology_structure_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Bedding') lithology_moisture = models.ForeignKey(LithologyMoistureCode, db_column='lithology_moisture_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Moisture') surficial_material = models.ForeignKey(SurficialMaterialCode, db_column='surficial_material_code', related_name='surficial_material_set', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Surficial Material') secondary_surficial_material = models.ForeignKey(SurficialMaterialCode, db_column='secondary_surficial_material_code', related_name='secondary_surficial_material_set', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Secondary Surficial Material') lithology_sequence_number = models.BigIntegerField(blank=True, null=True) class Meta: db_table = 'lithology_description' ordering = ["lithology_sequence_number"] def __str__(self): if self.activity_submission: return 'activity_submission {} {} {}'.format(self.activity_submission, self.lithology_from, self.lithology_to) else: return 'well {} {} {}'.format(self.well, self.lithology_from, self.lithology_to) class ProductionData(AuditModel): """ Water production of a well measured by a driller """ production_data_guid = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False) activity_submission = models.ForeignKey( ActivitySubmission, db_column='filing_number', on_delete=models.CASCADE, blank=True, null=True) well = models.ForeignKey( Well, db_column='well_tag_number', on_delete=models.CASCADE, blank=True, null=True) yield_estimation_method = models.ForeignKey( YieldEstimationMethodCode, db_column='yield_estimation_method_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Estimation Method') yield_estimation_rate = models.DecimalField( max_digits=7, decimal_places=2, verbose_name='Estimation Rate', blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))]) yield_estimation_duration = models.DecimalField( max_digits=9, decimal_places=2, verbose_name='Estimation Duration', blank=True, null=True, validators=[MinValueValidator(Decimal('0.01'))]) well_yield_unit = models.ForeignKey( WellYieldUnitCode, db_column='well_yield_unit_code', blank=True, null=True) static_level = models.DecimalField( max_digits=7, decimal_places=2, verbose_name='SWL Before Test', blank=True, null=True, validators=[MinValueValidator(Decimal('0.0'))]) drawdown = models.DecimalField( max_digits=7, decimal_places=2, blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))]) hydro_fracturing_performed = models.BooleanField( default=False, verbose_name='Hydro-fracturing Performed?', choices=((False, 'No'), (True, 'Yes'))) hydro_fracturing_yield_increase = models.DecimalField( max_digits=7, decimal_places=2, verbose_name='Well Yield Increase Due to Hydro-fracturing', blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))]) class Meta: db_table = 'production_data' def __str__(self): if self.activity_submission: return 'activity_submission {} {} {}'.format( self.activity_submission, self.yield_estimation_method, self.yield_estimation_rate) else: return 'well {} {} {}'.format( self.well, self.yield_estimation_method, self.yield_estimation_rate) class LinerPerforation(AuditModel): """ Perforation in a well liner """ liner_perforation_guid = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False) activity_submission = models.ForeignKey( ActivitySubmission, db_column='filing_number', on_delete=models.CASCADE, blank=True, null=True) well = models.ForeignKey(Well, db_column='well_tag_number', on_delete=models.CASCADE, blank=True, null=True) liner_perforation_from = models.DecimalField( max_digits=7, decimal_places=2, verbose_name='Perforated From', blank=False, validators=[MinValueValidator(Decimal('0.00'))]) liner_perforation_to = models.DecimalField( max_digits=7, decimal_places=2, verbose_name='Perforated To', blank=False, validators=[MinValueValidator(Decimal('0.01'))]) class Meta: db_table = 'liner_perforation' def __str__(self): if self.activity_submission: return 'activity_submission {} {} {}'.format(self.activity_submission, self.liner_perforation_from, self.liner_perforation_to) else: return 'well {} {} {}'.format(self.well, self.liner_perforation_from, self.liner_perforation_to) class Casing(AuditModel): """ Casing information """ casing_guid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) activity_submission = models.ForeignKey(ActivitySubmission, db_column='filing_number', on_delete=models.CASCADE, blank=True, null=True) well_tag_number = models.ForeignKey(Well, db_column='well_tag_number', on_delete=models.CASCADE, blank=True, null=True) casing_from = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='From', null=True, blank=True, validators=[MinValueValidator(Decimal('0.00'))]) casing_to = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='To', null=True, blank=True, validators=[MinValueValidator(Decimal('0.01'))]) diameter = models.DecimalField(max_digits=8, decimal_places=3, verbose_name='Diameter', null=True, blank=True, validators=[MinValueValidator(Decimal('0.5'))]) casing_code = models.ForeignKey(CasingCode, db_column='casing_code', on_delete=models.CASCADE, verbose_name='Casing Code', null=True) casing_material = models.ForeignKey(CasingMaterialCode, db_column='casing_material_code', on_delete=models.CASCADE, blank=True, null=True, verbose_name='Casing Material Code') wall_thickness = models.DecimalField(max_digits=6, decimal_places=3, verbose_name='Wall Thickness', blank=True, null=True, validators=[MinValueValidator(Decimal('0.01'))]) drive_shoe = models.NullBooleanField(default=False, null=True, verbose_name='Drive Shoe', choices=((False, 'No'), (True, 'Yes'))) class Meta: ordering = ["casing_from", "casing_to"] db_table = 'casing' def __str__(self): if self.activity_submission: return 'activity_submission {} {} {}'.format(self.activity_submission, self.casing_from, self.casing_to) else: return 'well {} {} {}'.format(self.well, self.casing_from, self.casing_to) def as_dict(self): return { "casing_from": self.casing_from, "casing_to": self.casing_to, "casing_guid": self.casing_guid, "well_tag_number": self.well_tag_number, "diameter": self.diameter, "wall_thickness": self.wall_thickness, "casing_material": self.casing_material, "drive_shoe": self.drive_shoe } class Screen(AuditModel): """ Screen in a well """ screen_guid = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False) activity_submission = models.ForeignKey( ActivitySubmission, db_column='filing_number', on_delete=models.CASCADE, blank=True, null=True) well = models.ForeignKey(Well, db_column='well_tag_number', on_delete=models.CASCADE, blank=True, null=True) screen_from = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='From', blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))]) screen_to = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='To', blank=False, null=True, validators=[MinValueValidator(Decimal('0.01'))]) internal_diameter = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='Diameter', blank=True, null=True, validators=[MinValueValidator(Decimal('0.0'))]) assembly_type = models.ForeignKey( ScreenAssemblyTypeCode, db_column='screen_assembly_type_code', on_delete=models.CASCADE, blank=True, null=True) slot_size = models.DecimalField(max_digits=7, decimal_places=2, verbose_name='Slot Size', blank=True, null=True, validators=[MinValueValidator(Decimal('0.00'))]) class Meta: db_table = 'screen' ordering = ['screen_from', 'screen_to'] def __str__(self): if self.activity_submission: return 'activity_submission {} {} {}'.format(self.activity_submission, self.screen_from, self.screen_to) else: return 'well {} {} {}'.format(self.well, self.screen_from, self.screen_to)
49.599082
110
0.657097
7,130
64,826
5.735764
0.070126
0.055238
0.062622
0.081891
0.796582
0.772814
0.750685
0.73056
0.699677
0.680702
0
0.011782
0.24845
64,826
1,306
111
49.63706
0.827627
0.049363
0
0.585714
0
0
0.117652
0.019977
0
0
0
0.001531
0
1
0.036735
false
0
0.010204
0.02449
0.528571
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
6714400875ceb6650b1a474c78573256de7ba6d7
57
py
Python
tools/Sikuli/ScenarioLine17.sikuli/ScenarioLine17.py
1cgh/vanessa-automation
4fa7018dc45e1c739ea316700919c4fbf2f5a8d9
[ "BSD-3-Clause" ]
296
2018-05-27T08:03:14.000Z
2022-03-19T08:36:11.000Z
tools/Sikuli/ScenarioLine17.sikuli/ScenarioLine17.py
1cgh/vanessa-automation
4fa7018dc45e1c739ea316700919c4fbf2f5a8d9
[ "BSD-3-Clause" ]
1,562
2018-05-27T18:36:25.000Z
2022-03-31T07:35:11.000Z
tools/Sikuli/ScenarioLine17.sikuli/ScenarioLine17.py
1cgh/vanessa-automation
4fa7018dc45e1c739ea316700919c4fbf2f5a8d9
[ "BSD-3-Clause" ]
299
2018-06-18T20:00:56.000Z
2022-03-29T12:29:55.000Z
click(Pattern("1595108969152.png").targetOffset(-24,22))
28.5
56
0.77193
7
57
6.285714
1
0
0
0
0
0
0
0
0
0
0
0.303571
0.017544
57
1
57
57
0.482143
0
0
0
0
0
0.298246
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
672aa79eed12307c1b1dcc68e91e9cf9a84e21d2
457
py
Python
mathbox/optimizer/tests/test_utils.py
freedeaths/mathbox-py
e294dc1b916bb634807378883b1ba941a924bec5
[ "MIT" ]
7
2021-12-23T07:03:12.000Z
2021-12-31T06:35:34.000Z
mathbox/optimizer/tests/test_utils.py
freedeaths/mathbox-py
e294dc1b916bb634807378883b1ba941a924bec5
[ "MIT" ]
8
2021-12-23T06:12:19.000Z
2022-01-07T15:01:47.000Z
mathbox/optimizer/tests/test_utils.py
freedeaths/mathbox-py
e294dc1b916bb634807378883b1ba941a924bec5
[ "MIT" ]
null
null
null
from ..utils import * def test_local_minmax(): case_1 = [1, 2, 3, 2, 1] print(local_minmax(case_1,True)) print(local_minmax(case_1)) case_2 = [0,0,1,2,3,9,9.1,9,9,3,3,3,2,-1,0,0,0] print("len_2: ",len(case_2)) print(local_minmax(case_2,True)) print(local_minmax(case_2)) assert local_minmax(case_2) == ([(6, 9.1)], [(13, -1)]) assert local_minmax(case_2,True) == ([(6, 9.1),(14,0),(15,0),(16,0)], [(0,0),(1,0),(13, -1)])
38.083333
97
0.582057
93
457
2.666667
0.236559
0.310484
0.423387
0.322581
0.564516
0
0
0
0
0
0
0.150649
0.157549
457
12
97
38.083333
0.493506
0
0
0
0
0
0.015284
0
0
0
0
0
0.181818
1
0.090909
false
0
0.090909
0
0.181818
0.454545
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
6732314750dad52ce5af4ed70486f5678da461a9
153
py
Python
problemsets/Codeforces/Python/B299.py
juarezpaulino/coderemite
a4649d3f3a89d234457032d14a6646b3af339ac1
[ "Apache-2.0" ]
null
null
null
problemsets/Codeforces/Python/B299.py
juarezpaulino/coderemite
a4649d3f3a89d234457032d14a6646b3af339ac1
[ "Apache-2.0" ]
null
null
null
problemsets/Codeforces/Python/B299.py
juarezpaulino/coderemite
a4649d3f3a89d234457032d14a6646b3af339ac1
[ "Apache-2.0" ]
null
null
null
""" * * Author: Juarez Paulino(coderemite) * Email: juarez.paulino@gmail.com * """ n,k=map(int,input().split()) print('YNEOS'['#'*k in input()::2])
19.125
38
0.601307
21
153
4.380952
0.809524
0.282609
0
0
0
0
0
0
0
0
0
0.007519
0.130719
153
8
39
19.125
0.684211
0.490196
0
0
0
0
0.090909
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
67580fab2df4364b30a7536dd4521cec04d915df
117
py
Python
scrapy_redis/__init__.py
roycehaynes/scrapy-redis
ac550c8010c7ec85ddcd6168937c7d8c2fe52df2
[ "BSD-3-Clause" ]
null
null
null
scrapy_redis/__init__.py
roycehaynes/scrapy-redis
ac550c8010c7ec85ddcd6168937c7d8c2fe52df2
[ "BSD-3-Clause" ]
null
null
null
scrapy_redis/__init__.py
roycehaynes/scrapy-redis
ac550c8010c7ec85ddcd6168937c7d8c2fe52df2
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python try: from setuptools import setup except ImportError: from distutils.core import setup
19.5
36
0.752137
16
117
5.5
0.8125
0.25
0
0
0
0
0
0
0
0
0
0
0.179487
117
6
36
19.5
0.916667
0.17094
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
675f363ae5fef4f09f1c1cd1ac0088f2bb43ec02
116
py
Python
fba/engine/hooks/__init__.py
hukkelas/full_body_anonymization
c61745b137c84ffb742ef6ab2f4721db4acf22b7
[ "MIT" ]
27
2022-01-06T20:15:24.000Z
2022-03-29T11:54:49.000Z
fba/engine/hooks/__init__.py
hukkelas/full_body_anonymization
c61745b137c84ffb742ef6ab2f4721db4acf22b7
[ "MIT" ]
2
2022-03-17T06:04:23.000Z
2022-03-25T08:50:57.000Z
fba/engine/hooks/__init__.py
hukkelas/full_body_anonymization
c61745b137c84ffb742ef6ab2f4721db4acf22b7
[ "MIT" ]
2
2022-01-07T13:16:59.000Z
2022-01-16T02:10:50.000Z
from .build import build_hooks, HookBase from .stat_logger import TimeLoggerHook from .helpers import CheckpointHook
38.666667
40
0.862069
15
116
6.533333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.103448
116
3
41
38.666667
0.942308
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5