hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2bc827b81d318e5b08fd6be908891296ced5a47e | 3,350 | py | Python | src/bpyutils/util/request.py | achillesrasquinha/bpyutils | 84bbbf1dc37629413fbc14b909188a54995e95a1 | [
"MIT"
] | 1 | 2022-02-01T04:50:22.000Z | 2022-02-01T04:50:22.000Z | src/bpyutils/util/request.py | achillesrasquinha/bpyutils | 84bbbf1dc37629413fbc14b909188a54995e95a1 | [
"MIT"
] | 2 | 2021-12-07T10:40:44.000Z | 2021-12-23T13:42:07.000Z | src/bpyutils/util/request.py | achillesrasquinha/bpyutils | 84bbbf1dc37629413fbc14b909188a54995e95a1 | [
"MIT"
] | null | null | null | import re
import os.path as osp
import requests
# from fake_useragent import UserAgent
from bpyutils.db import get_connection
from bpyutils.util.proxy import get_random_requests_proxies
from bpyutils.util._dict import merge_dict
from bpyutils.util.imports import import_or_raise
from bpyutils.util.string import get_random_str
from bpyutils.util.system import makepath
from bpyutils.util.imports import import_handler
from bpyutils import request as req
import bpyutils as bpy
# user_agent = UserAgent(verify_ssl = False)
# https://git.io/JsnSI
_REGEX_URL = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def proxy_request(*args, **kwargs):
fallback = kwargs.pop("fallback", False)
session = requests.Session()
proxies = get_random_requests_proxies()
# session.headers.update({ "User-Agent": user_agent.random })
session.proxies.update(proxies)
try:
kwargs["timeout"] = 5
response = session.request(*args, **kwargs)
except requests.exceptions.ConnectionError as e:
if fallback:
session.headers = kwargs.get("headers", {})
session.proxies = kwargs.get("proxies", {})
response = session.request(*args, **kwargs)
else:
raise e
return response
def proxy_grequest(*args, **kwargs):
proxies = get_random_requests_proxies()
# kwargs["headers"] = merge_dict(kwargs.get("headers", {}), {
# "User-Agent": user_agent.random })
kwargs["proxies"] = merge_dict(kwargs.get("proxies", {}), proxies)
grequests = import_or_raise("grequests")
return grequests.request(*args, **kwargs)
def check_url(url, raise_err = True):
if not re.match(_REGEX_URL, url):
if raise_err:
raise ValueError("Invalid URL: %s" % url)
return False
return True
def download_file(url, path = None, chunk_size = None, req_kwargs = { }):
chunk_size = chunk_size or bpy.settings.get("max_chunk_download_bytes")
response = req.get(url, stream = True, **req_kwargs)
response.raise_for_status()
headers = response.headers
size_total = int(headers.get('content-length', 0))
tqdm = import_handler("tqdm.tqdm")
progress_bar = None
if tqdm:
progress_bar = tqdm(total = size_total, unit = 'iB', unit_scale = True)
if not path:
header = headers.get("content-disposition")
if header:
name = re.findall("filename=(.+)", header)[0]
path = osp.abspath(name)
else:
path = get_random_str()
makepath(path)
with open(path, "wb") as f:
for content in response.iter_content(chunk_size = chunk_size):
if progress_bar:
progress_bar.update(len(content))
if content:
f.write(content)
if progress_bar:
progress_bar.close()
if size_total != 0 and progress_bar.n != size_total:
raise ValueError("Unable to read downloaded file into path %s." % path)
return path | 30.18018 | 100 | 0.618806 | 429 | 3,350 | 4.682984 | 0.314685 | 0.047785 | 0.047785 | 0.035839 | 0.151319 | 0.040816 | 0.005973 | 0.005973 | 0.005973 | 0 | 0 | 0.010212 | 0.24 | 3,350 | 111 | 101 | 30.18018 | 0.778869 | 0.096716 | 0 | 0.106667 | 0 | 0.026667 | 0.120066 | 0.046434 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053333 | false | 0 | 0.186667 | 0 | 0.306667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bcbff98a3b8f0e8db73d81591bf41ea08b8b323 | 1,845 | py | Python | bclearer_boson_1_2_source/b_code/configurations/getters/boson_1_2_2e_c3_configuration_getter_merge_inspire_bclearer_naming_pattern.py | teapowell/bclearer_boson_1_2 | 571b2e1ca6dee93ccc5cb4e30abe2660f40c2ac0 | [
"MIT"
] | null | null | null | bclearer_boson_1_2_source/b_code/configurations/getters/boson_1_2_2e_c3_configuration_getter_merge_inspire_bclearer_naming_pattern.py | teapowell/bclearer_boson_1_2 | 571b2e1ca6dee93ccc5cb4e30abe2660f40c2ac0 | [
"MIT"
] | null | null | null | bclearer_boson_1_2_source/b_code/configurations/getters/boson_1_2_2e_c3_configuration_getter_merge_inspire_bclearer_naming_pattern.py | teapowell/bclearer_boson_1_2 | 571b2e1ca6dee93ccc5cb4e30abe2660f40c2ac0 | [
"MIT"
] | 1 | 2021-11-19T13:05:53.000Z | 2021-11-19T13:05:53.000Z | from bclearer_source.b_code.common_knowledge.content_operation_types import ContentOperationTypes
from bclearer_source.b_code.common_knowledge.digitialisation_level_stereotype_matched_ea_objects import \
DigitalisationLevelStereotypeMatchedEaObjects
from bclearer_source.b_code.configurations.content_operation_configurations import ContentOperationConfigurations
from bclearer_source.b_code.configurations.load_hdf5_model_configurations import LoadHdf5ModelConfigurations
from bclearer_boson_1_2_source.b_code.configurations.resource_constants.resources_filename_constants import \
CONTENT_UNIVERSE_BCLEARER_NAMING_PATTERN_COMPONENT_FILENAME_HDF5
from bclearer_boson_1_2_source.b_code.configurations.resource_constants.resources_namespace_constants import \
CONTENT_OPERATIONS_RESOURCES_NAMESPACE
def get_boson_1_2_2e_c3_configuration_load_hdf5_bclearer_naming_pattern() \
-> LoadHdf5ModelConfigurations:
load_hdf5_model_configuration = \
LoadHdf5ModelConfigurations(
resource_namespace=CONTENT_OPERATIONS_RESOURCES_NAMESPACE,
resource_file_name=CONTENT_UNIVERSE_BCLEARER_NAMING_PATTERN_COMPONENT_FILENAME_HDF5,
universe_short_name='2e_c3_input_cont_uni_bclearer_naming')
return \
load_hdf5_model_configuration
def get_boson_1_2_2e_c3_configuration_merge_inspire_bclearer_naming_pattern() \
-> ContentOperationConfigurations:
content_operation_configuration = \
ContentOperationConfigurations(
content_operation_type=ContentOperationTypes.MERGE_UNIVERSES,
output_universe_short_name='2e_c3_output_merge_bclearer_naming',
default_digitalisation_level_stereotype=DigitalisationLevelStereotypeMatchedEaObjects.DIGITALISATION_LEVEL_1_CLASS_STEREOTYPE)
return \
content_operation_configuration
| 52.714286 | 138 | 0.852033 | 191 | 1,845 | 7.60733 | 0.303665 | 0.049553 | 0.045423 | 0.052306 | 0.348245 | 0.319339 | 0.26841 | 0.216105 | 0.096352 | 0.096352 | 0 | 0.015922 | 0.114905 | 1,845 | 34 | 139 | 54.264706 | 0.873852 | 0 | 0 | 0.074074 | 0 | 0 | 0.03794 | 0.03794 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.222222 | 0 | 0.37037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bcdc35a04b16095280a01dc75b1da9b3b40cc93 | 1,246 | py | Python | HackerRank/Interview Preparation Kit/Arrays/Dynamic Array/solution.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | HackerRank/Interview Preparation Kit/Arrays/Dynamic Array/solution.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | HackerRank/Interview Preparation Kit/Arrays/Dynamic Array/solution.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | """solution.py"""
import math
import os
import random
import re
import sys
import timeit
class SimpleXOR:
def xor(self, a, b):
return a ^ b
def dynamicArray(n, queries):
last_answer = 0
all_answer = []
arr = [[] for _ in range(n)]
xor = SimpleXOR()
for row in queries:
t = row[0]
x = row[1]
y = row[2]
sn = xor.xor(x, last_answer) % n
if t == 1:
arr[sn].append(y)
elif t == 2:
last_answer = arr[sn][y%len(arr[sn])]
all_answer.append(last_answer)
return all_answer
def run_time_it():
"""Trigger timeit"""
dynamicArray(n, queries)
if __name__ == '__main__':
INPUT_PATH = 'input/'
for filename in os.listdir(INPUT_PATH):
print('📂 %s' % (filename))
f = open(INPUT_PATH + filename, 'r')
inputs = f.readlines()
input_line = 0
nq = inputs[input_line].rstrip().split()
input_line += 1
n = int(nq[0])
q = int(nq[1])
queries = []
for _ in range(q):
queries.append(list(map(int, inputs[input_line].rstrip().split())))
input_line += 1
print("⏰ %.12f seconds ⏰" % timeit.timeit(run_time_it, number=1))
| 22.654545 | 79 | 0.539326 | 171 | 1,246 | 3.777778 | 0.403509 | 0.069659 | 0.06192 | 0.065015 | 0.111455 | 0.111455 | 0.111455 | 0.111455 | 0 | 0 | 0 | 0.01649 | 0.31862 | 1,246 | 54 | 80 | 23.074074 | 0.740872 | 0.020867 | 0 | 0.046512 | 0 | 0 | 0.029777 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.139535 | 0.023256 | 0.27907 | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bcdd23129cd748120cb2b5f88558b1bd84fa70e | 9,644 | py | Python | prettifyoutput_fast.py | compbio-iitr/SRFv2 | b7350f15db4ff0f21f268b81d78d77004530a6e8 | [
"MIT"
] | null | null | null | prettifyoutput_fast.py | compbio-iitr/SRFv2 | b7350f15db4ff0f21f268b81d78d77004530a6e8 | [
"MIT"
] | null | null | null | prettifyoutput_fast.py | compbio-iitr/SRFv2 | b7350f15db4ff0f21f268b81d78d77004530a6e8 | [
"MIT"
] | null | null | null | import re
import json
from reading_json import modification_json
from downloadable_file_fast import downloadable
def _get_nucleotide_contributions(line):
patterns = line.split(' ')
contribution = [{'A': 0, 'C': 0, 'T': 0, 'G': 0} for i in range(len(patterns[0]))]
for pattern in patterns:
# print pattern
for idx, nt in enumerate(pattern):
contribution[idx][nt] += 1
return contribution
def pattern2html(seq_file, locations,region_no, mer, no, ps, cons, noc, score):
fatsa_file = open(seq_file, 'r')
fatsa_file.readline()
genome = ''
for line in fatsa_file:
genome += line.strip()
fatsa_file.close()
genome = genome.upper()
genome = re.sub(r'[^ACGT]', '', genome)
if len(locations) >= 2:
p1, p2 = locations[0]-1, locations[-1]+mer-1
fmt = '%%0%dd'%(len(str(locations[-1])))
html_string = '<div class="pattern" id="%s%d">' % (ps.lower(), region_no)
html_string += '<div class="pattern-title primary">'
html_string += ps
html_string += '</div><div class="pattern-info">'
html_string += '<span class="pattern-detail">Pattern Searched: <span class="match dna-sequence">%s</span></span><br />' % ps
html_string += '<span class="pattern-detail">Consensus Pattern: <span class="dna-sequence">%s</span></span><br />' % cons
html_string += '<span class="pattern-detail">Number of Copies: %d</span><br />' % noc
html_string += '<span class="pattern-detail">Score: %s</span><br />' % score
html_string += '</div><div class="dna-sequence">'
idx = p1
loc_idx = 0
count = 0
while idx < p2:
if loc_idx > 0 and idx == locations[loc_idx-1] - 1 + mer:
html_string += '</span>'
if loc_idx < len(locations) and idx == locations[loc_idx] - 1:
html_string += '<span class="match">'
loc_idx += 1
if count%10 == 0 and count > 0:
html_string += " "
if count%60 == 0:
if not count == 0:
html_string += '<span class="droid-sans">' + fmt%(count+locations[0]-1) + '</span>' + " <br />" + '<span class="droid-sans">' + fmt%(count+1+locations[0]-1) + '</span>' + " "
else:
html_string += '<span class="droid-sans">' + fmt%(count+locations[0]) + '</span>' + " "
html_string += genome[idx]
idx += 1
count += 1
pattern_file = seq_file.replace('.fasta', '/') + 'pattern%d%d%d.html'%(region_no, mer, no)
html_string += '<br /><br /></div></div>'
with open(pattern_file, 'w') as pf:
pf.write(html_string)
return pattern_file[pattern_file.rfind('/'):]
return None
def split_and_append(text, split_by, append_text):
html_string = ''
length = len(text)
div = length/split_by
for k in xrange(1, div+1):
html_string = html_string + text[(k-1)*split_by:k*split_by] + append_text
html_string = html_string + text[div*split_by:length]
return html_string
#def pat2json(filename, jsonfilename):
# json_string = {'data' : {}, 'mers': []}
# mers = []
# with open(filename, 'r') as pat:
# idx = 0
# lines = pat.readlines()
# while idx < (len(lines)):
# line = lines[idx].strip()
# if line.startswith('#'):
# result = re.search(r'^#:REGION:(\d+):(\d+):(\d+)$', line)
# frm, upto, mer = result.group(1), result.group(2), result.group(3)
#
#
# region_dict = {}
# region_dict['from'] = int(frm)
# region_dict['upto'] = int(upto)
# region_dict['patterns'] = []
#
# idx += 1
# if idx < len(lines):
#
# line = lines[idx].strip()
# hello = 0
# while idx < len(lines) and not line.startswith('#'):
# hello = 1
# pattern_dict = {}
#
# result = re.search(r'^<:([ACTG]+):(\d+):([ACTGactg/]+)$', line)
#
# pattern_dict['pattern_searched'] = result.group(1)
# pattern_dict['consensus'] = result.group(3)
# pattern_dict['number_of_copies'] = int(result.group(2))
#
# idx += 1
#
# line = lines[idx].strip()
#
# pattern_dict['contribution'] = _get_nucleotide_contributions(line[2:])
#
# idx += 1
#
# scores = [float(score) for score in lines[idx][2:].split(' ')]
# score = sum(scores)/len(scores)
#
# pattern_dict['score'] = '%.4f' % score
#
# region_dict['patterns'].append(pattern_dict)
#
# idx += 2
#
# if idx < len(lines):
# line = lines[idx].strip()
# idx -= 1
# if hello == 1:
# mers.append(mer)
# mer_data = json_string['data'].get(mer)
# if mer_data is None:
# json_string['data'][mer] = []
# json_string['data'][mer].append(region_dict)
#
#
# idx += 1
# mers = sorted(list(set(mers)))
# json_string['mers'] = mers
# with open(jsonfilename, 'w') as jsonfile:
# jsonfile.write(json.dumps(json_string))#, sort_keys=True, indent=4, separators=(',', ': ')))
def pat2json(filename, jsonfilename, modified_name, download_filename, region_no, region_begin, region_end,start,end):
seq_file = filename[:filename.rfind('/')]
seq_file = seq_file +'.fasta'
json_string = {'data' : {}, 'mers': [], 'region_info': {'region_no':region_no, 'region_begin':start, 'region_end':end}}
mers = []
pattern_no = 1
with open(filename, 'r') as pat:
idx = 0
lines = pat.readlines()
while idx < (len(lines)):
line = lines[idx].strip()
if line.startswith('#'):
if idx == len(lines) - 1:
idx += 1
continue
line_next = lines[idx+1].strip()
if line_next.startswith('#'):
idx += 1
continue
result = re.search(r'^#:REGION:(\d+):(\d+):(\d+)$', line)
frm, upto, mer = result.group(1), result.group(2), result.group(3)
region_dict = {}
region_dict['from'] = int(frm) + region_begin - 1
region_dict['upto'] = int(upto) + region_begin - 1
region_dict['patterns'] = []
idx += 1
if idx < len(lines):
line = lines[idx].strip()
hello = 0
while idx < len(lines) and not line.startswith('#'):
hello = 1
pattern_dict = {}
result = re.search(r'^<:([ACTG]+):(\d+):([ACTGWSRYKMBDHVN]+)$', line)
pattern_dict['pattern_searched'] = result.group(1)
pattern_dict['consensus'] = result.group(3)
pattern_dict['number_of_copies'] = int(result.group(2))
idx += 1
line = lines[idx].strip()
pattern_dict['contribution'] = _get_nucleotide_contributions(line[2:])
idx += 1
scores = [float(score) for score in lines[idx][2:].split(' ')]
score = sum(scores)/len(scores)
pattern_dict['score'] = '%.4f' % score
idx += 1
line = lines[idx].strip()[2:]
locations = [int(loc) + region_begin - 1 for loc in line.split(' ')]
pattern_file = pattern2html(seq_file, locations, region_no, int(mer), pattern_no, pattern_dict['pattern_searched'], pattern_dict['consensus'], pattern_dict['number_of_copies'], pattern_dict['score'])
if pattern_file:
pattern_dict['pattern_file'] = pattern_file
else:
pattern_dict['pattern_file'] = ''
pattern_no += 1
region_dict['patterns'].append(pattern_dict)
idx += 1
if idx < len(lines):
line = lines[idx].strip()
idx -= 1
if hello == 1:
mers.append(mer)
mer_data = json_string['data'].get(mer)
if mer_data is None:
json_string['data'][mer] = []
json_string['data'][mer].append(region_dict)
idx += 1
mers = sorted(list(set(mers)))
json_string['mers'] = mers
with open(jsonfilename, 'w') as jsonfile:
jsonfile.write(json.dumps(json_string))#, sort_keys=True, indent=4, separators=(',', ': ')))
modification_json(jsonfilename,modified_name,region_end)
downloadable(modified_name,filename,download_filename)
#if __name__ == '__main__':
# pat2json('pat1.file', 'data.json')
#filename = './greater/pat2.file'
#jsonfilename = './pretty/data2.json'
#modified_name = './pretty/modified1.json'
#region_no = 1
#region_begin = 1
#region_end = 10000
#start = 9500
#end = 19500
#pat2json(filename, jsonfilename, modified_name, region_no, region_begin, region_end,start,end) | 39.52459 | 219 | 0.501659 | 1,068 | 9,644 | 4.367978 | 0.153558 | 0.049303 | 0.021222 | 0.032797 | 0.598714 | 0.527331 | 0.434941 | 0.419293 | 0.403859 | 0.403859 | 0 | 0.018969 | 0.344048 | 9,644 | 244 | 220 | 39.52459 | 0.718463 | 0.282663 | 0 | 0.140741 | 0 | 0.014815 | 0.14177 | 0.044331 | 0.014815 | 0 | 0 | 0 | 0 | 1 | 0.02963 | false | 0 | 0.02963 | 0 | 0.088889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bcf4ebb9b8e16ba99b810270dfd24d5a5b61f81 | 1,235 | py | Python | jsonscribe/filters.py | aweber/json-scribe | cc5ea2ed33afb0ffcee0c4610de77be83200c173 | [
"BSD-3-Clause"
] | null | null | null | jsonscribe/filters.py | aweber/json-scribe | cc5ea2ed33afb0ffcee0c4610de77be83200c173 | [
"BSD-3-Clause"
] | 1 | 2021-05-12T17:54:04.000Z | 2021-05-12T17:54:04.000Z | jsonscribe/filters.py | aweber/json-scribe | cc5ea2ed33afb0ffcee0c4610de77be83200c173 | [
"BSD-3-Clause"
] | 1 | 2021-05-12T12:16:39.000Z | 2021-05-12T12:16:39.000Z | import logging
import uuid
from jsonscribe import utils
class AttributeSetter(logging.Filter):
"""
Ensure that attributes exist on :class:`~logging.LogRecord` s.
:keyword dict add_fields: maps fields to create on
:class:`~logging.LogRecord` instances to their default
values
The values in the `add_fields` mapping can be strings that start
with ``'ext://'`` to invoke custom behaviors. The following values
are recognized:
**UUID**
Generate a new UUIDv4 instance via :func:`uuid.uuid4()`
**now**
Generate a new timezone-aware UTC :class:`datetime.datetime`
instance
"""
def __init__(self, *args, **kwargs):
self.add_fields = kwargs.pop('add_fields', {})
logging.Filter.__init__(self, *args, **kwargs)
self.ext_map = {
'ext://UUID': uuid.uuid4,
'ext://now': utils.utcnow,
}
def filter(self, record):
for name, default in self.add_fields.items():
if not hasattr(record, name):
try:
setattr(record, name, self.ext_map[default]())
except KeyError:
setattr(record, name, default)
return 1
| 28.068182 | 71 | 0.595951 | 144 | 1,235 | 5.006944 | 0.527778 | 0.062413 | 0.038835 | 0.0638 | 0.061026 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004592 | 0.294737 | 1,235 | 43 | 72 | 28.72093 | 0.823192 | 0.397571 | 0 | 0 | 0 | 0 | 0.04246 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bd109992083f654ea69f6db60e377f6f8a7e5c8 | 831 | py | Python | sounds/.ipynb_checkpoints/preprocess_sound-checkpoint.py | yanisbahroun/NeurIPS_SM_ICA | 07a719564153be5732a9ad556337d0bb55c9cb1c | [
"BSD-2-Clause"
] | null | null | null | sounds/.ipynb_checkpoints/preprocess_sound-checkpoint.py | yanisbahroun/NeurIPS_SM_ICA | 07a719564153be5732a9ad556337d0bb55c9cb1c | [
"BSD-2-Clause"
] | null | null | null | sounds/.ipynb_checkpoints/preprocess_sound-checkpoint.py | yanisbahroun/NeurIPS_SM_ICA | 07a719564153be5732a9ad556337d0bb55c9cb1c | [
"BSD-2-Clause"
] | null | null | null | """The script makes the sources to have same length,
as well as have the same sampling rate"""
from scipy.io import wavfile
import utilities as utl
# Read the .wav files as numpy arrays
rate1, data1 = wavfile.read("sourceX.wav")
rate2, data2 = wavfile.read("sourceY.wav")
# Plot the sounds as time series data
utl.plotSounds([data1, data2], ["PhoneRing", "StarWars"], rate1, "../plots/sounds/Ring_StarWars_original")
# Make both of the files to have same length as well as same sampling rate
minimum = min(data1.shape[0], data2.shape[0])
# Slicing the array for both the sources
data1 = data1[0:minimum]
data2 = data2[0:minimum]
# writing the array into to the wav file with sampling rate which is average of the two
wavfile.write("sourceX.wav", (rate1 + rate2)/2, data1)
wavfile.write("sourceY.wav", (rate1 + rate2)/2, data2) | 37.772727 | 106 | 0.740072 | 135 | 831 | 4.540741 | 0.451852 | 0.058728 | 0.032626 | 0.052202 | 0.078303 | 0.078303 | 0.078303 | 0 | 0 | 0 | 0 | 0.035211 | 0.145608 | 831 | 22 | 107 | 37.772727 | 0.828169 | 0.43201 | 0 | 0 | 0 | 0 | 0.213823 | 0.082073 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bd309d040112a1127680460018a592ca804766a | 1,389 | py | Python | swi_ml/classification/logistic_regression.py | aitikgupta/swi-ml | c7a44c71683a9bfb4adb13c7eb6117e652177807 | [
"MIT"
] | 16 | 2021-01-30T16:03:19.000Z | 2022-03-27T11:13:05.000Z | swi_ml/classification/logistic_regression.py | aitikgupta/swi-ml | c7a44c71683a9bfb4adb13c7eb6117e652177807 | [
"MIT"
] | 1 | 2021-01-30T19:28:05.000Z | 2021-01-30T19:28:05.000Z | swi_ml/classification/logistic_regression.py | aitikgupta/swi-ml | c7a44c71683a9bfb4adb13c7eb6117e652177807 | [
"MIT"
] | null | null | null | from swi_ml import activations
from swi_ml.regression.linear_regression import (
_BaseRegression,
L1_L2Regularisation,
)
class LogisticRegressionGD(_BaseRegression):
def __init__(
self,
num_iterations: int,
learning_rate: float,
multiply_factor=None,
l1_ratio=None,
normalize=False,
initialiser="uniform",
verbose=None,
):
self.activation = activations.Sigmoid()
regularisation = L1_L2Regularisation(
multiply_factor=multiply_factor, l1_ratio=l1_ratio
)
super().__init__(
num_iterations,
learning_rate,
normalize,
regularisation,
initialiser,
verbose,
)
def predict(self, X, probability=False):
"""
Given an input array X, it returns the prediction array
(GPU array if CuPy backend is enabled) after inferencing
"""
if probability:
return self._predict(self._predict_preprocess(X))
else:
activated_pred = self.activation.activate(
self._predict(self._predict_preprocess(X))
)
return self.backend.where(activated_pred > 0.5, 1, 0)
def _predict(self, X):
return self.activation.activate(
self.backend.asarray(X).dot(self.W) + self.b
)
| 28.346939 | 65 | 0.596832 | 137 | 1,389 | 5.80292 | 0.489051 | 0.055346 | 0.022642 | 0.037736 | 0.083019 | 0.083019 | 0 | 0 | 0 | 0 | 0 | 0.011702 | 0.323254 | 1,389 | 48 | 66 | 28.9375 | 0.834043 | 0.080634 | 0 | 0 | 0 | 0 | 0.005622 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.05 | 0.025 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bd3ca214bb18b2244342ed2d4e2e70853a6ed4f | 1,721 | py | Python | ee/clickhouse/views/groups.py | csmatar/posthog | 4587cfe18625f302726c531f06a32c18e9749e9d | [
"MIT"
] | null | null | null | ee/clickhouse/views/groups.py | csmatar/posthog | 4587cfe18625f302726c531f06a32c18e9749e9d | [
"MIT"
] | 15 | 2021-11-09T10:49:34.000Z | 2021-11-09T16:11:01.000Z | ee/clickhouse/views/groups.py | csmatar/posthog | 4587cfe18625f302726c531f06a32c18e9749e9d | [
"MIT"
] | null | null | null | import json
from collections import defaultdict
from rest_framework import exceptions, request, response, serializers, viewsets
from rest_framework.decorators import action
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from ee.clickhouse.client import sync_execute
from ee.clickhouse.sql.person import GET_TEAM_PERSON_DISTINCT_IDS
from posthog.api.routing import StructuredViewSetMixin
from posthog.models.group_type_mapping import GroupTypeMapping
class GroupTypeSerializer(serializers.ModelSerializer):
class Meta:
model = GroupTypeMapping
fields = ["group_type", "group_type_index"]
class ClickhouseGroupsView(StructuredViewSetMixin, ListModelMixin, viewsets.GenericViewSet):
serializer_class = GroupTypeSerializer
queryset = GroupTypeMapping.objects.all()
pagination_class = None
@action(methods=["GET"], detail=False)
def property_definitions(self, request: request.Request, **kw):
rows = sync_execute(
f"""
SELECT group_type_index, tupleElement(keysAndValues, 1) as key, count(*) as count
FROM groups
ARRAY JOIN JSONExtractKeysAndValuesRaw(group_properties) as keysAndValues
WHERE team_id = %(team_id)s
GROUP BY group_type_index, tupleElement(keysAndValues, 1)
ORDER BY group_type_index ASC, count DESC, key ASC
""",
{"team_id": self.team.pk},
)
group_type_index_to_properties = defaultdict(list)
for group_type_index, key, count in rows:
group_type_index_to_properties[group_type_index].append({"name": key, "count": count})
return response.Response(group_type_index_to_properties)
| 39.113636 | 98 | 0.733876 | 193 | 1,721 | 6.321244 | 0.46114 | 0.081148 | 0.103279 | 0.039344 | 0.129508 | 0.065574 | 0 | 0 | 0 | 0 | 0 | 0.001443 | 0.194654 | 1,721 | 43 | 99 | 40.023256 | 0.878788 | 0 | 0 | 0 | 0 | 0 | 0.25276 | 0.057525 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.264706 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bd55328213ae4ee50db668e28293c74adbf04e3 | 1,198 | py | Python | api/audit_trail/migrations/0009_control_code_payload.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | null | null | null | api/audit_trail/migrations/0009_control_code_payload.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | null | null | null | api/audit_trail/migrations/0009_control_code_payload.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | null | null | null | from django.db import migrations
from api.audit_trail.enums import AuditType
def update_good_review_payload(apps, schema_editor):
"""
Convert old AuditType.verb with format to new AuditType.verb as enum value.
"""
if schema_editor.connection.alias != "default":
return
Audit = apps.get_model("audit_trail", "Audit")
count = 0
for audit in Audit.objects.filter(verb=AuditType.GOOD_REVIEWED):
if "new_control_code" in audit.payload:
print("UPDAING FOR", audit.id)
new_payload = {
"good_name": audit.payload["good_name"],
"old_control_list_entry": audit.payload["old_control_code"],
"new_control_list_entry": audit.payload["new_control_code"],
}
audit.payload = new_payload
count += 1
audit.save()
if count:
print({"updated": count, "existing": Audit.objects.filter(verb=AuditType.GOOD_REVIEWED).count()})
class Migration(migrations.Migration):
dependencies = [
("audit_trail", "0008_granted_application_backfill"),
]
operations = [
migrations.RunPython(update_good_review_payload),
]
| 30.717949 | 105 | 0.641903 | 137 | 1,198 | 5.372263 | 0.459854 | 0.081522 | 0.043478 | 0.0625 | 0.192935 | 0.116848 | 0.116848 | 0 | 0 | 0 | 0 | 0.006674 | 0.249583 | 1,198 | 38 | 106 | 31.526316 | 0.812013 | 0.062604 | 0 | 0 | 0 | 0 | 0.183379 | 0.069557 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.259259 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bd5f8544212e23f65b882d89a3c224ece175d41 | 2,882 | py | Python | src/App/tests/test_setConfiguration.py | tseaver/Zope-RFA | 08634f39b0f8b56403a2a9daaa6ee4479ef0c625 | [
"ZPL-2.1"
] | 2 | 2015-12-21T10:34:56.000Z | 2017-09-24T11:07:58.000Z | src/App/tests/test_setConfiguration.py | MatthewWilkes/Zope | 740f934fc9409ae0062e8f0cd6dcfd8b2df00376 | [
"ZPL-2.1"
] | null | null | null | src/App/tests/test_setConfiguration.py | MatthewWilkes/Zope | 740f934fc9409ae0062e8f0cd6dcfd8b2df00376 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests for App.config.setConfiguration()
"""
import unittest
from Testing.ZopeTestCase.layer import ZopeLite
class SetConfigTests(unittest.TestCase):
layer = ZopeLite
def setUp(self):
# Save away everything as we need to restore it later on
self.clienthome = self.getconfig('clienthome')
self.instancehome = self.getconfig('instancehome')
self.debug_mode = self.getconfig('debug_mode')
def tearDown(self):
self.setconfig(clienthome=self.clienthome,
instancehome=self.instancehome,
debug_mode=self.debug_mode)
def getconfig(self, key):
import App.config
config = App.config.getConfiguration()
return getattr(config, key, None)
def setconfig(self, **kw):
import App.config
config = App.config.getConfiguration()
for key, value in kw.items():
setattr(config, key, value)
App.config.setConfiguration(config)
def testClientHomeLegacySources(self):
import os
import App.FindHomes
import Globals # for data
import __builtin__
self.setconfig(clienthome='foo')
self.assertEqual(os.environ.get('CLIENT_HOME'), 'foo')
self.assertEqual(App.FindHomes.CLIENT_HOME, 'foo')
self.assertEqual(__builtin__.CLIENT_HOME, 'foo')
self.assertEqual(Globals.data_dir, 'foo')
def testInstanceHomeLegacySources(self):
import os
import App.FindHomes
import Globals # for data
import __builtin__
self.setconfig(instancehome='foo')
self.assertEqual(os.environ.get('INSTANCE_HOME'), 'foo')
self.assertEqual(App.FindHomes.INSTANCE_HOME, 'foo')
self.assertEqual(__builtin__.INSTANCE_HOME, 'foo')
self.assertEqual(Globals.INSTANCE_HOME, 'foo')
def testDebugModeLegacySources(self):
import Globals # for data
self.setconfig(debug_mode=True)
self.assertEqual(Globals.DevelopmentMode, True)
self.setconfig(debug_mode=False)
self.assertEqual(Globals.DevelopmentMode, False)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SetConfigTests))
return suite
| 35.146341 | 78 | 0.644344 | 307 | 2,882 | 5.947883 | 0.381107 | 0.082147 | 0.078861 | 0.072289 | 0.290252 | 0.203724 | 0.133625 | 0.083242 | 0.083242 | 0.083242 | 0 | 0.002651 | 0.214781 | 2,882 | 81 | 79 | 35.580247 | 0.804242 | 0.200555 | 0 | 0.25 | 0 | 0 | 0.040433 | 0 | 0 | 0 | 0 | 0 | 0.192308 | 1 | 0.153846 | false | 0 | 0.25 | 0 | 0.480769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bd9973a07e3156ff4a84bcb99cd3ef64fb4cedb | 2,066 | py | Python | surface/orig8_corpus_stats.py | megodoonch/birdsong | 582e7ddecf6c9c1b75f17418097f7bcbf6784d31 | [
"BSD-3-Clause-Clear"
] | null | null | null | surface/orig8_corpus_stats.py | megodoonch/birdsong | 582e7ddecf6c9c1b75f17418097f7bcbf6784d31 | [
"BSD-3-Clause-Clear"
] | null | null | null | surface/orig8_corpus_stats.py | megodoonch/birdsong | 582e7ddecf6c9c1b75f17418097f7bcbf6784d31 | [
"BSD-3-Clause-Clear"
] | null | null | null |
import random
import pandas as pd
import compare_bigrams
import sys
import os
import as_numeric
import quantify_copying
# The file that contains the base corpus
INPUT_FILE = "../corpus/cath8.txt"
# Each line should be a sentence, with the words separated by spaces
# Read the input file and obtain a list of list of strings, i.e. the list of sentences
f = open(INPUT_FILE,'r')
lines = f.readlines()
f.close()
sentences = [ l.strip().split(" ") for l in lines ]
# Get a list of the bigrams
unigrams_unpermuted = compare_bigrams.unigram_counts( sentences )
bigrams_unpermuted = compare_bigrams.bigram_counts( sentences )
#print "The corpus has %i words, originally in %s sentences"%(len(flat_sentences),len(sentences))
# First, put all the words in a line
corpus_stats = pd.DataFrame()
if True:
corpus = sentences[:] # make a copy of cath8, then compare it to itself
# Count the bigrams of our permuted corpus
bigrams_permuted = compare_bigrams.bigram_counts( corpus )
# Get some statistics on the comparison of bigrams
generated_corpus_bigrams = compare_bigrams.bigram_counts ( corpus )
generated_corpus_unigrams = compare_bigrams.unigram_counts( corpus )
comp = compare_bigrams.compare_Ngrams( bigrams_unpermuted, generated_corpus_bigrams )
comp = dict([ ("bigrams.%s"%k,v) for (k,v) in comp.items() ])
comp["permutation"]=0
unicomp = compare_bigrams.compare_Ngrams( unigrams_unpermuted, generated_corpus_unigrams )
unicomp = dict([ ("unigrams.%s"%k,v) for (k,v) in unicomp.items() ])
comp = {**comp,**unicomp} # merge the dicts
comp["n.unique.bigrams"] = len(generated_corpus_bigrams.keys())
comp["n.unique.unigrams"] = len(generated_corpus_unigrams.keys())
if True:
# Quantify the copying
cop = quantify_copying.corpus(corpus)
corpus_stats = pd.concat([corpus_stats,
pd.DataFrame({**comp,**cop},
index=[1])])
corpus_stats.to_csv('interim/cath8_stats.csv')
| 28.694444 | 97 | 0.690707 | 279 | 2,066 | 4.956989 | 0.376344 | 0.080983 | 0.043384 | 0.056399 | 0.060738 | 0.014461 | 0.014461 | 0 | 0 | 0 | 0 | 0.003056 | 0.208132 | 2,066 | 71 | 98 | 29.098592 | 0.842298 | 0.252662 | 0 | 0.058824 | 0 | 0 | 0.071195 | 0.015023 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.205882 | 0 | 0.205882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bde73b9764fb0136ef3271756f67c6a2f1b95a4 | 2,406 | py | Python | update.py | cznull/dlpkuhole2 | 11f63f908749c6e0e643449c93a33644caa405dc | [
"MIT"
] | 2 | 2019-10-25T12:22:49.000Z | 2019-10-25T13:22:11.000Z | update.py | cznull/dlpkuhole2 | 11f63f908749c6e0e643449c93a33644caa405dc | [
"MIT"
] | null | null | null | update.py | cznull/dlpkuhole2 | 11f63f908749c6e0e643449c93a33644caa405dc | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
from utils import (
get_page,
internet_on,
my_log,
post_dict_to_list,
read_posts_dict,
write_posts,
)
cdname = os.path.dirname(__file__)
filename = os.path.join(cdname, 'pkuhole.txt')
filename_bak = os.path.join(cdname, 'pkuholebak.txt')
if __name__ == '__main__':
if not internet_on():
my_log('No internet')
exit()
if os.path.exists(os.path.join(cdname, 'update.flag')):
my_log('Update is already running')
exit()
with open(os.path.join(cdname, 'update.flag'), 'w', encoding='utf-8') as g:
g.write(str(os.getpid()))
my_log('Begin read posts')
post_dict = read_posts_dict(filename)
my_log('End read posts')
my_log('Begin write bak')
write_posts(filename_bak, post_dict_to_list(post_dict))
my_log('End write bak')
if post_dict:
min_pid = max(post_dict)
else:
# May change
min_pid = 32859
my_log('Min pid: {}'.format(min_pid))
try:
page = 1
while True:
my_log('Page {}'.format(page))
if page % 100 == 0:
my_log('Begin write posts')
write_posts(filename, post_dict_to_list(post_dict))
my_log('End write posts')
finished = get_page(post_dict, page, min_pid)
if finished:
break
page += 1
except Exception as e:
my_log('{}'.format(e))
my_log('Begin write posts at error')
write_posts(filename, post_dict_to_list(post_dict))
my_log('End write posts at error')
os.remove(os.path.join(cdname, 'update.flag'))
exit()
if os.path.exists(os.path.join(cdname, 'split.flag')):
my_log('split.flag found')
with open(
os.path.join(cdname, 'split.flag'), 'r',
encoding='utf-8') as f:
max_timestamp = int(f.read())
my_log('Begin write posts')
write_posts(filename, [
post for post in post_dict_to_list(post_dict)
if post['timestamp'] >= max_timestamp
])
my_log('End write posts')
os.remove(os.path.join(cdname, 'split.flag'))
else:
my_log('Begin write posts')
write_posts(filename, post_dict_to_list(post_dict))
my_log('End write posts')
os.remove(os.path.join(cdname, 'update.flag'))
| 26.152174 | 79 | 0.582294 | 332 | 2,406 | 3.978916 | 0.253012 | 0.071915 | 0.06813 | 0.109008 | 0.458743 | 0.443603 | 0.355034 | 0.355034 | 0.321726 | 0.238456 | 0 | 0.008182 | 0.288861 | 2,406 | 91 | 80 | 26.43956 | 0.763881 | 0.011638 | 0 | 0.231884 | 0 | 0 | 0.170034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.028986 | 0 | 0.028986 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2be01fdba7fd4a0fd0bcbe156a75252f384d72ef | 18,436 | py | Python | RECOVERED_FILES/root/ez-segway/simulator/ez_tracer.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | null | null | null | RECOVERED_FILES/root/ez-segway/simulator/ez_tracer.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | null | null | null | RECOVERED_FILES/root/ez-segway/simulator/ez_tracer.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | 1 | 2021-05-08T02:23:00.000Z | 2021-05-08T02:23:00.000Z | import argparse
import os
from misc import logger
from misc import constants
from collections import defaultdict, OrderedDict
import numpy
import re
class ExecutionResult:
def __init__(self):
self.test_number = 0
self.execution_time = ExecutionTime()
class ExecutionTime:
def __init__(self, topo, method):
self.test_number = 0
self.total_time = 0
self.method = method
self.topo = topo
self.global_computation = 0
self.local_computation = 0
self.local_update_only = 0
self.finishing_time_from_last_sending = 0
self.time_to_send_all_first_msgs = 0
self.rule_overheads = 0
self.total_rules = 0
self.deadlock = False
self.time_using_new_paths = {}
def __str__(self):
# arg_method,arg_topology,update_time,sw_time,ctr_time,update_only
return "%s\t%s\t%s\t%s\t%s\t%s" % (self.method, self.topo, self.total_time,
self.local_computation, self.global_computation,
self.local_update_only)
def __repr__(self):
return self.__str__()
class MessageOverhead:
def __init__(self):
self.false_positive = False
self.no_msgs_when_deadlock = 0
self.no_msgs_when_split = 0
self.stuck_into_deadlock_when_split = False
self.stuck_into_deadlock_when_skip = False
def __str__(self):
return "%d\t%d\t%s" % (self.no_msgs_when_deadlock, self.no_msgs_when_split, self.false_positive)
def __repr__(self):
return self.__str__()
class NewPathPair:
def __init__(self):
self.p2p = 0
self.cen = 0
def __str__(self):
return "cen:%d\tp2p:%d" % (self.cen, self.p2p)
def __repr__(self):
return self.__str__()
class RuleOverhead:
def __init__(self):
self.no_of_split = 0
self.total_no_of_flow = 0
self.stuck_into_deadlock_when_split = False
self.stuck_into_deadlock_when_skip = False
# self.rules_when_deadlock = 0
# self.rules_when_split = 0
def __str__(self):
return "%d\t%d\t%s" % (self.no_of_split, self.total_no_of_flow,
(self.stuck_into_deadlock_when_split and
not self.stuck_into_deadlock_when_skip))
def __repr__(self):
return self.__str__()
class EzTracer:
def __init__(self):
self.execution_time_by_test_number = defaultdict()
self.rule_overheads = {}
self.execution_results = defaultdict()
self.cdf_time_using_new_path = defaultdict()
self.is_reading_deadlock_exe = False
self.log = logger.getLogger("tracer", constants.LOG_LEVEL)
self.number_of_rules_by_test_number = defaultdict()
self.message_overheads = {}
self.current_method = ''
self.current_topo = ''
self.counters = defaultdict()
def parse_execution_line(self, line):
strs = re.split("[:\t]+", line.strip('\n'))
if len(strs) < 3:
return None, None
labels = strs[0].split('-')
if len(labels) > 2:
return None, None
test_number = int(labels[1])
exe_time = self.get_exe_time(test_number)
exe_time.local_computation = float(strs[2])
exe_time.global_computation = float(strs[3])
exe_time.global_computation += exe_time.local_computation
exe_time.total_time = float(strs[12])# + exe_time.global_computation
exe_time.local_computation = 0
exe_time.local_update_only = float(strs[4])
exe_time.finishing_time_from_last_sending = float(strs[10])
exe_time.method = self.current_method
exe_time.topo = self.current_topo
exe_time.deadlock = (strs[11] == "True")
exe_time.rule_overheads = int(strs[5])
exe_time.total_rules = int(strs[6])
self.update_exe_time(exe_time, test_number)
msg_overhead = None
if not self.message_overheads.has_key(test_number):
if (self.is_reading_deadlock_exe and strs[7] == 'True') \
or (not self.is_reading_deadlock_exe):
self.message_overheads[test_number] = MessageOverhead()
msg_overhead = self.message_overheads[test_number]
else:
msg_overhead = self.message_overheads[test_number]
if msg_overhead is not None:
# self.log.info("Test number %d: %s" % (test_number, strs))
# self.log.info("Test number %d: %s" % (test_number, msg_overhead))
if self.is_reading_deadlock_exe:
msg_overhead.no_msgs_when_deadlock = float(strs[8])
msg_overhead.stuck_into_deadlock_when_skip = True if strs[7] == "True" else False
else:
msg_overhead.no_msgs_when_split = float(strs[8])
msg_overhead.stuck_into_deadlock_when_split = True if strs[7] == "True" else False
return exe_time, msg_overhead
def parse_centralized_execution_line(self, line):
strs = re.split("[:\t]+", line.strip('\n'))
if len(strs) < 3:
return None
# if line.find("deadlock"):
# labels = strs[0].split('-')
# test_number = int(labels[1])
# key = (self.current_topo, self.current_method)
# exe_time = self.execution_time_by_test_number[test_number][key]
# exe_time.deadlock = True
# return None
# else:
# return None
labels = strs[0].split('-')
if len(labels) > 2:
return None
test_number = int(labels[1])
exe_time = self.get_exe_time(test_number)
exe_time.local_computation = float(strs[2])
exe_time.global_computation = float(strs[3])
exe_time.total_time = float(strs[5]) + exe_time.global_computation
#exe_time.local_update_only = float(strs[4])
#exe_time.finishing_time_from_last_sending = float(strs[5])
exe_time.local_update_only = float(strs[5])
exe_time.time_to_send_all_first_msgs = float(strs[6])
exe_time.method = self.current_method
exe_time.topo = self.current_topo
# self.log.info("{0} - str[8]: {1}".format(test_number, strs[8]))
exe_time.deadlock = (strs[8] == "True")
self.update_exe_time(exe_time, test_number)
return exe_time
def update_exe_time(self, exe_time, test_number):
if not self.execution_time_by_test_number.has_key(test_number):
self.execution_time_by_test_number[test_number] = {}
self.execution_time_by_test_number[test_number][(self.current_topo, self.current_method)] = exe_time
def parse_split_overhead_line(self, strs):
i = 5
test_number = int(strs[1])
while i < len(strs):
overhead = RuleOverhead()
overhead.no_of_split = int(strs[i])
overhead.total_no_of_flow = int(strs[i+1])
if not self.rule_overheads.has_key(test_number):
self.rule_overheads[test_number] = overhead
i += 2
def parse_number_of_rules_line(self, strs):
test_number = int(strs[1])
sw_number = int(strs[3])
if not self.number_of_rules_by_test_number.has_key(test_number):
self.number_of_rules_by_test_number[test_number] = {}
self.number_of_rules_by_test_number[test_number][sw_number] = int(strs[5])
def parse_time_using_new_path_line(self, strs, exe_time):
test_number = int(strs[1])
i = 3 if exe_time.method == 'cen' else 5
while i < len(strs):
time_slot = int(float(strs[i + 1]))
if not exe_time.time_using_new_paths.has_key(time_slot):
exe_time.time_using_new_paths[time_slot] = 1
else:
exe_time.time_using_new_paths[time_slot] += 1
i += 2
def get_exe_time(self, test_number):
key = (self.current_topo, self.current_method)
if self.execution_time_by_test_number.has_key(test_number) and \
self.execution_time_by_test_number[test_number].has_key(key):
exe_time = self.execution_time_by_test_number[test_number][key]
else:
exe_time = ExecutionTime(self.current_topo, self.current_method)
exe_time.test_number = test_number
return exe_time
def parse_line_of_every_switch(self, line):
line = line.replace(" ", "")
strs = filter(None, re.split("[:|\-\t\[\]\n]+", line))
if len(strs) < 3 or strs[2] != "sw":
return
test_number = int(strs[1])
exe_time = self.get_exe_time(test_number)
self.update_exe_time(exe_time, test_number)
if strs[4] == "new_path":
self.parse_time_using_new_path_line(strs, exe_time)
elif strs[4] == "split":
strs = filter(None, re.split("[:|\-\t\[\]\,\n]+", line))
self.parse_split_overhead_line(strs)
elif strs[4] == "no_rules":
self.parse_number_of_rules_line(strs)
def parse_cen_line_of_every_switch(self, line):
line = line.replace(" ", "")
strs = filter(None, re.split("[:|\-\t\[\]\n]+", line))
# self.log.info(strs)
if len(strs) < 3:
return
if strs[2] == "new_path":
test_number = int(strs[1])
exe_time = self.get_exe_time(test_number)
self.parse_time_using_new_path_line(strs, exe_time)
def read(self, filename, skip_deadlock):
self.is_reading_deadlock_exe = skip_deadlock
trace_reader = open(filename, 'rb')
line = trace_reader.readline()
while line:
if line.startswith("ez-segway: read") or line.startswith("topology:"):
line = trace_reader.readline()
continue
line = line.replace("ez-segway: ", "")
exe_time, rule_overhead = self.parse_execution_line(line)
if exe_time is None:
self.parse_line_of_every_switch(line)
line = trace_reader.readline()
# for key in self.cdf_time_using_new_path.keys():
# self.cdf_time_using_new_path[key] = \
# OrderedDict(sorted(self.cdf_time_using_new_path[key].items(), key=lambda t: t[0]))
# self.log.info(self.cdf_time_using_new_path)
trace_reader.close()
def read_centralized_trace(self, filename):
self.is_reading_deadlock_exe = True
trace_reader = open(filename, 'rb')
line = trace_reader.readline()
while line:
if line.startswith("cen_result"):
line = line.replace("cen_result: ", "")
exe_time = self.parse_centralized_execution_line(line)
# self.log.info("exe_time=%s" % str(exe_time))
if exe_time is None:
self.parse_cen_line_of_every_switch(line)
line = trace_reader.readline()
# for key in self.cdf_time_using_new_path.keys():
# self.cdf_time_using_new_path[key] = \
# OrderedDict(sorted(self.cdf_time_using_new_path[key].items(), key=lambda t: t[0]))
# self.log.info(self.cdf_time_using_new_path)
trace_reader.close()
def write_rules_overhead(self):
# trace_writer = open(folder + "/overhead_n.log", 'w')
rule_overheads = []
total_rules = []
for key in self.execution_time_by_test_number:
for exe_time in self.execution_time_by_test_number[key].values():
if exe_time.deadlock and exe_time.method == 'split':
rule_overheads.append(exe_time.rule_overheads)
total_rules.append(exe_time.total_rules)
max_rule_overhead = numpy.max(rule_overheads)
average_rule_overhead = numpy.mean(rule_overheads)
std = numpy.std(rule_overheads)
max_total_rules = numpy.max(total_rules)
self.log.info("Max rule overhead: %d in %d cases" % (max_rule_overhead, len(rule_overheads)))
self.log.info("Sum overhead: %d" % sum(rule_overheads))
self.log.info("Average rule overhead: %s +/- %s" % (average_rule_overhead, std))
self.log.info("Max total rules: %d" % max_total_rules)
def write_message_overhead(self, folder):
trace_writer = open(folder + "/overhead_n.log", 'w')
str_overheads = ""
overheads = []
total_message_no_overheads = []
for key in self.message_overheads.keys():
overhead = self.message_overheads[key]
if overhead.stuck_into_deadlock_when_skip or \
overhead.stuck_into_deadlock_when_split:
# self.log.info(overhead)
diff = overhead.no_msgs_when_split - overhead.no_msgs_when_deadlock
str_overheads += "Test number %d:%d\t%d\t%s\t%s\n" \
% (key, diff,
overhead.no_msgs_when_deadlock,
overhead.stuck_into_deadlock_when_split,
overhead.stuck_into_deadlock_when_skip)
overheads.append(diff)
total_message_no_overheads.append(overhead.no_msgs_when_deadlock)
average = numpy.mean(overheads)
max_overhead = numpy.max(overheads)
std = numpy.std(overheads)
average_total = numpy.mean(total_message_no_overheads)
max_total = numpy.max(total_message_no_overheads)
std_total = numpy.std(total_message_no_overheads)
str_overheads += "Overhead average: %s +/- %s\n" % (average, std)
str_overheads += "Max overhead: %d\n" % max_overhead
str_overheads += "Sum overhead: %d of %d cases\n" % (sum(overheads), len(overheads))
str_overheads += "Total number average: %s +/- %s\n" % (average_total, std_total)
str_overheads += "Max message: %d\n" % max_total
self.log.info(str_overheads)
trace_writer.write(str_overheads)
trace_writer.close()
# trace_writer = open(folder + "/false_positive.log", 'w')
# total_split_cases = len(self.rule_overheads)
# false_positive_count = sum(1 for item in self.message_overheads.values() if item.false_positive == True)
# str_false_positive_res = "False positive / Deadlock: %d / %d" % (false_positive_count, total_split_cases)
# trace_writer.write(str_false_positive_res)
# trace_writer.close()
def write_execution_time(self, filename):
self.log.info("output to file: %s" % filename)
trace_writer = open(filename, 'w')
str_output = "arg_method\targ_topology\tupdate_time\tsw_time\tctr_time\tupdate_only\n"
for key in self.execution_time_by_test_number:
for exe_time in self.execution_time_by_test_number[key].values():
if (not exe_time.deadlock and exe_time.method == 'ez') or exe_time.method == 'cen':
str_output += str(exe_time) + '\n'
trace_writer.write(str_output)
trace_writer.close()
def write_cdf_of_a_test_number(self, folder, test_number):
trace_writer = open(folder + ("/time_new_path/time_using_new_path_%d.log" % test_number), 'w')
cdf = self.cdf_time_using_new_path[test_number]
sum = 0
max_key = max(cdf.keys())
cdf_res = "arg_method\tcount\n"
for key in cdf.keys():
if cdf[key].p2p > 0:
for c in xrange(0, cdf[key].p2p):
cdf_res += "ez\t%d\n" % (key)
if cdf[key].cen > 0:
for c in xrange(0, cdf[key].cen):
cdf_res += "cen\t%d\n" % (key)
trace_writer.write(cdf_res)
trace_writer.close()
def write_new_cdf_of_a_test_number(self, folder, test_number, topo):
trace_writer = open(folder + ("/time_new_path/time_using_new_path_%d.log" % test_number), 'w')
cdf_res = "arg_method\tcount\n"
exe_list = self.execution_time_by_test_number[test_number].values()
for exe_time in exe_list:
if ((not exe_time.deadlock and exe_time.method == 'ez')
or exe_time.method == 'cen') and exe_time.topo == topo:
#if exe_time.topo == 'ez':
cdf = exe_time.time_using_new_paths
# self.log.info("%s: %s" % (exe_time.method, cdf.values()))
for key in cdf.keys():
if cdf[key] > 0:
for c in xrange(0, cdf[key]):
cdf_res += "%s\t%d\n" % (exe_time.method,key)
trace_writer.write(cdf_res)
trace_writer.close()
def write_cdf_of_new_path(self, folder):
for key in self.cdf_time_using_new_path.keys():
self.write_cdf_of_a_test_number(folder, key)
def write_new_cdf_of_new_path(self, folder, topo):
for key in self.execution_time_by_test_number.keys():
self.write_new_cdf_of_a_test_number(folder, key, topo)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ez-segway sim.')
parser.add_argument('--logFolder', nargs='?',
type=str, default="logs")
parser.add_argument('--logFile', nargs='?',
type=str, default="stdout")
parser.add_argument('--dataFolder', nargs='?',
type=str, default="data")
args = parser.parse_args()
directory = "../%s" % (args.logFolder)
if not os.path.exists(directory):
os.makedirs(directory)
logger.init("../" + args.logFolder + "/" + args.logFile, constants.LOG_LEVEL)
tracer = EzTracer()
methods = ['cen','ez']
topos = ['b4', 'i2']
for topo in topos:
for method in methods:
tracer.current_method = method
tracer.current_topo = topo
filename = "../{0}/{1}-{2}.log".format(args.dataFolder, method, topo)
if method == 'cen':
tracer.read_centralized_trace(filename)
else:
tracer.read(filename, False)
output_filename = "../{0}/update_time.log".format(args.dataFolder)
tracer.write_execution_time(output_filename)
for topo in topos:
folder = "{0}/{1}-cdf".format(args.dataFolder, topo)
tracer.write_new_cdf_of_new_path("../" + folder, topo)
| 41.804989 | 115 | 0.612118 | 2,419 | 18,436 | 4.319554 | 0.083506 | 0.055603 | 0.024117 | 0.0245 | 0.550579 | 0.469232 | 0.403962 | 0.350943 | 0.297253 | 0.261748 | 0 | 0.007726 | 0.276904 | 18,436 | 440 | 116 | 41.9 | 0.776086 | 0.095845 | 0 | 0.319767 | 0 | 0.002907 | 0.055201 | 0.011846 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.020349 | 0.023256 | 0.180233 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2be1a1fa8ec7479aeac3b69430ba8d4a6a93005c | 2,406 | py | Python | tests/test_potential_of_layers.py | calliope-project/solar-and-wind-potentials | a17000028b1c391d37b415859697aa71aea4affe | [
"MIT"
] | 8 | 2020-10-28T14:03:39.000Z | 2022-01-08T16:38:42.000Z | tests/test_potential_of_layers.py | calliope-project/solar-and-wind-potentials | a17000028b1c391d37b415859697aa71aea4affe | [
"MIT"
] | 18 | 2020-10-28T08:58:01.000Z | 2021-05-14T16:33:23.000Z | tests/test_potential_of_layers.py | timtroendle/solar-and-wind-potentials | 8b15f3d20a47ba3631f03026a4263ecd1fcbfd58 | [
"MIT"
] | 1 | 2020-12-07T03:13:28.000Z | 2020-12-07T03:13:28.000Z | """Test whether potential estimations between layers are similar."""
import os
from pathlib import Path
import pytest
import pandas as pd
from renewablepotentialslib.eligibility import Potential
TOLERANCE = 0.005 # 0.5%
BUILD_DIR = Path(os.path.abspath(__file__)).parent.parent / "build"
PATH_TO_CONTINENTAL_POTENTIALS = BUILD_DIR / "continental" / "technical-potential" / "potentials.csv"
PATH_TO_NATIONAL_POTENTIALS = BUILD_DIR / "national" / "technical-potential" / "potentials.csv"
PATH_TO_REGIONAL_POTENTIALS = BUILD_DIR / "regional" / "technical-potential" / "potentials.csv"
PATH_TO_MUNICIPAL_POTENTIALS = BUILD_DIR / "municipal" / "technical-potential" / "potentials.csv"
@pytest.mark.skipif(not PATH_TO_CONTINENTAL_POTENTIALS.exists(), reason="Continental potentials not available.")
@pytest.mark.skipif(not PATH_TO_NATIONAL_POTENTIALS.exists(), reason="National potentials not available.")
@pytest.mark.parametrize(
"potential", Potential
)
def test_continental_to_national(potential):
continental = pd.read_csv(PATH_TO_CONTINENTAL_POTENTIALS, index_col=0).sum()
national = pd.read_csv(PATH_TO_NATIONAL_POTENTIALS, index_col=0).sum()
assert continental[str(potential)] == pytest.approx(national[str(potential)], TOLERANCE)
@pytest.mark.skipif(not PATH_TO_CONTINENTAL_POTENTIALS.exists(), reason="Continental potentials not available.")
@pytest.mark.skipif(not PATH_TO_REGIONAL_POTENTIALS.exists(), reason="Regional potentials not available.")
@pytest.mark.parametrize(
"potential", Potential
)
def test_continental_to_regional(potential):
continental = pd.read_csv(PATH_TO_CONTINENTAL_POTENTIALS, index_col=0).sum()
regional = pd.read_csv(PATH_TO_REGIONAL_POTENTIALS, index_col=0).sum()
assert continental[str(potential)] == pytest.approx(regional[str(potential)], TOLERANCE)
@pytest.mark.skipif(not PATH_TO_CONTINENTAL_POTENTIALS.exists(), reason="Continental potentials not available.")
@pytest.mark.skipif(not PATH_TO_MUNICIPAL_POTENTIALS.exists(), reason="Municipal potentials not available.")
@pytest.mark.parametrize(
"potential", Potential
)
def test_continental_to_municipal(potential):
continental = pd.read_csv(PATH_TO_CONTINENTAL_POTENTIALS, index_col=0).sum()
municipal = pd.read_csv(PATH_TO_MUNICIPAL_POTENTIALS, index_col=0).sum()
assert continental[str(potential)] == pytest.approx(municipal[str(potential)], TOLERANCE)
| 45.396226 | 112 | 0.790524 | 303 | 2,406 | 6.019802 | 0.168317 | 0.052632 | 0.044408 | 0.103618 | 0.691886 | 0.623355 | 0.5625 | 0.5625 | 0.5625 | 0.5625 | 0 | 0.0055 | 0.093101 | 2,406 | 52 | 113 | 46.269231 | 0.830431 | 0.028263 | 0 | 0.315789 | 0 | 0 | 0.17753 | 0 | 0 | 0 | 0 | 0 | 0.078947 | 1 | 0.078947 | false | 0 | 0.131579 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2be7b6a4c3ecbf9fa56f76e517a0e77803b09915 | 7,231 | py | Python | pypeit/tests/tstutils.py | baileyji/PypeIt | eea71304f4a4bcf70148ea686967ed699dc36dfb | [
"BSD-3-Clause"
] | null | null | null | pypeit/tests/tstutils.py | baileyji/PypeIt | eea71304f4a4bcf70148ea686967ed699dc36dfb | [
"BSD-3-Clause"
] | null | null | null | pypeit/tests/tstutils.py | baileyji/PypeIt | eea71304f4a4bcf70148ea686967ed699dc36dfb | [
"BSD-3-Clause"
] | null | null | null | """
Odds and ends in support of tests
"""
import os
import pytest
import numpy as np
import copy
from astropy import time
from pypeit import arcimage
from pypeit import traceslits
from pypeit import wavecalib
from pypeit import wavetilts
from pypeit.masterframe import MasterFrame
from pypeit.core.wavecal import waveio
from pypeit.spectrographs.util import load_spectrograph
from pypeit.metadata import PypeItMetaData
# Create a decorator for tests that require the PypeIt dev suite
dev_suite_required = pytest.mark.skipif(os.getenv('PYPEIT_DEV') is None,
reason='test requires dev suite')
cooked_required = pytest.mark.skipif(os.getenv('PYPEIT_DEV') is None or
not os.path.isdir(os.path.join(os.getenv('PYPEIT_DEV'), 'Cooked')),
reason='no dev-suite cooked directory')
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
def dummy_fitstbl(nfile=10, spectro_name='shane_kast_blue', directory='', notype=False):
"""
Generate a dummy fitstbl for testing
Parameters
----------
nfile : int, optional
Number of files to mimic
spectro_name : str, optional
Name of spectrograph to mimic
notype : bool (optional)
If True, do not add image type info to the fitstbl
Returns
-------
fitstbl : PypeItMetaData
"""
fitsdict = {}
fitsdict['index'] = np.arange(nfile)
fitsdict['directory'] = [directory]*nfile
fitsdict['filename'] = ['b{:03d}.fits.gz'.format(i) for i in range(nfile)]
# TODO: The below will fail at 60
dates = ['2015-01-23T00:{:02d}:11.04'.format(i) for i in range(nfile)]
ttime = time.Time(dates, format='isot')
fitsdict['mjd'] = ttime.mjd
fitsdict['target'] = ['Dummy']*nfile
fitsdict['ra'] = ['00:00:00']*nfile
fitsdict['dec'] = ['+00:00:00']*nfile
fitsdict['exptime'] = [300.] * nfile
fitsdict['dispname'] = ['600/4310'] * nfile
fitsdict['dichroic'] = ['560'] * nfile
fitsdict["binning"] = ['1,1']*nfile
fitsdict["airmass"] = [1.0]*nfile
if spectro_name == 'shane_kast_blue':
fitsdict['numamplifiers'] = [1] * nfile
# Lamps
for i in range(1,17):
fitsdict['lampstat{:02d}'.format(i)] = ['off'] * nfile
fitsdict['exptime'][0] = 0 # Bias
fitsdict['lampstat06'][1] = 'on' # Arc
fitsdict['exptime'][1] = 30 # Arc
fitsdict['lampstat01'][2] = 'on' # Trace, pixel, slit flat
fitsdict['lampstat01'][3] = 'on' # Trace, pixel, slit flat
fitsdict['exptime'][2] = 30 # flat
fitsdict['exptime'][3] = 30 # flat
fitsdict['ra'][4] = '05:06:36.6' # Standard
fitsdict['dec'][4] = '52:52:01.0'
fitsdict['airmass'][4] = 1.2
fitsdict['ra'][5] = '07:06:23.45' # Random object
fitsdict['dec'][5] = '+30:20:50.5'
fitsdict['decker'] = ['0.5 arcsec'] * nfile
# arrays
for k in fitsdict.keys():
fitsdict[k] = np.array(fitsdict[k])
spectrograph = load_spectrograph(spectro_name)
fitstbl = PypeItMetaData(spectrograph, spectrograph.default_pypeit_par(), data=fitsdict)
fitstbl['instrume'] = spectro_name
type_bits = np.zeros(len(fitstbl), dtype=fitstbl.type_bitmask.minimum_dtype())
# Image typing
if not notype:
if spectro_name == 'shane_kast_blue':
#fitstbl['sci_ID'] = 1 # This links all the files to the science object
type_bits[0] = fitstbl.type_bitmask.turn_on(type_bits[0], flag='bias')
type_bits[1] = fitstbl.type_bitmask.turn_on(type_bits[1], flag='arc')
type_bits[2:4] = fitstbl.type_bitmask.turn_on(type_bits[2:4], flag=['pixelflat', 'trace'])
type_bits[4] = fitstbl.type_bitmask.turn_on(type_bits[4], flag='standard')
type_bits[5:] = fitstbl.type_bitmask.turn_on(type_bits[5:], flag='science')
fitstbl.set_frame_types(type_bits)
# Calibration groups
cfgs = fitstbl.unique_configurations(ignore_frames=['bias', 'dark'])
fitstbl.set_configurations(cfgs)
fitstbl.set_calibration_groups(global_frames=['bias', 'dark'])
return fitstbl
# TODO: Need to split this into functions that do and do not require
# cooked. We should remove the get_spectrograph option.
def load_kast_blue_masters(aimg=False, tslits=False, tilts=False, datasec=False, wvcalib=False):
"""
Load up the set of shane_kast_blue master frames
Args:
get_spectrograph:
aimg:
tslits:
tilts:
datasec:
wvcalib:
Returns:
"""
spectrograph = load_spectrograph('shane_kast_blue')
spectrograph.naxis = (2112,350) # Image shape with overscan
master_dir = os.path.join(os.getenv('PYPEIT_DEV'), 'Cooked', 'Shane_Kast_blue')
# master_dir = root_path+'_'+spectrograph.spectrograph
reuse_masters = True
# Load up the Masters
ret = []
# if get_spectrograph:
# ret.append(spectrograph)
master_key = 'A_1_01'
if aimg:
AImg = arcimage.ArcImage(spectrograph, master_key=master_key, master_dir=master_dir,
reuse_masters=reuse_masters)
msarc = AImg.load()
ret.append(msarc)
if tslits:
trace_file = os.path.join(master_dir, MasterFrame.construct_file_name('Trace', master_key))
tslits_dict, mstrace = traceslits.TraceSlits.load_from_file(trace_file)
ret.append(tslits_dict)
ret.append(mstrace)
if tilts:
tilts_file = os.path.join(master_dir, MasterFrame.construct_file_name('Tilts', master_key))
tilts_dict = wavetilts.WaveTilts.load_from_file(tilts_file)
ret.append(tilts_dict)
if datasec:
datasec_img = spectrograph.get_datasec_img(data_path('b1.fits.gz'), 1)
ret.append(datasec_img)
if wvcalib:
calib_file = os.path.join(master_dir,
MasterFrame.construct_file_name('WaveCalib', master_key,
file_format='json'))
wv_calib = waveio.load_wavelength_calibration(calib_file)
ret.append(wv_calib)
# Return
return ret
def instant_traceslits(mstrace_file, det=None):
"""
Instantiate a TraceSlits object from the master file
The loaded tslits_dict is set as the atribute
Args:
mstrace_file (str):
det (int, optional):
Returns:
Spectrograph, TraceSlits:
"""
# Load
tslits_dict, mstrace = traceslits.TraceSlits.load_from_file(mstrace_file)
# Instantiate
spectrograph = load_spectrograph(tslits_dict['spectrograph'])
par = spectrograph.default_pypeit_par()
msbpm = spectrograph.bpm(shape=mstrace.shape, det=det)
#binning = tslits_dict['binspectral'], tslits_dict['binspatial']
traceSlits = traceslits.TraceSlits(spectrograph, par['calibrations']['slits'],
msbpm=msbpm)
traceSlits.mstrace = copy.deepcopy(mstrace)
traceSlits.tslits_dict = copy.deepcopy(tslits_dict)
return spectrograph, traceSlits
| 35.79703 | 102 | 0.637395 | 898 | 7,231 | 4.973274 | 0.285078 | 0.021496 | 0.015674 | 0.024631 | 0.180699 | 0.162785 | 0.138603 | 0.106807 | 0.055307 | 0.055307 | 0 | 0.025506 | 0.240907 | 7,231 | 201 | 103 | 35.975124 | 0.788122 | 0.185037 | 0 | 0.017699 | 0 | 0 | 0.111208 | 0.004553 | 0 | 0 | 0 | 0.004975 | 0 | 1 | 0.035398 | false | 0 | 0.115044 | 0 | 0.185841 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2be7c4031f35d62bfd049a33b25b2f6481558602 | 1,041 | py | Python | calaccess_processed/admin/tracking.py | dwillis/django-calaccess-processed-data | f228252df1b390967468b41d336839f1bd9ca192 | [
"MIT"
] | 1 | 2021-01-13T12:06:25.000Z | 2021-01-13T12:06:25.000Z | calaccess_processed/admin/tracking.py | anthonyjpesce/django-calaccess-processed-data | d99b461abb7b7f7973f90b49634c9262efcbe7bf | [
"MIT"
] | null | null | null | calaccess_processed/admin/tracking.py | anthonyjpesce/django-calaccess-processed-data | d99b461abb7b7f7973f90b49634c9262efcbe7bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Custom administration panels for tracking models.
"""
from __future__ import unicode_literals
from django.contrib import admin
from calaccess_processed import models
from calaccess_raw.admin.base import BaseAdmin
@admin.register(models.ProcessedDataVersion)
class ProcessedDataVersionAdmin(BaseAdmin):
"""
Custom admin for the ProcessedDataVersion model.
"""
list_display = (
"id",
"raw_version",
"process_start_datetime",
"process_finish_datetime",
"pretty_zip_size",
)
list_display_links = ('process_start_datetime',)
list_filter = ("process_finish_datetime",)
@admin.register(models.ProcessedDataFile)
class ProcessedDataFileAdmin(BaseAdmin):
"""
Custom admin for the ProcessedDataFile model.
"""
list_display = (
"id",
"version",
"file_name",
"records_count",
)
list_display_links = ('id', 'file_name',)
list_filter = ("version__process_start_datetime",)
| 25.390244 | 54 | 0.683958 | 106 | 1,041 | 6.40566 | 0.481132 | 0.064801 | 0.088365 | 0.067747 | 0.076583 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001211 | 0.206532 | 1,041 | 40 | 55 | 26.025 | 0.820823 | 0.179635 | 0 | 0.16 | 0 | 0 | 0.234356 | 0.148466 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16 | 0 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2be8db748a16a1d91326b5c85994bfcde6e8d3f2 | 2,735 | py | Python | setup.py | arthurazs/ScreenPDF | 28d879e946cdc68271e9ca2fa7f35a2ab4972151 | [
"MIT"
] | null | null | null | setup.py | arthurazs/ScreenPDF | 28d879e946cdc68271e9ca2fa7f35a2ab4972151 | [
"MIT"
] | 3 | 2016-11-10T18:57:44.000Z | 2017-09-10T16:12:13.000Z | setup.py | arthurazs/ScreenPDF | 28d879e946cdc68271e9ca2fa7f35a2ab4972151 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
from setuptools import setup
import os.path as path
info_name = 'screenpdf'
info_url = 'https://github.com/arthurazs/{}/'.format(info_name)
author_name = 'Arthur Zopellaro'
email = 'arthurazsoares@gmail.com'
try:
with open(path.abspath(path.join(info_name,
'version.py'))) as version:
exec(version.read())
except IOError:
print('ERROR version not found')
__version__ = ''
info_download = '{}archive/v{}.tar.gz'.format(info_url, __version__)
try:
with open('PyPIREADME.rst', 'r') as readme:
info_long_description = readme.read()
except IOError:
try:
print(
'PyPIREADME.rst not found, trying '
'README.md instead')
print(
'WARNING It isn\'t recommended to upload '
'a markdown file as README to PyPI')
with open('README.md', 'r') as readme:
info_long_description = readme.read()
except IOError:
print('ERROR README.md not found either')
info_long_description = ''
setup(
name=info_name,
version=__version__,
author=author_name,
author_email=email,
maintainer=author_name,
maintainer_email=email,
description=(
'{} converts *.spdf to proper screenplay'
' PDF format.'.format(info_name)),
license='MIT',
keywords=(
'script screenplay format formatter'
),
url=info_url,
download_url=info_download,
packages=[info_name],
package_data={
info_name: [
'templates/*', 'data/*']},
long_description=info_long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Interpreters',
'Topic :: Text Processing',
'Topic :: Text Processing :: General',
'Topic :: Utilities'
],
setup_requires=['setuptools', 'pip', 'nose', 'rednose'],
install_requires=['fpdf'], # try fpdf2 in the future
entry_points={
'console_scripts': [
'{0}={0}.__main__:main'.format(info_name)]
},
tests_require=['nose', 'rednose', 'tox'],
test_suite='nose.collector'
)
| 30.730337 | 68 | 0.603656 | 291 | 2,735 | 5.498282 | 0.491409 | 0.04 | 0.078125 | 0.0275 | 0.086875 | 0.06375 | 0.06375 | 0.06375 | 0.06375 | 0.06375 | 0 | 0.005952 | 0.262888 | 2,735 | 88 | 69 | 31.079545 | 0.787698 | 0.021572 | 0 | 0.126582 | 0 | 0 | 0.391695 | 0.016835 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025316 | 0 | 0.025316 | 0.050633 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bebba5f36f0fc773a08af1c42a4714df0febac4 | 597 | py | Python | 221/main.py | JanaSabuj/Leetcode-solutions | 78d10926b15252a969df598fbf1f9b69b2760b79 | [
"MIT"
] | 13 | 2019-10-12T14:36:32.000Z | 2021-06-08T04:26:30.000Z | 221/main.py | JanaSabuj/Leetcode-solutions | 78d10926b15252a969df598fbf1f9b69b2760b79 | [
"MIT"
] | 1 | 2020-02-29T14:02:39.000Z | 2020-02-29T14:02:39.000Z | 221/main.py | JanaSabuj/Leetcode-solutions | 78d10926b15252a969df598fbf1f9b69b2760b79 | [
"MIT"
] | 3 | 2020-02-08T12:04:28.000Z | 2020-03-17T11:53:00.000Z | class Solution:
def maximalSquare(self, mat: List[List[str]]) -> int:
n = len(mat)
m = len(mat[0])
mx = 0
dp = [[0 for _ in range(m)] for _ in range(n)]
for i in range(n):
for j in range(m):
if i == 0 or j == 0:
dp[i][j] = 0 if mat[i][j] == "0" else 1
elif mat[i][j] == "1":
dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
else:
dp[i][j] = 0
mx = max(mx, dp[i][j])
return mx*mx | 29.85 | 77 | 0.350084 | 93 | 597 | 2.225806 | 0.301075 | 0.101449 | 0.096618 | 0.10628 | 0.057971 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047923 | 0.475712 | 597 | 20 | 78 | 29.85 | 0.613419 | 0 | 0 | 0 | 0 | 0 | 0.003454 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bed27e78e266549ed3fd0ab95fec60eddae005c | 3,481 | py | Python | bin/anthology/people.py | KhalilMrini/acl-anthology | ca36605b81b508ee3d70480a41e92f4e11c29032 | [
"Apache-2.0"
] | 1 | 2021-08-04T04:03:35.000Z | 2021-08-04T04:03:35.000Z | bin/anthology/people.py | KhalilMrini/acl-anthology | ca36605b81b508ee3d70480a41e92f4e11c29032 | [
"Apache-2.0"
] | 1 | 2021-04-19T17:14:31.000Z | 2021-04-19T17:14:31.000Z | bin/anthology/people.py | KhalilMrini/acl-anthology | ca36605b81b508ee3d70480a41e92f4e11c29032 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2019 Marcel Bollmann <marcel@bollmann.me>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as log
import anthology.formatter as my_formatter
class PersonName:
first, last = "", ""
def __init__(self, first, last, script="roman", variant: "PersonName" = None):
self.first = first.strip() if first is not None else ""
self.last = last.strip()
self.script = script
self.variant = variant
def from_element(person_element):
"""
Reads from the XML, which includes an optional first name, a last name,
and an optional variant (itself containing an optional first name, and a
last name).
"""
first, last = "", ""
# The name variant script, defaults to roman
script = person_element.attrib.get("script", "roman")
variant = None
for element in person_element:
tag = element.tag
# These are guaranteed to occur at most once by the schema
if tag == "first":
first = element.text or ""
elif tag == "last":
last = element.text or ""
elif tag == "variant":
variant = PersonName.from_element(element)
return PersonName(first, last, script=script, variant=variant)
def from_repr(repr_):
parts = repr_.split(" || ")
if len(parts) > 1:
first, last = parts[0], parts[1]
else:
first, last = "", parts[0]
return PersonName(first, last)
def from_dict(dict_):
first = dict_.get("first", "")
if first is None:
first = ""
last = dict_["last"]
return PersonName(first, last)
@property
def full(self):
"""
Return the full rendering of the name.
This includes any name variant in parentheses.
Currently handles both Roman and Han scripts.
"""
if self.script.startswith("han"):
form = f"{self.last}{self.first}"
else: # default to "roman"
form = f"{self.first} {self.last}"
if self.variant is not None:
return f"{form} ({self.variant.full})"
else:
return form
@property
def id_(self):
return repr(self)
def as_bibtex(self):
if not self.first:
return "{{{}}}".format(my_formatter.bibtex_encode(self.last))
return my_formatter.bibtex_encode("{}, {}".format(self.last, self.first))
def as_dict(self):
return {"first": self.first, "last": self.last, "full": self.full}
def __eq__(self, other):
return (self.first == other.first) and (self.last == other.last)
def __str__(self):
return self.full
def __repr__(self):
if not self.first:
return self.last
return "{} || {}".format(self.first, self.last)
def __hash__(self):
return hash(repr(self))
| 31.645455 | 82 | 0.595806 | 436 | 3,481 | 4.665138 | 0.318807 | 0.044248 | 0.037365 | 0.036873 | 0.043265 | 0.023599 | 0 | 0 | 0 | 0 | 0 | 0.005289 | 0.293881 | 3,481 | 109 | 83 | 31.93578 | 0.822213 | 0.288423 | 0 | 0.171875 | 0 | 0 | 0.069719 | 0.01848 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.03125 | 0.078125 | 0.453125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf00f70f8211fba122e48124085551574fc2d0f | 8,150 | py | Python | launch.py | danif93/MLProject | d93fc647f2cc135dc531889ffde3cd54f56a7210 | [
"MIT"
] | null | null | null | launch.py | danif93/MLProject | d93fc647f2cc135dc531889ffde3cd54f56a7210 | [
"MIT"
] | null | null | null | launch.py | danif93/MLProject | d93fc647f2cc135dc531889ffde3cd54f56a7210 | [
"MIT"
] | null | null | null |
# coding: utf-8
# In[ ]:
import mnist_loader as load
import neural_network as neuronet
import activation_functions as af
import optimisation_functions as of
import cost_functions as cf
import learningStep_generators as lg
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
# In[ ]:
# parameters initialization
maxEpochs = 50 # the neural net will not train over maxEpochs
earlyStopParameter = 5 # number of last error entries the net will take in consideration for early stopping
earlyStopThreshold = 0.14 # errors-vector norm threshold for deciding early stop
batchSize = 10 # stochastic gradient descent batch size
regularParam = 3.0 # lambda
hiddenUnit = 20 # neurons number for the single hidden unit layer
decayStep = lg.harmonicSeries(3) # at each epoch the step will shrink over the harmonic series
constStep = lg.constant(3) # at each epoch the step will remain constant
weightInitErr = 0.3 # if requested, a low delta for weights initialization
tr, va, te = load.loadMnist() # train, validation and test datasets
# In[ ]:
# Simple execution
sigmoidNetCross = [neuronet.NeuralNetwork([784, hiddenUnit, 10], unit=af.Sigmoid, cost=cf.MeanSquareError)]
sgdC = of.StochasticGradientDescent(sigmoidNetCross)
validAccuracy, trainAccuracy, testAccuracy = sgdC.SGD(tr, va, te, maxEpochs, batchSize, decayStep, earlyStopParameter, earlyStopThreshold)
# In[ ]:
# Constant - Decaying step comparison
# considerations about oscillating, saturation, early stopping and learning break
sigmoidNetCross = [neuronet.NeuralNetwork([784, hiddenUnit, 10], unit=af.Sigmoid, cost=cf.CrossEntropy)]
sgdC = of.StochasticGradientDescent(sigmoidNetCross)
validAccuracyC, trainAccuracyC, testAccuracyC = sgdC.SGD(tr, va, te, maxEpochs, batchSize, constStep, earlyStopParameter, earlyStopThreshold)
sigmoidNetCross = [neuronet.NeuralNetwork(net=[784, hiddenUnit, 10], unit=af.Sigmoid, cost=cf.CrossEntropy)]
sgdC = of.StochasticGradientDescent(sigmoidNetCross)
validAccuracyD, trainAccuracyD, testAccuracyD = sgdC.SGD(tr, va, te, maxEpochs, batchSize, decayStep, earlyStopParameter, earlyStopThreshold)
nonZeroConstant = len(np.argwhere(validAccuracyC[0]))
nonZeroDecaying = len(np.argwhere(validAccuracyD[0]))
plt.title('Constant/Decaying learning step')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.ylim([80,100])
plt.plot(np.arange(nonZeroConstant),validAccuracyC[0][:nonZeroConstant], label='Costant')
plt.plot(nonZeroConstant, testAccuracyC[0], 'x', label='Costant - Test')
plt.plot(np.arange(nonZeroDecaying),validAccuracyD[0][:nonZeroDecaying], label='Harmonic Decaying')
plt.plot(nonZeroDecaying, testAccuracyD[0], 'x', label='Harmonic Decaying - Test')
plt.grid()
plt.legend()
# In[ ]:
# Cross Entropy - MSE comparison
sigmoidNetCross = [neuronet.NeuralNetwork([784, hiddenUnit, 10], unit=af.Sigmoid, cost=cf.CrossEntropy, weightError=weightInitErr)]
sigmoidNetMSE = [neuronet.NeuralNetwork(net=[784, hiddenUnit, 10], unit=af.Sigmoid, cost=cf.MeanSquareError, weightError=weightInitErr)]
sgdC = of.StochasticGradientDescent(sigmoidNetCross)
sgdM = of.StochasticGradientDescent(sigmoidNetMSE)
validAccuracyM, trainAccuracyM, testAccuracyM = sgdM.SGD(tr, va, te, maxEpochs, batchSize, decayStep, earlyStopParameter, earlyStopThreshold)
validAccuracyC, trainAccuracyC, testAccuracyC = sgdC.SGD(tr, va, te, maxEpochs, batchSize, decayStep, earlyStopParameter, earlyStopThreshold)
nonZeroCross = len(np.argwhere(validAccuracyC[0]))
nonZeroMSE = len(np.argwhere(validAccuracyM[0]))
plt.title('Cross-Entropy / MSE training result')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.ylim([0,100])
plt.plot(np.arange(nonZeroCross), validAccuracyC[0][:nonZeroCross], label='Cross-Entropy Validation')
plt.plot(nonZeroCross, testAccuracyC[0], 'x', label='Cross-Entropy - Test')
plt.plot(np.arange(nonZeroMSE), validAccuracyM[0][:nonZeroMSE], label='MSE')
plt.plot(nonZeroMSE, testAccuracyM[0], 'x', label='MSE - Test')
plt.grid()
plt.legend()
# In[ ]:
# Training - Validation accuracy comparison
#SGD(trainingSet, validSet, testSet, numEpochs, batchSize, stepGen, earlyStopParam, earlyStopThrshld, _lambda=0.0, l2Regul=True, trainEval=False)
sigmoidNetCross = [neuronet.NeuralNetwork([784, hiddenUnit, 10], unit=af.Sigmoid, cost=cf.CrossEntropy)]
sgdC = of.StochasticGradientDescent(sigmoidNetCross)
validAccuracy, trainAccuracy, testAccuracy = sgdC.SGD(tr, va, te, maxEpochs, batchSize, decayStep, earlyStopParameter, earlyStopThreshold, trainEval=True)
nonZeroValues = len(np.argwhere(validAccuracy[0]))
plt.title('Training/Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.ylim([80,100])
plt.plot(np.arange(nonZeroValues), validAccuracy[0][:nonZeroValues], label='Validation Accuracy')
plt.plot(np.arange(nonZeroValues), trainAccuracy[0][:nonZeroValues], label='Train Accuracy')
plt.plot(nonZeroValues, testAccuracy[0], 'x', label='Test Accuracy')
plt.legend()
plt.grid()
# In[ ]:
# Training - Validation accuracy comparison with l2 regularization
sigmoidNetCross = [neuronet.NeuralNetwork([784, hiddenUnit, 10], unit=af.Sigmoid, cost=cf.CrossEntropy)]
sgdC = of.StochasticGradientDescent(sigmoidNetCross)
validAccuracy, trainAccuracy, testAccuracy = sgdC.SGD(tr, va, te, maxEpochs, batchSize, decayStep, earlyStopParameter, earlyStopThreshold, _lambda=regularParam, trainEval=True)
nonZeroValues = len(np.argwhere(validAccuracy[0]))
plt.title('Training/Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.ylim([80,100])
plt.plot(np.arange(nonZeroValues), validAccuracy[0][:nonZeroValues], label='Validation Accuracy')
plt.plot(np.arange(nonZeroValues), trainAccuracy[0][:nonZeroValues], label='Train Accuracy')
plt.plot(nonZeroValues, testAccuracy[0], 'x', label='Test Accuracy')
plt.legend()
plt.grid()
# In[ ]:
# Training - Test accuracy comparison with l1 regularization
sigmoidNetCross = [neuronet.NeuralNetwork([784, hiddenUnit, 10], unit=af.Sigmoid, cost=cf.CrossEntropy)]
sgdC = of.StochasticGradientDescent(sigmoidNetCross)
validAccuracy, trainAccuracy, testAccuracy = sgdC.SGD(tr, va, te, maxEpochs, batchSize, decayStep, earlyStopParameter, earlyStopThreshold, _lambda=regularParam, l2Regul=False, trainEval=True)
nonZeroValues = len(np.argwhere(validAccuracy[0]))
plt.title('Training/Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.ylim([80,100])
plt.plot(np.arange(nonZeroValues), validAccuracy[0][:nonZeroValues], label='Validation Accuracy')
plt.plot(np.arange(nonZeroValues), trainAccuracy[0][:nonZeroValues], label='Train Accuracy')
plt.plot(nonZeroValues, testAccuracy[0], 'x', label='Test Accuracy')
plt.legend()
plt.grid()
# In[ ]:
# hold-out validation on hidden unit number
nets = [neuronet.NeuralNetwork([784, hiddUnitNumber, 10], unit=af.Sigmoid, cost=cf.CrossEntropy) for hiddUnitNumber in [50,100,150,200]]
sgdC = of.StochasticGradientDescent(nets)
validAccuracy, trainAccuracy, testAccuracy = sgdC.SGD(tr, va, te, maxEpochs, batchSize, decayStep, earlyStopParameter, earlyStopThreshold, _lambda=regularParam)
plt.title('Validation accuracy per hidden unit model')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.ylim([90,100])
for i in np.arange(len(nets)):
nonZeroValues = len(np.argwhere(validAccuracy[i]))
plt.plot(np.arange(nonZeroValues),validAccuracy[i][:nonZeroValues], label='{}'.format(nets[i].net[1:-1]))
plt.plot(nonZeroValues, testAccuracy[i], 'x', label='{} - Test'.format(nets[i].net[1:-1]))
plt.legend()
plt.grid()
# In[ ]:
# AdaGrad algo - not working
sigmoidNetCross = neuronet.NeuralNetwork(net=[784, hiddenUnit, 10], unit=af.Sigmoid, cost=cf.CrossEntropy)
agC = of.AdaGrad(sigmoidNetCross)
testAccuracy, trainAccuracy = agC.AG(tr, va, te, maxEpochs, batchSize, constStep, _lambda=regularParam, trainEval=True)
plt.title('Training/Test accuracy ')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.ylim([80,100])
plt.plot(np.arange(epochs), testAccuracy, label='Test Accuracy')
plt.plot(np.arange(epochs), trainAccuracy, label='Train Accuracy')
plt.legend()
plt.grid()
| 38.084112 | 191 | 0.766503 | 963 | 8,150 | 6.474559 | 0.206646 | 0.038813 | 0.018765 | 0.031275 | 0.610906 | 0.564555 | 0.544186 | 0.525421 | 0.512911 | 0.496552 | 0 | 0.019932 | 0.101227 | 8,150 | 213 | 192 | 38.262911 | 0.831263 | 0.137546 | 0 | 0.487603 | 0 | 0 | 0.090363 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066116 | 0 | 0.066116 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf019712de59bf5b206a860d7ffb4d44cf73b61 | 8,005 | py | Python | python version/box_model.py | asiddi24/box_model | 011fb0e272c12019fa4c2533661c5eb1f8c0001a | [
"MIT"
] | null | null | null | python version/box_model.py | asiddi24/box_model | 011fb0e272c12019fa4c2533661c5eb1f8c0001a | [
"MIT"
] | null | null | null | python version/box_model.py | asiddi24/box_model | 011fb0e272c12019fa4c2533661c5eb1f8c0001a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 16:35:40 2019
@author: asiddi24
"""
'''Four box model'''
import numpy as np
import gsw as gsw
import time
import matplotlib.pyplot as plt
def fourbox(N,Kv,AI,Mek,Aredi,M_s,D0,T0s,T0n,T0l,T0d,S0s,S0n,S0l,S0d,Fws,Fwn,epsilon):
Area = 3.6e14
Area_low = 2e14
Area_s = 1e14
Area_n = 0.6e14
Dhigh = 100
dt = 365*86400/4
inorth = 0
isouth = 1
ilow = 2
ideep = 3
T = np.zeros(shape=[N+1,4])
S = np.zeros(shape=[N+1,4])
V = np.zeros(shape=[N+1,4])
M_n = np.zeros(N)
M_upw = np.zeros(N)
M_eddy = np.zeros(N)
D_low = np.zeros(N+1)
D_low[0] = D0 # why is this happening ?
T[0,:] = np.array([T0n, T0s, T0l, T0d]) # Why are the S , T vectors not aligned
S[0,:] = np.array([S0s, S0n, S0l, S0d])
sigma0 = np.zeros(shape=[N,4])
sigma2 = np.zeros(shape=[N,4])
M_LS = np.zeros(N)
M_LN = np.zeros(N)
S_initlow = np.zeros(N)
V_low = np.zeros(N)
V_deep = np.zeros(N)
dV_low = np.zeros(N)
dV_deep = np.zeros(N)
dS_low = np.zeros(N)
dS_south = np.zeros(N)
dS_deep = np.zeros(N)
dS_north = np.zeros(N)
dT_low = np.zeros(N)
dT_south = np.zeros(N)
dT_deep = np.zeros(N)
dT_north = np.zeros(N)
for j in range(N):
# Computing density using TEOS-10
sigma0[j,:] = gsw.density.sigma0(S[j,:],T[j,:])+1000 # returns potential density anomaly , conservative Temp is in degC and abs S in g/kg
sigma2[j,:] = gsw.density.sigma2(S[j,:],T[j,:]) +1000
M_LS[j] = Aredi*2.5e7*D_low[j]/1e6 # What is happening here ?
M_LN[j] = Aredi*5e6*D_low[j]/1e6
if (sigma0[j,inorth] > sigma0[j,ilow]):
gprime = 9.8*(sigma0[j,inorth] - sigma0[j,ilow])/sigma0[j,inorth] # do you want to save gprime ?
M_n[j] = gprime*D_low[j]**2/epsilon
M_upw[j] = Kv*Area_low/np.minimum(D_low[j],3700.0-D_low[j]) # What is happening here ?
M_eddy[j] = AI*D_low[j]*2.5e7/1e6 # how ? do something with 2.5e7
V_deep[j] = 3700*Area-Area_n*Dhigh - Area_s*Dhigh - Area_low*D_low[j]
V_low[j] = Area_low*D_low[j]
S_initlow[j] = S[j,ilow]*Area_low*D_low[j] # What is this ?
dV_low[j] = (Mek-M_eddy[j] - M_n[j] + M_upw[j] - Fws - Fwn)*dt # initialize this
dV_deep[j] = -dV_low[j] # intialize this
D_low[j+1] = D_low[j] + dV_low[j]/Area_low
dS_low[j] = (Mek*S[j,isouth] - M_eddy[j]*S[j,ilow] - M_n[j]*S[j,ilow] +\
M_upw[j]*S[j,ideep] + M_LS[j]*(S[j,isouth] - S[j,ilow]) + \
M_LN[j]*(S[j,inorth] - S[j,ilow]))*dt
dS_south[j] = ((M_eddy[j]+M_LS[j])*(S[j,ilow] - S[j,isouth]) + \
(Mek+M_s)*(S[j,ideep] - S[j,isouth]) - Fws*S[j,isouth])*dt
dS_deep[j] = (M_n[j]*S[j,inorth] - (M_upw[j] + Mek + M_s)*S[j,ideep] +\
(M_eddy[j]+M_s)*S[j,isouth] + Fws*S[j,isouth] +\
Fwn*S[j,inorth])*dt
dS_north[j] = ((M_n[j]+M_LN[j])*(S[j,ilow] - S[j,inorth]) - \
Fwn*S[j,inorth])*dt
dT_low[j] = (Mek*T[j,isouth] - M_eddy[j]*T[j,ilow] - M_n[j]*T[j,ilow] +\
M_upw[j]*T[j,ideep] + M_LS[j]*(T[j,isouth] - T[j,ilow]) + \
M_LN[j]*(T[j,inorth] - T[j,ilow]) + Area_low*100*(T0l-T[j,ilow])/365/86400)*dt
dT_south[j] = ((M_eddy[j]+M_LS[j])*(T[j,ilow]-T[j,isouth]) + \
(Mek+M_s)*(T[j,ideep]-T[j,isouth]) + \
Area_s*100*(T0s-T[j,isouth])/365/86400)*dt
dT_deep[j] = ((M_n[j] + Fwn)*T[j,inorth] - (M_upw[j]+Mek+M_s)*T[j,ideep] +\
(M_eddy[j] + M_s + Fws)*T[j,isouth])*dt
dT_north[j] = ((M_n[j] + M_LN[j])*(T[j,ilow] - T[j,inorth]) +\
Area_n*100*(T0n-T[j,inorth])/365/86400)*dt
S[j+1,inorth]=S[j,inorth]+dS_north[j]/(Dhigh*Area_n)
S[j+1,isouth]=S[j,isouth]+dS_south[j]/(Dhigh*Area_s)
S[j+1,ilow]=(S[j,ilow]*V_low[j]+dS_low[j])/(V_low[j]+dV_low[j])
S[j+1,ideep]=(S[j,ideep]*V_deep[j]+dS_deep[j])/(V_deep[j]+dV_deep[j])
T[j+1,inorth]=T[j,inorth]+dT_north[j]/(Dhigh*Area_n)
T[j+1,isouth]=T[j,isouth]+dT_south[j]/(Dhigh*Area_s)
T[j+1,ilow]=(T[j,ilow]*V_low[j]+dT_low[j])/(V_low[j]+dV_low[j])
T[j+1,ideep]=(T[j,ideep]*V_deep[j]+dT_deep[j])/(V_deep[j]+dV_deep[j])
elif (sigma0[j,inorth] <= sigma0[j,ilow]):
gprime = 9.8*(sigma0[j,inorth]-sigma0[j,ilow])/sigma0[j,inorth]
M_n[j] = gprime*Dhigh**2/epsilon
M_upw[j] = Kv*Area_low/np.minimum(D_low[j],3700-D_low[j])
M_eddy[j] = AI*D_low[j]*2.5e7/1e6
V_deep[j] = 3700*Area-Area_n*Dhigh-Area_s*Dhigh-Area_low*D_low[j]
V_low[j] = Area_low*D_low[j]
dV_low[j] = (Mek-M_eddy[j]-M_n[j]+M_upw[j]-Fws-Fwn)*dt
dV_deep[j] = -dV_low[j]
D_low[j+1] = D_low[j] + dV_low[j]/Area_low
dS_low[j] = (Mek*S[j,isouth] - M_eddy[j]*S[j,ilow] - M_n[j]*S[j,inorth] +\
M_upw[j]*S[j,ideep] + M_LS[j]*(S[j,isouth] - S[j,ilow]) +\
M_LN[j]*(S[j,inorth]-S[j,ilow]))*dt
dS_south[j] = ((M_eddy[j] + M_LS[j])*(S[j,ilow] - S[j,isouth]) +\
Mek*(S[j,ideep] - S[j,isouth]) + M_s*(S[j,ideep] - S[j,isouth]) - \
Fws*S[j,isouth])*dt
dS_deep[j] = (-(M_upw[j] + Mek + M_s - M_n[j])*S[j,ideep] +\
(M_eddy[j] + M_s + Fws)*S[j,isouth] + Fwn*S[j,inorth])*dt
dS_north[j] = (M_LN[j]*(S[j,ilow] - S[j,inorth]) - M_n[j]*(S[j,ideep] - S[j,inorth]) - Fwn*S[j,inorth])*dt
dT_low[j] = (Mek*T[j,isouth] - M_eddy[j]*T[j,ilow] - M_n[j]*T[j,inorth] +\
M_upw[j]*T[j,ideep] + M_LS[j]*(T[j,isouth] - T[j,ilow]) +\
M_LN[j]*(T[j,inorth] - T[j,ilow]) + Area_low*100*(T0l-T[j,ilow])/365/86400)*dt
dT_south[j] = ((M_eddy[j] + M_LS[j])*(T[j,ilow] - T[j,isouth]) +\
(Mek+M_s)*(T[j,ideep]-T[j,isouth]) +\
Area_s*100*(T0s-T[j,isouth])/365/86400)*dt
dT_deep[j] = (-(M_upw[j] + Mek + M_s - M_n[j])*T[j,ideep] + \
(M_eddy[j] + M_s)*T[j,isouth])*dt
dT_north[j] = ((-M_n[j] + M_LN[j])*(T[j,ilow] - T[j,inorth]) + Area_n*100*(T0n-T[j,inorth])/365/86400)*dt
S[j+1,inorth] = S[j,inorth] + dS_north[j]/(Area_n*Dhigh)
S[j+1,isouth] = S[j,isouth] + dS_south[j]/(Area_s*Dhigh)
S[j+1,ilow] = (S[j,ilow]*V_low[j]+dS_low[j])/(V_low[j]+dV_low[j])
S[j+1,ideep]=(S[j,ideep]*V_deep[j]+dS_deep[j])/(V_deep[j] + dV_deep[j])
T[j+1,inorth] = T[j,inorth] + dT_north[j]/(Dhigh*Area_n)
T[j+1,isouth] = T[j,isouth] + dT_south[j]/(Dhigh*Area_s)
T[j+1,ilow] = (T[j,ilow]*V_low[j]+dT_low[j])/(V_low[j]+dV_low[j])
T[j+1,ideep]=(T[j,ideep]*V_deep[j]+dT_deep[j])/(V_deep[j]+dV_deep[j])
fig, (ax1,ax2,ax3) = plt.subplots(3,1, sharex=False, sharey=False)
ax1.plot(S)
ax1.legend(['North','South','Low','Deep'])
ax2.plot(sigma0)
ax2.legend(['North','South','Low','Deep'])
ax3.plot(np.linspace(0,N,4000),M_n/1e6,np.linspace(0,N,4000),M_upw/1e6,np.linspace(0,N,4000),(Mek-M_eddy)/1e6 )
ax3.legend(['North','Upw','South'])
time.sleep(0.1)
return (M_n, M_upw, M_eddy, D_low, T, S, sigma0)
| 44.72067 | 145 | 0.484322 | 1,441 | 8,005 | 2.538515 | 0.104788 | 0.034445 | 0.017223 | 0.019136 | 0.693002 | 0.634773 | 0.591307 | 0.58912 | 0.56561 | 0.559869 | 0 | 0.048956 | 0.305934 | 8,005 | 179 | 146 | 44.72067 | 0.609431 | 0.053716 | 0 | 0.335878 | 0 | 0 | 0.006237 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007634 | false | 0 | 0.030534 | 0 | 0.045802 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf108d95891106d4dc930297e966511ec0c8d34 | 981 | py | Python | api/management/commands/pets_list.py | V-Holodov/pets_accounting | 300cb8748124b6f767e85404ee372b93b097098c | [
"MIT"
] | null | null | null | api/management/commands/pets_list.py | V-Holodov/pets_accounting | 300cb8748124b6f767e85404ee372b93b097098c | [
"MIT"
] | 1 | 2021-12-22T14:08:37.000Z | 2021-12-22T14:08:37.000Z | api/management/commands/pets_list.py | V-Holodov/pets_accounting | 300cb8748124b6f767e85404ee372b93b097098c | [
"MIT"
] | 1 | 2021-12-24T11:50:26.000Z | 2021-12-24T11:50:26.000Z | import io
from django.core.management.base import BaseCommand
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from api.models import Pet
from api.serializers import PetSerializer
class Command(BaseCommand):
"""Uploading pets from the command line to stdout in JSON format."""
help = "Displaying a list of pets in stdout"
def add_arguments(self, parser):
parser.add_argument(
"--has-photos", action="store_true",
help="Returns entries with photos"
)
def handle(self, *args, **kwargs):
has_photos = kwargs["has_photos"]
if has_photos:
pets = Pet.objects.filter(photos__isnull=False)
else:
pets = Pet.objects.all()
serializer = PetSerializer(pets, many=True)
stream = io.BytesIO(JSONRenderer().render(serializer.data))
data = {"pets": JSONParser().parse(stream)}
self.stdout.write(f"{data}")
| 29.727273 | 72 | 0.666667 | 118 | 981 | 5.457627 | 0.576271 | 0.055901 | 0.052795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.231397 | 981 | 32 | 73 | 30.65625 | 0.854111 | 0.063201 | 0 | 0 | 0 | 0 | 0.11391 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.26087 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf122809aba2074e678a428f5c1706ec17b6493 | 626 | py | Python | src/ocr/dataset/helper.py | AlessandroZanatta/ML-Captcha-Solver | 09d4eba3d277203eeb324440eb5641c9ce287963 | [
"MIT"
] | null | null | null | src/ocr/dataset/helper.py | AlessandroZanatta/ML-Captcha-Solver | 09d4eba3d277203eeb324440eb5641c9ce287963 | [
"MIT"
] | null | null | null | src/ocr/dataset/helper.py | AlessandroZanatta/ML-Captcha-Solver | 09d4eba3d277203eeb324440eb5641c9ce287963 | [
"MIT"
] | null | null | null | import numpy as np
def load_dataset(path):
# initialize the list of data and labels
data = []
labels = []
# retrieve data from CSV
for row in open(path):
# parse the label and image from the row
row = row.split(",")
label = ord(row[0]) - ord("A") # scale labels to be numbers in 0-25 (26 letters)
image = np.array([int(x) for x in row[1].split(".")], dtype="uint8")
# Reshape flattened images
image = image.reshape((32, 32))
# update the list of data and labels
data.append(image)
labels.append(label)
return (data, labels)
| 26.083333 | 88 | 0.583067 | 90 | 626 | 4.044444 | 0.544444 | 0.038462 | 0.049451 | 0.071429 | 0.142857 | 0.142857 | 0.142857 | 0 | 0 | 0 | 0 | 0.027335 | 0.298722 | 626 | 23 | 89 | 27.217391 | 0.801822 | 0.332268 | 0 | 0 | 0 | 0 | 0.019512 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf1fa79fccb8a23dfa75ef82b9bf40360252990 | 3,284 | py | Python | cogs/moderator.py | theobori/bot-template | 3aba0ed127c435e25b29be163f870f5088a611d8 | [
"MIT"
] | null | null | null | cogs/moderator.py | theobori/bot-template | 3aba0ed127c435e25b29be163f870f5088a611d8 | [
"MIT"
] | null | null | null | cogs/moderator.py | theobori/bot-template | 3aba0ed127c435e25b29be163f870f5088a611d8 | [
"MIT"
] | null | null | null | """moderation cog"""
from discord.ext import commands
import discord
from utils.database import CursorDB
from utils.page import Pages, make_groups
from utils.utilities import basic_frame, basic_message
from utils.reactions import Reactions
class Moderator(commands.Cog, CursorDB, Pages):
"""
Commands for the administrators, moderators
"""
def __init__(self, bot):
CursorDB.__init__(self)
Pages.__init__(self)
self.bot = bot
@commands.Cog.listener()
async def on_reaction_add(self, reaction: object, user: object):
if not reaction.message.author.bot or user.bot:
return
await reaction.remove(user)
await self.handler(reaction, user)
@commands.has_permissions(administrator=True)
@commands.command()
async def clear(self, ctx: commands.Context, n: int = 1):
"""
Removes n message
"""
await ctx.message.delete()
await ctx.send(Reactions.LOADING)
await ctx.channel.purge(limit = n + 1)
@commands.has_permissions(administrator=True)
@commands.command(aliases=["command_stats"])
async def cs(self, ctx: commands.Context):
"""
Show how many times each commands has been used
"""
self.execute(f"""SELECT name, count FROM command_stat
WHERE guild_id={ctx.guild.id}""")
response = self.cursor.fetchall()
if not response:
return (await basic_message(ctx, "❌ Empty"))
response = sorted(response, key=lambda item: int(item["count"]))[::-1]
data = {item["name"]: item["count"] for item in response}
page_content = basic_frame(data).split("\n")
page_content = make_groups(page_content, 10)
await self.create(ctx, page_content)
@commands.has_permissions(administrator=True)
@commands.command()
async def warn(self, ctx: commands.Context, member: discord.Member, reason: str = "No reason given"):
"""
Warn an user on a Discord guild
"""
self.execute(f"""INSERT INTO warn (guild_id, user_id)
VALUES({ctx.guild.id}, {member.id})
ON DUPLICATE KEY
UPDATE count = count + 1""")
await basic_message(
ctx,
f"⚠️ {member.mention} has been warned by `{ctx.author}`",
f"Reason: {reason}"
)
@commands.has_permissions(administrator=True)
@commands.command(aliases=["warn_stats"])
async def ws(self, ctx: commands.Context):
"""
Show every user warned
"""
data = {}
self.execute(f"""SELECT user_id, count FROM warn
WHERE guild_id={ctx.guild.id}""")
response = self.cursor.fetchall()
if not response:
return await basic_message(ctx, "❌ Empty")
for item in response:
user_id = int(item["user_id"])
user = await self.bot.fetch_user(user_id)
key = f"{user} ({user_id})"
data[key] = item["count"]
page_content = basic_frame(data).split("\n")
pages = make_groups(page_content, 10)
await self.create(ctx, pages)
def setup(bot: commands.Bot):
bot.add_cog(Moderator(bot))
| 29.321429 | 105 | 0.601705 | 395 | 3,284 | 4.891139 | 0.296203 | 0.021739 | 0.045549 | 0.072464 | 0.325052 | 0.298137 | 0.298137 | 0.266046 | 0.202899 | 0.096273 | 0 | 0.003386 | 0.280451 | 3,284 | 111 | 106 | 29.585586 | 0.812526 | 0.017661 | 0 | 0.208955 | 0 | 0 | 0.158143 | 0.02288 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.089552 | 0 | 0.179104 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf2f7c097096dee417521bb08ed9caad1bcf85b | 4,340 | py | Python | mpi_migration.py | max-509/distributed-kirchhoff-migration | 43c1dc2706f6ceb4e010ab005c34ff8ae6f51645 | [
"Apache-2.0"
] | null | null | null | mpi_migration.py | max-509/distributed-kirchhoff-migration | 43c1dc2706f6ceb4e010ab005c34ff8ae6f51645 | [
"Apache-2.0"
] | 1 | 2022-02-02T06:40:04.000Z | 2022-02-02T06:40:04.000Z | mpi_migration.py | max-509/distributed-kirchhoff-migration | 43c1dc2706f6ceb4e010ab005c34ff8ae6f51645 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os.path
from mpi4py import MPI
import numpy as np
import pandas as pd
import tensorflow as tf
import configparser
import numba
import multiprocessing as mp
import sys
from concurrent.futures import ThreadPoolExecutor
from _migration import calculate_migration
@numba.njit(parallel=True)
def travel_times_sum(t1, t2):
return t1 + t2
def cartesian_product(*arrays):
la = len(arrays)
dtype = np.result_type(*arrays)
arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)
for i, a in enumerate(np.ix_(*arrays)):
arr[..., i] = a
return arr.reshape(-1, la)
def main():
if len(sys.argv) != 2:
raise AttributeError("Error: need pass path to setting.ini file")
config = configparser.ConfigParser()
config.read(sys.argv[1])
directories = config['Directories']
loaded = tf.keras.models.load_model(directories['neural_network'])
data_set = pd.read_csv(directories['data_source-receiver'])
seism_trace = np.load(directories['seismogramma'])
path_to_result = directories['path_to_result']
data_set_source = data_set["SOUX"]
data_set_receiver = data_set["RECX"]
parametres = config['Settings']
nx = int(parametres["number_of_x_points"])
nz = int(parametres["number_of_z_points"])
x0 = int(parametres['starting_z_coord'])
x1 = int(parametres["ending_z_coord"])
z0 = int(parametres["starting_z_coord"])
z1 = int(parametres["ending_z_coord"])
dt = float(parametres["dt"])
dx = (x1 - x0) / (nx - 1)
dz = (z1 - z0) / (nz - 1)
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
neighbour_processes = [0, 0, 0, 0]
for grid_cols in range(int(np.floor(np.sqrt(size))), size + 1):
if size % grid_cols == 0:
grid_rows = size // grid_cols
break
cartesian_communicator = comm.Create_cart((grid_rows, grid_cols), periods=(False, False), reorder=True)
my_mpi_row, my_mpi_col = cartesian_communicator.Get_coords(cartesian_communicator.rank)
neighbour_processes[UP], neighbour_processes[DOWN] = cartesian_communicator.Shift(0, 1)
neighbour_processes[LEFT], neighbour_processes[RIGHT] = cartesian_communicator.Shift(1, 1)
x_coor = dx * (nx - 1) / grid_cols
z_coor = dz * (nz - 1) / grid_rows
n = nx // grid_cols
m = nz // grid_rows
if my_mpi_col == grid_cols - 1:
n = n + nx % grid_cols
if my_mpi_row == grid_rows - 1:
m = m + nz % grid_rows
masx = np.linspace(my_mpi_col * x_coor, (my_mpi_col + 1) * x_coor, n)
masz = np.linspace(my_mpi_row * z_coor, (my_mpi_row + 1) * z_coor, m)
if rank == 0:
sources_coords = data_set_source['SOUX'].values.reshape(-1)
receivers_coords = data_set_receiver['RECX'].values.reshape(-1)
seismogramm = seism_trace
else:
sources_coords = None
receivers_coords = None
seismogramm = None
sources_coords = comm.bcast(sources_coords, root=0)
receivers_coords = comm.bcast(receivers_coords, root=0)
seismogramm = comm.bcast(seismogramm, root=0)
d_source = cartesian_product(sources_coords, masz, masx)
d_receiver = cartesian_product(receivers_coords, masz, masx)
def times_calculator(input_coords):
return loaded.predict(input_coords)
n_processes = mp.cpu_count()
with ThreadPoolExecutor(n_processes) as executor:
def split(d):
n_sources_points_pairs = d.shape[0]
part = n_sources_points_pairs // n_processes
split_parts = []
for i in range(n_processes - 1):
split_parts.append((i + 1) * part)
split_parts.append(n_processes * part + (n_sources_points_pairs % n_processes))
source_travel_times_splits = np.concatenate(list(executor.map(times_calculator, np.split(d, split_parts)[:-1])))
return source_travel_times_splits
time1 = split(d_source)
time2 = split(d_receiver)
travel_times = travel_times_sum(time1, time2).reshape(-1, len(masz) * len(masx)).T
result = calculate_migration(seismogramm, travel_times, dt).reshape(m, n)
np.save(os.path.join(path_to_result, f'result{my_mpi_row}{my_mpi_col}'), result)
if __name__ == '__main__':
main()
| 32.631579 | 124 | 0.66659 | 608 | 4,340 | 4.498355 | 0.294408 | 0.018282 | 0.014625 | 0.020841 | 0.073857 | 0.035832 | 0.024132 | 0 | 0 | 0 | 0 | 0.015579 | 0.216129 | 4,340 | 132 | 125 | 32.878788 | 0.78836 | 0.004608 | 0 | 0 | 0 | 0 | 0.062978 | 0.006946 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04902 | false | 0.009804 | 0.107843 | 0.019608 | 0.196078 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf3a2f0d26b50aa2cf1e968ebff3eed788af470 | 13,622 | py | Python | plugins/trakt/test/test_trakt.py | Tigge/platinumshrimp | fc5c8e825a714253b18b46cedafe28820a3a34b7 | [
"MIT"
] | 2 | 2017-12-20T16:56:37.000Z | 2021-01-19T18:41:53.000Z | plugins/trakt/test/test_trakt.py | Tigge/platinumshrimp | fc5c8e825a714253b18b46cedafe28820a3a34b7 | [
"MIT"
] | 18 | 2015-01-03T21:33:09.000Z | 2018-12-04T12:05:58.000Z | plugins/trakt/test/test_trakt.py | Tigge/platinumshrimp | fc5c8e825a714253b18b46cedafe28820a3a34b7 | [
"MIT"
] | 6 | 2015-01-02T01:16:38.000Z | 2021-09-04T01:28:38.000Z | import json
import os
import unittest
from unittest.mock import Mock, patch
import requests_mock
from dateutil import relativedelta
from plugins.trakt.trakt import Trakt
from plugins.trakt import api
""" Presets copied from Trakt's API """
ACTIVITY_PRESET_EPISODE_1 = {
"watched_at": "2014-03-31T09:28:53.000Z",
"action": "watch",
"episode": {
"season": 2,
"number": 3,
"title": "Beauty Pageant",
"ids": {
"trakt": 253,
"tvdb": 1088041,
"imdb": None,
"tmdb": 397642,
"tvrage": None,
},
},
"show": {
"title": "Parks and Recreation",
"year": 2009,
"ids": {
"trakt": 4,
"slug": "parks-and-recreation",
"tvdb": 84912,
"imdb": "tt1266020",
"tmdb": 8592,
"tvrage": 21686,
},
},
}
ACTIVITY_PRESET_SERIES_1 = {
"number": 2,
"ids": {"trakt": 18965, "tvdb": 83141, "tmdb": 18456, "tvrage": None},
"rating": 8.49162,
"votes": 179,
"episode_count": 24,
"aired_episodes": 24,
"overview": "...",
"first_aired": "2009-09-18T00:00:00.000Z",
}
ACTIVITY_PRESET_MOVIE_1 = {
"watched_at": "2014-03-31T09:28:53.000Z",
"action": "scrobble",
"movie": {
"title": "The Dark Knight",
"year": 2008,
"ids": {
"trakt": 4,
"slug": "the-dark-knight-2008",
"imdb": "tt0468569",
"tmdb": 155,
},
},
}
ACTIVITY_TEMPLATE_1 = {
"watched_at": "",
"action": "watch",
"episode": {
"season": -1,
"number": -1,
"title": "Beauty Pageant",
"ids": {
"trakt": 253,
"tvdb": 1088041,
"imdb": None,
"tmdb": 397642,
"tvrage": None,
},
},
"show": {
"title": "Parks and Recreation",
"year": 2009,
"ids": {
"trakt": 4,
"slug": "parks-and-recreation",
"tvdb": 84912,
"imdb": "tt1266020",
"tmdb": 8592,
"tvrage": 21686,
},
},
}
class FormatTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dir = os.path.join("..", os.path.dirname(__file__))
def test_watch_episode(self):
with open(
os.path.join(self.dir, "format", "test_format_watch_episode.json")
) as f:
activity = json.load(f)
message = Trakt.format_activity(activity, "User", activity["action"])
self.assertEqual(
message,
"User watched 'Marvel's Agents of S.H.I.E.L.D.', "
"S01E11 'The Magical Place' https://www.trakt.tv/search/trakt/74015?id_type=episode",
)
def test_scrobble_episode(self):
with open(
os.path.join(self.dir, "format", "test_format_scrobble_episode.json")
) as f:
activity = json.load(f)
message = Trakt.format_activity(activity, "User", activity["action"])
self.assertEqual(
message,
"User scrobbled 'The Simpsons', "
"S26E10 'The Man Who Came to Be Dinner' https://www.trakt.tv/search/trakt/1390653?id_type=episode",
)
def test_watch_movie(self):
with open(
os.path.join(self.dir, "format", "test_format_watch_movie.json")
) as f:
activity = json.load(f)
message = Trakt.format_activity(activity, "User", activity["action"])
self.assertEqual(
message,
"User watched 'Soul Kitchen' (2009) https://www.trakt.tv/search/trakt/19911?id_type=movie",
)
def test_utf8(self):
with open(os.path.join(self.dir, "format", "test_format_unicode.json")) as f:
activity = json.load(f)
message = Trakt.format_activity(activity, "User", activity["action"])
self.assertEqual(
message,
"User watched 'The Walking Dead \u263b', "
"S05E09 'What Happened and What\u2019s Going On \u263b' "
"https://www.trakt.tv/search/trakt/998958?id_type=episode",
)
class StartTestCase(unittest.TestCase):
def setUp(self):
self.trakt = Trakt()
@patch("plugins.trakt.trakt.datetime")
def test_user_setup(self, mock_datetime):
data_users = ["adam", "dave", "sue", "eva"]
user_json = {"users": data_users, "key": "fakekey"}
mock_datetime.datetime.now = lambda **_: "fakedate"
self.trakt.started(json.dumps(user_json))
self.assertEqual(
self.trakt.users,
dict(
map(
lambda u: (
u,
{
"last_sync_episodes": "fakedate",
"last_sync_movies": "fakedate",
},
),
data_users,
)
),
)
class UpdateTestCase(unittest.TestCase):
def setUp(self):
self.trakt = Trakt()
self.trakt.started('{"key": "[FAKEKEY]", "users": ["adam"]}')
def setupMocks(self, fetch_side_effect, summary_side_effect=None):
fetch = Mock(side_effect=fetch_side_effect)
echo = Mock()
summary = Mock(side_effect=summary_side_effect)
self.trakt.fetch_new_activities = fetch
self.trakt.echo = echo
self.trakt.create_activity_summary = summary
return fetch, echo, summary
@patch("plugins.trakt.trakt.Trakt.format_activity")
def test_single_episode(self, format_):
user_name = "adam"
def mock_fetch_new_activities(url, typ, func):
if typ != "episodes":
return []
else:
return [ACTIVITY_PRESET_EPISODE_1]
summary = {"action": "WOOT", "series": [{"data": "dummy"}]}
summary_return = lambda _: [summary]
mock_fetch, mock_echo, _ = self.setupMocks(
mock_fetch_new_activities, summary_return
)
self.trakt.trakt.users_history = summary_return
self.trakt.users["adam"]["last_sync_episodes"] = api.Trakt.get_date(
ACTIVITY_PRESET_EPISODE_1["watched_at"]
) - relativedelta.relativedelta(days=1)
self.trakt.update_user(user_name)
self.assertTrue(mock_echo.called, "A message should have been sent")
format_.assert_called_once_with(summary, user_name, summary["action"])
self.assertEqual(
self.trakt.users["adam"]["last_sync_episodes"],
api.Trakt.get_date(ACTIVITY_PRESET_EPISODE_1["watched_at"]),
)
def test_no_new_episodes(self):
mock_fetch, mock_echo, _ = self.setupMocks(
lambda url, typ, func: [ACTIVITY_PRESET_EPISODE_1]
if typ == "episodes"
else [],
lambda _: [],
)
self.trakt.users["adam"]["last_sync_episodes"] = api.Trakt.get_date(
"2013-03-31T09:28:53.000Z"
)
self.trakt.update_user("adam")
self.assertFalse(
mock_echo.called,
"No message should be sent if no new activities were found",
)
self.assertEqual(
self.trakt.users["adam"]["last_sync_episodes"],
api.Trakt.get_date(ACTIVITY_PRESET_EPISODE_1["watched_at"]),
)
@requests_mock.mock()
def test_new_activity_both_types(self, mock_requests):
mock_requests.get(
"/users/adam/history/episodes", text=json.dumps([ACTIVITY_PRESET_EPISODE_1])
)
mock_requests.get(
"/users/adam/history/movies", text=json.dumps([ACTIVITY_PRESET_MOVIE_1])
)
mock_requests.get(
"/shows/4/seasons", text=json.dumps([ACTIVITY_PRESET_SERIES_1])
)
mock_requests.get("/users/adam/ratings/movies", text="[]")
self.trakt.echo = Mock()
self.trakt.users["adam"]["last_sync_episodes"] = api.Trakt.get_date(
"2013-03-31T09:28:53.000Z"
)
self.trakt.users["adam"]["last_sync_movies"] = api.Trakt.get_date(
"2013-03-31T09:28:53.000Z"
)
self.trakt.update_user("adam")
self.trakt.echo.assert_any_call(
"adam watched 'Parks and Recreation', S02E03 'Beauty Pageant' https://www.trakt.tv/search/trakt/253?id_type=episode"
)
self.trakt.echo.assert_any_call(
"adam scrobbled 'The Dark Knight' (2008) https://www.trakt.tv/search/trakt/4?id_type=movie"
)
self.assertEqual(self.trakt.echo.call_count, 2)
self.assertEqual(
self.trakt.users["adam"]["last_sync_episodes"],
api.Trakt.get_date(ACTIVITY_PRESET_EPISODE_1["watched_at"]),
)
self.assertEqual(
self.trakt.users["adam"]["last_sync_movies"],
api.Trakt.get_date(ACTIVITY_PRESET_MOVIE_1["watched_at"]),
)
class SummaryTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dir = os.path.join("..", os.path.dirname(__file__))
def setUp(self):
self.trakt = Trakt()
self.trakt.started('{"key": "[FAKEKEY]", "users": ["adam"]}')
def create_activity(self, action, title, year, season, number):
return {
"action": action,
"episode": {"season": season, "number": number},
"show": {"title": title, "year": year},
}
def test_empty_list(self):
result = self.trakt.create_activity_summary([])
self.assertEqual(result, [])
def test_single_episode(self):
with open(
os.path.join(self.dir, "summaries", "single_episode_episodes.json")
) as episodes_json, open(
os.path.join(self.dir, "summaries", "single_episode_show.json")
) as show_json:
self.trakt.trakt.seasons_summary = Mock(return_value=json.load(show_json))
result = self.trakt.create_activity_summary([json.load(episodes_json)[0]])
self.assertTrue(
len(result) == 1,
"Should have gotten one show back. Got: %s" % len(result),
)
res = result[0]
self.assertTrue(res["action"], "scrobble")
self.assertEqual(res["show"]["title"], "CGP Grey")
self.assertEqual(res["show"]["year"], 2011)
self.assertEqual(len(res["seasons"]), 1)
self.assertEqual(len(res["seasons"][2016]["episodes"]), 1)
self.assertEqual(
res["seasons"][2016]["episodes"][8]["title"],
"The Simple Solution to Traffic",
)
self.assertEqual(
Trakt.format_activity(result[0], "user", "watch"),
"user watched 'CGP Grey', S2016E08 'The Simple Solution to Traffic' https://www.trakt.tv/search/trakt/2327792?id_type=episode",
)
def test_single_season(self):
with open(
os.path.join(self.dir, "summaries", "single_season_episodes.json")
) as fe:
with open(
os.path.join(self.dir, "summaries", "single_season_show.json")
) as fs:
self.trakt.trakt.seasons_summary = Mock(return_value=json.load(fs))
result = self.trakt.create_activity_summary(json.load(fe))
self.assertTrue(
len(result) == 1,
"Should have gotten one show back. Got: %s" % len(result),
)
res = result[0]
self.assertTrue(res["action"], "scrobble")
self.assertEqual(res["show"]["title"], "The Cyanide & Happiness Show")
self.assertEqual(res["show"]["year"], 2014)
self.assertEqual(len(res["seasons"]), 1)
self.assertEqual(len(res["seasons"][2]["episodes"]), 3)
self.assertEqual(
Trakt.format_activity(result[0], "user", "watch"),
"user watched 'The Cyanide & Happiness Show' S02E05-E07 https://www.trakt.tv/search/trakt/117827?id_type=season",
)
def test_multiple_seasons(self):
with open(
os.path.join(self.dir, "summaries", "multiple_seasons_episodes.json")
) as episodes_json, open(
os.path.join(self.dir, "summaries", "multiple_seasons_show.json")
) as shows_json:
self.trakt.trakt.seasons_summary = Mock(return_value=json.load(shows_json))
result = self.trakt.create_activity_summary(json.load(episodes_json))
self.assertTrue(
len(result) == 1,
"Should have gotten one show back. Got: %s" % len(result),
)
res = result[0]
# Verify season information
self.assertTrue(res["action"], "scrobble")
self.assertEqual(res["show"]["title"], "Silicon Valley")
self.assertEqual(res["show"]["year"], 2014)
self.assertEqual(len(res["seasons"]), 3)
self.assertEqual(len(res["seasons"][1]["episodes"]), 8)
self.assertEqual(len(res["seasons"][2]["episodes"]), 10)
self.assertEqual(len(res["seasons"][3]["episodes"]), 10)
self.assertEqual(
Trakt.format_activity(result[0], "user", "watch"),
"user watched 'Silicon Valley' S01E01-E08, S02E01-E10, S03E01-E10 https://www.trakt.tv/search/trakt/60157?id_type=show",
)
| 34.75 | 143 | 0.551681 | 1,490 | 13,622 | 4.881208 | 0.175839 | 0.040836 | 0.016499 | 0.019249 | 0.627389 | 0.576653 | 0.501306 | 0.487282 | 0.471195 | 0.458133 | 0 | 0.043524 | 0.308472 | 13,622 | 391 | 144 | 34.838875 | 0.728556 | 0.001835 | 0 | 0.409496 | 0 | 0.023739 | 0.244264 | 0.041756 | 0 | 0 | 0 | 0 | 0.118694 | 1 | 0.059347 | false | 0 | 0.023739 | 0.002967 | 0.106825 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf4cf97686269a46eb3ddc822b1a02b29a68eb9 | 5,675 | py | Python | psop.py | robalty/pysynth | aa7abf78079af5b6dee29b1b97af5aa31e7b64e8 | [
"MIT"
] | 1 | 2020-05-04T23:49:38.000Z | 2020-05-04T23:49:38.000Z | psop.py | robalty/pysynth | aa7abf78079af5b6dee29b1b97af5aa31e7b64e8 | [
"MIT"
] | 5 | 2020-05-06T01:22:37.000Z | 2020-05-28T18:13:04.000Z | psop.py | robalty/pysynth | aa7abf78079af5b6dee29b1b97af5aa31e7b64e8 | [
"MIT"
] | 1 | 2020-06-07T22:23:56.000Z | 2020-06-07T22:23:56.000Z | import math
import numpy as np
SAMPLERATE = 48000
class ADSR:
def __init__(self, a=0.1, d=0.2, s=0.6, r=0.5):
self.timings = {
0 : 0,
1 : 1 / ((a * SAMPLERATE) + 1),
2 : -1 / ((d * SAMPLERATE) + 1),
3 : 0,
4 : -1 / ((r * SAMPLERATE) + 1)
}
self.s = s
self.cur = 0
self.stage = 0
def get_vol(self):
temp = self.cur + self.timings.get(self.stage)
if temp > 0.99:
temp = 0.99
self.stage = 2
elif (self.stage == 2) & (temp < self.s):
temp = self.s
self.stage = 3
elif temp < 0:
self.stage = 0
return 0
self.cur = temp
return temp
def get_vols(self, num):
temp = []
for i in range(num):
temp.append(self.get_vol())
return temp
class Operator:
def __init__(self, i=SAMPLERATE, f=220, m=1):
self.mod = 0
self.frequency = f
self.freq_mult = m
self.envelope = ADSR()
self.feedback = 0
self.acc_phase = 0
def sample(self, clock):
return (self.frequency * clock) + self.acc_phase
def sample_with(self, in_op, clock):
return (self.frequency * clock) + (self.mod * in_op) + self.acc_phase
class Synth:
def __init__(self):
self.ops = [Operator(), Operator(), Operator(), Operator()]
self.algorithm = 1
self.clock = 0
self.frequency = 220
self.vol = 0
def set_freq(self, val):
self.frequency = val
for i in self.ops:
t = val * i.freq_mult * 2 * np.pi
i.acc_phase += ((i.frequency - t) * self.clock)
i.frequency = t
def set_mod(self, val, op):
self.ops[op].mod = val
def get_samples(self, num_samples):
func = alg.get(self.algorithm)
temp = func(self.ops, self.clock, num_samples)
self.clock += num_samples / SAMPLERATE
return temp * self.vol
def release(self):
for op in self.ops:
op.envelope.stage = 4
def press(self):
for op in self.ops:
op.feedback = 0
op.envelope.stage = 1
op.envelope.cur = 0
op.acc_phase = 0
self.clock = 0
def algtest(ops, clock, size):
return samples_fb(ops[0], clock, size)
def samples(op, clock, size):
first = np.sin(np.fromfunction(lambda x: op.sample(clock + (x / SAMPLERATE)), (size,)))
vol = op.envelope.get_vols(size)
return np.multiply(vol,np.sin(first))
def samples_with(op, clock, size, in_op):
first = np.fromfunction(lambda x: op.sample(clock + (x / SAMPLERATE)), (size,))
vol = op.envelope.get_vols(size)
s_in = np.multiply(in_op, op.mod)
return np.multiply(vol, np.sin(np.add(first, s_in)))
def samples_fb(op, clock, size):
first = np.fromfunction(lambda x: op.sample(clock + ((x+1) / SAMPLERATE)), (size,))
second = np.sin(np.fromfunction(lambda x: op.sample(clock + (x / SAMPLERATE)), (size,)))
vol = op.envelope.get_vols(size)
s_in = np.multiply(second, op.mod)
output = np.sin(np.add(first, s_in))
temp = ((np.abs(op.mod) - 4) / 4)
if temp > 0:
output -= temp * output
output += temp * np.random.normal(scale=1, size=size)
return np.multiply(vol, output)
def alg1(ops, clock, size):
first = samples_fb(ops[0], clock, size)
second = samples_with(ops[1], clock, size, first)
third = samples_with(ops[2], clock, size, second)
return samples_with(ops[3], clock, size, third)
def alg2(ops, clock, size):
first = (samples_fb(ops[0], clock, size) + samples(ops[1], clock, size)) / 2
second = samples_with(ops[2], clock, size, first)
return samples_with(ops[3], clock, size, second)
def alg3(ops, clock, size):
first = samples_fb(ops[0], clock, size)
second = samples(ops[1], clock, size)
third = (first + samples_with(ops[2], clock, size, second)) / 2
return samples_with(ops[3], clock, size, third)
#algs 3 and 4 only differ in where the feedback op gets added
def alg4(ops, clock, size):
first = samples_fb(ops[0], clock, size)
second = samples(ops[2], clock, size)
third = (second + samples_with(ops[1], clock, size, first)) / 2
return samples_with(ops[3], clock, size, third)
def alg5(ops, clock, size):
first = samples_fb(ops[0], clock, size)
second = samples_with(ops[1], clock, size, first)
third = samples(ops[2], clock, size)
return (second + samples_with(ops[3], clock, size, third)) / 2
def alg6(ops, clock, size):
first = samples_fb(ops[0], clock, size)
second = samples_with(ops[1], clock, size, first)
third = samples_with(ops[2], clock, size, first)
fourth = samples_with(ops[3], clock, size, first)
return (second + third + fourth) / 3
def alg7(ops, clock, size):
first = samples_fb(ops[0], clock, size)
second = samples_with(ops[1], clock, size, first)
return (second + samples(ops[2], clock, size) + samples(ops[3], clock, size)) / 3
def alg8(ops, clock, size):
first = samples_fb(ops[0], clock, size) + samples(ops[1], clock, size)
second = samples(ops[2], clock, size) + samples(ops[3], clock, size)
return (first + second) / 4
alg = {
0 : alg1,
1 : alg2,
2 : alg3,
3 : alg4,
4 : alg5,
5 : alg6,
6 : alg7,
7 : alg8,
8 : algtest
}
def lerp(startval, endval, time, endtime):
if time > endtime:
return endval
else:
temp = (endval - startval) * (time / endtime)
return startval + temp
| 29.868421 | 92 | 0.570396 | 815 | 5,675 | 3.890798 | 0.134969 | 0.12772 | 0.07947 | 0.036897 | 0.504573 | 0.474298 | 0.425418 | 0.360769 | 0.346578 | 0.296752 | 0 | 0.032714 | 0.288987 | 5,675 | 189 | 93 | 30.026455 | 0.75316 | 0.010573 | 0 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.164474 | false | 0 | 0.013158 | 0.019737 | 0.328947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf8e95176c9e685fb3202612e058ef1486d6f76 | 4,077 | py | Python | src/app.py | shivamshinde123/mushroom_classification | fbeb4be4fcd31625d9412a0cef58622780dddffc | [
"MIT"
] | 1 | 2022-03-30T08:21:12.000Z | 2022-03-30T08:21:12.000Z | src/app.py | shivamshinde123/mushroom_classification | fbeb4be4fcd31625d9412a0cef58622780dddffc | [
"MIT"
] | null | null | null | src/app.py | shivamshinde123/mushroom_classification | fbeb4be4fcd31625d9412a0cef58622780dddffc | [
"MIT"
] | null | null | null | import pathlib
from flask import Flask, redirect, render_template, request, Response, url_for, session
import secrets
import json
from Predictions_using_trained_model import predictionsUsingTheTrainedModels
from predictionDatabaseOperations import PredictionDBOperations
from predictionPreprocessing import PredictionPreprocessing
from predictionRawDataTransformation import RawPredictionDataTransformation
from predictionRawDataValidation import PredictionRawDataValidation
from modeltraining import *
from model_methods import *
from performLogging import *
app = Flask(__name__)
secret = secrets.token_urlsafe(32)
app.secret_key = secret
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('500.html'), 500
@app.route('/', methods=['GET'])
def index():
return render_template("index.html")
# @app.route('/results')
# def results():
# return render_template('results.html')
@app.route('/predictions', methods=['POST'])
def prediction():
if request.form is not None:
path = request.form['folderpath']
path = pathlib.Path(path)
# _______________________PREDICTIN DATA VALIDATION STEP_____________________________
# creating a object of a class
obj = PredictionRawDataValidation()
# getting the pattern of the raw data filename
raw_data_filename_pattern = obj.manualRegexCreation()
# validating the raw data filename
obj.validatePredictionDataFileName(
raw_data_filename_pattern, path)
# validating the number of columns in the raw data file
obj.validateNumberOfColumns(22)
# checking the any of the raw data file contain any column with all missing values
obj.validateMissingValuesInWholeColumn()
# ______________________PREDICTION DATA TRANSFORMATION STEP_________________________________
# creating object of the class
obj1 = RawPredictionDataTransformation()
# adding quotes to the string values from the dataframe
obj1.addingQuotesToStringColumns()
# removing hyphens from the column names of the dataframe
obj1.removeHyphenFromColumnName()
# ______________________PREDICTION DATA DATABASE INSERTION STEP____________________________________
# creating a object of a class
obj2 = PredictionDBOperations()
# deleting the previously created table from the database
obj2.deleteDB()
# getting the dictonary containing the name and datatypes of the columns
with open(os.path.join("config", "params.yaml")) as p:
params = yaml.safe_load(p)
schema_prediction_path = params['pred_data_preparation']['schema_prediction']
with open(schema_prediction_path, "r") as k:
json_file = json.load(k)
name_dtype_dict = json_file['ColumnNames']
# creating a database and the creating a table into it
obj2.createTableIntoDb(name_dtype_dict)
# adding the data from the file from good data foler into the database
obj2.insertGoodDataIntoTable()
# exporting the data from the database as a csv file
obj2.getDataFromDbTableIntoCSV()
# _____________________PREDICTIN DATA PREPROCESSING STEP_____________________________________
# creating an object of the class
obj3 = PredictionPreprocessing()
# replacing question mark with 'b' in the feature column stalk-root
obj3.replaceQuestionMark()
# imputing missing values and encoding categorical columns
obj3.transformPipeline()
# ______________________PREDICTION USING TRAINED MODELS_____________________________________
# creating a object of a class
obj4 = predictionsUsingTheTrainedModels()
# calling the method of class
list = obj4.predictUsingModel()
return render_template('predictions.html',list=list)
if __name__ == '__main__':
app.run()
| 32.616 | 107 | 0.727005 | 418 | 4,077 | 6.301435 | 0.394737 | 0.031891 | 0.037965 | 0.019362 | 0.029233 | 0.029233 | 0.020501 | 0 | 0 | 0 | 0 | 0.010886 | 0.21143 | 4,077 | 124 | 108 | 32.879032 | 0.808398 | 0.357371 | 0 | 0 | 0 | 0 | 0.056713 | 0.008102 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0 | 0.210526 | 0.052632 | 0.350877 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf9743b7839d926775ed3660be8f3a065cd08dd | 2,017 | py | Python | Python/Tweet/read_tweet.py | LilyYC/legendary-train | 214525afeeb2da2409f451bf269e792c6940a1ba | [
"MIT"
] | null | null | null | Python/Tweet/read_tweet.py | LilyYC/legendary-train | 214525afeeb2da2409f451bf269e792c6940a1ba | [
"MIT"
] | null | null | null | Python/Tweet/read_tweet.py | LilyYC/legendary-train | 214525afeeb2da2409f451bf269e792c6940a1ba | [
"MIT"
] | null | null | null | def read_tweets(file):
""" (file open for reading) -> dict of {str: list of tweet tuples}
Return a dictionary with the names of the candidates
as keys, and tweet tuple in the form of (candidate, tweet text, date,
source, favorite count, retweet count)as values
"""
dic = {}
key = ""
content = []
for line in file:
if line.endswith(':\n') and 2 <= len(line.split()) <= 3:
key = line.strip()[: -1]
dic[key] = []
else:
if line != "<<<EOT\n":
# then we accumulate the content
content.append(line[:len(line)])
else:
# use a helper function to generate the tuple for the value
help_read_tweet(dic, key, content)
content = []
return dic
def help_read_tweet(dic, key, content):
""" (dic, str, list of str) --> None
Update the dictionary with dic, key as the key, and content as value.
>>> key = 'Donald Trump'
>>> dic = {key: []}
>>> content = ['791651860889427968,1477593886,Queens NY,Twitter for iPhone \
,10775,4475\n', 'JOIN ME! #MAGA\n', 'TODAY:\n', 'Springfield, OH \n', 'Tol \
edo, OH \n', 'Geneva, OH \n', 'FRIDAY:\n', 'Manchester, NH \n', 'Lisbon, \
ME \n', 'Cedar Rapids, IA\n', 'https://t.co/kv624y9UOm\n', '\n']
>>> help_read_tweet(dic, key, content)
>>> dic
{'Donald Trump': [('Donald Trump', 'JOIN ME! #MAGA\n TODAY:\n Springfield,\
OH \n Toledo, OH \n Geneva, OH \n FRIDAY:\n Manchester, NH \n Lisbon, \
ME \n Cedar Rapids, IA\n https://t.co/kv624y9UOm\n \n')]}
"""
for s in content:
if len(s.split(',')) == HEADER_LENGTH and s.split(',')[0].isnumeric():
info = s.split(',')
else:
txt = s + ' '
text = txt[:len(txt)]
value = (key, text, int(info[1]), info[3], int(info[4]), int(info[5][:-1]))
dic[key].append(value)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 35.385965 | 80 | 0.54239 | 274 | 2,017 | 3.934307 | 0.394161 | 0.044527 | 0.060297 | 0.044527 | 0.274583 | 0.274583 | 0.250464 | 0.19666 | 0.19666 | 0.139147 | 0 | 0.037762 | 0.291026 | 2,017 | 56 | 81 | 36.017857 | 0.716084 | 0.523054 | 0 | 0.185185 | 0 | 0 | 0.025612 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.037037 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bf9da14a8590cb2e410ccf84e328acb4bba5bef | 607 | py | Python | exe065a.py | Alexmachado81/ExerciciosPython_Resolvidos | 2774ba742788eb7b545f3f85e9438deb68a983d4 | [
"MIT"
] | null | null | null | exe065a.py | Alexmachado81/ExerciciosPython_Resolvidos | 2774ba742788eb7b545f3f85e9438deb68a983d4 | [
"MIT"
] | null | null | null | exe065a.py | Alexmachado81/ExerciciosPython_Resolvidos | 2774ba742788eb7b545f3f85e9438deb68a983d4 | [
"MIT"
] | null | null | null | cond = 'S'
num = quant = soma = maior = menor =0
while cond in 'Ss':
num = int(input(' Informe um numero:'))
quant += 1
soma += num
cond = str(input('Quer Continuar?[S/N]')).upper().strip()[0]
if quant == 1:
maior = menor = num
else:
if num > maior:
maior = num
if num < menor:
menor = num
media = soma/quant
print('Você digitou {} numero(s) \n'
'A soma do(s) numero(s) é {} \n'
'Sua media é {:.1f} \n'
'O maior numero digitado foi {} \n'
'O menor numero digitado foi {}'.format(quant,soma,media,maior,menor))
| 27.590909 | 76 | 0.532125 | 88 | 607 | 3.670455 | 0.431818 | 0.092879 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011905 | 0.308072 | 607 | 21 | 77 | 28.904762 | 0.757143 | 0 | 0 | 0 | 0 | 0 | 0.30313 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bfa633ea90ac984f4075624c6eb6d5ad5a3bbf4 | 6,206 | py | Python | scAnalysis/Segmentation.py | yuzhounaut/SpaceM | 0fed7f14bb7d378ed0a8cf2e075bc562d6eed497 | [
"Apache-2.0"
] | 8 | 2020-05-26T13:32:29.000Z | 2021-12-05T20:32:38.000Z | scAnalysis/Segmentation.py | yuzhounaut/SpaceM | 0fed7f14bb7d378ed0a8cf2e075bc562d6eed497 | [
"Apache-2.0"
] | 2 | 2021-06-08T21:39:05.000Z | 2022-03-12T00:31:50.000Z | scAnalysis/Segmentation.py | LRpz/SpaceM | 03f7b3b871ec70ed38df63adfc4efcd30c3896a5 | [
"Apache-2.0"
] | 3 | 2020-06-18T17:06:16.000Z | 2021-10-08T06:55:07.000Z | from subprocess import call
import matplotlib.pyplot as plt
import numpy as np
import tqdm
from scipy import ndimage
def callCP(MFA, cp_p, cppipe_p):
"""Call CellProfiler (http://cellprofiler.org/) to perform cell segmentation. CellProfiler segmentation pipeline
is in the spaceM folder with the '.cppipe' extension.
Args:
MFA (str): path to Main Folder Analysis.
cp_p (str): path to CellProfiler path
cppipe_p (str): path to CellProfiler pipeline file
"""
# CP headless info https://github.com/CellProfiler/CellProfiler/wiki/Adapting-CellProfiler-to-a-LIMS-environment
call([cp_p,
'-r',
'-c',
'-p',
cppipe_p,
'-o',
MFA + 'CellProfilerAnalysis\\', '--file-list',
MFA + 'CellProfilerAnalysis\input_files.txt'])
def cellOutlines(FluoBrightfield_p, fluo_window, label_p, save_p, clusters=[], cluster_col=[], labels_OI = []):
"""Visualize the cell segmentation results from CellProfiler by drawing a black outline around the estimated cell
boundaries.
Args:
FluoBrightfield_p (str): path to image to draw the cells outlines on.
fluo_window (int): number of pixels surrounding the frame of interest
label_p (str): path to the label image created by CellProfiler
save_p (str): path to the generated image with cells outlines
"""
if fluo_window > 0 :
labelI = plt.imread(label_p)[fluo_window:-fluo_window]
fluoI = plt.imread(FluoBrightfield_p)[fluo_window:-fluo_window]
else:
labelI = plt.imread(label_p)
fluoI = plt.imread(FluoBrightfield_p)[:,:,:3]
values = np.unique(labelI)
perimAll = np.zeros(np.shape(labelI))
struct = ndimage.generate_binary_structure(2, 1)
FIC = fluoI#[fluo_window:-fluo_window]
if np.shape(labels_OI)[0] > 0:
label_list = labels_OI
else:
label_list = np.unique(labelI)
cluster_cell_labels = np.array(clusters[0])
cluster_cell_values = np.array(clusters[1])
for seed in tqdm.tqdm(values):
BW = (labelI==seed)*1
if seed in label_list and seed >0:
perim = BW - ndimage.binary_erosion(BW, structure=struct, iterations=1).astype(BW.dtype)
perim = ndimage.binary_dilation(perim, structure=struct, iterations=1).astype(BW.dtype)
# perimAll = perimAll + (BW-erode)
if np.shape(clusters)[0] > 0:
if seed in cluster_cell_labels:
cluster_ind = np.array([cluster_cell_labels == seed][0])
color = cluster_col[cluster_cell_values[cluster_ind][0]]
else:
color = [0,0,0]
FIC[perim == 1, :] = color
else:
color = [0,0,0]
FIC[perim == 1, :] = color
plt.imshow(FIC)
PAC = perimAll#[fluo_window:-fluo_window]
PAC_d = ndimage.binary_dilation(PAC, structure=struct, iterations=1).astype(BW.dtype)
CC = FIC*np.dstack([np.invert(PAC_d.astype('bool'))] * 3)
plt.imsave(save_p, CC)
def cellOutlines_fast(FluoBrightfield_p, fluo_window, label_p, save_p, clusters=[], cluster_col=[], labels_OI = []):
"""Visualize the cell segmentation results from CellProfiler by drawing a black outline around the estimated cell
boundaries.
Args:
FluoBrightfield_p (str): path to image to draw the cells outlines on.
fluo_window (int): number of pixels surrounding the frame of interest
label_p (str): path to the label image created by CellProfiler
save_p (str): path to the generated image with cells outlines
"""
if fluo_window > 0 :
labelI = plt.imread(label_p)[fluo_window:-fluo_window]
fluoI = plt.imread(FluoBrightfield_p)[fluo_window:-fluo_window]
else:
labelI = plt.imread(label_p)
fluoI = plt.imread(FluoBrightfield_p)
values = np.unique(labelI)
perimAll = np.zeros(np.shape(labelI)).astype('uint8')
struct = ndimage.generate_binary_structure(2, 1)
FIC = fluoI#[fluo_window:-fluo_window]
if np.shape(labels_OI)[0] > 0:
label_list = labels_OI
else:
label_list = np.unique(labelI)
for seed in tqdm.tqdm(values):
BW = (labelI==seed)
BW = BW.astype('uint8')
if seed in label_list and seed > 0:
perim = BW - ndimage.binary_erosion(BW, structure=struct, iterations=1).astype(BW.dtype)
perimAll = perimAll + perim
perimAll_d = ndimage.binary_dilation(perimAll, structure=struct, iterations=1).astype(BW.dtype)
if len(np.shape(FIC)) >2:
CC = FIC*np.dstack([np.invert(perimAll_d.astype('bool'))] * 3)
else:
CC = FIC * np.invert(perimAll_d.astype('bool'))
plt.imsave(save_p, CC, cmap='gray')
def cellDistribution_MALDI(MF):
"""Maps the distribution of the cells over the sampled area by MALDI as a binary matrix. Can also be called an On/Off
sample mask, where pixels with a value of 1 are off sample (the corresponding ablation mark of that MALDI pixel
does not overlap with a cell) and a value of 1 are ON sample (there is overlap).
Args:
MF (str): path to the Main Folder.
"""
MFA = MF + 'Analysis/'
cellMask = tiff.imread(MFA + 'CellProfilerAnalysis/Labelled_cells.tif')
marksMask = np.load(MFA + 'Fiducials/transformedMarksMask.npy')
coordX, coordY = np.load(MFA + 'Fiducials/transformedMarks.npy')
window = 100
cellMask_bw_all = cellMask > 0
pmi = [] # Positive Mark Index
overLaps = []
for i in tqdm.tqdm(range(np.shape(marksMask)[0])):
status = 0
cell_mark_OL = 0
bi = []
for j in range(np.shape(marksMask[i][0])[0]):
if cellMask_bw_all[
int(marksMask[i][0][j] - np.min(coordX) + window), int(
marksMask[i][1][j] - np.min(coordY) + window)]:
status = 1
cell_mark_OL += 1
# if status == 1:
pmi = np.append(pmi, status)
overLaps = np.append(overLaps, cell_mark_OL)
np.save(MFA + 'CellProfilerAnalysis/cellDistribution_MALDI.npy', [pmi, overLaps])
| 38.7875 | 121 | 0.633742 | 828 | 6,206 | 4.624396 | 0.233092 | 0.052233 | 0.023505 | 0.020893 | 0.541917 | 0.5158 | 0.492818 | 0.472447 | 0.472447 | 0.422042 | 0 | 0.011688 | 0.255559 | 6,206 | 159 | 122 | 39.031447 | 0.8171 | 0.286175 | 0 | 0.37 | 0 | 0 | 0.061015 | 0.04844 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.05 | 0 | 0.09 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bfb524c2e56d706d022852606cbc81b117c3933 | 1,612 | py | Python | py/camvid.py | StoneWST/Dataset-Tool-for-Segmentation- | 813c55e92a61f915424f2b1f0e6c4d5eb20b7ce5 | [
"MIT"
] | 4 | 2021-06-03T09:02:36.000Z | 2022-02-28T00:58:22.000Z | py/camvid.py | wusitong98/Dataset-Tool-Segmentation | 813c55e92a61f915424f2b1f0e6c4d5eb20b7ce5 | [
"MIT"
] | null | null | null | py/camvid.py | wusitong98/Dataset-Tool-Segmentation | 813c55e92a61f915424f2b1f0e6c4d5eb20b7ce5 | [
"MIT"
] | null | null | null | import os
import fire
def gen_camvid_txt():
camvid_train_list = []
camvid_val_list = []
camvid_test_list = []
camvid_train_img_folder = "data/CamVid/train"
camvid_val_img_folder = "data/CamVid/val"
camvid_test_img_folder = "data/CamVid/test"
camvid_train_img_list = os.listdir(camvid_train_img_folder)
camvid_val_img_list = os.listdir(camvid_val_img_folder)
camvid_test_img_list = os.listdir(camvid_test_img_folder)
for i in camvid_train_img_list:
(former_name, extension) = os.path.splitext(i)
camvid_train_list.append(former_name)
txt_path = "data/CamVid/train.txt"
list2txt(camvid_train_list, txt_path)
for i in camvid_val_img_list:
(former_name, extension) = os.path.splitext(i)
camvid_val_list.append(former_name)
txt_path = "data/CamVid/val.txt"
list2txt(camvid_val_list, txt_path)
camvid_trainval_list = camvid_train_list + camvid_val_list
txt_path = "data/CamVid/trainval.txt"
list2txt(camvid_trainval_list, txt_path)
for i in camvid_test_img_list:
(former_name, extension) = os.path.splitext(i)
camvid_test_list.append(former_name)
txt_path = "data/CamVid/test.txt"
list2txt(camvid_test_list, txt_path)
def list2txt(list, txt_path):
if os.path.exists(txt_path):
os.remove(txt_path)
with open(txt_path, 'a') as f:
for i in list:
f.write(i)
f.write('\n')
print('Save txt file: {} with {} terms'.format(txt_path, len(list)))
if __name__ == '__main__':
fire.Fire()
# python3 camvid.py gen_camvid_txt
| 28.280702 | 72 | 0.694789 | 239 | 1,612 | 4.297071 | 0.188285 | 0.088608 | 0.064265 | 0.066212 | 0.435248 | 0.344693 | 0.290166 | 0.245375 | 0.137293 | 0.137293 | 0 | 0.004666 | 0.202233 | 1,612 | 56 | 73 | 28.785714 | 0.793935 | 0.019851 | 0 | 0.075 | 0 | 0 | 0.110266 | 0.028517 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.05 | 0 | 0.1 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bfc4f790355a7817532bd5b978db33d27b1e506 | 2,269 | py | Python | grapaold/layerfiles/gcomgraphcutter.py | psorus/grapa | 6af343bb35c466c971ded1876e7a9d00e77cef00 | [
"MIT"
] | null | null | null | grapaold/layerfiles/gcomgraphcutter.py | psorus/grapa | 6af343bb35c466c971ded1876e7a9d00e77cef00 | [
"MIT"
] | null | null | null | grapaold/layerfiles/gcomgraphcutter.py | psorus/grapa | 6af343bb35c466c971ded1876e7a9d00e77cef00 | [
"MIT"
] | null | null | null | import numpy as np
import math
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer,Dense, Activation
import tensorflow.keras as keras# as k
import tensorflow as t
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam,SGD
from tensorflow.linalg import trace
class gcomgraphcutter(Layer):#takes a reordered gs*gs graph, and cuts it in pieces of c, which get pooled by a given rule
def __init__(self,gs=20,c=2,mode="mean",cut=0.5,c_const=1000,**kwargs):
#c=2
#mode="min"
assert gs % c==0#assume there is an equal splitting
self.gs=gs
self.c=c
self.ogs=int(self.gs/self.c)
self.mode=mode
self.cut=cut
self.c_const=c_const
super(gcomgraphcutter,self).__init__(**kwargs)
def build(self, input_shape):
#self.trafo=self.add_weight(name="trafo",
# shape=(self.param*self.c,self.paramo),
# initializer=self.metrik_init,#ignores metrik_init completely
# trainable=self.learnable,
# regularizer=None)
self.built=True
def call(self,x):
A=x[0]#adjacency matrix
Ar=K.reshape(A,(-1,self.ogs,self.c,self.ogs,self.c))
#print("Ar",Ar.shape)
if self.mode=="mean":
Am=K.mean(Ar,axis=(2,4))
if self.mode=="min":
Am=K.min(Ar,axis=(2,4))
if self.mode=="max":
Am=K.max(Ar,axis=(2,4))
#print("Am",Am.shape)
C=self.c_const
cut=self.cut
Ar=K.relu(1+C*(Am-cut))-K.relu(C*(Am-cut))
#print("Ar",Ar.shape)
#exit()
return Ar
exit()
def compute_output_shape(self,input_shape):
grap_shape=input_shape[0]
assert len(grap_shape)==3
assert grap_shape[1]==self.gs
assert grap_shape[2]==self.gs
return tuple([grap_shape[0],self.ogs,self.ogs])
def get_config(self):
mi={"gs":self.gs,"param":self.param,"c":self.c,"metrik_init":self.metrik_init,"learnable":self.learnable,"paramo":self.paramo,"mode":self.mode,"cut":self.cut,"c_const":self.c_const}
th=super(gcomgraphcutter,self).get_config()
th.update(mi)
return th
def from_config(config):
return gcomgraphcutter(**config)
| 21.205607 | 185 | 0.633759 | 349 | 2,269 | 4.028653 | 0.318052 | 0.032006 | 0.054054 | 0.01707 | 0.025605 | 0.025605 | 0.025605 | 0 | 0 | 0 | 0 | 0.01418 | 0.223006 | 2,269 | 106 | 186 | 21.40566 | 0.783324 | 0.226972 | 0 | 0 | 0 | 0 | 0.035963 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.125 | false | 0 | 0.1875 | 0.020833 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2bfd2325a2d75def166622b037138785dc2ae620 | 1,778 | py | Python | extract_feature.py | Doarakko/baseball-and-football-face-classification | 53e1ff21918aab1b9b881c2dae4473e6bb1194a8 | [
"MIT"
] | null | null | null | extract_feature.py | Doarakko/baseball-and-football-face-classification | 53e1ff21918aab1b9b881c2dae4473e6bb1194a8 | [
"MIT"
] | null | null | null | extract_feature.py | Doarakko/baseball-and-football-face-classification | 53e1ff21918aab1b9b881c2dae4473e6bb1194a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import glob
import re
import os
import numpy as np
from keras.engine import Model
from keras.preprocessing.image import img_to_array, load_img
from keras.utils import plot_model
from keras_vggface.vggface import VGGFace
from keras_vggface import utils
layer_list = ['flatten', 'fc6', 'fc6/relu', 'fc7', 'fc7/relu', 'fc8', 'fc8/softmax']
layer = layer_list[5]
# 特徴を抽出する関数
def extract_feature(model, image_data):
x = np.expand_dims(image_data, axis=0)
x = utils.preprocess_input(x)
y = model.predict(x)[0]
return y
if __name__ == '__main__':
vgg_model = VGGFace(include_top=True, weights='vggface', input_tensor=None, input_shape=None, pooling=None, classes=2622)
out = vgg_model.get_layer(layer).output
model = Model(vgg_model.input, out)
# モデル可視化
# plot_model(model, to_file='./log/model.png')
image_data_path_list = glob.glob('./data/*/*/*/*/*_image.npy')
for (i, image_data_path) in enumerate(image_data_path_list):
dir_path = re.search(r'(.+)/.+npy', image_data_path)
dir_path = dir_path.group(1)
file_name = re.search(r'./data/.+/.+/.+/.+/(.+)_image.npy', image_data_path)
file_name = file_name.group(1)
# 特徴データを保存するパス
save_feature_data_path = dir_path + '/' + file_name + '_feature_' + layer + '.npy'
if os.path.exists(save_feature_data_path):
print('[Skip] {0}'.format(save_feature_data_path))
else:
# データセットをロード
image_data = np.load(image_data_path)
# 特徴抽出
feature_data = extract_feature(model, image_data)
np.save(save_feature_data_path, feature_data)
print('[Save] {0}\t{1} / {2}'.format(save_feature_data_path, i+1, len(image_data_path_list)))
| 35.56 | 125 | 0.662542 | 254 | 1,778 | 4.326772 | 0.358268 | 0.087352 | 0.082803 | 0.086442 | 0.096451 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014706 | 0.19685 | 1,778 | 49 | 126 | 36.285714 | 0.754902 | 0.062992 | 0 | 0 | 0 | 0 | 0.103739 | 0.035585 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.264706 | 0 | 0.323529 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9201694101813be0a08afb3b5067429915a5f7d8 | 9,662 | py | Python | PyRNN/rnn-train.py | theThing92/rub-met21 | 9d3d2e128a458aeb682548f1f207ee7491ca83a3 | [
"MIT"
] | null | null | null | PyRNN/rnn-train.py | theThing92/rub-met21 | 9d3d2e128a458aeb682548f1f207ee7491ca83a3 | [
"MIT"
] | null | null | null | PyRNN/rnn-train.py | theThing92/rub-met21 | 9d3d2e128a458aeb682548f1f207ee7491ca83a3 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
import argparse
import random
import operator
import pickle
import torch
from torch.optim.lr_scheduler import StepLR
sys.path.insert(0,'.')
from PyRNN.Data import Data
from PyRNN.RNNTagger import RNNTagger
from PyRNN.CRFTagger import CRFTagger
def build_optimizer(optim, model, learning_rate):
optimizer = {
'sgd': torch.optim.SGD,
'rmsprop': torch.optim.RMSprop,
'adagrad': torch.optim.Adagrad,
'adadelta': torch.optim.Adadelta,
'adam': torch.optim.Adam
}
return optimizer[optim](model.parameters(), lr=learning_rate)
def run_tagger(sentences, data, model, optimizer=None):
training_mode = True if optimizer else False
model.train(training_mode)
loss_function = torch.nn.CrossEntropyLoss(size_average=False)
### iterate over the data
num_tags = 0; num_correct = 0; loss_sum = 0.0
for iteration, (words, tags) in enumerate(sentences):
# map words and tags to numbers and create Torch variables
fwd_charIDs, bwd_charIDs = data.words2charIDvec(words)
fwd_charIDs = model.long_tensor(fwd_charIDs)
bwd_charIDs = model.long_tensor(bwd_charIDs)
tagIDs = model.long_tensor(data.tags2IDs(tags))
# optional word embeddings
word_embs = None if data.word_emb_size==0 else model.float_tensor(data.words2vecs(words))
# run the model
if type(model) is RNNTagger:
tagscores = model(fwd_charIDs, bwd_charIDs, word_embs)
loss = loss_function(tagscores, tagIDs)
# compute the tag predictions
_, predicted_tagIDs = tagscores.max(dim=-1)
elif type(model) is CRFTagger:
predicted_tagIDs, loss = \
model(fwd_charIDs, bwd_charIDs, word_embs, tagIDs)
else:
sys.exit("Error in function run_tagger")
num_tags += len(tagIDs)
num_correct += sum([1 for t, t2 in zip(tagIDs, predicted_tagIDs) if t == t2])
loss_sum += float(loss)
if training_mode:
# compute gradient and perform weight updates
optimizer.zero_grad()
loss.backward()
if args.grad_threshold > 0.0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_threshold)
optimizer.step()
if iteration%1000 == 0:
if args.verbose:
print("training items:",iteration, file=sys.stderr)
print(' '.join(words), file=sys.stderr)
print(' '.join(tags), file=sys.stderr)
print(' '.join(data.IDs2tags(predicted_tagIDs))+"\n",file=sys.stderr)
else:
print(iteration, end='\r', file=sys.stderr)
sys.stderr.flush()
accuracy = num_correct * 100.0 / num_tags
return loss_sum, accuracy
###########################################################################
# tagger training
###########################################################################
def training(args):
random.seed(args.random_seed)
data = Data(args.path_train, args.path_dev, args.word_trunc_len,
args.min_char_freq, args.word_embeddings, args.max_len)
data.save_parameters(args.path_param+".io")
### creation of the network
hyper_params = data.num_char_types, data.num_tag_types, \
args.char_embedding_size, data.word_emb_size, \
args.char_recurrent_size, args.word_recurrent_size, \
args.char_rnn_depth, args.word_rnn_depth, \
args.dropout_rate, args.crf_beam_size
if args.crf_epochs > 0: # use a CRF?
crf_model = CRFTagger(*hyper_params)
if args.gpu >= 0:
crf_model = crf_model.cuda()
model = crf_model.base_tagger # initial training of the base tagger only
else:
model = RNNTagger(*hyper_params[:-1])
if args.gpu >= 0:
model = model.cuda()
optimizer = build_optimizer(args.optimizer, model, args.learning_rate)
scheduler = StepLR(optimizer, step_size=1, gamma=args.learning_rate_decay)
### training
max_accuracy = -1.
for epoch in range(args.epochs + args.crf_epochs):
if epoch == args.epochs:
# start CRF training
model = crf_model
current_lr = optimizer.param_groups[0]['lr']
optimizer = build_optimizer(args.optimizer, model, current_lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.learning_rate_decay)
max_accuracy = -1.0
args.path_param += "-crf"
random.shuffle(data.train_sentences) # data is shuffled after each epoch
loss, accuracy = run_tagger(data.train_sentences, data, model, optimizer)
print("Epoch:", epoch+1, file=sys.stderr)
print("TrainLoss: %.0f" % loss, "TrainAccuracy: %.2f" % accuracy, file=sys.stderr)
sys.stderr.flush();
if epoch >= args.burn_in_epochs:
scheduler.step()
loss, accuracy = run_tagger(data.dev_sentences, data, model)
print(epoch+1, "DevLoss: %.0f" % loss, "DevAccuracy: %.2f" % accuracy)
sys.stdout.flush()
### keep the model which performs best on dev data
if max_accuracy < accuracy:
max_accuracy = accuracy
with open(args.path_param+".hyper", "wb") as file:
if epoch >= args.epochs:
# model is a CRFTagger
pickle.dump(hyper_params, file)
else:
# model is an RNNTagger
pickle.dump(hyper_params[:-1], file)
if model.on_gpu():
model = model.cpu()
torch.save(model.state_dict(), args.path_param+".rnn")
model = model.cuda()
else:
torch.save(model.state_dict(), args.path_param+".rnn")
###########################################################################
# main function
###########################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Training program of the RNN-Tagger.')
parser.add_argument('path_train', type=str,
help='file containing the training data')
parser.add_argument('path_dev', type=str,
help='file containing the development data')
parser.add_argument('path_param', type=str,
help='file in which the network parameters are stored')
parser.add_argument('--char_embedding_size', type=int, default=100,
help='size of the character embedding vectors')
parser.add_argument('--char_recurrent_size', type=int, default=400,
help='size of the hidden states of the RNN over characters')
parser.add_argument('--word_recurrent_size', type=int, default=400,
help='size of the hidden states of the RNN over words')
parser.add_argument('--char_rnn_depth', type=int, default=1,
help='number of character-based LSTM layers')
parser.add_argument('--word_rnn_depth', type=int, default=1,
help='number of word-based BiLSTM layers')
parser.add_argument('--crf_beam_size', type=int, default=0,
help='CRF beam search size')
parser.add_argument('--dropout_rate', type=float, default=0.5,
help='dropout rate')
parser.add_argument('--optimizer', type=str, default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'rmsprop', 'adam'],
help='seletion of the optimizer')
parser.add_argument('--learning_rate', type=float, default=1.0,
help='initial learning rate')
parser.add_argument('--learning_rate_decay', type=float, default=0.95,
help='learning rate decay after each epoch')
parser.add_argument('--burn_in_epochs', type=int, default=5,
help='number of initial epochs without a learning rate change')
parser.add_argument('--crf_epochs', type=int, default=5,
help='number of final CRF training epochs')
parser.add_argument('--grad_threshold', type=float, default=1.0,
help='gradient clipping threshold')
parser.add_argument('--word_trunc_len', type=int, default=10,
help='words longer than this are truncated')
parser.add_argument('--min_char_freq', type=int, default=2,
help='characters less frequent than this are replaced by <unk>')
parser.add_argument('--epochs', type=int, default=50,
help='number of training epochs')
parser.add_argument('--word_embeddings', type=str, default=None,
help='pretrained word embeddings')
parser.add_argument('--max_len', type=int, default=100,
help='maximal sentence length')
parser.add_argument('--random_seed', type=int, default=32,
help='seed for the random number generators')
parser.add_argument('--gpu', type=int, default=-1,
help='selection of the GPU (default is CPU)')
parser.add_argument("--verbose", action="store_true", default=False,
help="increase output verbosity")
args = parser.parse_args()
if args.gpu >= 0:
if not torch.cuda.is_available():
sys.exit("Sorry, no GPU available. Drop the gpu option.")
elif args.gpu >= torch.cuda.device_count():
sys.exit("Sorry, given GPU index was too large. Choose a different GPU.")
else:
torch.cuda.set_device(args.gpu)
torch.cuda.manual_seed(args.random_seed)
else:
torch.manual_seed(args.random_seed)
training(args)
| 39.92562 | 95 | 0.608466 | 1,174 | 9,662 | 4.841567 | 0.229983 | 0.038001 | 0.07178 | 0.014075 | 0.204609 | 0.135116 | 0.093596 | 0.081985 | 0.070373 | 0.044335 | 0 | 0.010794 | 0.252122 | 9,662 | 241 | 96 | 40.091286 | 0.77581 | 0.04823 | 0 | 0.090909 | 0 | 0 | 0.17159 | 0.00947 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017045 | false | 0 | 0.056818 | 0 | 0.085227 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92021bdc8a0aac4c752b56a0953aac9dec06b2cf | 4,281 | py | Python | tests/integrated/test_advertising_duration.py | teeheee/blatann | f8a75e68cd9d46b83d10482c5349842433dfa490 | [
"BSD-3-Clause"
] | 40 | 2018-03-01T19:49:20.000Z | 2022-03-31T11:35:06.000Z | tests/integrated/test_advertising_duration.py | teeheee/blatann | f8a75e68cd9d46b83d10482c5349842433dfa490 | [
"BSD-3-Clause"
] | 29 | 2019-03-12T18:29:57.000Z | 2022-03-30T04:21:22.000Z | tests/integrated/test_advertising_duration.py | teeheee/blatann | f8a75e68cd9d46b83d10482c5349842433dfa490 | [
"BSD-3-Clause"
] | 17 | 2019-03-27T19:11:12.000Z | 2022-03-16T06:00:08.000Z | import threading
import unittest
from blatann.gap.advertise_data import AdvertisingData, AdvertisingFlags
from blatann.gap.advertising import AdvertisingMode
from blatann.gap.scanning import ScanReport, Scanner
from blatann.utils import Stopwatch
from tests.integrated.base import BlatannTestCase, TestParams, long_running
# TODO: The acceptable duration deltas are generous because the nRF52 dev kits (being UART) are slower
# than the nRF52840 dongles by roughly an order of magnitude (1M baud UART vs. USB-CDC)
class TestAdvertisingDuration(BlatannTestCase):
def setUp(self) -> None:
self.adv_interval_ms = 50
self.adv_duration = 5
self.adv_mode = AdvertisingMode.non_connectable_undirected
self.adv_data = AdvertisingData(flags=0x06, local_name="Blatann Test")
self.dev1.advertiser.set_advertise_data(self.adv_data)
self.dev1.advertiser.set_default_advertise_params(self.adv_interval_ms, self.adv_duration, self.adv_mode)
def tearDown(self) -> None:
self.dev1.advertiser.stop()
self.dev2.scanner.stop()
@TestParams([dict(duration=x) for x in [1, 4, 10]], long_running_params=
[dict(duration=x) for x in [120, 180]])
def test_advertise_duration(self, duration):
acceptable_delta = 0.100
acceptable_delta_scan = 1.000
scan_stopwatch = Stopwatch()
dev1_addr = self.dev1.address
def on_scan_report(scanner: Scanner, report: ScanReport):
if report.peer_address != dev1_addr:
return
if scan_stopwatch.is_running:
scan_stopwatch.mark()
else:
scan_stopwatch.start()
self.dev2.scanner.set_default_scan_params(100, 100, duration+2, False)
with self.dev2.scanner.on_scan_received.register(on_scan_report):
self.dev2.scanner.start_scan()
with Stopwatch() as wait_stopwatch:
self.dev1.advertiser.start(timeout_sec=duration, auto_restart=False).wait(duration + 2)
self.assertFalse(wait_stopwatch.is_running)
self.assertFalse(self.dev1.advertiser.is_advertising)
self.assertDeltaWithin(duration, wait_stopwatch.elapsed, acceptable_delta)
self.assertDeltaWithin(duration, scan_stopwatch.elapsed, acceptable_delta_scan)
@TestParams([dict(duration=x) for x in [1, 2, 4, 10]], long_running_params=
[dict(duration=x) for x in [30, 60]])
def test_advertise_duration_timeout_event(self, duration):
acceptable_delta = 0.100
on_timeout_event = threading.Event()
def on_timeout(*args, **kwargs):
on_timeout_event.set()
with self.dev1.advertiser.on_advertising_timeout.register(on_timeout):
with Stopwatch() as stopwatch:
self.dev1.advertiser.start(timeout_sec=duration, auto_restart=False)
on_timeout_event.wait(duration + 2)
self.assertTrue(on_timeout_event.is_set())
self.assertFalse(self.dev1.advertiser.is_advertising)
self.assertDeltaWithin(duration, stopwatch.elapsed, acceptable_delta)
def test_advertise_auto_restart(self):
# Scan for longer than the advertising duration,
# but with auto-restart it should advertise for the full scan duration
scan_duration = 10
advertise_duration = 2
acceptable_delta = 0.500
dev1_addr = self.dev1.address
self.dev2.scanner.set_default_scan_params(100, 100, scan_duration, False)
w = self.dev2.scanner.start_scan()
self.dev1.advertiser.start(timeout_sec=advertise_duration, auto_restart=True)
w.wait()
self.dev1.advertiser.stop()
self.dev2.scanner.stop()
report_timestamps = [r.timestamp for r in self.dev2.scanner.scan_report.all_scan_reports
if r.peer_address == dev1_addr]
self.assertGreater(len(report_timestamps), 0)
report_seen_duration = report_timestamps[-1] - report_timestamps[0]
self.assertDeltaWithin(scan_duration, report_seen_duration, acceptable_delta)
if __name__ == '__main__':
unittest.main()
| 40.386792 | 114 | 0.677412 | 517 | 4,281 | 5.381044 | 0.274662 | 0.034508 | 0.064702 | 0.023005 | 0.277858 | 0.244069 | 0.209921 | 0.209921 | 0.158879 | 0.129403 | 0 | 0.028843 | 0.238729 | 4,281 | 105 | 115 | 40.771429 | 0.824793 | 0.071946 | 0 | 0.136986 | 0 | 0 | 0.005177 | 0 | 0 | 0 | 0.001035 | 0.009524 | 0.123288 | 1 | 0.09589 | false | 0 | 0.09589 | 0 | 0.219178 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9202c623dbd158b2b50431053b72ebb94e3aa74e | 1,892 | py | Python | src/ngsildclient/__init__.py | Orange-OpenSource/python-ngsild-client | 23ff31506aabd23c75befece1fb3d4536903cb2a | [
"Apache-2.0"
] | 7 | 2022-02-25T09:55:28.000Z | 2022-03-25T20:48:01.000Z | src/ngsildclient/__init__.py | Orange-OpenSource/python-ngsild-client | 23ff31506aabd23c75befece1fb3d4536903cb2a | [
"Apache-2.0"
] | null | null | null | src/ngsildclient/__init__.py | Orange-OpenSource/python-ngsild-client | 23ff31506aabd23c75befece1fb3d4536903cb2a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Software Name: ngsildclient
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: Apache 2.0
#
# This software is distributed under the Apache 2.0;
# see the NOTICE file for more details.
#
# Author: Fabien BATTELLO <fabien.battello@orange.com> et al.
import http.client
import logging
import sys
__version__ = "0.1.10"
from .utils import iso8601, is_interactive
from .utils.uuid import shortuuid
from .model.entity import Entity
from .model.helper.postal import PostalAddressBuilder
from .model.helper.openinghours import OpeningHoursBuilder
from .model.constants import (
CORE_CONTEXT,
NESTED,
Auto,
SmartDataModels,
Rel,
TZ_UTC,
TZ_WET,
TZ_CET,
TZ_FET,
)
from .model.mock import MockerNgsi
from .api.client import Client
from .api.helper.subscription import SubscriptionBuilder
from .exceptions import NgsiError
from .model.exceptions import NgsiModelError
from .api.exceptions import NgsiApiError, NgsiContextBrokerError, NgsiAlreadyExistsError
__all__ = [
"iso8601",
"shortuuid",
"Entity",
"PostalAddressBuilder",
"OpeningHoursBuilder",
"CORE_CONTEXT",
"NESTED",
"Auto",
"Rel",
"TZ_UTC",
"TZ_WET",
"TZ_CET",
"TZ_FET",
"MockerNgsi",
"Client",
"SubscriptionBuilder",
"SmartDataModels",
"NgsiError",
"NgsiModelError",
"NgsiApiError",
"NgsiContextBrokerError",
"NgsiAlreadyExistsError",
]
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
if is_interactive():
logging.disable(logging.CRITICAL)
sys.tracebacklimit = 0
logger = logging.getLogger(__name__)
http.client.HTTPConnection.debuglevel = 1
def print_to_log(*args):
logger.debug(" ".join(args))
# monkey patch the http.client's print() function
http.client.print = print_to_log
| 22.258824 | 88 | 0.717759 | 217 | 1,892 | 6.129032 | 0.488479 | 0.040602 | 0.01203 | 0.031579 | 0.034586 | 0.034586 | 0.034586 | 0.034586 | 0.034586 | 0 | 0 | 0.014696 | 0.172833 | 1,892 | 84 | 89 | 22.52381 | 0.835144 | 0.175476 | 0 | 0 | 0 | 0 | 0.192258 | 0.028387 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016393 | false | 0 | 0.245902 | 0 | 0.262295 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9205e60845af979d57cfab6fce5c83182fed94a4 | 3,513 | py | Python | tests/test_gosubdag_children.py | flying-sheep/goatools | 1e3a74faa17cbdeef02550c7ddf17b65cf47d34a | [
"BSD-2-Clause"
] | 477 | 2015-02-10T06:54:42.000Z | 2022-03-15T12:36:11.000Z | tests/test_gosubdag_children.py | flying-sheep/goatools | 1e3a74faa17cbdeef02550c7ddf17b65cf47d34a | [
"BSD-2-Clause"
] | 174 | 2015-02-05T18:11:14.000Z | 2022-03-29T10:24:19.000Z | tests/test_gosubdag_children.py | flying-sheep/goatools | 1e3a74faa17cbdeef02550c7ddf17b65cf47d34a | [
"BSD-2-Clause"
] | 202 | 2015-01-21T12:29:23.000Z | 2022-03-01T13:26:05.000Z | #!/usr/bin/env python
"""Test creation of GoSubDag's rcntobj data member."""
from __future__ import print_function
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
import os
from goatools.base import get_godag
from goatools.gosubdag.gosubdag import GoSubDag
from goatools.gosubdag.plot.gosubdag_plot import GoSubDagPlot
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../")
def test_plotgosubdag():
"""Test creation of GoSubDag's rcntobj data member."""
objrun = Run("../goatools/tests/data/mini_obo.obo")
#objrun.prt_goids_all(prt)
in_exp = [
(["GO:0000002"], set(["GO:0000001", "GO:0000002"]), {}),
(["GO:0000002"], set(["GO:0000001", "GO:0000002",
"GO:0000003", "GO:0000005",
"GO:0000006", "GO:0000008",
"GO:0000010", ]), {'children':True}),
(["GO:0000002"], set(["GO:0000001", "GO:0000002"]), {'children':0}),
(["GO:0000002"], set(["GO:0000001", "GO:0000002",
"GO:0000003", "GO:0000005",
"GO:0000006", "GO:0000008",
"GO:0000010", ]), {'children':1}),
(["GO:0000008"], set(["GO:0000001", "GO:0000003",
"GO:0000006", "GO:0000008"]), {}),
(["GO:0000008"], set(["GO:0000001", "GO:0000003",
"GO:0000006", "GO:0000008"]), {'children':False}),
(["GO:0000008"], set(["GO:0000001", "GO:0000003",
"GO:0000006", "GO:0000008"]), {'children':None}),
(["GO:0000008"], set(["GO:0000001", "GO:0000003",
"GO:0000006", "GO:0000008",
"GO:0000002", "GO:0000005",
"GO:0000004", "GO:0000007",
"GO:0000009", "GO:0000010"]), {'children':True}),
]
for in_goids, exp_goids, kws in in_exp:
objrun.run(in_goids, exp_goids, **kws)
# objrun.plt_goids("mini_obo.png", None)
class Run(object):
"""Printing GO IDs and Plotting; GODag from obo using GoSubDag."""
def __init__(self, obo):
self.go2obj_all = get_godag(os.path.join(REPO, obo))
self.gosubdag_all = GoSubDag(None, self.go2obj_all)
self.prtfmt = self.gosubdag_all.prt_attr['fmta']
def prt_goids_all(self, prt):
"""Print all GO IDs, including alternate GO IDs, in GODag."""
self.gosubdag_all.prt_goids(prtfmt=self.prtfmt, prt=prt)
def plt_goids(self, fout_img, go_sources):
"""Plot GO IDs."""
# % src/bin/go_plot.py GOs --obo=../goatools/data/i86.obo --outfile=t00.jpg --mark_alt_id
gosubdag = GoSubDag(go_sources, self.go2obj_all)
objplt = GoSubDagPlot(gosubdag, mark_alt_id=True)
objplt.plt_dag(os.path.join(REPO, fout_img))
def run(self, go_sources, exp_gos, **kws):
"""Create GoSubDag using specified GO sources."""
print("\nSRCS: {GOs}".format(GOs=go_sources))
gosubdag = GoSubDag(go_sources, self.go2obj_all, **kws)
gosubdag.prt_goids(gosubdag.go2nt)
assert set(gosubdag.go2nt) == exp_gos, "ACT({}) != EXP({})\n{} {}".format(
sorted(gosubdag.go2nt), sorted(exp_gos), go_sources, kws)
if __name__ == '__main__':
test_plotgosubdag()
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang. All rights reserved.
| 43.37037 | 97 | 0.565329 | 419 | 3,513 | 4.560859 | 0.279236 | 0.047096 | 0.050235 | 0.058608 | 0.385139 | 0.3663 | 0.3663 | 0.310832 | 0.253271 | 0.253271 | 0 | 0.141363 | 0.265016 | 3,513 | 80 | 98 | 43.9125 | 0.598761 | 0.146598 | 0 | 0.203704 | 0 | 0 | 0.236585 | 0.011812 | 0 | 0 | 0 | 0 | 0.018519 | 1 | 0.092593 | false | 0 | 0.092593 | 0 | 0.203704 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92071f8e67600ce7e492cbb22dfc9e21581246f3 | 5,078 | py | Python | lib_ddos_simulator/old/api/api.py | jfuruness/lib_ddos_simulator | 2d860fd3f35f4c25262f5269251eed89975f95e8 | [
"BSD-4-Clause"
] | 1 | 2020-04-01T22:42:36.000Z | 2020-04-01T22:42:36.000Z | lib_ddos_simulator/old/api/api.py | jfuruness/lib_ddos_simulator | 2d860fd3f35f4c25262f5269251eed89975f95e8 | [
"BSD-4-Clause"
] | null | null | null | lib_ddos_simulator/old/api/api.py | jfuruness/lib_ddos_simulator | 2d860fd3f35f4c25262f5269251eed89975f95e8 | [
"BSD-4-Clause"
] | 1 | 2020-02-16T17:55:46.000Z | 2020-02-16T17:55:46.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This module creates the flask app to shuffle users
App must be here because flask explodes if you move to subdir"""
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "jfuruness@gmail.com"
__status__ = "Development"
import pkg_resources
from flasgger import Swagger, swag_from
from flask import Flask, request
from .api_utils import format_json
from .api_utils import init_sim
from .api_utils import complete_turn
from .api_utils import connect_disconnect_uids
from ..managers import Manager
def create_app():
"""Create and configure an instance of the Flask application."""
app = Flask(__name__)
app.managers = {}
# https://stackoverflow.com/a/32965521/8903959
version = pkg_resources.get_distribution('lib_ddos_simulator').version
template = {
"swagger": "2.0",
"info": {
"title": "lib_ddos_simulator API",
"description": "Provides access to a number of shuffling algorithms for DDOS mitigation",
"contact": {
"responsibleOrganization": "Justin Furuness",
"responsibleDeveloper": "Justin Furuness",
"email": "jfuruness@gmail.com",
"url": "https://github.com/jfuruness/lib_ddos_simulator#lib_ddos_simulator",
},
"termsOfService": "https://github.com/jfuruness/lib_ddos_simulator/blob/master/LICENSE",
"version": version,
},
# "host": "lib_ddos_simulator_api.com", # overrides localhost:500
# "basePath": "/api", # base bash for blueprint registration
"schemes": [
"http",
"https"
],
"operationId": "getmyData"
}
swagger = Swagger(app, template=template)
@app.route("/")
@app.route("/home")
def home():
return "App is running"
@app.route("/init")
@swag_from("flasgger_docs/init_sim.yml")
@format_json(desc="Initializes simulation",
req_args=["uids", "num_buckets", "manager", "sys_id"])
def init():
"""Initializes app
input user ids, bucket ids, and manager name"""
# http://0.0.0.0:5000/init?uids=1,2,3,4&num_buckets=3&manager=protag_manager_merge
user_ids = [int(x) for x in request.args.get("uids", "").split(",")]
num_buckets = int(request.args.get("num_buckets"))
manager_str = request.args.get("manager", "")
manager_cls = None
for manager in Manager.runnable_managers:
if manager_str.lower() == manager.__name__.lower():
manager_cls = manager
assert manager_cls is not None, "Manager class is not correct"
sys_id = int(request.args.get("sys_id"))
# init here
init_sim(app, user_ids, num_buckets, manager_cls, sys_id)
return app.managers[sys_id].json
@app.route("/round")
@swag_from("flasgger_docs/turn.yml")
@format_json(desc="Cause simulation to take actions",
req_args=["sys_id"])
def round():
"""Takes a turn. Input downed buckets"""
# http://0.0.0.0:5000/round?bids=1,2,3
if len(request.args.get("bids", [])) > 0:
bucket_ids = [int(x) for x in request.args.get("bids").split(",")]
else:
bucket_ids = []
sys_id = int(request.args.get("sys_id"))
complete_turn(app, bucket_ids, sys_id)
return app.managers[sys_id].json
@app.route("/connect_disconnect")
@swag_from("flasgger_docs/connect_disconnect.yml")
@format_json(desc="Connect and disconnect users",
req_args=["sys_id"])
def connect_disconnect():
"""Connects and disconnects users."""
# http://0.0.0.0:5000/connect_disconnect?cuids=1,2,3&duids=4,5,6
if len(request.args.get("cuids", [])) > 0:
connecting_uids = [int(x) for x in
request.args.get("cuids").split(",")]
else:
connecting_uids = []
if len(request.args.get("duids", [])) > 0:
disconnecting_uids = [int(x) for x in
request.args.get("duids").split(",")]
else:
disconnecting_uids = []
sys_id = int(request.args.get("sys_id"))
connect_disconnect_uids(app,
connecting_uids,
disconnecting_uids,
sys_id)
return app.managers[sys_id].json
@app.route("/get_mappings")
@swag_from("flasgger_docs/get_mappings.yml")
@format_json(desc="Gets mappings", req_args=["sys_id"])
def get_mappings():
"""Gets mappings of users"""
# http://0.0.0.0:5000/get_mappings
sys_id = int(request.args.get("sys_id"))
return app.managers[sys_id].json
@app.route("/runnable_managers")
@swag_from("flasgger_docs/runnable_managers.yml")
@format_json(desc="List of runnable managers")
def runnable_managers():
return {"managers": ([x.__name__ for x in
Manager.runnable_managers])}
return app
| 32.551282 | 97 | 0.60575 | 623 | 5,078 | 4.720706 | 0.274478 | 0.032302 | 0.061884 | 0.034002 | 0.230874 | 0.196192 | 0.16355 | 0.125468 | 0.090445 | 0.053043 | 0 | 0.018858 | 0.258566 | 5,078 | 155 | 98 | 32.76129 | 0.762284 | 0.149665 | 0 | 0.126214 | 0 | 0 | 0.233365 | 0.0403 | 0 | 0 | 0 | 0 | 0.009709 | 1 | 0.067961 | false | 0 | 0.07767 | 0.019417 | 0.213592 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9207e392d5244627558a7eff04b6d9be4add7394 | 2,882 | py | Python | src/controllers/MPCController.py | MatthiasDR96/inverted_pendulum_simulator | c13314b625445c13a4225b88480e2bed772fe466 | [
"MIT"
] | null | null | null | src/controllers/MPCController.py | MatthiasDR96/inverted_pendulum_simulator | c13314b625445c13a4225b88480e2bed772fe466 | [
"MIT"
] | null | null | null | src/controllers/MPCController.py | MatthiasDR96/inverted_pendulum_simulator | c13314b625445c13a4225b88480e2bed772fe466 | [
"MIT"
] | null | null | null | import cvxpy
import numpy as np
class Control:
def __init__(self, model):
# Bind model
self.model = model
# Desired x_pos
self.xd = 0.0
# Control parameters
self.N = 100
# Control limits
self.umax = np.reshape(np.repeat(10, self.N), (self.N, 1))
self.xmax = np.reshape(np.repeat(1, 4 * self.N), (4 * self.N, 1))
# Control parameters
if self.model.name == 'Pendulum':
self.Q = np.mat([[100, 0, 0, 0], [0, 10, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
self.R = np.mat(np.identity(1))
self.P = np.mat([[1000, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
else:
self.Q = np.mat([[1.0, 0.0], [0.0, 1.0]])
self.R = np.mat(np.identity(1))
self.P = np.mat([[1.0, 0.0], [0.0, 1.0]])
# Get dynamics
A = np.mat(self.model.A_disc)
B = np.mat(self.model.B_disc)
# Alternative to calculating Abar, Bbar, Cbar, and Ahat
Abar = np.vstack((np.zeros((len(A), self.N*len(A))), np.hstack((np.kron(np.eye(self.N-1), A),
np.zeros((len(A)*(self.N-1), len(A)))))))
Bbar = np.kron(np.eye(self.N), B)
self.Ahat = (np.identity(np.shape(Abar)[0]) - Abar).I * np.kron(np.identity(self.N), A)[:, 0:len(A)]
self.Cbar = (np.identity(np.shape(Abar)[0]) - Abar).I * Bbar
# Calculate penalty matrices
tm1 = np.eye(self.N)
tm1[self.N - 1, self.N - 1] = 0
tm2 = np.zeros((self.N, self.N))
tm2[self.N - 1, self.N - 1] = 1
self.Qbar = np.kron(tm1, self.Q) + np.kron(tm2, self.P)
self.Rbar = np.kron(np.eye(self.N), self.R)
def set_desired_position(self, x):
self.xd = x
def control(self, state):
# Initial state
x0 = np.reshape(np.mat(state), (len(state), 1))
# Set optimization variables
u = cvxpy.Variable((self.N, 1))
x = cvxpy.Variable((self.N * len(state), 1))
# Ccompute cost
cost = 0.5 * cvxpy.quad_form(x, self.Qbar)
cost += 0.5 * cvxpy.quad_form(u, self.Rbar)
# Create state constraints
constr = [x == self.Cbar @ u + self.Ahat @ x0]
# Create control constraints
constr += [np.vstack((np.identity(self.N), -np.identity(self.N))) @ u <= np.vstack((self.umax, self.umax))]
# Create problem
prob = cvxpy.Problem(cvxpy.Minimize(cost), constr)
# Solve problem
prob.solve(verbose=False)
# Get optimal result
if prob.status == cvxpy.OPTIMAL:
ou = np.array(u.value[0, :]).flatten()
else:
ou = [0.0]
# Get only first control signal
u = ou[0]
return u
| 32.382022 | 118 | 0.497224 | 441 | 2,882 | 3.22449 | 0.226757 | 0.059072 | 0.067511 | 0.067511 | 0.224332 | 0.223629 | 0.123769 | 0.123769 | 0.085795 | 0.079466 | 0 | 0.058338 | 0.327897 | 2,882 | 88 | 119 | 32.75 | 0.675787 | 0.123525 | 0 | 0.083333 | 0 | 0 | 0.00319 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.041667 | 0 | 0.145833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
920a0f56b7d98522ca08ddd15424f5561358eed0 | 5,211 | py | Python | aclpwn/pathfinding.py | aas-n/aclpwn.py | b447f20b2b2ea0905e6430f6e6bb85237f13e23f | [
"MIT"
] | 5 | 2021-05-18T21:25:46.000Z | 2022-03-09T05:09:32.000Z | aclpwn/pathfinding.py | aas-n/aclpwn.py | b447f20b2b2ea0905e6430f6e6bb85237f13e23f | [
"MIT"
] | null | null | null | aclpwn/pathfinding.py | aas-n/aclpwn.py | b447f20b2b2ea0905e6430f6e6bb85237f13e23f | [
"MIT"
] | null | null | null | from aclpwn import utils, database
# Cost map for relationships
costmap = {
'MemberOf': 0,
'AddMember': 1,
'GenericAll': 1,
'GenericWrite': 1,
'WriteOwner': 3,
'WriteDacl': 2,
'DCSync': 0,
'Owns': 2,
'GetChangesAll': 0,
'GetChanges': 0,
'AllExtendedRights': 2
}
def dijkstra_find(fromid, toid, dbhost):
# This is "documented" here
# https://github.com/neo4j/neo4j/blob/3.3/community/server/src/main/java/org/neo4j/server/rest/web/DatabaseActions.java
# https://github.com/neo4j/neo4j/blob/3.3/community/server/src/main/java/org/neo4j/server/domain/RelationshipExpanderBuilder.java
rellist = [{"type": rel, "direction": "out"} for rel in costmap.keys()]
data = {
"to" : "http://%s:7474/db/data/node/%s" % (dbhost, toid),
"max_depth" : 100,
"relationships" : rellist,
"algorithm" : "dijkstra",
"cost_property": "aclpwncost",
"default_cost": 1
}
resp = database.restapi.post('http://%s:7474/db/data/node/%s/paths' % (dbhost, fromid), json=data)
data = resp.json()
paths = []
for path in data:
nodes, rels = resolve_rest_path(path)
paths.append((nodes, rels, path))
return paths
def dijkstra_find_cypher(startnode, endnode, starttype='User', endtype='User'):
query = "MATCH (n:%s {name: $startnode}), (m:%s {name: $endnode}) " \
"CALL gds.beta.shortestPath.dijkstra.stream({sourceNode: id(n), targetNode: id(m), " \
"relationshipWeightProperty: 'aclpwncost', nodeProjection: '*', relationshipProjection: {" \
"all: {type: '*', properties: 'aclpwncost', orientation: 'NATURAL'}}}) " \
"YIELD nodeIds, costs " \
"RETURN nodeIds, costs"
path = []
with database.driver.session() as session:
with session.begin_transaction() as tx:
for record in tx.run(query % (starttype, endtype), startnode=startnode, endnode=endnode, property='aclpwncost'):
path.append(record)
paths = []
if path:
nodes, rels = resolve_dijkstra_path(path[0])
paths.append((nodes, rels, path[0]))
return paths
queries = {
# Query all shortest paths
'shortestonly': "MATCH (n:%s {name: $startnode}),"
"(m:%s {name: $endnode}),"
" p=allShortestPaths((n)-[:MemberOf|AddMember|GenericAll|"
"GenericWrite|WriteOwner|WriteDacl|Owns|DCSync|GetChangesAll|AllExtendedRights*1..]->(m))"
" RETURN p",
# Query all simple paths (more expensive query than above)
# credits to https://stackoverflow.com/a/40062243
'allsimple': "MATCH (n:%s {name: $startnode}),"
"(m:%s {name: $endnode}),"
" p=(n)-[:MemberOf|AddMember|GenericAll|"
"GenericWrite|WriteOwner|WriteDacl|Owns|DCSync|GetChangesAll|AllExtendedRights*1..]->(m)"
"WHERE ALL(x IN NODES(p) WHERE SINGLE(y IN NODES(p) WHERE y = x))"
" RETURN p"
}
def get_path(startnode, endnode, starttype='User', endtype='User', querytype='allsimple'):
records = []
with database.driver.session() as session:
with session.begin_transaction() as tx:
for record in tx.run(queries[querytype] % (starttype, endtype), startnode=startnode, endnode=endnode):
records.append(record)
return records
def get_path_cost(record):
nmap = utils.getnodemap(record['p'].nodes)
cost = 0
for el in record['p']:
cost += costmap[el.type]
return cost
def resolve_dijkstra_path(path):
nodes = []
rels = []
nq = "MATCH (n)-[w {aclpwncost: $cost}]->(m) WHERE ID(n) = $source AND ID(m) = $dest RETURN n,w,m"
bnq = "MATCH (n)-[w]->(m) WHERE ID(n) = $source AND ID(m) = $dest RETURN n,w,m"
with database.driver.session() as session:
with session.begin_transaction() as tx:
pv = path.values()
for i in range(1, len(pv[0])):
res = tx.run(nq, source=pv[0][i-1], cost=pv[1][i]-pv[1][i-1], dest=pv[0][i])
data = res.single()
# No result, most likely an invalid path, but query for a relationship with any cost regardless
if not data:
res = tx.run(bnq, source=pv[0][i-1], dest=pv[0][i])
data = res.single()
nodes.append(data['n'])
rels.append(data['w'])
# Append the last node
nodes.append(data['m'])
return (nodes, rels)
def resolve_rest_path(path):
nodes = []
rels = []
nq = "MATCH (n) WHERE id(n) = {id} RETURN n"
rq = "MATCH ()-[n]-() WHERE id(n) = {id} RETURN n LIMIT 1"
with database.driver.session() as session:
with session.begin_transaction() as tx:
for nodeurl in path['nodes']:
nid = nodeurl.split('/')[-1]
res = tx.run(nq, id=int(nid))
nodes.append(res.single()['n'])
for relurl in path['relationships']:
nid = relurl.split('/')[-1]
res = tx.run(rq, id=int(nid))
rels.append(res.single()['n'])
return (nodes, rels)
| 39.778626 | 133 | 0.573211 | 631 | 5,211 | 4.700475 | 0.283677 | 0.024275 | 0.024275 | 0.033715 | 0.422792 | 0.390425 | 0.331086 | 0.302765 | 0.287256 | 0.26062 | 0 | 0.01574 | 0.268471 | 5,211 | 130 | 134 | 40.084615 | 0.762329 | 0.104203 | 0 | 0.203704 | 0 | 0.046296 | 0.300988 | 0.079004 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.009259 | 0 | 0.12037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
920a9793f168d206e013f7bbb71c589069f6db3f | 6,343 | py | Python | src/data/load_data_from_text.py | dsridhar91/hstm | af943b3f34e443f6fda8115b300fc828ba2ff6ba | [
"MIT"
] | null | null | null | src/data/load_data_from_text.py | dsridhar91/hstm | af943b3f34e443f6fda8115b300fc828ba2ff6ba | [
"MIT"
] | null | null | null | src/data/load_data_from_text.py | dsridhar91/hstm | af943b3f34e443f6fda8115b300fc828ba2ff6ba | [
"MIT"
] | null | null | null | import os
import json
import numpy as np
import pandas as pd
import gzip
import argparse
from collections import Counter
def load_mixed_corpus():
grocery_file = '../dat/reviews_Grocery_and_Gourmet_Food_5.json'
office_file = '../dat/reviews_Office_Products_5.json'
doc_groc, _ = load_amazon(grocery_file, 5000, 'reviewText', 'overall')
doc_office, _ = load_amazon(office_file, 5000, 'reviewText', 'overall')
responses_groc = np.ones(doc_groc.shape[0])
responses_office = np.zeros(doc_office.shape[0])
return np.hstack([doc_groc, doc_office]), np.hstack([responses_groc, responses_office])
def load_framing_corpus(data_dir, topic, annotation_code_file):
data_file = os.path.join(data_dir, topic, topic + '_labeled.json')
with open(data_file, 'r') as f:
data = json.loads(f.read())
with open(annotation_code_file, 'r') as f:
annotation_dict = json.loads(f.read())
docs = []
responses = []
for key, value in data.items():
if 'tone' not in value['annotations']:
continue
else:
tone_dict = value['annotations']['tone']
if len(tone_dict.keys()) == 0:
continue
annotations = Counter()
for annotator, annot_list in tone_dict.items():
for annot_dict in annot_list:
tone_code = annotation_dict[str(annot_dict['code'])]
annotations.update([tone_code])
majority_label = annotations.most_common()[0][0]
if majority_label == 'Neutral':
continue
else:
label = 1 if 'Pro' in majority_label else 0
responses.append(label)
text = value['text']
docs.append(text)
return np.array(docs), np.array(responses)
def load_yelp(data_file, subsample=None):
df = pd.read_csv(data_file, names=['label', 'text'])
df.loc[df.label==1, 'label'] = 0
df.loc[df.label==2, 'label'] = 1
if subsample is not None:
indices = np.arange(df.shape[0])
np.random.shuffle(indices)
subsample_inds = indices[:subsample]
df = df.iloc[subsample_inds, :]
docs = df['text'].values
responses = df['label'].values
return docs, responses
def load_yelp_full(data_file):
train = data_file + 'train.csv'
test = data_file + 'test.csv'
train_df = pd.read_csv(train, names=['label', 'text'])
test_df = pd.read_csv(test, names=['label', 'text'])
full_df = pd.concat([train_df, test_df])
full_df.loc[full_df.label==1, 'label'] = 0
full_df.loc[full_df.label==2, 'label'] = 1
docs = full_df['text'].values
responses = full_df['label'].values
return docs, responses
def load_peerread(data_file):
df = pd.read_csv(data_file)
docs = df['abstract_text'].values
responses = df['decision'].astype('int64').values
return docs, responses
def load_semantic_scholar(data_file, min_year, max_year):
papers = []
with gzip.open(data_file, 'rb') as f:
for line in f:
paper = json.loads(line)
papers.append(paper)
papers = papers[0]
docs = []
responses = []
for paper in papers:
if 'year' in paper and 'inCitations' in paper and 'paperAbstract' in paper:
year = int(paper['year'])
if year >= min_year and year <= max_year:
if len(paper['paperAbstract']) > 0:
responses.append(len(paper['inCitations']))
docs.append(paper['paperAbstract'])
docs = np.array(docs)
responses = np.array(responses)
return docs, responses
def load_amazon(data_file, subsample, text_attr_key, label_key, make_bool=False):
documents = []
file_handle = open(data_file, 'r')
for line in file_handle.readlines():
doc = json.loads(line)
documents.append(doc)
review_indices = np.arange(len(documents))
np.random.shuffle(review_indices)
iter_indices = review_indices[:subsample]
docs = []
responses = []
for idx in iter_indices:
doc = documents[idx]
if label_key in doc and text_attr_key in doc:
docs.append(doc[text_attr_key])
responses.append(doc[label_key])
docs = np.array(docs)
responses = np.array(responses)
filtered = preprocess_ratings(responses)
docs = docs[filtered]
responses = responses[filtered]
if make_bool:
responses[responses <= 2.0] = 0
responses[responses >= 4.0] = 1
return docs, responses
def preprocess_ratings(ratings):
valid_ratings = (ratings <= 2.0) | (ratings >= 4.0)
return valid_ratings
def main(args):
data_file = args.data_file
out = args.outfile
dataset = args.data
framing_topic = args.framing_topic
seed = 12345
np.random.seed(seed)
if dataset == 'amazon':
if data_file == "":
data_file = '../dat/reviews_Office_Products_5.json'
doc,responses = load_amazon(data_file, 20000, 'reviewText', 'overall')
elif dataset == 'amazon_binary':
if data_file == "":
data_file = '../dat/reviews_Grocery_and_Gourmet_Food_5.json'
doc,responses = load_amazon(data_file, 20000, 'reviewText', 'overall', make_bool=True)
elif dataset == 'yelp':
if data_file == "":
data_file = '../dat/yelp_review_polarity_csv/train.csv'
doc, responses = load_yelp(data_file, 20000)
elif dataset == 'yelp_full':
if data_file == "":
data_file = '../dat/yelp_review_polarity_csv/'
doc, responses = load_yelp_full(data_file)
elif dataset == 'peerread':
if data_file == "":
data_file = '../dat/peerread_abstracts.csv'
doc, responses = load_peerread(data_file)
elif dataset == 'framing_corpus':
if data_file == "":
data_file = '../dat/framing/' #+ framing_topic + '/'
annotation_code_file = '../dat/framing/codes.json'
doc, responses = load_framing_corpus(data_file, framing_topic, annotation_code_file)
else:
if data_file == "":
data_file = '../dat/cs_papers.gz'
doc, responses = load_semantic_scholar(data_file, 2010, 2016)
if dataset == 'semantic_scholar':
responses = np.log(responses + 1)
if dataset == 'amazon' or dataset == 'semantic_scholar':
responses = (responses - responses.mean()) / (responses.std())
df = pd.DataFrame(np.column_stack((doc.T, responses.T)) , columns=['text', 'label'])
os.makedirs('../dat/csv_proc', exist_ok=True)
if dataset == 'framing_corpus':
dataset = framing_topic
if out == "":
out = '../dat/csv_proc/' + dataset + '.csv'
df.to_csv(out)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_file", action="store", default="")
parser.add_argument("--outfile", action="store", default="")
parser.add_argument("--data", action="store", default="amazon")
parser.add_argument("--framing-topic", action='store', default='immigration')
args = parser.parse_args()
main(args) | 29.640187 | 88 | 0.702034 | 907 | 6,343 | 4.689085 | 0.197354 | 0.069598 | 0.022572 | 0.023043 | 0.238655 | 0.180108 | 0.122972 | 0.116388 | 0.06419 | 0.046555 | 0 | 0.013153 | 0.148983 | 6,343 | 214 | 89 | 29.640187 | 0.774731 | 0.003311 | 0 | 0.16092 | 0 | 0 | 0.138247 | 0.046346 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.04023 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
920dc979bbbe3c3f4d049b066bb2c9d6cdf12abd | 2,396 | py | Python | app/crud/address.py | oscarine/oscarine-api | ed4760724e42ac13aeaa3a566d19bf31113c9b8f | [
"MIT"
] | 7 | 2019-09-18T19:45:46.000Z | 2020-05-18T20:07:07.000Z | app/crud/address.py | oscarine/oscarine-api | ed4760724e42ac13aeaa3a566d19bf31113c9b8f | [
"MIT"
] | 252 | 2019-09-18T20:25:03.000Z | 2022-03-25T11:23:50.000Z | app/crud/address.py | oscarine/oscarine-api | ed4760724e42ac13aeaa3a566d19bf31113c9b8f | [
"MIT"
] | 8 | 2019-09-18T11:02:45.000Z | 2021-05-18T17:08:51.000Z | from datetime import datetime
from typing import List
from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from app.api.utils.db import clone_db_model
from app.db_models.address import Address
from app.models.address import EditAddress, UserAddress
def add_user_address(
db_session: Session, *, user_id: int, data: UserAddress
) -> Address:
address = Address(user_id=user_id)
data = jsonable_encoder(data, exclude_none=True)
for field in data:
setattr(address, field, data[field])
address.location = f"POINT({data['longitude']} {data['latitude']})"
db_session.add(address)
db_session.commit()
db_session.refresh(address)
return address
def get_user_addresses(
db_session: Session, *, user_id: int, include_archived: bool = False
) -> List[Address]:
query = db_session.query(Address).filter(Address.user_id == user_id)
if not include_archived:
query = query.filter(Address.archived == False)
query = query.order_by(Address.id.desc())
if addresses := query.all():
return addresses
return None
def get_address_by_id(
db_session: Session, *, id: int, user_id: int = None, include_archived: bool = False
) -> Address:
query = db_session.query(Address).filter(Address.id == id)
if user_id:
query.filter(Address.user_id == user_id)
if not include_archived:
query = query.filter(Address.archived == False)
if address := query.first():
return address
return None
def edit_user_address(
db_session: Session, *, address: Address, data: EditAddress
) -> Address:
address_clone = clone_db_model(model=address)
data = jsonable_encoder(data, exclude_none=True)
for field in data:
setattr(address_clone, field, data[field])
if "longitude" in data and "latitude" in data:
address_clone.location = f"POINT({data['longitude']} {data['latitude']})"
else:
address_clone.location = f"POINT({address.longitude} {address.latitude})"
db_session.add(address_clone)
# Archive the old address after creating the clone.
address.archived = True
db_session.commit()
db_session.refresh(address_clone)
return address_clone
def delete_user_address(db_session: Session, *, address: Address):
address.deleted_at = datetime.utcnow()
address.archived = True
db_session.commit()
| 32.378378 | 88 | 0.707429 | 316 | 2,396 | 5.18038 | 0.221519 | 0.07697 | 0.04887 | 0.036652 | 0.504582 | 0.434942 | 0.372022 | 0.227856 | 0.179597 | 0.179597 | 0 | 0 | 0.186561 | 2,396 | 73 | 89 | 32.821918 | 0.839918 | 0.020451 | 0 | 0.333333 | 0 | 0 | 0.064819 | 0.031983 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.116667 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
920dd0484a64f720a0513aad802820f70a897dcc | 2,493 | py | Python | Python/examples/AdvancedSpecificMultipleInstruments.py | william-richards-idexx/fgt-SDK | 674b572c714302be561b08ba63ff3358dfa13cea | [
"Apache-2.0"
] | 20 | 2019-05-21T17:43:07.000Z | 2022-03-22T16:38:59.000Z | Python/examples/AdvancedSpecificMultipleInstruments.py | william-richards-idexx/fgt-SDK | 674b572c714302be561b08ba63ff3358dfa13cea | [
"Apache-2.0"
] | 28 | 2019-05-21T17:36:24.000Z | 2022-03-21T07:21:51.000Z | Python/examples/AdvancedSpecificMultipleInstruments.py | william-richards-idexx/fgt-SDK | 674b572c714302be561b08ba63ff3358dfa13cea | [
"Apache-2.0"
] | 7 | 2020-09-18T23:47:25.000Z | 2022-03-03T09:36:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Advanced Specific Multiple Instruments
This example shows how to use specific channels ID and multiple connected
instruments
Requires at least two Fluigent pressure channels
Copyright (c) Fluigent 2019. All Rights Reserved.
"""
# Print function for Python 2 compatibility
from __future__ import print_function
from Fluigent.SDK import fgt_detect, fgt_init, fgt_close
from Fluigent.SDK import fgt_get_controllersInfo
from Fluigent.SDK import fgt_get_pressureChannelCount, fgt_get_pressureChannelsInfo
from Fluigent.SDK import fgt_get_pressure
# Detect all controllers
SNs, types = fgt_detect()
controllerCount = len(SNs)
print('Number of controllers detected: {}'.format(controllerCount))
# List all found controllers' serial number and type
for i, sn in enumerate(SNs):
print('Detected instrument at index: {}, ControllerSN: {}, type: {}'\
.format(i, sn, str(types[i])))
## Initialize specific instruments
# Initialize only specific instrument controllers here If you do not want
# a controller in the list or if you want a specific order (e.g. LineUP
# before MFCS instruments), rearrange parsed SN table
fgt_init(SNs)
# Get total number of initialized pressure channels
print('Total number of pressure channels: {}'.format(fgt_get_pressureChannelCount()))
## Get detailed information about all controllers
controllerInfoArray = fgt_get_controllersInfo()
for i, controllerInfo in enumerate(controllerInfoArray):
print('Controller info at index: {}'.format(i))
print(controllerInfo)
## Get detailed information about all pressure channels
pressureInfoArray = fgt_get_pressureChannelsInfo()
for i, pressureInfo in enumerate(pressureInfoArray):
print('Pressure channel info at index: {}'.format(i))
print(pressureInfo)
## Read pressure using unique ID
# If you want to address a specific channel, unique ID can be used. However
# if hardware changed channel may not be found
try:
pressure1 = fgt_get_pressure(pressureInfoArray[0].indexID)
print('Read pressure from ID {} : {:.2f}\n'.format(pressureInfoArray[0].indexID, pressure1))
except IndexError as e:
print('WARNING: Cannot read pressure on channel 0')
try:
pressure2 = fgt_get_pressure(pressureInfoArray[1].indexID)
print('Read pressure from ID {} : {:.2f}\n'.format(pressureInfoArray[1].indexID, pressure2))
except IndexError as e:
print('WARNING: Cannot read pressure on channel 1')
## Close the session
fgt_close()
| 35.112676 | 96 | 0.765744 | 331 | 2,493 | 5.679758 | 0.389728 | 0.028723 | 0.031915 | 0.044681 | 0.233511 | 0.18883 | 0.121277 | 0.121277 | 0.121277 | 0.121277 | 0 | 0.008447 | 0.145207 | 2,493 | 70 | 97 | 35.614286 | 0.873768 | 0.372242 | 0 | 0.125 | 0 | 0 | 0.225618 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15625 | 0 | 0.15625 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9213947a6c459942a56f0c3d98a57e842bc0d8f6 | 875 | py | Python | _scripts/one-time/makeDecosNonCampaign.py | Son-Guhun/Titan-Land-Lands-of-Plenty | edeca1d5437a7397195799ebf4b9585ee4609fed | [
"MIT"
] | 12 | 2019-05-27T16:02:28.000Z | 2021-01-08T09:32:08.000Z | _scripts/one-time/makeDecosNonCampaign.py | Son-Guhun/Titan-Land-Lands-of-Plenty | edeca1d5437a7397195799ebf4b9585ee4609fed | [
"MIT"
] | 209 | 2019-04-06T15:16:52.000Z | 2021-07-03T02:11:53.000Z | _scripts/one-time/makeDecosNonCampaign.py | Son-Guhun/Titan-Land-Lands-of-Plenty | edeca1d5437a7397195799ebf4b9585ee4609fed | [
"MIT"
] | 1 | 2021-05-26T12:13:35.000Z | 2021-05-26T12:13:35.000Z | """This script iterates over all decorations in a .ini database and set their Specialart field
to the format expected by the SpecialEffect system.
"""
from myconfigparser import MyConfigParser, load_unit_data, get_decorations, Section
dataBase = '../development/table/unit.ini'
def configure_decorations(unit_data, decoration_list):
for decoration in decoration_list:
data = Section(unit_data[decoration])
if data['_parent'] != decoration:
if data['campaign'] == '1' and data['isbldg'] == '0' and data['hostilePal'] == '0':
data['special'] = '1'
data['campaign'] = '0'
def do(file_path):
f = open(file_path)
unit_data = load_unit_data(f)
decorations = get_decorations(unit_data)
configure_decorations(unit_data, decorations)
f = open(file_path, 'w')
unit_data.write(f)
| 33.653846 | 95 | 0.668571 | 110 | 875 | 5.136364 | 0.472727 | 0.113274 | 0.100885 | 0.099115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007299 | 0.217143 | 875 | 25 | 96 | 35 | 0.817518 | 0.163429 | 0 | 0 | 0 | 0 | 0.111724 | 0.04 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.0625 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
92162c36211ae5a295d2c5d9300ea4a55a55b0d5 | 4,135 | py | Python | gradient/cli/tensorboards.py | vishalbelsare/gradient-cli | c0e06252925cad3ad73d47ded1100f6b0cb0989a | [
"0BSD"
] | 52 | 2019-06-10T04:20:00.000Z | 2021-12-06T01:13:26.000Z | gradient/cli/tensorboards.py | vishalbelsare/gradient-cli | c0e06252925cad3ad73d47ded1100f6b0cb0989a | [
"0BSD"
] | 125 | 2019-06-05T16:34:19.000Z | 2022-03-30T18:46:06.000Z | gradient/cli/tensorboards.py | vishalbelsare/gradient-cli | c0e06252925cad3ad73d47ded1100f6b0cb0989a | [
"0BSD"
] | 11 | 2019-07-16T06:48:55.000Z | 2021-12-15T12:41:51.000Z | import click
from gradient.cli import common
from gradient.cli.cli import cli
from gradient.commands import tensorboards as tensorboards_commands
@cli.group("tensorboards", help="Manage tensorboards", cls=common.ClickGroup)
def tensorboards_group():
pass
@tensorboards_group.command("create", help="Create new tensorboard")
@click.option(
"--experiment",
"experiments",
multiple=True,
required=True,
metavar="[<experiment ID>]",
help="One or more experiment IDs [--experiment id1 --experiment id2 ...]",
cls=common.GradientOption,
)
@click.option(
"--image",
"image",
help="Tensorboard Container Image, by default its set to tensorflow/tensorflow:latest",
cls=common.GradientOption,
)
@click.option(
"--username",
"username",
help="Basic Auth Username",
cls=common.GradientOption,
)
@click.option(
"--password",
"password",
help="Basic Auth Password",
cls=common.GradientOption,
)
# @click.option(
# "--instanceType",
# "instance_type",
# help="Instance type",
# cls=common.GradientOption,
# )
# @click.option(
# "--instanceSize",
# "instance_size",
# help="Instance size",
# cls=common.GradientOption,
# )
# @click.option(
# "--instancesCount",
# "instances_count",
# type=int,
# help="Instances count",
# cls=common.GradientOption,
# )
@common.api_key_option
@common.options_file
def create_tensorboard(api_key, options_file, **kwargs):
command = tensorboards_commands.CreateTensorboardCommand(api_key=api_key)
command.execute(**kwargs)
@tensorboards_group.command("details", help="Show details of a tensorboard")
@click.option(
"--id",
"id",
metavar="<tensorboard ID>",
required=True,
help="Tensorboard ID",
cls=common.GradientOption,
)
@common.api_key_option
@common.options_file
def tensorboard_details(id, api_key, options_file):
command = tensorboards_commands.GetTensorboardCommand(api_key=api_key)
command.execute(id)
@tensorboards_group.command("list", help="Show list of tensorboards")
@common.api_key_option
@common.options_file
def list_tensorboards(api_key, options_file):
command = tensorboards_commands.ListTensorboardsCommand(api_key=api_key)
command.execute()
@tensorboards_group.command("add-experiments", help="Update tensorboard experiments")
@click.option(
"--id",
"tensorboard_id",
metavar="<tensorboard ID>",
required=True,
help="Tensorboard ID",
cls=common.GradientOption,
)
@click.option(
"--experiment",
"experiments",
multiple=True,
required=True,
metavar="[<experiment ID>]",
help="One or more experiment IDs [--experiment id1 --experiment id2 ...]",
cls=common.GradientOption,
)
@common.api_key_option
@common.options_file
def add_experiments_to_tensorboard(tensorboard_id, experiments, api_key, options_file):
command = tensorboards_commands.AddExperimentToTensorboard(api_key=api_key)
command.execute(tensorboard_id, experiments)
@tensorboards_group.command("remove-experiments", help="Update tensorboard experiments")
@click.option(
"--id",
"id",
required=True,
metavar="<tensorboard ID>",
help="Tensorboard ID",
cls=common.GradientOption,
)
@click.option(
"--experiment",
"experiments",
multiple=True,
required=True,
metavar="[<experiment ID>]",
help="One or more experiment IDs [--experiment id1 --experiment id2 ...]",
cls=common.GradientOption,
)
@common.api_key_option
@common.options_file
def remove_experiments_to_tensorboard(api_key, options_file, **kwargs):
command = tensorboards_commands.RemoveExperimentToTensorboard(api_key=api_key)
command.execute(**kwargs)
@tensorboards_group.command("delete", help="Delete tensorboard")
@click.option(
"--id",
"id",
metavar="<tensorboard ID>",
required=True,
help="Tensorboard ID",
cls=common.GradientOption,
)
@common.api_key_option
@common.options_file
def delete_tensorboard(api_key, options_file, **kwargs):
command = tensorboards_commands.DeleteTensorboard(api_key=api_key)
command.execute(**kwargs)
| 26.677419 | 91 | 0.712696 | 457 | 4,135 | 6.291028 | 0.170678 | 0.050087 | 0.104 | 0.077913 | 0.640348 | 0.577391 | 0.550261 | 0.493217 | 0.441043 | 0.377391 | 0 | 0.001709 | 0.151149 | 4,135 | 154 | 92 | 26.850649 | 0.817379 | 0.08948 | 0 | 0.6 | 0 | 0 | 0.23498 | 0.007477 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058333 | false | 0.033333 | 0.033333 | 0 | 0.091667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
921710fe148b77c5af4c4630e1aea73b68d40f82 | 5,430 | py | Python | src/selena/services/sla.py | deejay1/selena | 16189ee57c8197ab4375727ef8a905d4f4561eb7 | [
"Apache-2.0"
] | 23 | 2015-01-10T18:17:58.000Z | 2021-12-21T03:01:38.000Z | src/selena/services/sla.py | deejay1/selena | 16189ee57c8197ab4375727ef8a905d4f4561eb7 | [
"Apache-2.0"
] | 20 | 2015-01-10T14:05:42.000Z | 2016-08-09T07:48:50.000Z | src/selena/services/sla.py | deejay1/selena | 16189ee57c8197ab4375727ef8a905d4f4561eb7 | [
"Apache-2.0"
] | 3 | 2015-01-10T18:27:30.000Z | 2020-04-07T16:17:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.db.models.aggregates import Min
from services.models import (Service, SlaDaily, SlaCache, ServiceHistory)
from django.utils import timezone
from dateutil.relativedelta import *
from django.core.exceptions import ObjectDoesNotExist
import datetime
import logging
OFFSET_PERIOD = 7
WEEK = 7
ONE_MONTH = 1
THREE_MONTHS = 3
ONE_DAY_SECONDS = datetime.timedelta(days=1).total_seconds()
logger = logging.getLogger(__name__)
def get_slacache(service):
"""Generate the SLA cache for a single service.
:param Service service: service, for which we want to generate the cache
:return: SlaCache for a given service
"""
last_day = _find_start_date(service)
diff = datetime.date.today() - last_day
for day in range(diff.days):
if day > 0:
_calculate_SLA(day, service)
sla7d = '%.2f' % _calculate_cache(service, timezone.now() - datetime.timedelta(days=WEEK))
sla1m = '%.2f' % _calculate_cache(service, timezone.now() + relativedelta(months=-ONE_MONTH))
sla3m = '%.2f' % _calculate_cache(service, timezone.now() + relativedelta(months=-THREE_MONTHS))
try:
out = SlaCache(
service_id=service.id,
sla7days=sla7d,
sla1month=sla1m,
sla3months=sla3m
)
return out
except Exception as ex:
logger.error("Something bad happened. %s" % ex)
def calculatesla():
# SlaCache object for bulk create
sla_cache_create = []
services = Service.objects.filter(is_active=True).only('id').order_by('id')
for service in services:
# create SlaCache and append it to list
sla_cache_create.append(get_slacache(service))
# delete all cache objects.
SlaCache.objects.filter(pk__in=[sc.service_id for sc in sla_cache_create]).delete()
# create all SlaCache objects in bulk.
SlaCache.objects.bulk_create(sla_cache_create)
# calculate SLA for particular day and selected service
def _calculate_SLA(offset, service):
UTC_NOW = timezone.now()
LOCAL_NOW = timezone.localtime(UTC_NOW)
TIME_OFFSET = LOCAL_NOW.utcoffset()
utc_start_time = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) - datetime.timedelta(days=offset)
utc_stop_time = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) - datetime.timedelta(days=(offset-1))
local_start_time = utc_start_time - TIME_OFFSET
local_stop_time = utc_stop_time - TIME_OFFSET
# retrieve history. It take into account situation where one agent has a failure
# and there is, at least, another one agent which reports success.
service_history = ServiceHistory.objects.values('created').\
filter(service_id=service.id).\
filter(created__gt=local_start_time).\
filter(created__lt=local_stop_time).\
filter(response_state__lt=5).\
annotate(response_state=Min('response_state')).\
order_by('created', 'response_state')
aggregated_failing_time_sec = 0
break_found = False
number_of_tries = 0
_start_time = local_start_time
for s_history in service_history:
number_of_tries += 1
if s_history['response_state'] != 1:
break_found = True
diff_time = s_history['created'] - _start_time
aggregated_failing_time_sec += diff_time.total_seconds()
else:
# if previous check returned service down or performance issue, count the time to next proper
# return as service break.
if break_found:
diff_time = s_history['created'] - _start_time
aggregated_failing_time_sec += diff_time.total_seconds()
break_found = False
_start_time = s_history['created']
# if there is at least one record in services_sevicehistory for selected service, calucate SLA
# if the day has no records, lets assume, we are not able to calculate SLA
if number_of_tries > 0:
# include last period: difference between last entry in servicehistory and 00:00:00 UTC next day
if break_found:
diff_time = local_stop_time - _start_time
aggregated_failing_time_sec += diff_time.total_seconds()
sla_value = (100 - (aggregated_failing_time_sec/ONE_DAY_SECONDS*100))
sla_daily = SlaDaily(service_id=service.id, day=utc_start_time, sla=sla_value)
sla_daily.save()
def _find_start_date(service):
try:
last_day = SlaDaily.objects.filter(service_id__exact=service.id).latest('day')
except ObjectDoesNotExist:
return datetime.date.today() - datetime.timedelta(days=OFFSET_PERIOD)
return last_day.day.date()
def _calculate_cache(service, yesterday):
delta_days = timezone.now() - yesterday
service_sla = SlaDaily.objects.filter(service_id__exact=service.id,
day__gte=yesterday.strftime("%Y-%m-%d 00:00:00%z")).values('day', 'sla')
failing_time = 0
counter = 0
for row in service_sla:
failing_time += ((100 - row['sla']) * ONE_DAY_SECONDS) / 100
counter += 1
if counter == 0:
sla = -1
else:
sla = (100 - (failing_time / (ONE_DAY_SECONDS * delta_days.days) * 100))
return sla
| 37.448276 | 123 | 0.688029 | 716 | 5,430 | 4.949721 | 0.290503 | 0.027935 | 0.029628 | 0.03386 | 0.189898 | 0.167325 | 0.157731 | 0.157731 | 0.102991 | 0.102991 | 0 | 0.01627 | 0.218969 | 5,430 | 144 | 124 | 37.708333 | 0.819382 | 0.167035 | 0 | 0.128713 | 0 | 0 | 0.033408 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049505 | false | 0 | 0.108911 | 0 | 0.19802 | 0.009901 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
921793e6129c6314d6f7c5cb0bd9274856e44cb1 | 2,960 | py | Python | examples/fashion_mnist_example.py | saugatkandel/cvnn | f6d7b5c17fd064a7eaa60e7af922914a974eb69a | [
"MIT"
] | 38 | 2020-09-16T14:47:36.000Z | 2022-03-30T13:35:05.000Z | examples/fashion_mnist_example.py | saugatkandel/cvnn | f6d7b5c17fd064a7eaa60e7af922914a974eb69a | [
"MIT"
] | 25 | 2020-10-03T19:30:16.000Z | 2022-03-29T15:24:44.000Z | examples/fashion_mnist_example.py | saugatkandel/cvnn | f6d7b5c17fd064a7eaa60e7af922914a974eb69a | [
"MIT"
] | 9 | 2021-01-18T10:48:57.000Z | 2022-02-11T10:34:52.000Z | # TensorFlow and tf.keras
import tensorflow as tf
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
from cvnn import layers
print(tf.__version__)
def get_fashion_mnist_dataset():
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
return (train_images, train_labels), (test_images, test_labels)
def keras_fit(train_images, train_labels, test_images, test_labels,
init1='glorot_uniform', init2='glorot_uniform', epochs=10):
tf.random.set_seed(1)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer=init1),
tf.keras.layers.Dense(10, kernel_initializer=init2)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=epochs, shuffle=False)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
return history
def own_fit(train_images, train_labels, test_images, test_labels,
init1='glorot_uniform', init2='glorot_uniform', epochs=10):
tf.random.set_seed(1)
model = tf.keras.Sequential([
layers.ComplexFlatten(input_shape=(28, 28)),
layers.ComplexDense(128, activation='cart_relu', dtype=np.float32, kernel_initializer=init1),
layers.ComplexDense(10, dtype=np.float32, kernel_initializer=init2)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=epochs, shuffle=False)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
return history
def test_fashion_mnist():
assert not tf.test.gpu_device_name(), "Using GPU not good for debugging"
seed = 117
epochs = 3
init = tf.keras.initializers.GlorotUniform(seed=seed)
init1 = tf.constant_initializer(init((784, 128)).numpy())
init2 = tf.constant_initializer(init((128, 10)).numpy())
(train_images, train_labels), (test_images, test_labels) = get_fashion_mnist_dataset()
keras = keras_fit(train_images, train_labels, test_images, test_labels, init1=init1, init2=init2, epochs=epochs)
# keras1 = keras_fit(train_images, train_labels, test_images, test_labels, init1=init1, init2=init2, epochs=epochs)
own = own_fit(train_images, train_labels, test_images, test_labels, init1=init1, init2=init2, epochs=epochs)
# if keras.history == keras1.history:
assert keras.history == own.history, f"{keras.history } != {own.history }"
if __name__ == "__main__":
test_fashion_mnist()
| 43.529412 | 119 | 0.714865 | 384 | 2,960 | 5.260417 | 0.255208 | 0.034653 | 0.079208 | 0.108911 | 0.612376 | 0.590099 | 0.590099 | 0.590099 | 0.527723 | 0.527723 | 0 | 0.027981 | 0.166892 | 2,960 | 67 | 120 | 44.179104 | 0.79116 | 0.064189 | 0 | 0.423077 | 0 | 0 | 0.071971 | 0 | 0 | 0 | 0 | 0 | 0.038462 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.211538 | 0.057692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
921ae232934b3615fdba27bd605f3a627db67ede | 1,064 | py | Python | challenges/2021/06-lanternfish/py/__init__.py | codemicro/adventOfCode | 53574532ece1d19e5f5ba2f39e8e183c4c6225a1 | [
"MIT"
] | 9 | 2020-12-06T23:18:30.000Z | 2021-12-19T22:31:26.000Z | challenges/2021/06-lanternfish/py/__init__.py | codemicro/adventOfCode | 53574532ece1d19e5f5ba2f39e8e183c4c6225a1 | [
"MIT"
] | null | null | null | challenges/2021/06-lanternfish/py/__init__.py | codemicro/adventOfCode | 53574532ece1d19e5f5ba2f39e8e183c4c6225a1 | [
"MIT"
] | 3 | 2020-12-08T09:45:44.000Z | 2020-12-15T19:20:20.000Z | from typing import List, Dict
from aocpy import BaseChallenge
def parse(instr: str) -> List[int]:
return list(map(int, instr.strip().split(",")))
def count_fish_by_timer(all_fish: List[int]) -> Dict[int, int]:
m = {}
for fish in all_fish:
m[fish] = m.get(fish, 0) + 1
return m
def iterate_fish_once(sum_dict: Dict[int, int]):
nm = {}
# shift back
for i in range(9):
nm[i - 1] = sum_dict.get(i, 0)
# apply -1 value
nm[6] = nm.get(6, 0) + nm.get(-1, 0)
nm[8] = nm.get(-1, 0)
del nm[-1]
return nm
def sum_of_fish_after_n(fish: List[int], n: int) -> int:
by_timer = count_fish_by_timer(fish)
for _ in range(n):
by_timer = iterate_fish_once(by_timer)
return sum([by_timer[k] for k in by_timer])
class Challenge(BaseChallenge):
@staticmethod
def one(instr: str) -> int:
fish = parse(instr)
return sum_of_fish_after_n(fish, 80)
@staticmethod
def two(instr: str) -> int:
fish = parse(instr)
return sum_of_fish_after_n(fish, 256)
| 23.644444 | 63 | 0.608083 | 175 | 1,064 | 3.514286 | 0.297143 | 0.079675 | 0.043902 | 0.068293 | 0.193496 | 0.193496 | 0.162602 | 0.162602 | 0.162602 | 0.162602 | 0 | 0.025157 | 0.25282 | 1,064 | 44 | 64 | 24.181818 | 0.748428 | 0.023496 | 0 | 0.129032 | 0 | 0 | 0.000965 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.193548 | false | 0 | 0.064516 | 0.032258 | 0.483871 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecf5d69ffbb4ac849872de070db13227e5f7d278 | 1,569 | py | Python | django_dhcp/urls.py | weijia/django-dhcp | 625c031f5b969d48e7ad15bca75c460ddad20a00 | [
"BSD-3-Clause"
] | 1 | 2016-12-24T21:00:03.000Z | 2016-12-24T21:00:03.000Z | django_dhcp/urls.py | weijia/django-dhcp | 625c031f5b969d48e7ad15bca75c460ddad20a00 | [
"BSD-3-Clause"
] | null | null | null | django_dhcp/urls.py | weijia/django-dhcp | 625c031f5b969d48e7ad15bca75c460ddad20a00 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse, reverse_lazy
from django.views.generic import ListView, DetailView, UpdateView, CreateView
from django_dhcp.models import NetworkNode
urlpatterns = patterns('',
url(r'^node_list/$', ListView.as_view(
queryset=NetworkNode.objects.all(),
context_object_name='nodes',
template_name='django_dhcp/list.html'), name="node_list"),
url(r'^(?P<pk>[0-9]+)/$',
DetailView.as_view(
model=NetworkNode,
template_name='django_dhcp/detail.html',
context_object_name='node'
), name="detail"),
url(r'^create/$', CreateView.as_view(
model=NetworkNode,
template_name='django_dhcp/create.html',),
name="create"),
url(r'^update/(?P<pk>[0-9]+)/$',
UpdateView.as_view(model=NetworkNode,
template_name='django_dhcp/update.html'),
name="update"),
)
try:
from djangoautoconf import create_tastypie_resource
urlpatterns += (r'^api/node/', include(create_tastypie_resource(NetworkNode)))
except:
pass | 43.583333 | 87 | 0.511791 | 142 | 1,569 | 5.478873 | 0.394366 | 0.064267 | 0.092545 | 0.113111 | 0.169666 | 0.169666 | 0.169666 | 0.169666 | 0 | 0 | 0 | 0.004145 | 0.384959 | 1,569 | 36 | 88 | 43.583333 | 0.802073 | 0 | 0 | 0.066667 | 0 | 0 | 0.126115 | 0.072611 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.033333 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecf6c36593681a36f3dd9d09dc12399f3a5c1c70 | 8,604 | py | Python | alipay/aop/api/domain/SearchBrandBoxInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/SearchBrandBoxInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/SearchBrandBoxInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BoxExclusiveBase import BoxExclusiveBase
from alipay.aop.api.domain.BoxOrderStatusInfo import BoxOrderStatusInfo
from alipay.aop.api.domain.BoxExclusiveKeyword import BoxExclusiveKeyword
from alipay.aop.api.domain.BoxExclusiveService import BoxExclusiveService
from alipay.aop.api.domain.BoxExclusiveService import BoxExclusiveService
class SearchBrandBoxInfo(object):
def __init__(self):
self._base_info = None
self._box_status = None
self._box_type = None
self._brand_id = None
self._channel = None
self._ext_info = None
self._functions_order_info = None
self._keywords = None
self._operator_id = None
self._operator_type = None
self._related_accounts = None
self._related_functions = None
@property
def base_info(self):
return self._base_info
@base_info.setter
def base_info(self, value):
if isinstance(value, BoxExclusiveBase):
self._base_info = value
else:
self._base_info = BoxExclusiveBase.from_alipay_dict(value)
@property
def box_status(self):
return self._box_status
@box_status.setter
def box_status(self, value):
self._box_status = value
@property
def box_type(self):
return self._box_type
@box_type.setter
def box_type(self, value):
self._box_type = value
@property
def brand_id(self):
return self._brand_id
@brand_id.setter
def brand_id(self, value):
self._brand_id = value
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def functions_order_info(self):
return self._functions_order_info
@functions_order_info.setter
def functions_order_info(self, value):
if isinstance(value, BoxOrderStatusInfo):
self._functions_order_info = value
else:
self._functions_order_info = BoxOrderStatusInfo.from_alipay_dict(value)
@property
def keywords(self):
return self._keywords
@keywords.setter
def keywords(self, value):
if isinstance(value, BoxExclusiveKeyword):
self._keywords = value
else:
self._keywords = BoxExclusiveKeyword.from_alipay_dict(value)
@property
def operator_id(self):
return self._operator_id
@operator_id.setter
def operator_id(self, value):
self._operator_id = value
@property
def operator_type(self):
return self._operator_type
@operator_type.setter
def operator_type(self, value):
self._operator_type = value
@property
def related_accounts(self):
return self._related_accounts
@related_accounts.setter
def related_accounts(self, value):
if isinstance(value, list):
self._related_accounts = list()
for i in value:
if isinstance(i, BoxExclusiveService):
self._related_accounts.append(i)
else:
self._related_accounts.append(BoxExclusiveService.from_alipay_dict(i))
@property
def related_functions(self):
return self._related_functions
@related_functions.setter
def related_functions(self, value):
if isinstance(value, list):
self._related_functions = list()
for i in value:
if isinstance(i, BoxExclusiveService):
self._related_functions.append(i)
else:
self._related_functions.append(BoxExclusiveService.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.base_info:
if hasattr(self.base_info, 'to_alipay_dict'):
params['base_info'] = self.base_info.to_alipay_dict()
else:
params['base_info'] = self.base_info
if self.box_status:
if hasattr(self.box_status, 'to_alipay_dict'):
params['box_status'] = self.box_status.to_alipay_dict()
else:
params['box_status'] = self.box_status
if self.box_type:
if hasattr(self.box_type, 'to_alipay_dict'):
params['box_type'] = self.box_type.to_alipay_dict()
else:
params['box_type'] = self.box_type
if self.brand_id:
if hasattr(self.brand_id, 'to_alipay_dict'):
params['brand_id'] = self.brand_id.to_alipay_dict()
else:
params['brand_id'] = self.brand_id
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.functions_order_info:
if hasattr(self.functions_order_info, 'to_alipay_dict'):
params['functions_order_info'] = self.functions_order_info.to_alipay_dict()
else:
params['functions_order_info'] = self.functions_order_info
if self.keywords:
if hasattr(self.keywords, 'to_alipay_dict'):
params['keywords'] = self.keywords.to_alipay_dict()
else:
params['keywords'] = self.keywords
if self.operator_id:
if hasattr(self.operator_id, 'to_alipay_dict'):
params['operator_id'] = self.operator_id.to_alipay_dict()
else:
params['operator_id'] = self.operator_id
if self.operator_type:
if hasattr(self.operator_type, 'to_alipay_dict'):
params['operator_type'] = self.operator_type.to_alipay_dict()
else:
params['operator_type'] = self.operator_type
if self.related_accounts:
if isinstance(self.related_accounts, list):
for i in range(0, len(self.related_accounts)):
element = self.related_accounts[i]
if hasattr(element, 'to_alipay_dict'):
self.related_accounts[i] = element.to_alipay_dict()
if hasattr(self.related_accounts, 'to_alipay_dict'):
params['related_accounts'] = self.related_accounts.to_alipay_dict()
else:
params['related_accounts'] = self.related_accounts
if self.related_functions:
if isinstance(self.related_functions, list):
for i in range(0, len(self.related_functions)):
element = self.related_functions[i]
if hasattr(element, 'to_alipay_dict'):
self.related_functions[i] = element.to_alipay_dict()
if hasattr(self.related_functions, 'to_alipay_dict'):
params['related_functions'] = self.related_functions.to_alipay_dict()
else:
params['related_functions'] = self.related_functions
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SearchBrandBoxInfo()
if 'base_info' in d:
o.base_info = d['base_info']
if 'box_status' in d:
o.box_status = d['box_status']
if 'box_type' in d:
o.box_type = d['box_type']
if 'brand_id' in d:
o.brand_id = d['brand_id']
if 'channel' in d:
o.channel = d['channel']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'functions_order_info' in d:
o.functions_order_info = d['functions_order_info']
if 'keywords' in d:
o.keywords = d['keywords']
if 'operator_id' in d:
o.operator_id = d['operator_id']
if 'operator_type' in d:
o.operator_type = d['operator_type']
if 'related_accounts' in d:
o.related_accounts = d['related_accounts']
if 'related_functions' in d:
o.related_functions = d['related_functions']
return o
| 35.553719 | 91 | 0.607508 | 1,000 | 8,604 | 4.93 | 0.066 | 0.070994 | 0.070588 | 0.043813 | 0.445639 | 0.381136 | 0.150913 | 0.130223 | 0.068154 | 0.023529 | 0 | 0.000499 | 0.30172 | 8,604 | 241 | 92 | 35.701245 | 0.820073 | 0.004881 | 0 | 0.179724 | 0 | 0 | 0.086001 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.124424 | false | 0 | 0.032258 | 0.0553 | 0.230415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecf7e96aed7bb38c108c65e6a088553ea5f5fcea | 4,921 | py | Python | dependencyinjection/internal/descriptors.py | Cologler/dependencyinjection-python | dc05c61571f10652d82929ebec4b255f109b840b | [
"MIT"
] | null | null | null | dependencyinjection/internal/descriptors.py | Cologler/dependencyinjection-python | dc05c61571f10652d82929ebec4b255f109b840b | [
"MIT"
] | null | null | null | dependencyinjection/internal/descriptors.py | Cologler/dependencyinjection-python | dc05c61571f10652d82929ebec4b255f109b840b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from abc import abstractmethod
import inspect
from .common import LifeTime, IServiceProvider, IDescriptor, ICallSiteMaker
from .param_type_resolver import ParameterTypeResolver
from .errors import ParameterTypeResolveError
from .callsites import (
InstanceCallSite,
ServiceProviderCallSite,
CallableCallSite,
ListedCallSite
)
class Descriptor(IDescriptor):
def __init__(self, service_type: type, lifetime: LifeTime):
if not isinstance(service_type, type):
raise TypeError('service_type must be a type')
if not isinstance(lifetime, LifeTime):
raise TypeError('lifetime must be a LifeTime')
self._service_type = service_type
self._lifetime = lifetime
@property
def service_type(self):
return self._service_type
@property
def lifetime(self):
return self._lifetime
class CallableDescriptor(Descriptor):
def __init__(self, service_type: type, func: callable, lifetime: LifeTime, **options):
super().__init__(service_type, lifetime)
if service_type is ParameterTypeResolver:
raise RuntimeError(f'service_type cannot be {ParameterTypeResolver}.')
if not callable(func):
raise TypeError
self._func = func
self._options = options
def make_callsite(self, service_provider, depend_chain):
param_callsites = {}
signature = inspect.signature(self._func)
params = signature.parameters.values()
params = [p for p in params if p.kind is p.POSITIONAL_OR_KEYWORD]
if params:
type_resolver: ParameterTypeResolver = service_provider.get(ParameterTypeResolver)
for param in params:
callsite = None
if param.default is param.empty:
try:
param_type = type_resolver.resolve(param, False)
except ParameterTypeResolveError as err:
if isinstance(self._func, type):
msg = f'error on creating type {self._func}: {err}'
else:
msg = f'error on invoke facrory {self._func}: {err}'
raise ParameterTypeResolveError(msg)
callsite = service_provider.get_callsite(param_type, depend_chain)
else:
param_type = type_resolver.resolve(param, True)
if param_type is not None:
callsite = service_provider.get_callsite(param_type, depend_chain, required=False)
if callsite is None:
callsite = InstanceCallSite(None, param.default)
param_callsites[param.name] = callsite
return CallableCallSite(self, self._func, param_callsites, self._options)
@staticmethod
def try_create(service_type: type, func: callable, lifetime: LifeTime, **options):
try:
inspect.signature(func)
except ValueError:
return None
else:
return CallableDescriptor(service_type, func, lifetime, **options)
class InstanceDescriptor(Descriptor):
def __init__(self, service_type: type, instance):
super().__init__(service_type, LifeTime.singleton)
if not isinstance(instance, service_type):
raise TypeError('obj is not a {}'.format(service_type))
self._instance = instance
def make_callsite(self, service_provider, depend_chain):
return InstanceCallSite(self, self._instance)
class ServiceProviderDescriptor(Descriptor):
def __init__(self):
super().__init__(IServiceProvider, LifeTime.scoped)
def make_callsite(self, service_provider, depend_chain):
return ServiceProviderCallSite(self)
class MapDescriptor(Descriptor):
def __init__(self, service_type: type, target_service_type: type):
super().__init__(service_type, LifeTime.transient)
if not isinstance(target_service_type, type):
raise TypeError('target_service_type must be a type')
self._target = target_service_type
def make_callsite(self, service_provider, depend_chain):
return service_provider.get_callsite(self._target, depend_chain)
class ListedDescriptor(ICallSiteMaker):
def __init__(self, descriptors):
self._descriptors = tuple(descriptors)
def __hash__(self):
return hash(self._descriptors)
def __eq__(self, other):
return self._descriptors == other
def make_callsite(self, service_provider, depend_chain):
callsites = []
for descriptor in self._descriptors:
callsites.append(service_provider.get_callsite(descriptor, depend_chain))
return ListedCallSite(callsites)
| 36.183824 | 106 | 0.655151 | 506 | 4,921 | 6.096838 | 0.231225 | 0.08201 | 0.038898 | 0.030794 | 0.266451 | 0.220421 | 0.176337 | 0.146191 | 0.084603 | 0 | 0 | 0.002481 | 0.262955 | 4,921 | 135 | 107 | 36.451852 | 0.848084 | 0.024182 | 0 | 0.117647 | 0 | 0 | 0.049009 | 0.005005 | 0 | 0 | 0 | 0 | 0 | 1 | 0.156863 | false | 0 | 0.058824 | 0.068627 | 0.382353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecf8a661406818a230a4076d57178a508b0bd6f5 | 2,666 | py | Python | ilurl/core/ql/define.py | guilhermevarela/ilu | e4db9744c28f9e04ae82c884f131ee8cd9601cc8 | [
"MIT"
] | 2 | 2019-10-18T17:04:50.000Z | 2019-10-18T17:05:04.000Z | ilurl/core/ql/define.py | guilhermevarela/ilurl | e4db9744c28f9e04ae82c884f131ee8cd9601cc8 | [
"MIT"
] | 17 | 2019-11-20T09:33:50.000Z | 2020-01-30T14:57:40.000Z | ilurl/core/ql/define.py | gsavarela/ilurl | e4db9744c28f9e04ae82c884f131ee8cd9601cc8 | [
"MIT"
] | null | null | null | """The module helps define the q learning dictionary"""
__author__ = "Guilherme Varela"
__date__ = "2019-07-25"
from itertools import product as prod
def dpq_tls(state_rank, state_dim,
action_rank, action_dim, initial_value=0):
"""Prepares a dynamic programming Q-learning table
for a traffic light agent
PARAMETERS
----------
**_rank: int
See catspace bellow
**_dim: int
See catspace bellow
*initial_value: int
See dpq bellow
RETURNS
-------
* q: dictionary
Where q is a nested dictionary where the outer keys
are states and the inner keys are the actions
"""
state_space = catspace(state_rank, state_dim)
action_space = catspace(action_rank, action_dim)
return dpq(state_space, action_space,
initial_value=initial_value)
def dpq(states, actions, initial_value=0):
"""dynamic programming definition for Q-learning
This implementation returns a table like, nested
dict of states and actions -- where the inner values
q[s][a] reflect the best current estimates of expected
rewards for taking action a on state s.
See [1] chapter 3 for details.
PARAMETERS
----------
* states: list, tuple or any other enumerable
Where each state in the collection is representated by
any other immutable type i.g int or tuple.
* actions: list, tuple or any other enumerable
Where each action in the collection is representated by
any other immutable type i.g int or tuple.
* initial_value: numeric
Initial a priori guess for the Q function
RETURNS
-------
* q: dictionary
Where q is a nested dictionary where the outer keys
are states and the inner keys are the actions
REFERENCE
---------
[1] Sutton et Barto, Reinforcement Learning 2nd Ed 2018
"""
return {
s: {
a: initial_value
for a in actions
}
for s in states
}
def catspace(rank, dim):
"""Makes a categorical space of the discrete
combinations of rank each holding dim elements
USAGE
-----
> catspace(3, 2)
> [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0),
(0, 0, 1), (1, 0, 1), (0, 1, 0), (1, 1, 0)]
PARAMETERS
----------
* rank: int
The length of the tuples ( width )
* dim: int
The number of elements in each position of
the tuple ( depth )
RETURNS
-------
* space: list of tuples
Each tuple is of length rank, and each
tuple element is an integer in 0...dim
"""
return [
tuple(e)
for e in prod(range(dim), repeat=rank)]
| 24.685185 | 59 | 0.620405 | 370 | 2,666 | 4.389189 | 0.335135 | 0.051724 | 0.007389 | 0.007389 | 0.289409 | 0.253695 | 0.246305 | 0.246305 | 0.199507 | 0.199507 | 0 | 0.023709 | 0.288072 | 2,666 | 107 | 60 | 24.915888 | 0.831928 | 0.646287 | 0 | 0 | 0 | 0 | 0.037901 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.047619 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecf92f0deadf2f7bda487207ea9032532498f896 | 296 | py | Python | Company_Based_Questions/Geometry/Count_number_of_rectangles_in_a_circle.py | Satyam-Bhalla/Competitive-Coding | 5814f5f60572f1e76495efe751b94bf4d2845198 | [
"MIT"
] | 1 | 2021-12-09T10:36:48.000Z | 2021-12-09T10:36:48.000Z | Company_Based_Questions/Geometry/Count_number_of_rectangles_in_a_circle.py | Satyam-Bhalla/Competitive-Coding | 5814f5f60572f1e76495efe751b94bf4d2845198 | [
"MIT"
] | null | null | null | Company_Based_Questions/Geometry/Count_number_of_rectangles_in_a_circle.py | Satyam-Bhalla/Competitive-Coding | 5814f5f60572f1e76495efe751b94bf4d2845198 | [
"MIT"
] | null | null | null | def countRectangles(r):
d = 2*r
dSq = d*d
rectangles = 0
for i in range(1,d):
for j in range(1,d):
if i**2+j**2 <= dSq:
rectangles += 1
return rectangles
t = int(input())
for _ in range(t):
r = int(input())
print(countRectangles(r)) | 22.769231 | 32 | 0.510135 | 46 | 296 | 3.26087 | 0.434783 | 0.14 | 0.106667 | 0.12 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036082 | 0.344595 | 296 | 13 | 33 | 22.769231 | 0.737113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecf99972906a2664b8eb3161b627de2250acc67f | 546 | py | Python | siteapp/models.py | cbidici/cbsite | 594e327ad0c0bfa5015461eb243176c3aec7b68d | [
"MIT"
] | null | null | null | siteapp/models.py | cbidici/cbsite | 594e327ad0c0bfa5015461eb243176c3aec7b68d | [
"MIT"
] | 1 | 2020-05-10T14:45:22.000Z | 2020-05-10T15:06:50.000Z | siteapp/models.py | cbidici/cbsite | 594e327ad0c0bfa5015461eb243176c3aec7b68d | [
"MIT"
] | null | null | null | from django.db import models
from slugify import slugify
class Tag(models.Model):
id = models.AutoField(primary_key=True)
slug = models.CharField(max_length=128, unique=True)
tag = models.CharField(max_length=128)
def save(self, force_insert=False, force_update=False):
self.slug = slugify(self.tag)
super(Tag, self).save(force_insert, force_update)
def __str__(self):
return self.tag
class TagsModel(models.Model):
tags = models.ManyToManyField(Tag)
class Meta:
abstract = True
| 23.73913 | 59 | 0.695971 | 73 | 546 | 5.054795 | 0.479452 | 0.04878 | 0.097561 | 0.130081 | 0.146341 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013793 | 0.203297 | 546 | 22 | 60 | 24.818182 | 0.834483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0.066667 | 0.8 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecfb22412fbdd1ca40827fb6bcc73e85e6d1f7d3 | 392 | py | Python | file_rename_voc.py | GitEasonXu/Python_wheel | 52a495b4b90132d6980aeded099ff575f2ed58e4 | [
"MIT"
] | null | null | null | file_rename_voc.py | GitEasonXu/Python_wheel | 52a495b4b90132d6980aeded099ff575f2ed58e4 | [
"MIT"
] | null | null | null | file_rename_voc.py | GitEasonXu/Python_wheel | 52a495b4b90132d6980aeded099ff575f2ed58e4 | [
"MIT"
] | null | null | null | ##遍历指定path下所有文件,并将文件名d重命名为name
import os
path = './Image'
for (root,dirs,files) in os.walk(path) :
for item in files :
Olddir = os.path.join(root, item)
filename=os.path.split(Olddir)[0] #文件名
filetype=os.path.split(Olddir)[1][-4:] #文件扩展名
Newdir=os.path.join(path,str(count).zfill(6)+filetype)
os.rename(Olddir, Newdir)
count+=1 | 32.666667 | 62 | 0.609694 | 55 | 392 | 4.345455 | 0.527273 | 0.125523 | 0.083682 | 0.142259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016667 | 0.234694 | 392 | 12 | 63 | 32.666667 | 0.78 | 0.091837 | 0 | 0 | 0 | 0 | 0.01983 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecfe6cf1d1e3eb686af5893cb455fa42f77691d4 | 6,833 | py | Python | src/protocol_analysis/protocol_analysis.py | shelleyshuzhang/mon-iot-traffic-analysis | fb4cead55fed8163af5975ccf2ccea3c63b8c97a | [
"Apache-2.0"
] | 1 | 2020-10-18T07:56:47.000Z | 2020-10-18T07:56:47.000Z | src/protocol_analysis/protocol_analysis.py | shelleyshuzhang/mon-iot-traffic-analysis | fb4cead55fed8163af5975ccf2ccea3c63b8c97a | [
"Apache-2.0"
] | null | null | null | src/protocol_analysis/protocol_analysis.py | shelleyshuzhang/mon-iot-traffic-analysis | fb4cead55fed8163af5975ccf2ccea3c63b8c97a | [
"Apache-2.0"
] | null | null | null | import csv
import os
from multiprocessing import Manager
from multiprocessing import Process
####### must import the packet in scapy in order to see the results #######
from scapy import *
from scapy.layers import *
from scapy.layers.dns import *
from scapy.layers.inet import *
from scapy.utils import PcapReader
from protocol_analysis import Destination, DestinationPro, ProtocolPort
protocol_known_dict = {"1": "well-known", "-1": "unknown",
"0.5": "registered", "0": "not-well-known"}
protocol_readable_dict = {"1": "human-readable", "0": "human-unreadable",
"0.5": "partially human-readable", "-1": "unknown"}
protocol_encrypted_dict = {"1": "encrypted", "0": "unencrypted", "-1": "unknown"}
protocol_importance_dict = {"1": "important", "0": "unimportant", "-1": "unknown"}
dst_info = {}
protocol_info = {}
filenames = []
def run(dir_name, device_mac, script_dir, previous_info, num_proc):
global filenames
print(" Reading the destination info...")
read_dst_csv(result=previous_info)
print(" Reading common protocol and port info...")
read_protocol_csv(script_dir + "/protocol_analysis/protocols_info.csv")
print(" Analyzing the protocol and port of each packet...")
results = Manager().list()
for i in range(num_proc):
filenames.append([])
results.append([])
index = 0
for root, dirs, files in os.walk(dir_name):
for filename in files:
if filename.endswith(".pcap") and not filename.startswith("."):
filenames[index].append(root + "/" + filename)
index += 1
if index >= num_proc:
index = 0
procs = []
pid = 0
for i in range(num_proc):
p = Process(target=dst_protocol_analysis,
args=(pid, device_mac, results))
procs.append(p)
p.start()
pid += 1
for p in procs:
p.join()
combined_results = results[0]
for i in range(num_proc - 1):
dst_pro_arr = results[i + 1]
for dst_pro in dst_pro_arr:
if dst_pro in combined_results:
index = combined_results.index(dst_pro)
combined_results[index].add_all(dst_pro.snd, dst_pro.rcv, dst_pro.p_snd, dst_pro.p_rcv)
else:
combined_results.append(dst_pro)
return combined_results
def dst_protocol_analysis(pid, d_mac, result_list):
result = []
for f in filenames[pid]:
for p in PcapReader(f):
packet_len = len(p)
p_ip, snd_rcv = get_pak_ip(p, d_mac)
if p_ip != 'non-ip' and p_ip in dst_info:
p_protocol = get_pak_protocol(packet=p, d_mac=d_mac)
host = dst_info[p_ip]
if p_protocol in protocol_info:
prot = protocol_info[p_protocol]
else:
prot = ProtocolPort.ProtocolPort(p_protocol, '-1', '-1', '-1', '-1')
index = 0
is_old = False
for dst_pro in result:
if host == dst_pro.host and prot == dst_pro.protocol_port:
is_old = True
break
index += 1
if is_old:
if snd_rcv == 'snd':
result[index].add_snd(packet_len)
result[index].add_ps(1)
else:
result[index].add_rcv(packet_len)
result[index].add_pr(1)
else:
current: DestinationPro.DestinationPro
current = DestinationPro.DestinationPro(host, prot)
if snd_rcv == 'snd':
current.add_snd(packet_len)
current.add_ps(1)
else:
current.add_rcv(packet_len)
current.add_pr(1)
result.append(current)
result_list[pid] = result
# For expected: 1 (well-known), -1 (unknown),
# 0.5 (potentially encrypted)
# For encrypted: 1 (encrypted), 0 (unencrypted),
# 0.5 (partially encrypted), -1 (unknown)
# return: dict (keys: Protocol&port, Expected, Encrypted)
def read_protocol_csv(file_name):
global protocol_info
with open(file_name, mode='r', encoding='utf-8-sig') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
protocol = row["Protocol&port"]
protocol_info[protocol] = ProtocolPort.ProtocolPort(protocol_port=protocol,
encrypted=row["Encrypted"],
expected=row["Well-known"],
readable=row["Human-readable"],
importance=row["Importance"])
csv_file.close()
# read all the destination related info
def read_dst_csv(result: dict):
global dst_info
total_num = result['ip'].__len__()
index = 0
while index < total_num:
ip = result['ip'][index]
if ip not in dst_info:
dst_info[ip] = Destination.Destination(host=result['host'][index],
party=result['party'][index],
ip=ip,
host_full=result['host_full'][index],
country=result['country'][index],
org=result['organization'][index])
index += 1
# get the protocol and port info of a packet
def get_pak_protocol(packet, d_mac):
# get the protocol info
protocol = packet.lastlayer().name
if protocol == "Raw":
pros = list(packet.iterpayloads())
protocol = pros[pros.__len__() - 2].name
if not protocol.startswith("IGMPv3mr"):
protocol = protocol.split()[0]
else:
protocol = "IGMPv3"
# get port number information
port_number = ""
if packet.src == d_mac:
if packet.haslayer(UDP) or packet.haslayer(TCP):
port_number = " port: " + str(packet.dport)
else:
if packet.haslayer(UDP) or packet.haslayer(TCP):
port_number = " port: " + str(packet.sport)
return protocol + port_number
# get the IP of the packet and whether
# it is sent or received
def get_pak_ip(packet, d_mac):
if packet.haslayer(IP):
if packet.src == d_mac:
return packet[IP].dst, 'rcv'
else:
return packet[IP].src, 'snd'
else:
return 'non-ip', 'none'
| 35.221649 | 103 | 0.536075 | 779 | 6,833 | 4.521181 | 0.213094 | 0.022147 | 0.017036 | 0.017888 | 0.089154 | 0.061897 | 0.056786 | 0.034639 | 0.034639 | 0.034639 | 0 | 0.011397 | 0.357969 | 6,833 | 193 | 104 | 35.404145 | 0.791429 | 0.072443 | 0 | 0.163265 | 0 | 0 | 0.08365 | 0.005862 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.081633 | 0 | 0.156463 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ecff03b69669e40823b94d33d2618c575448cad8 | 3,988 | py | Python | BayOptPy/tpot/notebooks/evaluate_results.py | Mind-the-Pineapple/tpot-age | 2969bfa6dc5c652d5b4f00f59e9b0b23869f6bef | [
"MIT"
] | 3 | 2020-04-09T16:53:54.000Z | 2020-04-21T16:49:52.000Z | BayOptPy/tpot/notebooks/evaluate_results.py | Mind-the-Pineapple/tpot-age | 2969bfa6dc5c652d5b4f00f59e9b0b23869f6bef | [
"MIT"
] | null | null | null | BayOptPy/tpot/notebooks/evaluate_results.py | Mind-the-Pineapple/tpot-age | 2969bfa6dc5c652d5b4f00f59e9b0b23869f6bef | [
"MIT"
] | null | null | null | '''
This scripts define functions that will be used by the different notebooks to
analyse the results
'''
import pandas as pd
from plotly import tools
import plotly.graph_objs as go
def predecessor_generation(results, curr_generation, verbose):
'''
Print the predecessor's generation for all the models in a specific generation
'''
data_df = pd.DataFrame()
for key in results['evaluated_individuals'][curr_generation].keys():
predecessor = ''.join(results['evaluated_individuals'][curr_generation][key]['predecessor'])
for generation in range(curr_generation-1, -1, -1):
if predecessor in results['evaluated_individuals'][generation].keys():
mae = abs(results['evaluated_individuals'][generation][predecessor]['internal_cv_score'])
data_df = data_df.append({'model': key, 'predecessor': predecessor, 'generation': int(generation),
'mae': mae},
ignore_index=True)
if verbose:
print('Current Generation: %d' %curr_generation)
print('List of repeated models')
repeated_predecessors = set([x for x in list(data_df['predecessor']) if list(data_df['predecessor']).count(x)>1])
print(repeated_predecessors)
print('There are %d repeated predecessors' )
return data_df
def prepare_df_for_plotly(data_df):
'''
Create new columns
'''
# Create a new column in the dataframe that contains only the list of models in the predecessor model.
list_models_predecessor = []
list_models_curr_model = []
for idx, _ in data_df.iterrows():
# Check if the current row contains an ensamble, if yes split them all speratatly
predecessor = data_df['predecessor'][idx].split('(input_matrix')[0].split('(')
list_models_predecessor.append(predecessor)
# Do the same thing for the current model
model = data_df['model'][idx].split('(input_matrix')[0].split('(')
list_models_curr_model.append(model)
data_df['list_models_predecessor'] = list_models_predecessor
data_df['list_models_curr_model'] = list_models_curr_model
# Create a column to visualise on plotly
data_df['visualisation'] = 'Predecessor Generation: ' + data_df['generation'].astype(str) + '<br>' + \
'Predecessor Models:' + data_df['list_models_predecessor'].astype(str) + '<br>' + \
'Curr Model list: ' + data_df['list_models_curr_model'].astype(str)
return data_df
def plot_plotly(results, n_plots, curr_gen_idx, ex_plot, subplots):
# Create the same plot for other 5 generations and put the next to
# each other
data = []
rows = 2
columns = 5
fig = tools.make_subplots(rows=rows, cols=columns,
subplot_titles=tuple(['Gen %d'%x for x in
range(1 + n_plots*ex_plot,
n_plots + n_plots * ex_plot + 1)]))
for i in range(rows):
for j in range(columns):
curr_gen = subplots[curr_gen_idx] # Load the data for the current generation
data_df = predecessor_generation(results, curr_gen, verbose=False)
# Add additional columns for visualisation
data_df = prepare_df_for_plotly(data_df)
sp = go.Scatter(y = data_df['mae'],
mode='markers',
text=data_df['visualisation'],
hoverinfo = 'text',
marker=dict(
size=16,
color=data_df['generation'], #set color equal to a variable
colorscale='Viridis',
cmin=0,
cmax = 100),
showlegend=False)
fig.append_trace(sp, i+1, j+1)
curr_gen_idx +=1
fig['layout'].update(height=600, width=1000, title='Multiple Generations')
return fig, curr_gen_idx
| 45.318182 | 121 | 0.614092 | 485 | 3,988 | 4.853608 | 0.323711 | 0.058624 | 0.044605 | 0.040357 | 0.146559 | 0.071368 | 0.029737 | 0.029737 | 0 | 0 | 0 | 0.009434 | 0.282347 | 3,988 | 87 | 122 | 45.83908 | 0.813068 | 0.161234 | 0 | 0.032258 | 0 | 0 | 0.164595 | 0.052743 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.048387 | 0 | 0.145161 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6018f719d55a7b7e6894dc15be94c1e0f710808 | 817 | py | Python | h/migrations/versions/77c2af032aca_add_document_uri_and_meta_docid_ix.py | y3g0r/h | a057144956fe25e669aeba5d0f0eb38f9dc09566 | [
"BSD-2-Clause"
] | 2 | 2019-08-04T07:22:11.000Z | 2020-07-17T05:01:41.000Z | h/migrations/versions/77c2af032aca_add_document_uri_and_meta_docid_ix.py | fuelpress/i.fuel.press | af7b25895d813af0fef656dcf483afe852a99d76 | [
"BSD-2-Clause"
] | 4 | 2020-03-24T17:38:24.000Z | 2022-03-02T05:45:01.000Z | h/migrations/versions/77c2af032aca_add_document_uri_and_meta_docid_ix.py | fuelpress/i.fuel.press | af7b25895d813af0fef656dcf483afe852a99d76 | [
"BSD-2-Clause"
] | null | null | null | """Add Document URI and Meta document_id index
Revision ID: 77c2af032aca
Revises: f3b8e76ae9f5
Create Date: 2016-05-13 15:06:55.496502
"""
# revision identifiers, used by Alembic.
revision = "77c2af032aca"
down_revision = "f3b8e76ae9f5"
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("COMMIT")
op.create_index(
op.f("ix__document_uri_document_id"),
"document_uri",
["document_id"],
postgresql_concurrently=True,
)
op.create_index(
op.f("ix__document_meta_document_id"),
"document_meta",
["document_id"],
postgresql_concurrently=True,
)
def downgrade():
op.drop_index(op.f("ix__document_uri_document_id"), "document_uri")
op.drop_index(op.f("ix__document_meta_document_id"), "document_meta")
| 22.694444 | 73 | 0.691554 | 105 | 817 | 5.057143 | 0.4 | 0.131827 | 0.105461 | 0.07533 | 0.512241 | 0.376648 | 0.376648 | 0.323917 | 0.323917 | 0.323917 | 0 | 0.066768 | 0.19339 | 817 | 35 | 74 | 23.342857 | 0.738998 | 0.210526 | 0 | 0.285714 | 0 | 0 | 0.339089 | 0.178964 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.095238 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a60228f4b0493e204cf39ee8d4a76aafe56f57f7 | 4,410 | py | Python | performance.py | PudPawat/protest-detection-violence-estimation | 6469c3ae47a7d99308458174fe16bd2c5c7821aa | [
"MIT"
] | 2 | 2020-12-10T01:22:13.000Z | 2021-03-11T08:05:16.000Z | performance.py | PudPawat/protest-detection-violence-estimation | 6469c3ae47a7d99308458174fe16bd2c5c7821aa | [
"MIT"
] | null | null | null | performance.py | PudPawat/protest-detection-violence-estimation | 6469c3ae47a7d99308458174fe16bd2c5c7821aa | [
"MIT"
] | null | null | null | import os
import subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import argparse
import seaborn as sns # I love this package!
sns.set_style('white')
import torch
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve
import scipy.stats as stats
def plot_roc(attr, target, pred):
"""Plot a ROC curve and show the accuracy score and the AUC"""
fig, ax = plt.subplots()
auc = roc_auc_score(target, pred)
acc = accuracy_score(target, (pred >= 0.5).astype(int))
fpr, tpr, _ = roc_curve(target, pred)
plt.plot(fpr, tpr, lw = 2, label = attr.title())
plt.legend(loc = 4, fontsize = 15)
plt.title(('ROC Curve for {attr} (Accuracy = {acc:.3f}, AUC = {auc:.3f})'
.format(attr = attr.title(), acc= acc, auc = auc)),
fontsize = 15)
plt.xlabel('False Positive Rate', fontsize = 15)
plt.ylabel('True Positive Rate', fontsize = 15)
plt.show()
return fig
def main():
# load check point
model_path = args.model_dir
checkpoint = torch.load(model_path)
# print("ch",checkpoint)
loss_history_train = checkpoint['loss_history_train']
loss_history_val = checkpoint['loss_history_val']
loss_his_val = [] # change type tensor to numpy for plotting
for loss in loss_history_val:
loxx = []
for los in loss:
loxx.append(los.cpu().numpy())
loss_his_val.append(loxx)
print(type(loss_history_val[0][0]))
print(loss_history_train[0][0])
loss_train = [np.mean(l) for l in loss_history_train]
loss_val = [np.mean(l) for l in loss_his_val]
plt.plot(loss_train, label = 'Train Loss')
plt.plot(loss_val, label = 'Val Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Trend')
plt.legend()
plt.show()
# load prediction
df_pred = pd.read_csv(args.csv_dir)
df_pred['imgpath'] = df_pred['imgpath'].apply(os.path.basename)
# load target
test_label_path = 'UCLA-protest/annot_test.txt'
df_target = pd.read_csv(test_label_path, delimiter= '\t')
# plot ROC curve for protest
attr = "protest"
target = df_target[attr]
pred = df_pred[attr]
fig = plot_roc(attr, target, pred)
fig.savefig(os.path.join('output', attr+'.png'))
# plot ROC curves for visual attributes
for attr in df_pred.columns[3:]:
target = df_target[attr]
pred = df_pred[attr][target != '-']
target = target[target != '-'].astype(int)
fig = plot_roc(attr, target, pred)
fig.savefig(os.path.join(args.save_dir, attr+'.png'))
attr = 'violence'
pred = df_pred[df_target['protest'] == 1][attr].tolist()
target = df_target[df_target['protest'] == 1][attr].astype(float).tolist()
fig, ax = plt.subplots()
plt.scatter(target, pred, label = attr.title())
plt.xlim([-.05,1.05])
plt.ylim([-.05,1.05])
plt.xlabel('Annotation', fontsize = 15)
plt.ylabel('Predicton', fontsize = 15)
corr, pval = stats.pearsonr(target, pred)
plt.title(('Scatter Plot for {attr} (Correlation = {corr:.3f})'
.format(attr = attr.title(), corr= corr)), fontsize = 15)
plt.show()
fig.savefig(os.path.join('files', attr+'.png'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--csv_dir",
type=str,
default="output/resnet34_40ep/resnet34_best_result.csv",
required = False,
help = "image directory to calculate output"
"(the directory must contain only image files)"
)
parser.add_argument("--model_dir",
type=str,
default="output/resnet34_40ep/checkpoint.pth.tar",
required = False,
help = "image directory to calculate output"
"(the directory must contain only image files)"
)
parser.add_argument("--save_dir",
type=str,
default="output/single_perceptron_40ep/",
required = False,
help = "image directory to calculate output"
"(the directory must contain only image files)"
)
args = parser.parse_args()
main() | 36.446281 | 80 | 0.594558 | 568 | 4,410 | 4.466549 | 0.288732 | 0.031533 | 0.030745 | 0.020102 | 0.292077 | 0.214821 | 0.214821 | 0.173827 | 0.148601 | 0.148601 | 0 | 0.015699 | 0.277778 | 4,410 | 121 | 81 | 36.446281 | 0.780848 | 0.057143 | 0 | 0.207921 | 0 | 0.009901 | 0.176315 | 0.034009 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019802 | false | 0 | 0.09901 | 0 | 0.128713 | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a60260497b4fa5807ca2464a85c90d77b9b013da | 6,071 | py | Python | tangent/anf.py | aayushi94/tangent | 4a8c5f1f0c69adc574a2643a6bc02521c5bdaa2a | [
"Apache-2.0"
] | null | null | null | tangent/anf.py | aayushi94/tangent | 4a8c5f1f0c69adc574a2643a6bc02521c5bdaa2a | [
"Apache-2.0"
] | null | null | null | tangent/anf.py | aayushi94/tangent | 4a8c5f1f0c69adc574a2643a6bc02521c5bdaa2a | [
"Apache-2.0"
] | 1 | 2019-12-06T11:51:41.000Z | 2019-12-06T11:51:41.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transform AST into something similar to A-normal form.
This significantly simplifies certain procedures later on. The ANF
transformations guarantee the following:
All nested expressions on the right hand side of assignments are expanded and
reduced to the following:
y = x
y = f(x1, ..., xn)
z = x + y
y = -x
y.i = x
y = x.i
y[i] = x
y = x[i]
z = x, y
Note that we do not allow tuple unpacking, because statements like `x[i], y =
f(x)` are difficult to process in this case. Hence, unpacking is made explicit.
The value of the return statement is reduced to either a single variable, or a
tuple of variables (nested tuples are expanded).
"""
from __future__ import absolute_import
import gast
from tangent import annotations as anno
from tangent import grammar
from tangent import naming
from tangent import quoting
from tangent import transformers
class ANF(transformers.TreeTransformer):
"""Transform a tree to an ANF-like form."""
def __init__(self):
super(ANF, self).__init__()
# Whether the current statement in question must be trivialized
self.trivializing = False
# The original line that is transformed, which is kept as an annotation
self.src = ''
def mark(self, node):
if not anno.hasanno(node, 'pre_anf') and self.src:
anno.setanno(node, 'pre_anf', self.src)
def trivialize(self, node):
if isinstance(node, (gast.Name, type(None)) + grammar.LITERALS):
return node
name = self.namer.name(node)
stmt = gast.Assign(
targets=[gast.Name(annotation=None, id=name, ctx=gast.Store())],
value=None)
self.mark(stmt)
self.prepend(stmt)
stmt.value = self.visit(node)
return gast.Name(annotation=None, id=name, ctx=gast.Load())
def visit_Call(self, node):
if self.trivializing:
for i, arg in enumerate(node.args):
node.args[i] = self.trivialize(arg)
for keyword in node.keywords:
keyword.value = self.trivialize(keyword.value)
return node
def visit_FunctionDef(self, node):
self.namer = naming.Namer.build(node)
return self.generic_visit(node)
def visit_BinOp(self, node):
if self.trivializing:
node.left = self.trivialize(node.left)
node.right = self.trivialize(node.right)
return node
def visit_UnaryOp(self, node):
if self.trivializing:
node.operand = self.trivialize(node.operand)
return node
def visit_Return(self, node):
self.trivializing = True
self.namer.target = node
node.value = self.trivialize(node.value)
self.trivializing = False
self.namer.target = None
return node
def visit_Subscript(self, node):
if self.trivializing:
node.value = self.trivialize(node.value)
if isinstance(node.slice, gast.Index):
node.slice.value = self.trivialize(node.slice.value)
elif isinstance(node.slice, gast.Slice):
name = self.namer.name(node.slice)
target = gast.Name(id=name, ctx=gast.Store(), annotation=None)
stmt = gast.Assign(targets=[target], value=None)
self.prepend(stmt)
stmt.value = gast.Call(
func=gast.Name(id='slice', ctx=gast.Load(), annotation=None),
args=[
self.trivialize(arg) if arg else
gast.Name(id='None', ctx=gast.Load(), annotation=None)
for arg in [node.slice.lower, node.slice.upper,
node.slice.step]],
keywords=[])
node.slice = gast.Index(value=gast.Name(id=name, ctx=gast.Load(),
annotation=None))
else:
raise ValueError
return node
def visit_Tuple(self, node):
if self.trivializing:
node.elts = [self.trivialize(elt) for elt in node.elts]
return node
def visit_List(self, node):
if self.trivializing:
node.elts = [self.trivialize(elt) for elt in node.elts]
return node
def visit_AugAssign(self, node):
self.trivializing = True
left = self.trivialize(node.target)
right = self.trivialize(node.value)
self.trivializing = False
node = gast.Assign(targets=[node.target],
value=gast.BinOp(left=left, op=node.op, right=right))
return node
def visit_Assign(self, node):
self.src = quoting.unquote(node)
self.mark(node)
self.trivializing = True
self.namer.target = node.targets[0]
if isinstance(node.targets[0], (gast.Subscript, gast.Attribute)):
node.value = self.trivialize(node.value)
node.targets[0] = self.visit(node.targets[0])
elif isinstance(node.targets[0], gast.Tuple):
node.value = self.visit(node.value)
name = self.namer.name(node.targets[0])
target = gast.Name(id=name, ctx=gast.Store(), annotation=None)
for i, elt in enumerate(node.targets[0].elts):
stmt = gast.Assign(
targets=[elt],
value=gast.Subscript(
value=gast.Name(id=name, ctx=gast.Load(),
annotation=None),
slice=gast.Index(value=gast.Num(n=i)),
ctx=gast.Load()))
self.mark(stmt)
self.append(stmt)
node.targets[0] = target
elif not isinstance(node.targets[0], gast.Name):
raise ValueError
node = self.generic_visit(node)
self.namer.target = None
self.trivializing = False
return node
def anf(node):
"""Turn an AST into ANF-like form."""
ANF().visit(node)
return node
| 32.994565 | 79 | 0.654423 | 834 | 6,071 | 4.731415 | 0.254197 | 0.049671 | 0.02965 | 0.036493 | 0.292448 | 0.193867 | 0.150532 | 0.128231 | 0.088697 | 0.088697 | 0 | 0.003884 | 0.236534 | 6,071 | 183 | 80 | 33.174863 | 0.847465 | 0.236864 | 0 | 0.333333 | 0 | 0 | 0.004999 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113821 | false | 0 | 0.056911 | 0 | 0.284553 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a602c20e6c62e465410e523f5014d66e68793385 | 1,565 | py | Python | tests/tagger_test.py | slouvan/anago | 99a8be0ba2ea42c9c686ff9697ea9e6ef60ca028 | [
"MIT"
] | null | null | null | tests/tagger_test.py | slouvan/anago | 99a8be0ba2ea42c9c686ff9697ea9e6ef60ca028 | [
"MIT"
] | 9 | 2020-01-28T22:26:05.000Z | 2022-02-09T23:47:51.000Z | tests/tagger_test.py | tungnt244/ie_tourism_model | f9d6a234af4ddeb632b63e6dcd05eea23a48b2a7 | [
"MIT"
] | 1 | 2021-06-23T13:35:51.000Z | 2021-06-23T13:35:51.000Z | import os
import unittest
from pprint import pprint
import anago
from anago.config import ModelConfig
from anago.models import SeqLabeling
from anago.preprocess import WordPreprocessor
SAVE_ROOT = os.path.join(os.path.dirname(__file__), 'models')
class TaggerTest(unittest.TestCase):
def setUp(self):
p = WordPreprocessor.load(os.path.join(SAVE_ROOT, 'preprocessor.pkl'))
config = ModelConfig()
config.vocab_size = len(p.vocab_word)
config.char_vocab_size = len(p.vocab_char)
model = SeqLabeling(config, ntags=len(p.vocab_tag))
model.load(filepath=os.path.join(SAVE_ROOT, 'model_weights.h5'))
self.tagger = anago.Tagger(model, preprocessor=p)
self.sent = 'President Obama is speaking at the White House.'
def test_analyze(self):
res = self.tagger.analyze(self.sent)
pprint(res)
def test_tagging(self):
res = self.tagger.tag(self.sent)
self.assertIsInstance(res, list)
self.assertIsInstance(res[0], tuple)
self.assertEqual(len(res[0]), 2)
self.assertIsInstance(res[0][0], str)
self.assertIsInstance(res[0][1], str)
tag_set = {'O', 'LOC', 'PER', 'ORG', 'MISC'}
for _, tag in res:
self.assertIn(tag, tag_set)
def test_get_entities(self):
res = self.tagger.get_entities(self.sent)
print(res)
self.assertIsInstance(list(res.keys())[0], str)
self.assertIsInstance(list(res.values())[0], list)
self.assertIsInstance(list(res.values())[0][0], str)
| 30.686275 | 78 | 0.656869 | 206 | 1,565 | 4.883495 | 0.364078 | 0.139165 | 0.091451 | 0.050696 | 0.139165 | 0.067594 | 0 | 0 | 0 | 0 | 0 | 0.009756 | 0.214058 | 1,565 | 50 | 79 | 31.3 | 0.80813 | 0 | 0 | 0 | 0 | 0 | 0.063259 | 0 | 0 | 0 | 0 | 0 | 0.243243 | 1 | 0.108108 | false | 0 | 0.189189 | 0 | 0.324324 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6033320b86e6ca0f7d5044e4b6dd458c4e4c840 | 1,858 | py | Python | unittest/base/JobControllerTest.py | hamatoma/snakeboxx | de4609e0d980c7ce775060e3813e71752e8670aa | [
"CC0-1.0"
] | null | null | null | unittest/base/JobControllerTest.py | hamatoma/snakeboxx | de4609e0d980c7ce775060e3813e71752e8670aa | [
"CC0-1.0"
] | null | null | null | unittest/base/JobControllerTest.py | hamatoma/snakeboxx | de4609e0d980c7ce775060e3813e71752e8670aa | [
"CC0-1.0"
] | null | null | null | '''
Created on 12.04.2018
@author: hm
'''
import os
import time
from unittest.UnitTestCase import UnitTestCase
import base.MemoryLogger
import base.JobController
import base.StringUtils
DEBUG = False
class TestJobController (base.JobController.JobController):
def __init__(self, logger):
base.JobController.JobController.__init__(self, '/tmp/unittest/jobcontrol', 1, logger)
self._done = {}
def process(self, name, args):
self._done[name] = ':' + '|'.join(args)
return True
def result(self, name):
rc = self._done[name] if name in self._done else ''
return rc
class JobControllerTest(UnitTestCase):
def __init__(self):
UnitTestCase.__init__(self)
self._logger = base.MemoryLogger.MemoryLogger(3)
self._controller = TestJobController(self._logger)
self._dummyFile = self._controller.jobDirectory() + os.sep + 'dummy.file'
def debugFlag(self):
base.StringUtils.avoidWarning(self)
return DEBUG
def testBasics(self):
base.JobController.JobController.writeJob('test2args', ['a1', 'a2'], self._controller.jobDirectory(), self._logger)
base.JobController.JobController.writeJob('testNoArgs', [], self._controller.jobDirectory(), self._logger)
self.assertTrue(self._controller.check())
self.assertTrue(self._controller.check())
self.assertIsEqual(':a1|a2', self._controller.result('test2args'))
self.assertIsEqual(':', self._controller.result('testNoArgs'))
def testClean(self):
base.StringUtils.toFile(self._dummyFile, 'Hi')
time.sleep(1)
self.assertFalse(self._controller.check())
self.assertFileNotExists(self._dummyFile)
if __name__ == '__main__':
#import sys;sys.argv = ['', 'Test.testName']
tester = JobControllerTest()
tester.run()
| 32.034483 | 123 | 0.682454 | 197 | 1,858 | 6.213198 | 0.365482 | 0.102941 | 0.098039 | 0.056373 | 0.173203 | 0.05719 | 0 | 0 | 0 | 0 | 0 | 0.011266 | 0.187836 | 1,858 | 57 | 124 | 32.596491 | 0.799867 | 0.041981 | 0 | 0.04878 | 0 | 0 | 0.053612 | 0.013544 | 0.02439 | 0 | 0 | 0 | 0.146341 | 1 | 0.170732 | false | 0 | 0.146341 | 0 | 0.439024 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a605d79f8499b6ae4b9712a96390545d3da40661 | 7,829 | py | Python | online_recommend/first-release-version/online_recommend/movie_recall/cal_movie_sim.py | hfhfn/db_recommend | 3a9f03157bb81e295f8cff30fbc7ad2a8cfdf963 | [
"MIT"
] | null | null | null | online_recommend/first-release-version/online_recommend/movie_recall/cal_movie_sim.py | hfhfn/db_recommend | 3a9f03157bb81e295f8cff30fbc7ad2a8cfdf963 | [
"MIT"
] | null | null | null | online_recommend/first-release-version/online_recommend/movie_recall/cal_movie_sim.py | hfhfn/db_recommend | 3a9f03157bb81e295f8cff30fbc7ad2a8cfdf963 | [
"MIT"
] | null | null | null | from pyspark.ml.clustering import BisectingKMeans, BisectingKMeansModel
from utils import channelInfo
from utils import MovieDataApp
class BkmeansCossim(object):
def __init__(self, spark, database, group=0, recall_topK=50, k=100, minDCS=200, seed=10, channel='电影'):
"""
:param group: int 计算第几组聚类相似度
:param recall_num: int 召回相似电影数量
:param k: int 聚类中心数
"""
self.spark = spark
self.spark.sql("use {}".format(database))
self.movieVector = self.load_movie_vector()
self.group = group
self.recall_num = recall_topK
self.k = k
self.minDCS = minDCS
self.seed = seed
self.channel = channel
self.channel_id = channelInfo[self.channel]
self.similar_model_path = "hdfs://hadoop-master:8020/movie/models/similar/channel_%d_%s_k%dm%d.bkmeans" \
% (self.channel_id, self.channel, self.k, self.minDCS)
def load_movie_vector(self):
"""
读取movieVector并转换格式
:return: 【dataframe】
"""
movieVector = self.spark.sql("select * from movie_vector")
# hive中保存的是array类型, 需要转换为vector类型
from pyspark.ml.linalg import Vectors
def array_to_vector(row):
return row.movie_id, row.cate_id, Vectors.dense(row.movieVector)
movieVector = movieVector.rdd.map(array_to_vector).toDF(['movie_id', 'cate_id', 'movieVector'])
# movieVector.show()
return movieVector
def fit_bkmeans(self):
"""
训练并保存bkmeans模型
:return:
"""
bkmeans = BisectingKMeans(k=self.k, minDivisibleClusterSize=self.minDCS, featuresCol="movieVector",
predictionCol='group', seed=self.seed)
bkmeans_model = bkmeans.fit(self.movieVector)
# minDivisibleClusterSize=50 => k=100 Errors = 135931.2 k=50 150118.78
# minDivisibleClusterSize=200 => k=100 Errors = 134837.76 k=50 150416.65
# minDivisibleClusterSize=400 => k=50 149343.97
# minDivisibleClusterSize=600 => k=50 148595.19
# minDivisibleClusterSize=800 => k=50 148424.24
bkmeans_model.write().overwrite().save(self.similar_model_path)
return bkmeans_model
def get_bkmeans(self, refit):
"""
计算bkmeans结果
:return: 【dataframe】
"""
if not refit:
bkmeans_model = BisectingKMeansModel.load(self.similar_model_path)
else:
bkmeans_model = self.fit_bkmeans()
bkmeans_group = bkmeans_model.transform(self.movieVector).select("movie_id", "cate_id", "movieVector",
"group")
def toArray(row):
return row.movie_id, row.cate_id, [float(i) for i in row.movieVector.toArray()], row.group
bkmeans_group = bkmeans_group.rdd.map(toArray).toDF(['movie_id', 'cate_id', 'movieVector', 'group'])
# bkmeans_group.collect()
# bkmeans_group.show()
# bkmeans_group[0].prediction == bkmeans_group[1].prediction => True/False
# Evaluate clustering.
cost = bkmeans_model.computeCost(self.movieVector)
print("Within Set Sum of Squared Errors = " + str(cost))
# # Shows the result.
# print("Cluster Centers: ")
# centers = bkmeans_model.clusterCenters()
# for center in centers:
# print(center)
return bkmeans_group
def load_movie_bkmeans(self):
"""
读取movie_bkmeans并转换格式
:return: 【dataframe】
"""
movieVector = self.spark.sql("select * from movie_bkmeans_{}_k{}m{}"
.format(self.channel_id, self.k, self.minDCS)).filter('group = {}'.format(self.group))
# hive中保存的是array类型, 需要转换为vector类型
from pyspark.ml.linalg import Vectors
def array_to_vector(row):
return row.movie_id, Vectors.dense(row.movieVector)
movieVector = movieVector.rdd.map(array_to_vector).toDF(['movie_id', 'movieVector'])
# movieVector.show()
return movieVector
def get_cos_sim(self):
"""
计算余弦相似度
:return: 【dataframe】
"""
# from pyspark import SparkConf, SparkContext
# SparkContext()
# SparkConf().set("spark.storage.memoryFraction","0.6")
import numpy as np
# import gc
movieVector = self.load_movie_bkmeans()
temp_df = movieVector.withColumnRenamed("movie_id", "movie_id2").withColumnRenamed("movieVector", "movieVector2")
# print(temp_df.count())
movie_vector_join = movieVector.join(temp_df, movieVector.movie_id != temp_df.movie_id2, how="outer")
# print(movie_vector_join.count())
def cal_cos_similar(row):
vector1 = row.movieVector
vector2 = row.movieVector2
# 余弦相似度计算公式
sim = np.dot(vector1, vector2) / (np.linalg.norm(vector1) * (np.linalg.norm(vector2)))
return row.movie_id, row.movie_id2, float('%.4f' % sim)
# persist 配置memory_and_disk,内存不足保存到硬盘,作用有限
from pyspark import StorageLevel
ret = movie_vector_join.rdd.persist(storageLevel=StorageLevel.MEMORY_AND_DISK).map(cal_cos_similar)\
.toDF(['movie_id', 'movie_id2', 'cos_sim'])
import pyspark.sql.functions as fn
from pyspark.sql import Window
# 对结果进行分组排序,取每组前一百个最相似的
ret = ret.withColumn("sort_num", fn.row_number().over(Window.partitionBy("movie_id")
.orderBy(ret["cos_sim"].desc()))).where("sort_num <= {}".format(self.recall_num))
# gc.collect()
# print(ret.count())
# ret.groupby('movie_id').count().show()
return ret
if __name__ == '__main__':
pass
# def fit_lsh():
# """
# 训练并保存 lsh 模型
# :return:
# """
# from pyspark.ml.feature import BucketedRandomProjectionLSH
# movieVector = load_movie_vector()
# # 默认4,10,官方推荐使用大小
# brp = BucketedRandomProjectionLSH(inputCol='movieVector', outputCol='hashes', numHashTables=4.0,
# bucketLength=10.0)
# brp_model = brp.fit(movieVector)
# # fit_model = brp_model.transform(movieVector).show()
# # fit_model = brp_model.transform(movieVector)
# # print(fit_model.count())
# brp_model.write().overwrite().save(similar_model_path + "channel_%d_%s.lsh" % (channel_id, channel))
#
# def get_lsh_similar():
# """
# 计算 lsh 相似度结果
# :return: 【dataframe】 datasetA和datasetB字段格式不能存入hive
# """
# from pyspark.ml.feature import BucketedRandomProjectionLSHModel
# movieVector = load_movie_vector()
# # print(movieVector.count())
#
# brp_model = BucketedRandomProjectionLSHModel.load(similar_model_path + "channel_%d_%s.lsh" % (channel_id, channel))
# # threshold 为距离最大阈值, 返回值小于2的相似结果
# similar = brp_model.approxSimilarityJoin(movieVector, movieVector, threshold=10, distCol='EucDistance') # EuclideanDistance
# # similar.sort(['EucDistance'], ascending=False).filter("datasetA != datasetB").show()
# print(similar.count())
# import pyspark.sql.functions as fn
# from pyspark.sql import Window
# similar = similar.withColumn("row_number", fn.row_number().over(Window.partitionBy("datasetB.movie_id")
# .orderBy(similar["EucDistance"].asc())))
# # .orderBy(similar["EucDistance"].asc())))
# similar.groupby('datasetB.movie_id').count().show()
# # print(similar.count())
# # similar = similar.where("datasetA.movie_id != datasetB.movie_id")
# # print(similar.count())
# # .where("datasetA.movie_id != datasetB.movie_id and row_number <= 100")
# # print(similar.count())
# return similar | 40.989529 | 131 | 0.62013 | 839 | 7,829 | 5.606675 | 0.263409 | 0.028274 | 0.013818 | 0.013605 | 0.239371 | 0.224277 | 0.181335 | 0.146471 | 0.139031 | 0.116497 | 0 | 0.022505 | 0.256482 | 7,829 | 191 | 132 | 40.989529 | 0.785604 | 0.400817 | 0 | 0.084507 | 0 | 0.014085 | 0.097937 | 0.022217 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140845 | false | 0.014085 | 0.126761 | 0.042254 | 0.408451 | 0.014085 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a605fb5e525f35f836c02ffad077a242234f3c4a | 32,138 | py | Python | src/gui/scroll.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 63 | 2016-01-02T16:28:47.000Z | 2022-01-19T11:29:51.000Z | src/gui/scroll.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 12 | 2016-06-12T14:14:15.000Z | 2020-12-18T16:11:45.000Z | src/gui/scroll.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 17 | 2016-05-23T00:02:27.000Z | 2021-04-25T17:48:27.000Z | from .base import *
MAX_TEX_SIZE = 4000 # TODO: make user-configurable
class ScrollThumb(Widget):
def __init__(self, parent, pane, gfx_ids, cull_bin, scroll_dir, inner_border_id):
Widget.__init__(self, "scrollthumb", parent, gfx_ids, "normal")
self._pane = pane
self.direction = scroll_dir
self._inner_border_id = inner_border_id
sort = parent.sort + 1
self.mouse_region.sort = sort
self.texture = tex = Texture("scrollthumb")
tex.minfilter = SamplerState.FT_nearest
tex.magfilter = SamplerState.FT_nearest
cm = CardMaker("scrollthumb")
cm.set_frame(0., 1., -1., 0.)
self._quad = quad = Mgr.get("gui_root").attach_new_node(cm.generate())
quad.set_bin(cull_bin, sort)
quad.set_texture(self.texture)
quad.set_transparency(TransparencyAttrib.M_alpha)
thickness = Skin.options["scrollthumb_thickness"]
min_size = (0, thickness) if scroll_dir == "horizontal" else (thickness, 0)
self.set_size(min_size, is_min=True)
w, h = self.min_size
l_s, r_s, b_s, t_s = Skin.atlas.inner_borders[inner_border_id]
self.set_pos((l_s, t_s))
size = (0, h + b_s + t_s) if scroll_dir == "horizontal" else (w + l_s + r_s, 0)
parent.set_size(size, is_min=True)
self._start_mouse_crd = self._mouse_crd = 0
self._start_scroll_offset = self._scroll_offset = 0
self._scroll_size = 0
self._scrolling = False
self._listener = DirectObject()
self._listener.accept("gui_mouse1-up", self.on_left_up)
def destroy(self):
Widget.destroy(self)
self._pane = None
self._listener.ignore_all()
self._listener = None
self._quad.detach_node()
self._quad = None
@property
def quad(self):
return self._quad
def update_size(self):
pane = self._pane
d = self.direction
dim = 0 if d == "horizontal" else 1
size = pane.get_size()[dim]
size_virt = pane.virtual_size[dim]
l, r, b, t = self.gfx_inner_borders
if d == "horizontal":
border = l + r
else:
border = b + t
if Skin.options["integrate_scrollbar_in_frame"]:
l_f, r_f, b_f, t_f = pane.frame.gfx_inner_borders
else:
l_f = r_f = b_f = t_f = 0
l_s, r_s, b_s, t_s = Skin.atlas.inner_borders[self._inner_border_id]
if d == "horizontal":
self._scroll_size = size_scroll = size + l_f + r_f - l_s - r_s - border
else:
self._scroll_size = size_scroll = size + b_f + t_f - b_s - t_s - border
size_thumb = border + int(size_scroll * min(1., size / size_virt))
if d == "horizontal":
self.set_size((size_thumb, 0))
else:
self.set_size((0, size_thumb))
def get_offset(self):
return self._scroll_offset
def set_offset(self, offset):
start_scroll_offset = self._scroll_offset
self._scroll_offset = offset
self.update_offset()
self.update_pos()
self.update_mouse_region_frames()
pane = self._pane
pane.update_mouse_watcher_frame()
root_node = pane.widget_root_node
if self.direction == "horizontal":
x = root_node.get_x()
root_node.set_x(x + start_scroll_offset - self._scroll_offset)
else:
z = root_node.get_z()
root_node.set_z(z - start_scroll_offset + self._scroll_offset)
def update_offset(self):
pane = self._pane
d = self.direction
dim = 0 if d == "horizontal" else 1
size = pane.get_size()[dim]
size_virt = pane.virtual_size[dim]
self._scroll_offset = offset = max(0, min(self._scroll_offset, size_virt - min(size_virt, size)))
pane.update_scroll_offset(offset)
def get_page_size(self):
pane = self._pane
d = self.direction
dim = 0 if d == "horizontal" else 1
return pane.get_size()[dim]
def update_pos(self):
x, y = self.get_pos(net=True)
pane = self._pane
d = self.direction
dim = 0 if d == "horizontal" else 1
size_virt = pane.virtual_size[dim]
incr = int((self._scroll_offset / size_virt) * self._scroll_size)
if d == "horizontal":
x += incr
else:
y += incr
self._quad.set_x(x)
self._quad.set_z(-y)
def set_size(self, size, includes_borders=True, is_min=False):
w, h = Widget.set_size(self, size, includes_borders, is_min)
self._quad.set_scale(w, 1., h)
def update_images(self):
img = Widget.update_images(self)[self.state]
self.texture.load(img)
def update_mouse_region_frames(self, exclude=""):
x, y = self.get_pos()
pane = self._pane
d = self.direction
dim = 0 if d == "horizontal" else 1
size_virt = pane.virtual_size[dim]
offset = int((self._scroll_offset / size_virt) * self._scroll_size)
# temporarily update the thumb position with the scroll offset
self.set_pos((x + offset, y) if d == "horizontal" else (x, y + offset))
Widget.update_mouse_region_frames(self, exclude)
# restore the original thumb position
self.set_pos((x, y))
def update(self):
start_scroll_offset = self._scroll_offset
self.update_size()
self.update_offset()
self.update_pos()
self.update_images()
self.update_mouse_region_frames()
root_node = self._pane.widget_root_node
if self.direction == "horizontal":
x = root_node.get_x()
root_node.set_x(x + start_scroll_offset - self._scroll_offset)
else:
z = root_node.get_z()
root_node.set_z(z - start_scroll_offset + self._scroll_offset)
def __scroll(self, task):
d = self.direction
mouse_pointer = Mgr.get("mouse_pointer", 0)
mouse_crd = mouse_pointer.x if d == "horizontal" else mouse_pointer.y
if mouse_crd == self._mouse_crd:
return task.cont
d_crd = mouse_crd - self._start_mouse_crd
pane = self._pane
dim = 0 if d == "horizontal" else 1
size_virt = pane.virtual_size[dim]
offset = int(size_virt * d_crd / self._scroll_size)
self._scroll_offset = self._start_scroll_offset + offset
self.update_offset()
self.update_pos()
self._mouse_crd = mouse_crd
return task.cont
def on_enter(self):
self.state = "hilited"
self.update_images()
def on_leave(self):
if not self._scrolling:
self.state = "normal"
self.update_images()
def on_left_down(self):
Mgr.add_task(self.__scroll, "scroll")
self._scrolling = True
mouse_pointer = Mgr.get("mouse_pointer", 0)
mouse_crd = mouse_pointer.x if self.direction == "horizontal" else mouse_pointer.y
self._start_mouse_crd = self._mouse_crd = mouse_crd
self._start_scroll_offset = self._scroll_offset
def on_left_up(self):
if not self._scrolling:
return
Mgr.remove_task("scroll")
self._scrolling = False
self.update_mouse_region_frames()
pane = self._pane
pane.update_mouse_watcher_frame()
root_node = pane.widget_root_node
if self.direction == "horizontal":
x = root_node.get_x()
root_node.set_x(x + self._start_scroll_offset - self._scroll_offset)
else:
z = root_node.get_z()
root_node.set_z(z - self._start_scroll_offset + self._scroll_offset)
if self.mouse_watcher.get_over_region() != self.mouse_region:
self.on_leave()
class ScrollBar(Widget):
def __init__(self, parent, pane, gfx_ids, thumb_gfx_ids, cull_bin, scroll_dir, inner_border_id):
Widget.__init__(self, "scrollbar", parent, gfx_ids)
self.direction = scroll_dir
self.sort = sort = parent.sort + 1
self.mouse_region.sort = sort
self._thumb = self._create_thumb(pane, thumb_gfx_ids, cull_bin, scroll_dir, inner_border_id)
self._start_mouse_crd = 0
self._start_scroll_offset = 0
self._scrolling = False
self._clicked = False
self._listener = DirectObject()
self._listener.accept("gui_mouse1-up", self.on_left_up)
def _create_thumb(self, pane, thumb_gfx_ids, cull_bin, scroll_dir, inner_border_id):
""" Override in derived class """
return ScrollThumb(self, pane, thumb_gfx_ids, cull_bin, scroll_dir, inner_border_id)
def destroy(self):
self._listener.ignore_all()
self._listener = None
self._thumb.destroy()
self._thumb = None
Widget.destroy(self)
def get_thumb(self):
return self._thumb
def set_pos(self, pos):
Widget.set_pos(self, pos)
self._thumb.update_pos()
def update_images(self):
if self.has_state(""):
Widget.update_images(self)[""]
self._thumb.update_images()
def update_mouse_region_frames(self, exclude=""):
Widget.update_mouse_region_frames(self, exclude)
self._thumb.update_mouse_region_frames(exclude)
def __scroll(self, task=None):
d = self.direction
thumb = self._thumb
mouse_pointer = Mgr.get("mouse_pointer", 0)
mouse_crd = mouse_pointer.x if d == "horizontal" else mouse_pointer.y
quad = thumb.quad
crd = quad.get_x() if d == "horizontal" else -quad.get_z()
thumb_size = quad.get_sx() if d == "horizontal" else quad.get_sz()
page_size = thumb.get_page_size()
offset = thumb.get_offset()
if crd < mouse_crd < crd + thumb_size:
return task.again if task else None
if mouse_crd < crd:
thumb.set_offset(offset - page_size)
elif mouse_crd > crd + thumb_size:
thumb.set_offset(offset + page_size)
return task.again if task else None
def __start_scrolling(self, task):
Mgr.add_task(.1, self.__scroll, "scroll")
self._scrolling = True
def on_left_down(self):
self.__scroll()
Mgr.add_task(.3, self.__start_scrolling, "start_scrolling")
self._clicked = True
def on_left_up(self):
if self._scrolling:
Mgr.remove_task("scroll")
self._scrolling = False
elif self._clicked:
Mgr.remove_task("start_scrolling")
self._clicked = False
class ScrollPaneFrame(Widget):
def __init__(self, parent, pane, gfx_ids, bar_gfx_ids, thumb_gfx_ids, cull_bin,
scroll_dir, inner_border_id, has_mouse_region=True):
Widget.__init__(self, "scroll_pane_frame", parent, gfx_ids,
has_mouse_region=has_mouse_region)
self._pane = pane
self._scrollbar = self._create_bar(pane, bar_gfx_ids, thumb_gfx_ids, cull_bin,
scroll_dir, inner_border_id)
sizer = Sizer("horizontal" if scroll_dir == "vertical" else "vertical")
self.sizer = sizer
@property
def sort(self):
return self.parent.sort
def _create_bar(self, pane, bar_gfx_ids, thumb_gfx_ids, cull_bin, scroll_dir, inner_border_id):
""" Override in derived class """
return ScrollBar(self, pane, bar_gfx_ids, thumb_gfx_ids, cull_bin, scroll_dir,
inner_border_id)
def setup(self, client_size=(0, 0), append_bar=True):
scrollbar = self._scrollbar
scroll_dir = scrollbar.direction
l, r, b, t = borders = self.gfx_inner_borders
w, h = client_size
w += l + r
h += b + t
if scroll_dir == "vertical":
w += scrollbar.min_size[0]
if Skin.options["integrate_scrollbar_in_frame"]:
w -= r if append_bar else l
else:
h += scrollbar.min_size[1]
if Skin.options["integrate_scrollbar_in_frame"]:
h -= b if append_bar else t
default_size = (w, h)
sizer = self.sizer
sizer.default_size = default_size
if append_bar:
borders = (l, 0, b, t) if scroll_dir == "vertical" else (l, r, 0, t)
else:
borders = (0, r, b, t) if scroll_dir == "vertical" else (l, r, b, 0)
if Skin.options["integrate_scrollbar_in_frame"]:
bar_borders = None
else:
if append_bar:
bar_borders = (0, r, b, t) if scroll_dir == "vertical" else (l, r, b, 0)
else:
bar_borders = (l, 0, b, t) if scroll_dir == "vertical" else (l, r, 0, t)
if append_bar:
sizer.add(self._pane, (1., 1.), ("expand", "expand"), borders)
sizer.add(scrollbar, alignments=("expand", "expand"), borders=bar_borders)
else:
sizer.add(scrollbar, alignments=("expand", "expand"), borders=bar_borders)
sizer.add(self._pane, (1., 1.), ("expand", "expand"), borders)
def destroy(self):
Widget.destroy(self)
self._pane = None
self._scrollbar = None
def get_scrollbar(self):
return self._scrollbar
class ScrollPane(WidgetCard):
def __init__(self, frame_parent, pane_id, scroll_dir, cull_bin, frame_gfx_ids, bar_gfx_ids,
thumb_gfx_ids, bar_inner_border_id, bg_tex_id="", frame_client_size=(0, 0),
frame_has_mouse_region=True, append_scrollbar=True):
frame = self._create_frame(frame_parent, scroll_dir, cull_bin, frame_gfx_ids,
bar_gfx_ids, thumb_gfx_ids, bar_inner_border_id, frame_has_mouse_region)
WidgetCard.__init__(self, pane_id, frame)
sizer = Sizer(scroll_dir)
self.sizer = sizer
sizer.default_size = (1, 1)
frame.setup(frame_client_size, append_scrollbar)
self._background_tex_id = bg_tex_id
self.sort = frame.sort + 1
self.widget_root_node = NodePath("pane_widget_root")
self.display_region = None
self.scrollthumb = frame.get_scrollbar().get_thumb()
self._subimg_index = -1
self._subimg_x = 0
self._subimg_y = 0
self._subimg_w = 0
self._subimg_h = 0
self._mouse_region_mask = mask = MouseWatcherRegion(f"{pane_id}_mask", 0., 0., 0., 0.)
mask.suppress_flags = MouseWatcherRegion.SF_mouse_button
mask.sort = self._get_mask_sort()
Mgr.get("mouse_watcher").add_region(mask)
self._mouse_watcher = MouseWatcher(pane_id)
self._mouse_watcher_np = None
GD["mouse_watchers"].append(self._mouse_watcher)
self._cull_bin = cull_bin
gui_root = Mgr.get("gui_root")
node_0 = gui_root.attach_new_node("scissor_node_0")
node_1 = gui_root.attach_new_node("scissor_node_1")
scissor_effect = ScissorEffect.make_node(True)
scissor_effect = scissor_effect.add_point((0., 0., 0.), node_0)
scissor_effect = scissor_effect.add_point((0., 0., 0.), node_1)
self.scissor_effect = scissor_effect
self._scissor_nodes = (node_0, node_1)
self._listener = listener = DirectObject()
listener.accept(f"{pane_id}_mouse1", self.__on_left_down)
listener.accept(f"{pane_id}_mouse1-up", self.__on_left_up)
listener.accept(f"{pane_id}_mouse3", self.__on_right_down)
listener.accept(f"{pane_id}_mouse3-up", self.__on_right_up)
listener.accept(f"{pane_id}_wheel_up", self.__on_wheel_up)
listener.accept(f"{pane_id}_wheel_down", self.__on_wheel_down)
listener.accept(f"{pane_id}_home", self.__scroll_to_start)
listener.accept(f"{pane_id}_end", self.__scroll_to_end)
listener.accept(f"{pane_id}_page_up", self.__on_page_up)
listener.accept(f"{pane_id}_page_down", self.__on_page_down)
listener.accept(f"{pane_id}_page_up-repeat", self.__on_page_up)
listener.accept(f"{pane_id}_page_down-repeat", self.__on_page_down)
def _create_frame(self, parent, scroll_dir, cull_bin, gfx_ids, bar_gfx_ids,
thumb_gfx_ids, bar_inner_border_id, has_mouse_region=True):
""" Override in derived class """
return ScrollPaneFrame(parent, self, gfx_ids, bar_gfx_ids, thumb_gfx_ids,
cull_bin, scroll_dir, bar_inner_border_id, has_mouse_region)
@property
def frame(self):
return self.parent
def _get_mask_sort(self):
""" Override in derived class """
return self.sort + 8
def _contents_needs_redraw(self):
""" Override in derived class """
return True
def _copy_widget_images(self, pane_image):
""" Override in derived class """
pass
def _can_scroll(self):
""" Override in derived class """
return True
def _finalize_mouse_watcher_frame_update(self):
""" Override in derived class """
pass
def setup(self):
self.display_region = region = GD.window.make_display_region(0., 1., 0., 1.)
GD.window.remove_display_region(region)
self.update_display_region()
self.update_mouse_watcher_frame()
mouse_watcher_node = self._mouse_watcher
mouse_watcher_node.set_display_region(region)
input_ctrl = GD.showbase.mouseWatcher.parent
self._mouse_watcher_np = mw = input_ctrl.attach_new_node(mouse_watcher_node)
mouse_watcher_node.set_enter_pattern("gui_region_enter")
mouse_watcher_node.set_leave_pattern("gui_region_leave")
pane_id = self.widget_type
btn_thrower_node = ButtonThrower(f"btn_thrower_{pane_id}")
btn_thrower_node.prefix = f"{pane_id}_"
btn_thrower_node.modifier_buttons = ModifierButtons()
mw.attach_new_node(btn_thrower_node)
def destroy(self):
self._listener.ignore_all()
self._listener = None
Mgr.get("mouse_watcher").remove_region(self._mouse_region_mask)
WidgetCard.destroy(self)
GD["mouse_watchers"].remove(self._mouse_watcher)
self._mouse_watcher_np.detach_node()
self._mouse_watcher_np = None
self._mouse_watcher = None
self.display_region = None
self._mouse_region_mask = None
self.scissor_effect = None
for node in self._scissor_nodes:
node.detach_node()
self._scissor_nodes = None
self.widget_root_node.detach_node()
self.widget_root_node = None
self.scrollthumb = None
@property
def min_size(self):
min_size = list(self.sizer.min_size)
min_size[self.sizer.prim_dim] = 0
return tuple(min_size)
@property
def virtual_size(self):
w_min, h_min = self.sizer.min_size
return (max(1, w_min), max(1, h_min))
@virtual_size.setter
def virtual_size(self, size):
w_d, h_d = self.sizer.default_size
w, h = size
w = max(w, w_d)
h = max(h, h_d)
self.sizer.min_size = (w, h)
def reset_sub_image_index(self):
self._subimg_index = -1
def update_scroll_offset(self, scroll_offset):
width, height = self.get_size()
w_virt, h_virt = w_subimg, h_subimg = self.virtual_size
scroll_dir = self.sizer.prim_dir
max_size_exceeded = False
# To prevent downscaling of the texture due to its size exceeding the maximum size allowed
# by the graphics device, the PNMImage loaded into it must be a smaller sub-image, small
# enough to avoid lag while scrolling but big enough to accommodate a large scroll pane.
# Two consecutive sub-images overlap by a portion equal to the pane size;
# the scroll offset divided by the size limit minus this overlap yields the index
# of the sub-image that needs to be cut out of the complete image;
# the position of this sub-image equals that index multiplied by the shortened size
# limit, while its size equals the virtual size minus its position, or the size limit
# itself if this is smaller.
if scroll_dir == "horizontal":
if w_virt > max(width, MAX_TEX_SIZE):
index = scroll_offset // (MAX_TEX_SIZE - width)
x_subimg = index * (MAX_TEX_SIZE - width)
w_subimg = min(MAX_TEX_SIZE, w_virt - x_subimg)
max_size_exceeded = True
else:
if h_virt > max(height, MAX_TEX_SIZE):
index = scroll_offset // (MAX_TEX_SIZE - height)
y_subimg = index * (MAX_TEX_SIZE - height)
h_subimg = min(MAX_TEX_SIZE, h_virt - y_subimg)
max_size_exceeded = True
size = width if scroll_dir == "horizontal" else height
size_subimg = w_subimg if scroll_dir == "horizontal" else h_subimg
tex_scale = min(1., size / size_subimg)
quad = self.quad
if max_size_exceeded:
if self._subimg_index != index:
w = w_subimg if scroll_dir == "horizontal" else width
h = height if scroll_dir == "horizontal" else h_subimg
x = x_subimg if scroll_dir == "horizontal" else 0
y = 0 if scroll_dir == "horizontal" else y_subimg
if self._image:
sub_image = PNMImage(w, h, 4)
sub_image.copy_sub_image(self._image, 0, 0, x, y, w, h)
self.texture.load(sub_image)
sx = tex_scale if scroll_dir == "horizontal" else 1.
sy = 1. if scroll_dir == "horizontal" else tex_scale
quad.set_tex_scale(TextureStage.default, sx, sy)
self._subimg_x = x
self._subimg_y = y
self._subimg_w = w
self._subimg_h = h
self._subimg_index = index
scroll_offset -= x_subimg if scroll_dir == "horizontal" else y_subimg
else:
self._subimg_index = -1
if scroll_dir == "horizontal":
tex_offset = (scroll_offset / size) * tex_scale
quad.set_tex_offset(TextureStage.default, tex_offset, 0.)
else:
tex_offset = 1. - tex_scale - (scroll_offset / max(1, size)) * tex_scale
quad.set_tex_offset(TextureStage.default, 0., tex_offset)
def set_size(self, size, is_min=False):
WidgetCard.set_size(self, size, is_min)
self.scrollthumb.update_size()
self.scrollthumb.update_offset()
def get_size(self):
return self._size
def update_images(self):
width, height = self.get_size()
w_virt, h_virt = w_subimg, h_subimg = self.virtual_size
sizer = self.sizer
scroll_dir = sizer.prim_dir
if self._subimg_index > -1:
x_subimg = self._subimg_x
y_subimg = self._subimg_y
w_subimg = self._subimg_w
h_subimg = self._subimg_h
if scroll_dir == "horizontal":
tex_offset = (self.quad.get_tex_offset(TextureStage.default)[0], 1.)
tex_scale = (min(1., width / w_subimg), 1.)
else:
tex_offset = (1., self.quad.get_tex_offset(TextureStage.default)[1])
tex_scale = (1., min(1., height / h_subimg))
tex = self.texture
if self._contents_needs_redraw():
sizer.update_images()
w = w_virt if scroll_dir == "horizontal" else width
h = height if scroll_dir == "horizontal" else h_virt
img = PNMImage(w, h, 4)
if self._background_tex_id:
x, y, w, h = Skin.atlas.regions[self._background_tex_id]
src_img = PNMImage(w, h, 4)
src_img.copy_sub_image(Skin.atlas.image, 0, 0, x, y, w, h)
if min(w, h) > 1:
painter = PNMPainter(img)
fill = PNMBrush.make_image(src_img, 0, 0)
pen = PNMBrush.make_transparent()
painter.fill = fill
painter.pen = pen
painter.draw_rectangle(0, 0, w_virt, h_virt)
else:
img.unfiltered_stretch_from(src_img)
self._copy_widget_images(img)
if self._subimg_index > -1:
w = w_subimg if scroll_dir == "horizontal" else width
h = height if scroll_dir == "horizontal" else h_subimg
x = x_subimg if scroll_dir == "horizontal" else 0
y = 0 if scroll_dir == "horizontal" else y_subimg
sub_image = PNMImage(w, h, 4)
sub_image.copy_sub_image(img, 0, 0, x, y, w, h)
tex.load(sub_image)
else:
tex.load(img)
self._image = img
l = 0
r = min(width, w_virt) if scroll_dir == "horizontal" else width
b = -height if scroll_dir == "horizontal" else -min(height, h_virt)
t = 0
quad = self.create_quad((l, r, b, t))
x, y = self.get_pos(net=True)
quad.set_pos(x, 0, -y)
quad.set_texture(tex)
quad.set_bin(self._cull_bin, self.sort)
quad.set_tex_offset(TextureStage.default, *tex_offset)
quad.set_tex_scale(TextureStage.default, *tex_scale)
def copy_sub_image(self, widget, sub_image, width, height, offset_x=0, offset_y=0):
img = self._image
if not img:
return
x, y = widget.get_pos(ref_node=self.widget_root_node)
x += offset_x
y += offset_y
img.copy_sub_image(sub_image, x, y, 0, 0, width, height)
if self._subimg_index > -1:
width, height = self.get_size()
scroll_dir = self.sizer.prim_dir
w = self._subimg_w if scroll_dir == "horizontal" else width
h = height if scroll_dir == "horizontal" else self._subimg_h
x = self._subimg_x if scroll_dir == "horizontal" else 0
y = 0 if scroll_dir == "horizontal" else self._subimg_y
sub_image = PNMImage(w, h, 4)
sub_image.copy_sub_image(img, 0, 0, x, y, w, h)
self.texture.load(sub_image)
else:
self.texture.load(img)
def update_mouse_region_frames(self, exclude="", recurse=True):
sizer = self.sizer
scroll_dir = sizer.prim_dir
w_virt, h_virt = self.virtual_size
w, h = self.get_size()
x, y = self.get_pos(net=True)
l = x
r = x + (min(w, w_virt) if scroll_dir == "horizontal" else w)
b = -y - (h if scroll_dir == "horizontal" else min(h, h_virt))
t = -y
self._mouse_region_mask.frame = (l, r, b, t)
if recurse:
sizer.update_mouse_region_frames(exclude)
self.update_mouse_watcher_frame()
if self.display_region:
self.update_display_region()
def update_display_region(self):
scroll_dir = self.sizer.prim_dir
w_virt, h_virt = self.virtual_size
w_ref, h_ref = Mgr.get("window_size")
w, h = self.get_size()
x, y = self.get_pos(net=True)
l = x / w_ref
r = (x + (min(w, w_virt) if scroll_dir == "horizontal" else w)) / w_ref
b = 1. - (y + (h if scroll_dir == "horizontal" else min(h, h_virt))) / h_ref
t = 1. - y / h_ref
self.display_region.dimensions = (l, r, b, t)
# Update the nodes controlling the ScissorEffect so it keeps geometry of input fields
# from being rendered outside of the display region.
scissor_nodes = self._scissor_nodes
scissor_nodes[0].set_pos(x, 0., -y)
scissor_nodes[1].set_pos(x + w, 0., -y - h)
def update_mouse_watcher_frame(self):
scroll_dir = self.sizer.prim_dir
w_virt, h_virt = self.virtual_size
width, height = self.get_size()
if scroll_dir == "horizontal":
l = self.scrollthumb.get_offset()
r = l + min(width, w_virt)
self._mouse_watcher.set_frame(l, r, -height, 0)
else:
t = -self.scrollthumb.get_offset()
b = t - min(height, h_virt)
self._mouse_watcher.set_frame(0, width, b, t)
self._finalize_mouse_watcher_frame_update()
@property
def mouse_watcher(self):
return self._mouse_watcher
def get_mouse_watcher_nodepath(self):
return self._mouse_watcher_np
def update_quad_pos(self):
WidgetCard.update_quad_pos(self)
self.scrollthumb.update_pos()
def update_widget_root_node(self):
scroll_dir = self.sizer.prim_dir
if scroll_dir == "horizontal":
self.widget_root_node.set_x(-self.scrollthumb.get_offset())
else:
self.widget_root_node.set_z(self.scrollthumb.get_offset())
def update_layout(self):
self._subimg_index = -1
sizer = self.parent.sizer
size = sizer.update_min_size()
sizer.set_size(size)
sizer.update_positions()
sizer.update_images()
self.update_quad_pos()
sizer.update_mouse_region_frames()
self.update_widget_root_node()
def __on_left_down(self):
region = self._mouse_watcher.get_over_region()
if not region:
return
name = region.name
if name == "inputfield_mask":
Mgr.do("accept_field_input")
elif name.startswith("widget_"):
widget_id = int(name.replace("widget_", ""))
Widget.registry[widget_id].on_left_down()
def __on_left_up(self):
region = self._mouse_watcher.get_over_region()
if not region:
return
name = region.name
if name.startswith("widget_"):
widget_id = int(name.replace("widget_", ""))
Widget.registry[widget_id].on_left_up()
def __on_right_down(self):
region = self._mouse_watcher.get_over_region()
if not region:
return
name = region.name
if name == "inputfield_mask":
Mgr.do("reject_field_input")
elif name.startswith("widget_"):
widget_id = int(name.replace("widget_", ""))
Widget.registry[widget_id].on_right_down()
def __on_right_up(self):
region = self._mouse_watcher.get_over_region()
if not region:
return
name = region.name
if name.startswith("widget_"):
widget_id = int(name.replace("widget_", ""))
Widget.registry[widget_id].on_right_up()
def __on_wheel_up(self):
if self._can_scroll():
offset = self.scrollthumb.get_offset()
self.scrollthumb.set_offset(offset - Skin.options["scroll_step"])
def __on_wheel_down(self):
if self._can_scroll():
offset = self.scrollthumb.get_offset()
self.scrollthumb.set_offset(offset + Skin.options["scroll_step"])
def __on_page_up(self):
if self._can_scroll():
scrollthumb = self.scrollthumb
offset = scrollthumb.get_offset()
page_size = scrollthumb.get_page_size()
scrollthumb.set_offset(offset - page_size)
def __on_page_down(self):
if self._can_scroll():
scrollthumb = self.scrollthumb
offset = scrollthumb.get_offset()
page_size = scrollthumb.get_page_size()
scrollthumb.set_offset(offset + page_size)
def __scroll_to_start(self):
ctrl_down = self._mouse_watcher.is_button_down("control")
if ctrl_down and self._can_scroll():
self.scrollthumb.set_offset(0)
def __scroll_to_end(self):
ctrl_down = self._mouse_watcher.is_button_down("control")
if ctrl_down and self._can_scroll():
sizer = self.sizer
scroll_dir = sizer.prim_dir
offset = self.virtual_size[0 if scroll_dir == "horizontal" else 1]
self.scrollthumb.set_offset(offset)
| 33.166151 | 105 | 0.609434 | 4,299 | 32,138 | 4.215166 | 0.071645 | 0.031786 | 0.023674 | 0.038243 | 0.563048 | 0.47387 | 0.420838 | 0.359914 | 0.327631 | 0.303736 | 0 | 0.007158 | 0.291462 | 32,138 | 968 | 106 | 33.200413 | 0.788635 | 0.035783 | 0 | 0.399135 | 0 | 0 | 0.049958 | 0.006596 | 0 | 0 | 0 | 0.001033 | 0 | 1 | 0.103746 | false | 0.002882 | 0.001441 | 0.012968 | 0.151297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6089b93cf4c6a825b8066cca145aee395091e54 | 4,561 | py | Python | extraction_tool/extract_token.py | ohadlevy/homebridge-palgate-opener | 1dc2c4e00a7dc969221db6f571a75d2d4088a7a0 | [
"MIT"
] | 13 | 2020-10-24T22:17:31.000Z | 2022-03-04T22:31:23.000Z | extraction_tool/extract_token.py | ohadlevy/homebridge-palgate-opener | 1dc2c4e00a7dc969221db6f571a75d2d4088a7a0 | [
"MIT"
] | 7 | 2021-01-24T11:23:24.000Z | 2022-02-27T13:19:28.000Z | extraction_tool/extract_token.py | ohadlevy/homebridge-palgate-opener | 1dc2c4e00a7dc969221db6f571a75d2d4088a7a0 | [
"MIT"
] | 4 | 2020-10-24T22:17:42.000Z | 2022-01-20T08:01:31.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
########################################################
# PalGate Token Extraction Tool #
########################################################
# #
# Like my work? please consider buying me a coffee :) #
# https://paypal.me/roeio #
# Written by Roei Ofri #
########################################################
import re
import requests
import constants
__author__ = "Roei Ofri"
class PalGateInfoExtractor:
def __init__(self):
self.client = requests.Session()
self.client.headers = {'Content-Type': 'application/json'}
self.sms_headers = {'x-bt-app-token': constants.SMS_TOKEN}
self.sms_headers.update(self.client.headers)
def initiate_sms(self, phone_number):
# Initiating SMS request authorization
self._validate_phone_prefix(phone_number)
res = self.client.get(url=constants.SMS_ADDR.format(phone_number), headers=self.sms_headers)
self._validate_res(res)
return True
def confirm_sms(self, confirm_code, phone_number):
# Confirm SMS
res = self.client.get(url=constants.VALIDATE_ADDR.format(phone_number, str(confirm_code)),
headers=self.sms_headers)
self._validate_res(res)
return res.json()['user']['token']
def get_device_id(self, token):
# Extract device ID (gate ID)
res = self.client.get(url=constants.DEVICE_ADDR, headers={'x-bt-user-token': token})
self._validate_res(res)
return res.json()['devices']
@staticmethod
def _validate_res(res):
print(res.json()['msg'])
assert res.json()['status'] == 'ok', res.json()
@staticmethod
def _validate_phone_prefix(phone_number):
assert len(phone_number) == 10, 'Phone number is too short/long, please check.'
assert re.findall('^[0-9]*$', phone_number), 'Phone number should contain only digits'
assert not re.findall('^[0-1]*$', phone_number[1:]), 'Phone number cannot start with 0 or 1.'
def return_gate_info(self, res):
ret_info = []
for r in res:
info_dict = {}
for k, v in r.items():
if k == 'address' or k == '_id':
info_dict[k] = v
if info_dict:
ret_info.append(info_dict)
return ret_info
def main():
palgate = PalGateInfoExtractor()
phone_number = str(input('Please type your phone number, e.g 054123412345: '))
assert palgate.initiate_sms(phone_number)
sms_code = input('SMS was sent to your phone, please type SMS verification code: ')
token = palgate.confirm_sms(sms_code, phone_number)
devices = palgate.get_device_id(token)
gate_info = palgate.return_gate_info(devices)
print("--------------------------------------------------------------")
print(" This is the extracted information: ")
print(" Please save it for HomeBridge config usage ")
print(" Any issues? please open a ticket on github ")
print(" https://github.com/RoeiOfri/homebridge-palgate-opener/issues ")
print(" ")
print("--------------------------------------------------------------")
print("Disclaimer: this tool and the author are not responsible")
print("for any issues/damage etc that might occur due to usage ")
print("of this plugin. this plugin was written for teaching purposes only.")
print("This tool and plugin are free and will always be free.")
print("If you love this plugin and this tool and want to show your appreciation")
print("please consider buying me a coffee :)")
print("")
print("-----------------------------------------------------------------")
print("Info returns: [{<gate address>: <gate_id>}, <gate_address>: <gate_id>]")
print("Gates ID and Address (location): {}".format(gate_info))
print("-----------------------------------------------------------------------")
print("Your token is (single token is needed for all gates): {}".format(token))
print("-----------------------------------------------------------------------")
print("")
print("Donate -> https://paypal.me/roeio")
print("Donate ->https://www.buymeacoffee.com/roeio ")
if __name__ == '__main__':
main()
| 42.626168 | 101 | 0.534093 | 499 | 4,561 | 4.723447 | 0.344689 | 0.074671 | 0.023759 | 0.020365 | 0.140008 | 0.114552 | 0.054306 | 0.038184 | 0.038184 | 0 | 0 | 0.006739 | 0.251699 | 4,561 | 106 | 102 | 43.028302 | 0.683856 | 0.085508 | 0 | 0.157895 | 0 | 0 | 0.402708 | 0.082999 | 0 | 0 | 0 | 0 | 0.065789 | 1 | 0.105263 | false | 0 | 0.039474 | 0 | 0.210526 | 0.315789 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a608ef057ab4973763dbb8b9d94aa73dd295ec64 | 3,335 | py | Python | main.py | Mnkai/GoogleTakeoutPhotosMigrationPrepare | d43c15dce87c937b2696dde16f62461ab504fdef | [
"WTFPL"
] | 1 | 2020-04-19T06:43:30.000Z | 2020-04-19T06:43:30.000Z | main.py | Mnkai/GoogleTakeoutPhotosMigrationPrepare | d43c15dce87c937b2696dde16f62461ab504fdef | [
"WTFPL"
] | null | null | null | main.py | Mnkai/GoogleTakeoutPhotosMigrationPrepare | d43c15dce87c937b2696dde16f62461ab504fdef | [
"WTFPL"
] | null | null | null | import os
import ArrayUtils
import GoogleMetadataUtils
from objects.ExportedObject import ExportedObject
from objects.Image import Image
counter = 0
def directory_walk(start_path, extension):
to_return = []
for (dirpath, dirnames, filenames) in os.walk(start_path):
for filename in filenames:
if filename.lower().endswith(extension.lower()):
to_return.append(os.sep.join([dirpath, filename]))
return to_return
def exif_process(image):
if image.get_exif_creation_time() is None:
googlemetadata_file = GoogleMetadataUtils.get_googlemetadata_filename(image.filepath)
googlemetadata_timestamp = GoogleMetadataUtils.get_googlemetadata_timestamp(googlemetadata_file)
if googlemetadata_timestamp is not None:
image.set_exif_creation_time(googlemetadata_timestamp)
image.set_modified_time(googlemetadata_timestamp)
global counter
counter += 1
print("Change " + str(image.filepath) + " exif and modification time to " + str(
googlemetadata_timestamp))
else:
# Set image modification date to exif data
if image.modified_time is not image.get_exif_creation_time():
image.set_modified_time(image.get_exif_creation_time())
print("Change " + str(image.filepath) + " modification time to " + str(
image.get_exif_creation_time()))
return
def other_process(object):
googlemetadata_file = GoogleMetadataUtils.get_googlemetadata_filename(object.filepath)
if googlemetadata_file is not None:
googlemetadata_timestamp = GoogleMetadataUtils.get_googlemetadata_timestamp(googlemetadata_file)
if googlemetadata_timestamp is not None:
# If possible, try to nuke exif data to avoid confusion
if object.modified_time is not googlemetadata_timestamp:
object.set_modified_time(googlemetadata_timestamp)
global counter
counter += 1
print(
"Changed " + str(object.filepath) + " modification time to " + str(googlemetadata_timestamp))
def main():
directory_walk_array = []
directory_walk_array = ArrayUtils.concat(directory_walk_array, directory_walk(".", ".jpg"))
directory_walk_array = ArrayUtils.concat(directory_walk_array, directory_walk(".", ".jpeg"))
directory_walk_array = ArrayUtils.concat(directory_walk_array, directory_walk(".", ".tiff"))
directory_walk_array = ArrayUtils.concat(directory_walk_array, directory_walk(".", ".png"))
directory_walk_array = ArrayUtils.concat(directory_walk_array, directory_walk(".", ".avi"))
directory_walk_array = ArrayUtils.concat(directory_walk_array, directory_walk(".", ".mp4"))
directory_walk_array = ArrayUtils.concat(directory_walk_array, directory_walk(".", ".mov"))
file_array = []
for filepath in directory_walk_array:
try:
file_array.append(Image(filepath))
except:
file_array.append(ExportedObject(filepath))
for object in file_array:
if isinstance(object, Image):
exif_process(object)
else:
other_process(object)
print("Total " + str(counter) + " files processed")
if __name__ == "__main__":
main()
| 37.055556 | 113 | 0.689055 | 362 | 3,335 | 6.052486 | 0.21547 | 0.142401 | 0.131447 | 0.098585 | 0.560018 | 0.472387 | 0.375628 | 0.375628 | 0.375628 | 0.375628 | 0 | 0.001544 | 0.223388 | 3,335 | 89 | 114 | 37.47191 | 0.844402 | 0.028186 | 0 | 0.15625 | 0 | 0 | 0.050649 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.078125 | 0 | 0.171875 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a60b9d285aa7339e1763df0a9faec446ada591b3 | 2,658 | py | Python | src/models/gan_bert.py | gchhablani/financial-sentiment-analysis | b18e9072f8edb9f09d0fef697892f2462d6d44e9 | [
"MIT"
] | 2 | 2021-10-03T14:24:52.000Z | 2021-11-17T14:55:53.000Z | src/models/gan_bert.py | gchhablani/financial-sentiment-analysis | b18e9072f8edb9f09d0fef697892f2462d6d44e9 | [
"MIT"
] | null | null | null | src/models/gan_bert.py | gchhablani/financial-sentiment-analysis | b18e9072f8edb9f09d0fef697892f2462d6d44e9 | [
"MIT"
] | 1 | 2021-10-03T14:25:36.000Z | 2021-10-03T14:25:36.000Z | # https://raw.githubusercontent.com/crux82/ganbert-pytorch/main/GANBERT_pytorch.ipynb
# [WIP], not finished
import torch
from torch.nn import Dropout, LeakyReLU, Linear, Module, Sequential
from transformers import AutoModel
class Generator(Module):
def __init__(
self, noise_size=100, output_size=768, hidden_sizes=[768], dropout_rate=0.1
):
super(Generator, self).__init__()
layers = []
hidden_sizes = [noise_size] + hidden_sizes
for i in range(len(hidden_sizes) - 1):
layers.extend(
[
Linear(hidden_sizes[i], hidden_sizes[i + 1]),
LeakyReLU(0.2, inplace=True),
Dropout(dropout_rate),
]
)
layers.append(Linear(hidden_sizes[-1], output_size))
self.layers = Sequential(*layers)
def forward(self, noise):
output_rep = self.layers(noise)
return output_rep
# ------------------------------
# The Discriminator
# https://www.aclweb.org/anthology/2020.acl-main.191/
# https://github.com/crux82/ganbert
# ------------------------------
class Discriminator(Module):
def __init__(
self, input_size=768, hidden_sizes=[768], num_labels=3, dropout_rate=0.1
):
super(Discriminator, self).__init__()
self.input_dropout = Dropout(p=dropout_rate)
layers = []
hidden_sizes = [input_size] + hidden_sizes
for i in range(len(hidden_sizes) - 1):
layers.extend(
[
Linear(hidden_sizes[i], hidden_sizes[i + 1]),
LeakyReLU(0.2, inplace=True),
Dropout(dropout_rate),
]
)
self.layers = Sequential(*layers) # per il flatten
self.logit = Linear(
hidden_sizes[-1], num_labels + 1
) # +1 for the probability of this sample being fake/real.
def forward(self, input_rep):
input_rep = self.input_dropout(input_rep)
last_rep = self.layers(input_rep)
logits = self.logit(last_rep)
return logits, last_rep
class GANBert(Module):
def __init__(self, model_name):
self.transformer = AutoModel.from_pretrained(model_name)
pass
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
model_out = self.transformer(input_ids, attention_mask, token_type_ids)
model_out = model_out[1] # pooler out
noise = torch.zeros(input_ids.shape[0], self.noise_size).uniform(0, 1)
gen_out = self.generator(noise)
disc_input = torch.cat([hidden_states, gen_rep], dim=0)
| 34.519481 | 85 | 0.598194 | 315 | 2,658 | 4.796825 | 0.32381 | 0.101919 | 0.031767 | 0.033752 | 0.259431 | 0.207809 | 0.207809 | 0.16413 | 0.16413 | 0.16413 | 0 | 0.02487 | 0.27389 | 2,658 | 76 | 86 | 34.973684 | 0.758031 | 0.133935 | 0 | 0.310345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0.017241 | 0.051724 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a60f47844b4e66c2b41b6d88b7e848c7e6e83731 | 1,761 | py | Python | reddit_client.py | spotify-companion/CLI | 29934cc715922263652235d0a9a4f0d8f57ecec5 | [
"MIT"
] | null | null | null | reddit_client.py | spotify-companion/CLI | 29934cc715922263652235d0a9a4f0d8f57ecec5 | [
"MIT"
] | null | null | null | reddit_client.py | spotify-companion/CLI | 29934cc715922263652235d0a9a4f0d8f57ecec5 | [
"MIT"
] | null | null | null | import praw
import pandas as pd
import reddit_constants
class RedditClient(object):
__shared_instance = "RedditClient"
@staticmethod
def get_instance():
""" To implement singleton pattern """
if RedditClient.__shared_instance == "RedditClient":
RedditClient()
return RedditClient.__shared_instance
def __init__(self):
if RedditClient.__shared_instance != "RedditClient":
raise Exception("Sorry this is a singleton implementation")
else:
RedditClient.__shared_instance =self
self.reddit = praw.Reddit(client_id=reddit_constants.constants['client_id'], client_secret=reddit_constants.constants['secret_key'], user_agent=reddit_constants.constants['user_agent'])
self.client_id = reddit_constants.constants['client_id']
self.user_agent = reddit_constants.constants['user_agent']
self.posts = {}
def getHot(self, subreddit, limit=20):
hot_posts = self.reddit.subreddit(subreddit).hot(limit=limit)
titles = []
for post in hot_posts:
# print(post.title)
titles.append(post.title)
return hot_posts, titles
def get_songs(self):
songlist, titles = self.reddit_client.getHot('listentothis', 20)
songs = []
for i in titles:
if ' -- ' in i:
i = i.split(' -- ')
elif ' - ' in i:
i = i.split(' - ')
elif ' — ' in i:
i = i.split(' — ')
# print(i[len(i)-1].split(' [')[0])
temp = {'artist': i[0], 'title': i[len(i)-1].split(' [')[0]}
songs.append(temp)
return songs
| 33.226415 | 193 | 0.570131 | 189 | 1,761 | 5.10582 | 0.328042 | 0.093264 | 0.124352 | 0.015544 | 0.325389 | 0.232124 | 0.178238 | 0.095337 | 0 | 0 | 0 | 0.007432 | 0.312323 | 1,761 | 53 | 194 | 33.226415 | 0.787779 | 0.0477 | 0 | 0 | 0 | 0 | 0.101198 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.078947 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a610ecc05f6f6e302f862afbc071148996b7ed46 | 19,020 | py | Python | ii_irods/ii_command.py | UtrechtUniversity/ii | 7d3899b4d6bbbf5a14be1d85296b3ea99ffe135b | [
"MIT"
] | null | null | null | ii_irods/ii_command.py | UtrechtUniversity/ii | 7d3899b4d6bbbf5a14be1d85296b3ea99ffe135b | [
"MIT"
] | null | null | null | ii_irods/ii_command.py | UtrechtUniversity/ii | 7d3899b4d6bbbf5a14be1d85296b3ea99ffe135b | [
"MIT"
] | null | null | null | import argparse
from fnmatch import fnmatch
import os.path
import re
import sys
from ii_irods.coll_utils import resolve_base_path, convert_to_absolute_path, get_dataobjects_in_collection
from ii_irods.coll_utils import get_direct_subcollections, get_subcollections, collection_exists
from ii_irods.do_utils import get_dataobject_info, dataobject_exists
from ii_irods.environment import verify_environment, get_cwd, set_cwd, get_home
from ii_irods.ls_formatters import TextListFormatter, CSVListFormatter
from ii_irods.ls_formatters import JSONListFormatter, YAMLListFormatter
from ii_irods.session import setup_session
from ii_irods.utils import exit_with_error, print_error, print_debug, debug_dumpdata
def entry():
try:
main()
except KeyboardInterrupt:
print("Script stopped by user.")
def main():
args = parse_args()
if args["command"] == "pwd":
command_pwd(args)
elif args["command"] == "cd":
command_cd(args)
elif args["command"] == "ls":
command_ls(args)
elif args["command"] == "find":
command_find(args)
else:
exit_with_error("Error: unknown command")
def get_version():
"""Returns version number of script"""
return "0.0.1 (prerelease prototype)"
def parse_args():
"""Returns command line arguments of the script.
Exits with error message or help text when user provides
wrong or no arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version',
version="iswitch version " + get_version())
# Can't require a subparser because of need to maintain
# backwards compatibility with Python 3.6
subparsers = parser.add_subparsers(
dest='command', help='command')
pwd_parser = subparsers.add_parser("pwd",
help='Print working directory/collection')
pwd_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print verbose information for troubleshooting')
cd_parser = subparsers.add_parser("cd",
help='Change working directory/collection')
cd_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print verbose information for troubleshooting')
cd_parser.add_argument('directory', default=None, nargs='?',
help='Directory to change to')
ls_parser = subparsers.add_parser("ls",
help='List collections or data objects')
ls_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print verbose information for troubleshooting')
ls_parser.add_argument('queries', default=None, nargs='*',
help='Collection, data object or data object wildcard')
ls_parser.add_argument("-m", "--format", dest='format', default='plain',
help="Output format", choices=['plain', 'json', 'csv', "yaml"])
ls_parser.add_argument("-s", "--sort", dest="sort", default='name',
help="Propery to use for sorting", choices=['name', 'ext', 'size', 'date', "unsorted"])
ls_parser.add_argument("-H", "--hr-size", default='default', dest="hrsize",
help="Whether to print human-readable sizes [yes,no,default]." +
"By default, enable human-readable for text output, disable for other formats.",
choices=['default', 'yes', 'no'])
ls_parser.add_argument('--recursive', '-r', action='store_true', default=False,
help='Include contents of subcollections')
ls_parser.add_argument('-l', action='store_true', default=False,
help='Display replicas with size, resource, owner, date')
ls_parser.add_argument('-L', action='store_true', default=False,
help='like -l, but also display checksum and physical path')
help_hrs = " (you can optionally use human-readable sizes, like \"2g\" for 2 gigabytes)"
find_parser = subparsers.add_parser("find",
help='Find data objects by property')
find_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print verbose information for troubleshooting')
find_parser.add_argument('queries', default=None, nargs='*',
help='Collection, data object or data object wildcard')
find_parser.add_argument('--print0', '-0', action='store_true', default=False,
help='Use 0 byte delimiters between results')
find_parser.add_argument(
"--dname",
help="Wildcard filter for data object name")
find_parser.add_argument(
"--owner-name",
help="Filter for data object owner name (excluding zone)")
find_parser.add_argument("--owner-zone",
help="Filter for data object owner zone")
find_parser.add_argument("--resc-name",
help="Filter for data object resource")
find_parser.add_argument(
"--minsize",
help="Filter for minimum data object size" +
help_hrs)
find_parser.add_argument(
"--maxsize",
help="Filter for maximum data object size" +
help_hrs)
find_parser.add_argument(
"--size",
help="Filter for (exact) data object size" +
help_hrs)
if len(sys.argv) == 1:
parser.print_help()
parser.exit()
return vars(parser.parse_args())
def command_pwd(args):
"""Code for the pwd command"""
_perform_environment_check(False)
print(get_cwd(args["verbose"]))
def command_cd(args):
"""Code for the cd command"""
_perform_environment_check()
if args["directory"] is None:
directory = get_home(args["verbose"])
if args["verbose"]:
print_debug("Defaulting cwd to home directory: " + directory)
else:
directory = args["directory"]
if not directory.startswith("/"):
directory = resolve_base_path(directory, get_cwd())
if args["verbose"]:
print_debug("Resolved relative directory to " + directory)
session = setup_session()
if not collection_exists(session, directory):
exit_with_error("This collection does not exist.")
try:
set_cwd(directory, args["verbose"])
except IOError:
exit_with_error("IO error during reading or writing CWD data.")
def command_ls(args):
"""Code for the ls command"""
_perform_environment_check()
if args["l"] and args["L"]:
exit_with_error(
"The -l and -L switches of the ls command are incompatible.")
session = setup_session()
expanded_queries = _expand_query_list(session, args["queries"],
args["recursive"], args["verbose"])
query_results = retrieve_object_info(
session, expanded_queries, args["sort"])
if args["l"] or args["L"]:
_ls_print_results(query_results, args)
else:
dedup_results = _replica_results_dedup(query_results)
_ls_print_results(dedup_results, args)
def command_find(args):
"""Code for the find command"""
_perform_environment_check()
filter_dict = _get_find_filter_dict(args)
_find_verify_arguments(filter_dict)
session = setup_session()
expanded_queries = _expand_query_list(
session, args["queries"], True, args["verbose"])
query_results = retrieve_object_info(session, expanded_queries, "unsorted")
filtered_results = _find_filter_results(query_results, filter_dict)
dedup_results = _replica_results_dedup(filtered_results)
_find_print_results(dedup_results, args["print0"])
def _find_verify_arguments(filters):
"""This checks filter arguments of the find command. If they are inconsistent, it
exits with an error message"""
if ("minsize" in filters and "maxsize" in filters and
filters["maxsize"] < filters["minsize"]):
exit_with_error("Maximum size cannot be less than minimum size.")
if ("size" in filters and "maxsize" in filters and
filters["maxsize"] < filters["size"]):
exit_with_error("Maximum size cannot be less than (exact) size.")
if ("size" in filters and "minsize" in filters and
filters["minsize"] > filters["size"]):
exit_with_error("Minimum size cannot be more than (exact) size.")
def _parse_human_filesize(m):
"""Parses human readable file sizes, such as "1240", "200k", "30m", and
returns them as int. Raises ValueError if the value cannot be parsed."""
try:
return int(m)
except ValueError as e:
match = re.match("^(\\d+)([kmgtp])$", m)
if match:
digits = match[1]
suffix = match[2]
multiplier = 1
for letter in ["k", "m", "g", "t", "p"]:
multiplier *= 1024
if suffix == letter:
return multiplier * int(digits)
raise e
def _get_find_filter_dict(args):
"""This preprocesses commandline arguments related to filters for the find command and
returns the results in a dictionary."""
filter_dict = {}
# Arguments that don't need any preprocessing can just be copied,
# if they are present.
for arg in ["dname", "owner_name", "owner_zone", "resc_name"]:
if arg in args and args[arg] is not None:
filter_dict[arg] = args[arg]
# Try to parse human-readable file sizes
for arg in ["size", "minsize", "maxsize"]:
if arg in args and args[arg] is not None:
try:
parsed_value = _parse_human_filesize(args[arg])
except ValueError:
exit_with_error(
"Unable to parse size \"{}\"".format(args[arg]))
filter_dict[arg] = parsed_value
return filter_dict
def _find_filter_results(inresults, filters):
filteredData = []
for query in inresults:
outquery = query.copy()
if "results" in query:
outresults = []
for result in query["results"]:
if result["type"] != "dataobject":
continue
if ("dname" in filters and
not fnmatch(result["name"], filters["dname"])):
continue
if ("owner_name" in filters and
result["owner_name"] != filters["owner_name"]):
continue
if ("owner_zone" in filters and
result["owner_zone"] != filters["owner_zone"]):
continue
if ("resc_name" in filters and
result["resc_name"] != filters["resc_name"]):
continue
if ("size" in filters and
result["size"] != filters["size"]):
continue
if ("minsize" in filters and
result["size"] < filters["minsize"]):
continue
if ("maxsize" in filters and
result["size"] > filters["maxsize"]):
continue
outresults.append(result.copy())
outquery["results"] = outresults
filteredData.append(outquery)
return filteredData
def _expand_query_list(session, queries, recursive=False, verbose=False):
"""This function expands ls queries by resolving relative paths,
expanding wildcards and expanding recursive queries. If the user provides no
queries, the method defaults to a single nonrecursive query for the current working directory."""
results = []
# If no queries are supplied by the user, default to a query for the
# current working directory
if len(queries) == 0:
queries = [get_cwd()]
# Wildcard expansion is performed first, so it can be combined with other types
# of expansion, such as recursive expansion of subcollections later. Each collection
# or data object is expanded only once.
preprocessed_queries = []
already_expanded = {}
for query in queries:
# Currently only wildcards without a collection path are supported
# e.g. "*.dat", but not "../*.dat" or "*/data.dat".
if "/" not in query and ("?" in query or "*" in query):
for d in get_dataobjects_in_collection(session, get_cwd()):
if fnmatch(d["name"],
query) and d["full_name"] not in already_expanded:
preprocessed_queries.append(d["full_name"])
already_expanded[d["full_name"]] = 1
for c in get_direct_subcollections(session, get_cwd()):
parent, coll = os.path.split(c["name"])
if fnmatch(coll, query) and d["name"] not in already_expanded:
preprocessed_queries.append(c["name"])
already_expanded[d["name"]] = 1
else:
preprocessed_queries.append(query)
for query in preprocessed_queries:
absquery = convert_to_absolute_path(query)
if collection_exists(session, absquery):
results.append({"original_query": query, "expanded_query": absquery,
"expanded_query_type": "collection"})
if verbose:
print_debug("Argument \"{}\" is a collection.".format(query))
if recursive:
for subcollection in get_subcollections(session, absquery):
if verbose:
print_debug("Recursively adding subcollection " +
subcollection + " to queries.")
results.append({"original_query": query,
"expanded_query": subcollection,
"expanded_query_type": "collection"})
elif dataobject_exists(session, absquery):
results.append({"original_query": query, "expanded_query": absquery,
"expanded_query_type": "dataobject"})
if verbose:
print_debug("Argument \"{}\" is a data object.".format(query))
else:
print_error(
"Query \"{}\" could not be resolved. Ignoring ... ".format(query))
return results
def _replica_results_dedup(queries):
"""This method deduplicates data object results within a query, so that ls displays data objects
one time, instead of once for every replica."""
deduplicated_queries = []
for query in queries:
new_query = query.copy()
if "results" in query:
objects_seen = {}
dedup_results = []
results = query["results"]
for result in results:
if result["type"] == "dataobject":
full_name = result["full_name"]
if full_name not in objects_seen:
objects_seen[full_name] = 1
dedup_results.append(result)
else:
dedup_results.append(result)
new_query["results"] = dedup_results
deduplicated_queries.append(new_query)
return deduplicated_queries
def _ls_print_results(results, args):
if args["format"] == "plain":
formatter = TextListFormatter()
elif args["format"] == "json":
formatter = JSONListFormatter()
elif args["format"] == "yaml":
formatter = YAMLListFormatter()
elif args["format"] == "csv":
formatter = CSVListFormatter()
else:
print("Output format {} is not supported.".format(args["format"]))
formatter.print_data(results, args)
def _find_print_results(data, print0):
def _find_print(m):
if print0:
print(m, end="\0")
else:
print(m)
for query in data:
querytype = query["expanded_query_type"]
if querytype == "collection" and "results" in query:
results = query["results"]
for result in results:
if result["type"] == "dataobject":
_find_print(result["full_name"])
elif querytype == "dataobject" and "expanded_query" in query:
_find_print(query["expanded_query"])
else:
print_warning(
"Unexpected query type {} in text formatter".format(querytype))
def retrieve_object_info(session, queries, sortkey):
"""Retrieves information about data objects and collections that match
the expanded query list."""
results = []
for query in queries:
expquery = query["expanded_query"]
qtype = query["expanded_query_type"]
if qtype == "collection":
queryresults = []
queryresults.extend(get_direct_subcollections(session, expquery))
queryresults.extend(
get_dataobjects_in_collection(
session, expquery))
elif qtype == "dataobject":
queryresults = get_dataobject_info(session, expquery)
else:
exit_with_error(
"Internal issue - illegal query type in retrieve_object_info: "
+ qtype)
query["results"] = sort_object_info(queryresults, sortkey)
results.append(query)
return results
def sort_object_info(results, sortkey):
"""Sort result objects by specified key"""
if sortkey == "unsorted":
return results
elif sortkey == "name":
return sorted(results, key=lambda r: r["name"])
elif sortkey == "ext":
def _get_ext(n):
# Get extension for sorting
if n["type"] == "dataobject":
return n["name"].split(".")[-1]
else:
# Use name for sorting collections
return n["name"]
return sorted(results, key=_get_ext)
elif sortkey == "size":
return sorted(results, key=lambda k: k.get("size", 0))
elif sortkey == "date":
return sorted(results, key=lambda k: k.get("modify_time", 0))
else:
exit_with_error("Sort option {} not supported.".format(sortkey))
def _perform_environment_check(check_auth=True):
"""Check if the environment configuration file is present, readable and
has the required fields. By default, also check that a scrambled password
file is present (unless check_auth is set to False). Prints any errors
encountered and exits if there is a problem, otherwise returns."""
correct, errors = verify_environment(check_auth)
if not correct:
print_error(
"Cannot execute command because of problem(s) with environment:")
for error in errors:
print_error(" - " + error)
sys.exit(1)
| 39.055441 | 114 | 0.601472 | 2,143 | 19,020 | 5.166589 | 0.175922 | 0.018696 | 0.033779 | 0.018967 | 0.273573 | 0.216402 | 0.165101 | 0.155708 | 0.140896 | 0.126084 | 0 | 0.002902 | 0.293323 | 19,020 | 486 | 115 | 39.135802 | 0.820847 | 0.109884 | 0 | 0.21547 | 0 | 0 | 0.198702 | 0.001251 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058011 | false | 0 | 0.035912 | 0 | 0.138122 | 0.077348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6117f3294c81246ee664c4b1aa72e59144f4cc8 | 6,998 | py | Python | tests/pyarxaas/models/test_Dataset.py | vinayranjan/pyarxaas | f304d360adf1a647d9daff0afda5290ca0fd7ec8 | [
"MIT"
] | 4 | 2020-07-14T16:36:09.000Z | 2020-11-24T14:28:02.000Z | tests/pyarxaas/models/test_Dataset.py | vinayranjan/pyarxaas | f304d360adf1a647d9daff0afda5290ca0fd7ec8 | [
"MIT"
] | 20 | 2020-06-19T04:37:33.000Z | 2021-07-26T04:27:46.000Z | tests/pyarxaas/models/test_Dataset.py | vinayranjan/pyarxaas | f304d360adf1a647d9daff0afda5290ca0fd7ec8 | [
"MIT"
] | 2 | 2020-11-11T11:52:06.000Z | 2020-12-03T10:22:47.000Z | import unittest
import pandas
from pyarxaas.models.attribute_type import AttributeType
from pyarxaas.models.dataset import Dataset
from tests.pyarxaas import data_generator
class DatasetTest(unittest.TestCase):
def setUp(self):
self.test_data = [['id', 'name'],
['0', 'Viktor'],
['1', 'Jerry']]
self.test_attribute_type_mapping = {'id': AttributeType.IDENTIFYING,
'name': AttributeType.QUASIIDENTIFYING}
def test_init(self):
Dataset(self.test_data, self.test_attribute_type_mapping)
def test_equality(self):
dataset_1 = data_generator.id_name_dataset()
dataset_2 = data_generator.id_name_dataset()
self.assertEqual(dataset_1, dataset_2)
self.assertIsNot(dataset_1, dataset_2)
dataset_2._set_attribute_type("id", AttributeType.QUASIIDENTIFYING)
self.assertNotEqual(dataset_1, dataset_2)
def test_hash(self):
dataset_1 = data_generator.id_name_dataset()
dataset_2 = data_generator.id_name_dataset()
test_set = {dataset_1, dataset_2}
self.assertEqual(1, len(test_set))
def test_init__without_attribute_types_param(self):
dataset = Dataset(self.test_data)
self.assertEqual(dataset._DEFAULT_ATTRIBUTE_TYPE.value, dataset._attributes[0].type.value)
self.assertEqual(self.test_data[0][0], dataset._attributes[0].name)
self.assertEqual(self.test_data[0][1], dataset._attributes[1].name)
def test_create_from_pandas_dataframe(self):
dataframe = pandas.DataFrame(self.test_data[1:], columns=self.test_data[0])
dataset = Dataset.from_pandas(dataframe)
pandas_df = dataset.to_dataframe()
# assert column names are in top row
self.assertEqual(dataframe.to_dict(), pandas_df.to_dict())
# assert default AttributeType is set
self.assertEqual(Dataset._DEFAULT_ATTRIBUTE_TYPE.value, dataset._attributes[0].type.value)
def test_set_attribute_types_default_value(self):
dataset = Dataset(self.test_data)
self.assertEqual(AttributeType.QUASIIDENTIFYING.value, dataset._attributes[0].type.value)
self.assertEqual(AttributeType.QUASIIDENTIFYING.value, dataset._attributes[1].type.value)
def test_set_attribute_type_with_sequence_of_attributes(self):
dataset = Dataset(self.test_data)
dataset.set_attribute_type(AttributeType.IDENTIFYING, "id", "name")
self.assertEqual(AttributeType.IDENTIFYING.value, dataset._attributes[0].type.value)
self.assertEqual(AttributeType.IDENTIFYING.value, dataset._attributes[1].type.value)
def test_set_attribute_type_with_single_attribute(self):
dataset = Dataset(self.test_data)
dataset.set_attribute_type(AttributeType.IDENTIFYING, "id")
self.assertEqual(AttributeType.IDENTIFYING.value, dataset._attributes[0].type.value)
def test_set_attribute_type__single_attribute(self):
dataset = Dataset(self.test_data)
dataset._set_attribute_type("id", AttributeType.QUASIIDENTIFYING)
self.assertEqual(AttributeType.QUASIIDENTIFYING.value, dataset._attributes[0].type.value)
self.assertEqual(Dataset._DEFAULT_ATTRIBUTE_TYPE.value, dataset._attributes[1].type.value)
def test_set_hierarchy(self):
test_hierarchy = [["0", "*"], ["1", "*"]]
dataset = Dataset(self.test_data)
dataset._set_attribute_type("id", AttributeType.QUASIIDENTIFYING)
dataset.set_hierarchy("id", test_hierarchy)
self.assertEqual(dataset._attributes[0].hierarchy, test_hierarchy)
def test_set_hierarchy__not_valid_attribute_name(self):
test_hierarchy = [["0", "*"], ["1", "*"]]
dataset = Dataset(self.test_data)
dataset._set_attribute_type("id", AttributeType.QUASIIDENTIFYING)
with self.assertRaises(KeyError):
dataset.set_hierarchy("fail", test_hierarchy)
self.assertIsNone(dataset._attributes[0].hierarchy)
def test_set_hierarchy__not_valid_attribute_type(self):
test_hierarchy = [["0", "*"], ["1", "*"]]
dataset = Dataset(self.test_data)
dataset._set_attribute_type("id", AttributeType.INSENSITIVE)
with self.assertRaises(ValueError):
dataset.set_hierarchy("id", test_hierarchy)
self.assertIsNone(dataset._attributes[0].hierarchy)
self.assertIsNot(test_hierarchy, dataset._attributes[0].hierarchy)
def test_set_hierarchies(self):
test_hierarchy_id = [["0", "*"], ["1", "*"]]
test_hierarchy_name = [["Viktor", "*"], ["Jerry", "*"]]
dataset = Dataset(self.test_data)
dataset._set_attribute_type("id", AttributeType.QUASIIDENTIFYING)
dataset._set_attribute_type("name", AttributeType.QUASIIDENTIFYING)
dataset.set_hierarchies({"id": test_hierarchy_id, "name": test_hierarchy_name})
self.assertEqual(dataset._attributes[0].hierarchy, test_hierarchy_id)
self.assertEqual(dataset._attributes[1].hierarchy, test_hierarchy_name)
def test_set_hierarchy_with_pandas(self):
test_hierarchy = [["0", "*"], ["1", "*"]]
hierarchy_df = pandas.DataFrame(test_hierarchy)
dataset = Dataset(self.test_data)
dataset._set_attribute_type("id", AttributeType.QUASIIDENTIFYING)
dataset.set_hierarchy("id", hierarchy_df)
self.assertEqual(dataset._attributes[0].hierarchy, test_hierarchy)
def test__payload(self):
dataset = Dataset(self.test_data)
payload = dataset._payload()
self.assertEqual(AttributeType.QUASIIDENTIFYING.value, payload["attributes"][0]["attributeTypeModel"])
self.assertEqual(None, payload["attributes"][0]["hierarchy"])
def test__payload__with_hierarchies(self):
test_hierarchy_id = [["0", "*"], ["1", "*"]]
test_hierarchy_name = [["Viktor", "NAME"], ["Jerry", "NAME"]]
dataset = Dataset(self.test_data)
dataset._set_attribute_type("id", AttributeType.QUASIIDENTIFYING)
dataset._set_attribute_type("name", AttributeType.QUASIIDENTIFYING)
dataset.set_hierarchies({"id": test_hierarchy_id, "name": test_hierarchy_name})
payload = dataset._payload()
self.assertEqual(test_hierarchy_id, payload["attributes"][0]["hierarchy"])
self.assertEqual(test_hierarchy_name, payload["attributes"][1]["hierarchy"])
def test_to_dataframe(self):
dataset = Dataset(self.test_data, self.test_attribute_type_mapping)
df = dataset.to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
def test_from_dict(self):
data = {"id": [1, 2], "name": ["Monsen", "Mikkel"]}
expected_df = pandas.DataFrame.from_dict(data)
dataset = Dataset.from_dict(data)
self.assertIsNotNone(dataset)
self.assertIsInstance(dataset, Dataset)
self.assertEqual(expected_df.to_dict(), dataset.to_dataframe().to_dict())
| 43.7375 | 110 | 0.69577 | 789 | 6,998 | 5.858048 | 0.101394 | 0.048464 | 0.049329 | 0.057551 | 0.657075 | 0.609476 | 0.594115 | 0.558633 | 0.492211 | 0.448074 | 0 | 0.010152 | 0.183624 | 6,998 | 159 | 111 | 44.012579 | 0.79888 | 0.010003 | 0 | 0.372881 | 0 | 0 | 0.034378 | 0 | 0 | 0 | 0 | 0 | 0.279661 | 1 | 0.161017 | false | 0 | 0.042373 | 0 | 0.211864 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a612ee91d33761ed97242b786506ebee4d8f2061 | 6,955 | py | Python | perception/utils/visualization.py | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb | [
"MIT"
] | 3 | 2021-06-19T10:49:26.000Z | 2022-03-26T11:31:28.000Z | perception/utils/visualization.py | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb | [
"MIT"
] | 1 | 2021-10-12T15:40:55.000Z | 2021-10-12T15:40:55.000Z | perception/utils/visualization.py | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb | [
"MIT"
] | null | null | null | import random
import matplotlib.pyplot as plt
import numpy as np
import torch
from perception.utils.segmentation_labels import CARLA_CLASSES, DEFAULT_CLASSES
def get_segmentation_colors(n_classes, only_random=False, class_indxs=None, color_seed=73):
assert only_random or class_indxs
random.seed(color_seed)
class_colors = []
if only_random:
for _ in range(n_classes):
class_colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
return class_colors
elif class_indxs:
for c in class_indxs:
class_colors.append(CARLA_CLASSES[c][1])
class_colors.append((0, 0, 0))
return class_colors
def get_rgb_segmentation(semantic_image: np.ndarray, class_colors):
"""
Creates a RGB image from a semantic image. Semantic image must have shape: (Height, Width, #Semantic Classes)
"""
height, width, n_classes = semantic_image.shape
semantic_image_rgb = np.zeros((height, width, 3))
semantic_pred_argmax = semantic_image.argmax(axis=2)
for c in range(n_classes):
semantic_image_rgb[:, :, 0] += ((semantic_pred_argmax[:, :] == c) * (class_colors[c][0])).astype('uint8')
semantic_image_rgb[:, :, 1] += ((semantic_pred_argmax[:, :] == c) * (class_colors[c][1])).astype('uint8')
semantic_image_rgb[:, :, 2] += ((semantic_pred_argmax[:, :] == c) * (class_colors[c][2])).astype('uint8')
return semantic_image_rgb
def display_images_horizontally(images, fig_width, fig_height, display=True, title=None, subplot_titles=None):
# Inspired from Hands-On Machine Learning with SciKit-learn, Keras and TensorFlow, page 574
# Displays the list of images horizontally.
def plot_image(image, cmap="binary"):
# todo: https://stackoverflow.com/questions/49643907/clipping-input-data-to-the-valid-range-for-imshow-with-rgb-data-0-1-for-floa
plt.imshow(image, cmap=cmap)
plt.axis("off")
# plt.show()
n_images = len(images)
if subplot_titles is not None:
assert len(subplot_titles) == n_images, "need a subtitle for every image"
if n_images > 0:
fig = plt.figure(figsize=(fig_width, fig_height))
for image_index in range(n_images):
image = images[image_index]
ax = plt.subplot(1, n_images, 1 + image_index)
if subplot_titles is not None:
ax.set_title(subplot_titles[image_index])
cmap = "binary" if len(images[image_index].shape) == 3 else "gray"
plot_image(image, cmap=cmap)
if title is not None:
fig.suptitle(title, fontsize="x-large")
if display:
fig.show()
array = get_np_array_from_figure(fig)
plt.close()
return array
def get_np_array_from_figure(fig):
"""Returns numpy rgb array from matplotlib figure"""
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def display_originals_with_decoded(original_images, decoded_images, title=""):
# Inspired by Hands-On Machine Learning with SciKit-learn, Keras and TensorFlow, page 574.
# Meant to be used for visualization of target images and predicted images in multi-task learning.
# Target images displayed in top row, predicted images in row below.
def plot_image(image, cmap="binary"):
# todo: https://stackoverflow.com/questions/49643907/clipping-input-data-to-the-valid-range-for-imshow-with-rgb-data-0-1-for-floa
plt.imshow(image, cmap=cmap)
plt.axis("off")
# plt.show()
n_images = len(original_images)
if n_images > 0:
fig = plt.figure(figsize=(n_images * 1.2, 3))
fig.suptitle(title, fontsize=10)
for image_index in range(n_images):
cmap = "binary" if original_images[image_index].shape[-1] == 3 else "gray"
plt.subplot(2, n_images, 1 + image_index)
plot_image(original_images[image_index], cmap=cmap)
plt.subplot(2, n_images, 1 + n_images + image_index)
plot_image(decoded_images[image_index], cmap=cmap)
fig.show()
def show_predictions(model, inputs, device, semantic_classes, n_displays=1, title=""):
# input_image has size (Height, Width, N-Channels).
# Have to add batch dimension, and transpose it to able to make predictions
rgb_inputs, rgb_targets, semantic_targets, depth_targets = inputs[0].to(device), inputs[1].to(device), inputs[2].to(
device), inputs[3].to(device)
model.eval()
with torch.no_grad():
predictions = model(rgb_inputs)
# Send all predictions and target tensors to cpu
n_displays = min(n_displays, len(rgb_inputs))
rgb_preds, semantic_preds, depth_preds = [pred.cpu().numpy().transpose(0, 2, 3, 1)[:n_displays] for pred in
predictions]
rgb_targets = rgb_targets.cpu().numpy().transpose(0, 2, 3, 1)[:n_displays]
depth_targets = depth_targets.cpu().numpy().transpose(0, 2, 3, 1)[:n_displays]
semantic_targets = semantic_targets.cpu().numpy().transpose(0, 2, 3, 1)[:n_displays]
for i in range(n_displays):
rgb_pred = rgb_preds[i]
semantic_pred = semantic_preds[i]
depth_pred = depth_preds[i]
rgb_target = rgb_targets[i]
semantic_target = semantic_targets[i]
depth_target = depth_targets[i]
class_colors = get_segmentation_colors(n_classes=len(semantic_classes) + 1, class_indxs=semantic_classes)
semantic_pred_rgb = get_rgb_segmentation(semantic_image=semantic_pred, class_colors=class_colors)
semantic_target_rgb = get_rgb_segmentation(semantic_image=semantic_target, class_colors=class_colors)
semantic_pred_rgb = semantic_pred_rgb / 255
semantic_target_rgb = semantic_target_rgb / 255
# Setup original images for display
original_images = [rgb_target, semantic_target_rgb, depth_target]
# Setup decoded images for display
decoded_images = [rgb_pred, semantic_pred_rgb, depth_pred]
# Show rgb, semantic segmentation and depth images with corresponding predictions
display_originals_with_decoded(original_images=original_images, decoded_images=decoded_images, title=title)
def plot_image(image, title="", cmap="binary"):
# todo: https://stackoverflow.com/questions/49643907/clipping-input-data-to-the-valid-range-for-imshow-with-rgb-data-0-1-for-floa
plt.imshow(image, cmap=cmap)
plt.title(title)
plt.show()
def plot_segmentation(image: np.ndarray, title=None):
_, _, n_classes = image.shape
class_colors = get_segmentation_colors(n_classes=n_classes, class_indxs=DEFAULT_CLASSES)
semantic_image_rgb = get_rgb_segmentation(image, class_colors=class_colors) / 255
plot_image(semantic_image_rgb, title=title)
plt.show()
| 41.646707 | 137 | 0.68555 | 964 | 6,955 | 4.716805 | 0.18361 | 0.043545 | 0.024632 | 0.015835 | 0.363097 | 0.308115 | 0.261051 | 0.192655 | 0.168683 | 0.168683 | 0 | 0.020736 | 0.202588 | 6,955 | 166 | 138 | 41.89759 | 0.799135 | 0.182027 | 0 | 0.17757 | 0 | 0 | 0.017159 | 0 | 0 | 0 | 0 | 0.006024 | 0.018692 | 1 | 0.093458 | false | 0 | 0.046729 | 0 | 0.186916 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a613b9a8c301525a013a5679b0f23d6447d8344b | 7,214 | py | Python | nova3/engines/nyaapantsu.py | chr0nu5/qBittorrent-Plugins-Easy-Install | 2b905523671f3d75977d1dc399cf6a8c723f463e | [
"MIT"
] | null | null | null | nova3/engines/nyaapantsu.py | chr0nu5/qBittorrent-Plugins-Easy-Install | 2b905523671f3d75977d1dc399cf6a8c723f463e | [
"MIT"
] | null | null | null | nova3/engines/nyaapantsu.py | chr0nu5/qBittorrent-Plugins-Easy-Install | 2b905523671f3d75977d1dc399cf6a8c723f463e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#VERSION: 1.2
#AUTHORS: Joost Bremmer (toost.b@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from enum import Enum
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
# import qBT modules
try:
from novaprinter import prettyPrinter
from helpers import retrieve_url
except:
pass
class nyaapantsu(object):
"""Class used by qBittorrent to search for torrents"""
url = 'https://nyaa.pantsu.cat'
name = 'Nyaa.pantsu'
# defines which search categories are supported by this search engine
# and their corresponding id. Possible categories are:
# 'all', 'movies', 'tv', 'music', 'games', 'anime', 'software', 'pictures',
# 'books'
supported_categories = {
'all': '_',
'anime': '3_',
'books': '4_',
'music': '2_',
'pictures': '6_',
'software': '1_',
'tv': '5_',
'movies': '5_'}
class NyaaPantsuParser(HTMLParser):
""" Parses Nyaa.pantsu browse page for search resand prints them"""
class DataType(Enum):
"""Enumeration to keep track of the TD Type to use in handle_data()'"""
NONE = 0
NAME = 1
SEEDS = 2
LEECH = 3
SIZE = 4
def __init__(self, res=(), url="https://nyaa.pantsu.cat"):
try:
super().__init__()
except:
# See: http://stackoverflow.com/questions/9698614/
HTMLParser.__init__(self)
self.engine_url = url
self.results = res
self.curr = None
self.td_type = self.DataType.NONE
def handle_starttag(self, tag, attr):
"""Calls element specific functions based on tag."""
if tag == 'a':
self.start_a(attr)
if tag == 'tr':
self.start_tr(attr)
if tag == 'td':
self.start_td(attr)
def start_tr(self, attr):
params = dict(attr)
if 'class' in params and params['class'].startswith('torrent-info'):
self.curr = {'engine_url': self.engine_url}
def start_a(self, attr):
params = dict(attr)
# get torrent name
if 'href' in params and params['href'].startswith('/view/'):
if self.curr:
self.curr['desc_link'] = self.engine_url + params['href']
# also get name from handle_data()
self.td_type = self.DataType.NAME
# get torrent magnet link
elif 'href' in params and params['href'].startswith("magnet:?"):
if self.curr:
self.curr['link'] = params['href']
def start_td(self, attr):
"""Parses TD elements and sets self.td_type based on its html class.
If last TD element for the current hit is reached it appends it to
results and cleans up.
"""
params = dict(attr)
# get seeds from handle_data()
if 'class' in params and params['class'].startswith("tr-se"):
self.td_type = self.DataType.SEEDS
# get leechers from handle_data()
elif 'class' in params and params['class'].startswith("tr-le"):
self.td_type = self.DataType.LEECH
# get size from handle_data()
elif 'class' in params and params['class'].startswith("tr-size"):
self.td_type = self.DataType.SIZE
# we've reached the end of this result; save it and clean up.
elif 'class' in params and params['class'].startswith("tr-date"):
self.results.append(self.curr)
self.td_type = self.DataType.NONE
self.curr = None
# default: current innerContent does not concern us: pass.
else:
self.td_type = self.DataType.NONE
def handle_data(self, data):
"""Strip textContent data for search result based on td type"""
# Get result name
if self.td_type == self.DataType.NAME:
if 'name' not in self.curr:
self.curr['name'] = ''
self.curr['name'] += data.strip()
self.td_type = self.DataType.NONE
# Get no. of seeds
elif self.td_type == self.DataType.SEEDS:
try:
self.curr['seeds'] = int(data.strip())
except:
self.curr['seeds'] = -1
finally:
self.td_type = self.DataType.NONE
# Get no. of leechers
elif self.td_type == self.DataType.LEECH:
try:
self.curr['leech'] = int(data.strip())
except:
self.curr['leech'] = -1
finally:
self.td_type = self.DataType.NONE
# Get size
elif self.td_type == self.DataType.SIZE:
size = data.strip()
self.curr['size'] = data.strip()
self.td_type = self.DataType.NONE
# Default: self.td_type is unset, current textConent is not
# interesting, do nothing.
else:
pass
# DO NOT CHANGE the name and parameters of this function
# This function will be the one called by nova2.py
def search(self, what, cat='all'):
"""
Retreive and parse engine search results by category and query.
Parameters:
:param what: a string with the search tokens, already escaped
(e.g. "Ubuntu+Linux")
:param cat: the name of a search category, see supported_categories.
"""
page = 1
hits = []
parser = self.NyaaPantsuParser(hits, self.url)
while True:
url = str(
"{0}/search/{1}?s=0&sort=5&order=false&max=300&c={2}&q={3}"
.format(self.url,
page,
self.supported_categories.get(cat),
what))
# pantsu is very volatile.
try:
res = retrieve_url(url)
parser.feed(res)
except:
pass
for each in hits:
prettyPrinter(each)
if len(hits) < 300:
break
del hits[:]
page += 1
parser.close()
| 35.712871 | 83 | 0.530358 | 839 | 7,214 | 4.488677 | 0.312277 | 0.030271 | 0.045141 | 0.055762 | 0.275624 | 0.236325 | 0.141264 | 0.122677 | 0.071163 | 0.030271 | 0 | 0.008778 | 0.368312 | 7,214 | 201 | 84 | 35.890547 | 0.817643 | 0.305656 | 0 | 0.264463 | 0 | 0.008264 | 0.074685 | 0.01176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057851 | false | 0.024793 | 0.049587 | 0 | 0.157025 | 0.008264 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a619383cde46a46920c9996c56eea79c404d6a6c | 8,342 | py | Python | trainer/audio_trainer.py | ryanwongsa/DeepFakeDetectionChallenge | b9902b88e89d5165190ad673d5dfb10cc821d5a1 | [
"Apache-2.0"
] | 5 | 2020-05-07T18:14:17.000Z | 2021-11-18T02:44:55.000Z | trainer/audio_trainer.py | ryanwongsa/DeepFakeDetectionChallenge | b9902b88e89d5165190ad673d5dfb10cc821d5a1 | [
"Apache-2.0"
] | 1 | 2021-08-17T09:40:28.000Z | 2021-09-20T16:57:29.000Z | trainer/audio_trainer.py | ryanwongsa/DeepFakeDetectionChallenge | b9902b88e89d5165190ad673d5dfb10cc821d5a1 | [
"Apache-2.0"
] | 1 | 2020-12-21T08:31:18.000Z | 2020-12-21T08:31:18.000Z | import torch
torch.backends.cudnn.benchmark = True
from trainer.base_audio_trainer import BaseAudioTrainer
from logger.new_callbacks import Callbacks
from torch.utils.data import DataLoader
from dataloader.audio_dataset import AudioDataset
from torch.optim.lr_scheduler import CosineAnnealingLR
from models.audio_models.model_dcase import ConvNet
from models.audio_models.model_m1 import Classifier_M2, Classifier_M3
from models.audio_models.model_m0 import Classifier
from utils_helper.mixup import *
import numpy as np
import cProfile
try:
from apex import amp
except:
pass
try:
import wandb
except:
pass
class AudioTrainer(BaseAudioTrainer):
def __init__(self, hparams, train_length=None, valid_length=None):
self.mixup = hparams.mixup
self.cutmix = hparams.cutmix
self.batch_size = hparams.batch_size
self.num_workers = hparams.num_workers
self.train_dir = hparams.train_dir
self.train_meta_file = hparams.train_meta_file
self.valid_dir = hparams.valid_dir
self.valid_meta_file = hparams.valid_meta_file
self.epochs = hparams.epochs
self.save_dir = hparams.save_dir
self.checkpoint_dir = hparams.checkpoint_dir
self.grad_acc_num = hparams.grad_acc_num
self.lr = hparams.lr
self.network_name = hparams.network_name
self.optimizer_name = hparams.optimizer_name
self.scheduler_name = hparams.scheduler_name
self.project_name = hparams.project_name
self.run_name = hparams.run_name
self.criterion_name = hparams.criterion_name
self.use_amp = hparams.use_amp
self.device = hparams.device
self.load_model_only = hparams.load_model_only
self.tuning_type = hparams.tuning_type
self.pos_weight_factor = hparams.pos_weight_factor
self.cb = Callbacks(log_every=10, save_dir=self.save_dir)
self.init_train_dataloader(length=train_length)
self.init_valid_dataloader(length = valid_length)
self.init_criterion()
self.init_model()
self.set_tuning_parameters()
self.init_optimizer()
self.init_scheduler()
if hparams.project_name is not None:
self.cb.init_wandb(hparams.project_name, hparams, hparams.run_name)
wandb.watch(self.model)
if torch.cuda.device_count() > 1 and self.device == 'cuda':
print("Using Multiple GPUs")
self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
self.model.to(self.device)
if self.use_amp:
self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level="O1")
self.load_checkpoint(self.checkpoint_dir, is_model_only=self.load_model_only)
def init_criterion(self):
# self.criterion_name
self.criterion = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor(self.pos_weight_factor))
self.log_loss_criterion = torch.nn.BCELoss()
self.valid_criterion = torch.nn.BCELoss()
def init_model(self):
# self.network_name
model_dict = {
"m0": Classifier,
"m2": Classifier_M2,
"m3": Classifier_M3,
"dcase": ConvNet,
}
self.model = model_dict[self.network_name](num_classes=1)
def set_tuning_parameters(self):
# self.tuning_type
if self.tuning_type=="freeze_bn":
self.model.freeze_bn = True
self.model.freeze_bn_affine = True
def init_optimizer(self, lr=None):
# self.optimizer_name
if lr is not None:
self.lr = lr
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.lr, amsgrad=False)
def init_scheduler(self):
# self.scheduler_name
if self.scheduler_name == "cosine":
self.scheduler = CosineAnnealingLR(self.optimizer, T_max=10, eta_min=1e-5)
else:
self.scheduler = None
'''
1.1.1. batch process
'''
def batch_process(self, batch, index=None, isTraining=True):
self.cb.on_batch_process_start()
source_filenames, x_batch, y_batch, video_original_filenames = batch
y_batch = y_batch.float()
if isTraining:
r = np.random.rand(1)
if (self.mixup or self.cutmix) and r < 0.5:
if self.mixup and (not self.cutmix):
x_batch, y_batch_a, y_batch_b, lam = mixup_data(x_batch, y_batch)
elif self.cutmix and (not self.mixup):
x_batch, y_batch_a, y_batch_b, lam = cutmix_data(x_batch, y_batch, device=self.device)
else:
x_batch, y_batch_a, y_batch_b, lam = cutmix_data(x_batch, y_batch, device=self.device) if np.random.rand() > 0.5 else mixup_data(x_batch, y_batch, device=self.device)
y_batch_b = y_batch_b.unsqueeze(1)
y_batch_a = y_batch_a.unsqueeze(1)
self.cb.on_batch_process_end()
return x_batch, y_batch_a, y_batch_b, lam
else:
y_batch = y_batch.unsqueeze(1)
self.cb.on_batch_process_end()
return x_batch, y_batch
else:
y_batch = y_batch.unsqueeze(1)
self.cb.on_batch_process_end()
return x_batch, y_batch
'''
1.1.2. batch train
'''
def batch_train_step(self, batch, index):
self.cb.on_batch_train_step_start()
r = np.random.rand(1)
if (len(batch)==4):
x_batch, y_batch_a, y_batch_b, lam = batch
preds = self.model(x_batch.to(self.device))
loss = mixup_criterion(self.criterion, preds, y_batch_a.to(self.device), y_batch_b.to(self.device), lam)
else:
x_batch, y_batch = batch
preds = self.model(x_batch.to(self.device))
loss = self.criterion(preds, y_batch.to(self.device))
dict_metrics = {"train_batch_loss":loss.item()}
if self.scheduler is not None:
dict_metrics["lr"] = self.optimizer.param_groups[0]['lr']
self.cb.on_batch_train_step_end(dict_metrics)
return loss
'''
2.1.2. batch valid
'''
def batch_valid_step(self, batch, index):
self.cb.on_batch_valid_step_start()
with torch.no_grad():
for idx, (x_batch, y_batch) in enumerate(zip(*batch)):
x_batch = x_batch
y_batch = y_batch.unsqueeze(0)
predicted = self.model(x_batch.to(self.device))
loss_original = self.criterion(predicted.mean(axis=0).unsqueeze(0), y_batch.to(self.device))
predicted2 = torch.sigmoid(predicted).mean(axis=0)
predicted2[predicted2<0.5] = 0.5
log_loss = self.log_loss_criterion(predicted2, y_batch.to(self.device))
self.cb.on_batch_valid_step_end({"predicted":torch.sigmoid(predicted).mean(axis=0).item(), "actual":y_batch[0].item(),"num_above":(predicted2>0.5).sum().item(),"valid_batch_loss":loss_original.item(), "valid_log_loss": log_loss.item(), "valid_original_loss":loss_original.item()})
def init_train_dataloader(self, length = None):
train_dataset = AudioDataset(self.train_dir, self.train_meta_file, spec_aug=False, isBalanced=True, isValid=False)
if length is not None:
train_dataset.length = length
self.trainloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers= self.num_workers, collate_fn= train_dataset.collate_fn, pin_memory= True, drop_last = True, worker_init_fn=train_dataset.init_workers_fn)
def init_valid_dataloader(self, length = None):
valid_dataset = AudioDataset(self.valid_dir, self.valid_meta_file, spec_aug=False, isBalanced=False, isValid=True)
if length is not None:
valid_dataset.length = length
self.validloader = DataLoader(valid_dataset, batch_size=self.batch_size, shuffle=False, num_workers= self.num_workers,pin_memory= True, collate_fn= valid_dataset.collate_fn, drop_last = False, worker_init_fn=valid_dataset.init_workers_fn)
| 42.131313 | 296 | 0.649245 | 1,116 | 8,342 | 4.576165 | 0.171147 | 0.04347 | 0.043078 | 0.035246 | 0.262972 | 0.189348 | 0.125906 | 0.111807 | 0.087331 | 0.07617 | 0 | 0.009009 | 0.254855 | 8,342 | 198 | 297 | 42.131313 | 0.81258 | 0.011268 | 0 | 0.156863 | 0 | 0 | 0.017693 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071895 | false | 0.013072 | 0.091503 | 0 | 0.196078 | 0.006536 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a61e50a3bc6d9d83dac4e1176ba6bdfe1909c870 | 1,881 | py | Python | bin/taxid2lineage.py | twylie/viromatch | 44edca07c44308b17b9f19174c08175736fff53f | [
"MIT"
] | 5 | 2021-02-13T09:02:21.000Z | 2021-10-06T19:20:41.000Z | bin/taxid2lineage.py | twylie/viromatch | 44edca07c44308b17b9f19174c08175736fff53f | [
"MIT"
] | 4 | 2021-05-14T09:02:32.000Z | 2022-03-25T05:06:40.000Z | bin/taxid2lineage.py | twylie/viromatch | 44edca07c44308b17b9f19174c08175736fff53f | [
"MIT"
] | 1 | 2021-04-05T22:30:44.000Z | 2021-04-05T22:30:44.000Z | #! /usr/bin/python3.7
import argparse
from viromatch.lib.taxonomy import Taxonomy
import os
version = '1.0'
def eval_cli_arguments():
parser = argparse.ArgumentParser(
description='Resolve lineage given taxid.',
prog='taxid2lineage.py',
add_help=False
)
# Optional arguments.
parser.add_argument(
'-h',
'--help',
action='help',
help='Display the extended usage statement.'
)
parser.add_argument(
'--version',
action='version',
version=version,
help='Display the software version number.'
)
# Required arguments.
required_group = parser.add_argument_group('required')
required_group.add_argument(
'--taxonomy',
metavar='FILE',
action='store',
help='Path to taxonomy file.',
required=True,
)
required_group.add_argument(
'--taxid',
metavar='INT',
action='store',
help='Tax ids.',
required=True,
nargs='+'
)
arguments = parser.parse_args()
for arg in arguments.taxid:
if arg.isnumeric() is not True:
msg = 'Taxid must be numeric.'
parser.error(msg)
if os.path.isfile(arguments.taxonomy) is not True:
msg = 'You must supply a taxonomy file [--taxonomy].'
parser.error(msg)
return arguments
if __name__ == '__main__':
# Given a list of NCBI tax ids, we will look-up and return the
# corresponding taxonomic lineages. By default, we return the YAML
# representation and the flattened lineage views.
arguments = eval_cli_arguments()
tax = Taxonomy(arguments.taxonomy)
for id_ in arguments.taxid:
tax.lookup_lineage(id_)
print('# [{}] {}'.format(id_, tax.flatten_lineage()))
tax.lineage_yaml()
print('---')
# __END__
| 21.62069 | 70 | 0.600744 | 209 | 1,881 | 5.248804 | 0.4689 | 0.050137 | 0.04649 | 0.043756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003715 | 0.284423 | 1,881 | 86 | 71 | 21.872093 | 0.811293 | 0.128655 | 0 | 0.181818 | 0 | 0 | 0.188725 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018182 | false | 0 | 0.054545 | 0 | 0.090909 | 0.036364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a61f07aff13fd36fc9ecc85e1e4a70ff58848de8 | 2,779 | py | Python | database.py | Konako1/osu-tg-bot | fc569b0a0fe2bc25947eb2fca443f809c84ef980 | [
"MIT"
] | 1 | 2022-03-26T16:55:33.000Z | 2022-03-26T16:55:33.000Z | database.py | Konako1/osu-tg-bot | fc569b0a0fe2bc25947eb2fca443f809c84ef980 | [
"MIT"
] | null | null | null | database.py | Konako1/osu-tg-bot | fc569b0a0fe2bc25947eb2fca443f809c84ef980 | [
"MIT"
] | null | null | null | from typing import Optional
import aiosqlite
from aiogram.dispatcher.middlewares import BaseMiddleware
import config
class OsuDb:
def __init__(self, path: str = config.ASSET_PATH / 'osu.db'):
self._conn = aiosqlite.connect(path)
async def connect(self):
self._conn = await self._conn
await self._conn.execute('CREATE TABLE IF NOT EXISTS users(telegram_id INTEGER PRIMARY KEY,'
' osu_id INTEGER NOT NULL)')
await self._conn.execute('CREATE TABLE IF NOT EXISTS osu_cache(username TEXT PRIMARY KEY,'
' user_id INTEGER NOT NULL)')
await self._conn.commit()
async def close(self):
await self._conn.commit()
await self._conn.close()
async def set_user(self, tg_user_id: int, osu_user_id: int) -> None:
user = await self.get_user(tg_user_id)
if user is None:
await self._conn.execute('INSERT INTO users(telegram_id, osu_id) VALUES (?, ?)',
(tg_user_id, osu_user_id))
else:
await self._conn.execute('UPDATE users SET osu_id=? WHERE telegram_id=?',
(osu_user_id, tg_user_id))
await self._conn.commit()
async def cache_user(self, osu_username: str, osu_user_id: int) -> None:
user = await self.get_cached_user(osu_username)
if user is None:
await self._conn.execute('INSERT INTO osu_cache(username, user_id) VALUES (?, ?)',
(osu_username, osu_user_id))
else:
await self._conn.execute('UPDATE osu_cache SET user_id=? WHERE username=?',
(osu_user_id, osu_username))
await self._conn.commit()
async def get_user(self, tg_user_id: int) -> Optional[int]:
cur = await self._conn.execute('SELECT osu_id FROM users WHERE telegram_id=?',
(tg_user_id,))
row = await cur.fetchone()
if row is not None:
return row[0]
return None
async def get_cached_user(self, osu_username: str) -> Optional[int]:
cur = await self._conn.execute('SELECT user_id FROM osu_cache WHERE username=?',
(osu_username,))
row = await cur.fetchone()
if row is not None:
return row[0]
return None
class OsuDbMiddleware(BaseMiddleware):
@staticmethod
async def on_process_message(_, data: dict):
db = OsuDb()
await db.connect()
data['db'] = db
@staticmethod
async def on_post_process_message(_, __, data: dict):
db = data.pop('db', None)
if db is not None:
await db.close()
| 38.068493 | 100 | 0.582944 | 349 | 2,779 | 4.401146 | 0.203438 | 0.083333 | 0.11849 | 0.104167 | 0.494792 | 0.421224 | 0.352214 | 0.322917 | 0.270833 | 0.123698 | 0 | 0.001064 | 0.323498 | 2,779 | 72 | 101 | 38.597222 | 0.815957 | 0 | 0 | 0.3 | 0 | 0 | 0.171644 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.066667 | 0 | 0.183333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6238a7cdde1b28e0b8b179e20a5211a8bc243b8 | 8,423 | py | Python | module_utils/rsd_common.py | intel/ansible-rsd-provisioning | 757f4d5ca9447a22efdf56fda337dc19579c6380 | [
"Apache-2.0"
] | 2 | 2019-07-17T09:56:41.000Z | 2020-03-14T21:32:32.000Z | module_utils/rsd_common.py | intel/ansible-rsd-provisioning | 757f4d5ca9447a22efdf56fda337dc19579c6380 | [
"Apache-2.0"
] | null | null | null | module_utils/rsd_common.py | intel/ansible-rsd-provisioning | 757f4d5ca9447a22efdf56fda337dc19579c6380 | [
"Apache-2.0"
] | 4 | 2019-11-02T00:31:07.000Z | 2021-02-17T11:11:46.000Z | # Copyright (c) 2019 Intel Corporation. All rights reserved.
#
# GNU General Public License v3.0+
# (see LICENSE.GPL or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Authors:
# - Igor D.C. - <igor.duarte.cardoso@intel.com>
# - Marco Chiappero - <marco.chiappero@intel.com>
#######################################################
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.basic import AnsibleModule
try:
import rsd_lib
import sushy
HAS_RSDLIB = True
except ImportError:
HAS_RSDLIB = False
class RSD(object):
class PodmInfo(object):
def __init__(self):
self._host = None
self._port = 8443
self._protocol = 'http'
self._verify_cert = False
@property
def host(self):
return self._host
@host.setter
def host(self, value):
if value and isinstance(value, str):
self._host = value
else:
raise ValueError("Invalid hostname")
@property
def port(self):
return self._port
@port.setter
def port(self, value):
if 1 < value and value <= 65535:
self._port = value
else:
raise ValueError("Invalid port number")
@property
def protocol(self):
return self._protocol
@protocol.setter
def protocol(self, value):
if value.lower() in ['https', 'http']:
self._protocol = value
else:
raise ValueError("Must be a http or https")
@property
def verify_cert(self):
return self._verify_cert
@verify_cert.setter
def verify_cert(self, value):
self._verify_cert = value
def is_valid(self):
if self._host and self._port:
return True
return False
def to_url(self):
endpoint = "/redfish/v1" # might be influenced by <version>
return "{0}://{1}:{2}{3}".format(
self._protocol, self._host, self._port, endpoint)
class AuthInfo():
def __init__(self):
self.username = None
self.password = None
RSD_BACKEND_ARGS = dict(
id=dict(
type='dict',
required=True,
options=dict(
type=dict(
type='str',
required=False,
choices=['name', 'identity', 'uuid'],
default='identity'
),
value=dict(
type='str',
required=True
)
)
),
podm=dict(
type='dict',
apply_defaults=True,
options=dict(
host=dict(
type='str',
fallback=(env_fallback, ['PODM_HOST']),
required=True,
aliases=['hostname']
),
port=dict(
type='int',
fallback=(env_fallback, ['PODM_PORT']),
default=443,
),
protocol=dict(
type='str',
default='https',
choices=['https', 'http']
),
validate_cert=dict(
type='bool',
default=False,
aliases=['verify_cert']
),
),
),
auth=dict(
type='dict',
apply_defaults=True,
options=dict(
username=dict(
type='str',
fallback=(env_fallback, ['PODM_USERNAME']),
required=True,
no_log=True,
aliases=['user']
),
password=dict(
type='str',
fallback=(env_fallback, ['PODM_PASSWORD']),
required=True,
no_log=True,
aliases=['pass']
),
),
)
)
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None,
required_together=None, required_one_of=None,
add_file_common_args=False, supports_check_mode=False,
required_if=None):
full_arg_spec = dict()
full_arg_spec.update(RSD.RSD_BACKEND_ARGS) # args spec to this class
full_arg_spec.update(argument_spec) # args from derived class
self.module = AnsibleModule(
argument_spec=full_arg_spec,
supports_check_mode=supports_check_mode,
bypass_checks=bypass_checks,
no_log=no_log,
add_file_common_args=add_file_common_args,
check_invalid_arguments=check_invalid_arguments,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_if=required_if,
required_one_of=required_one_of
)
if not HAS_RSDLIB:
self.module.fail_json(msg='The rsd-lib Python module is required')
podm, credentials = self._parse_connection_info()
self._connect(podm, credentials)
self.module.debug("rsd-lib setup completed")
def _parse_connection_info(self):
endpoint = RSD.PodmInfo()
podm_info = self.module.params['podm']
endpoint.host = podm_info['host']
endpoint.port = podm_info['port']
endpoint.protocol = podm_info['protocol']
endpoint.verify_cert = podm_info['validate_cert']
if not endpoint.host:
self.module.fail_json(msg='Missing PODM connection info')
credentials = RSD.AuthInfo()
auth_info = self.module.params['auth']
credentials.username = auth_info['username']
credentials.password = auth_info['password']
if not credentials.username or not credentials.password:
self.module.fail_json(msg='Missing endpoint credentials')
return endpoint, credentials
def _connect(self, podm_info, auth_info):
if not podm_info.is_valid():
raise ValueError("Invalid PODM info")
try:
self.rsd = rsd_lib.RSDLib(
base_url=podm_info.to_url(),
verify=podm_info.verify_cert,
username=auth_info.username,
password=auth_info.password
).factory()
except (sushy.exceptions.ResourceNotFoundError,
sushy.exceptions.ConnectionError,
sushy.exceptions.HTTPError) as e:
self.module.fail_json(
msg="Failed to setup and endpoint connection: {0}".format(
str(e)))
self.module.debug("Connection with PODM established")
def _get_nodes_collection(self):
return self.rsd.get_node_collection().get_members()
def _get_node(self):
params = self.module.params
type = params['id']['type']
value = params['id']['value']
if type == 'identity':
node_uri = "v1/Nodes/" + str(value)
try:
return self.rsd.get_node(node_uri)
except sushy.exceptions.ResourceNotFoundError:
self.module.fail_json(
msg="There is no node with such ID: {0}".format(value))
else:
nodes = self._get_nodes_collection()
if type == 'name':
node_match = [n for n in nodes if n.name == value]
if len(node_match) == 1:
return node_match[0]
elif len(node_match) > 1:
self.module.fail_json(msg="Multiple nodes found with "
"given name: {0}".format(value))
elif type == 'uuid':
for n in nodes:
if n.uuid == value:
return n
# not found
self.module.fail_json(msg="There is no node with id type '{0}' "
"and value: '{1}'".format(type, value))
| 31.429104 | 78 | 0.515256 | 846 | 8,423 | 4.917258 | 0.231678 | 0.03125 | 0.023558 | 0.030288 | 0.151202 | 0.111538 | 0.062019 | 0.0375 | 0.018269 | 0.018269 | 0 | 0.00675 | 0.384424 | 8,423 | 267 | 79 | 31.546816 | 0.795564 | 0.041672 | 0 | 0.215962 | 0 | 0 | 0.083833 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079812 | false | 0.042254 | 0.028169 | 0.023474 | 0.183099 | 0.004695 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a62415e6af752f35b91e91cf93818faa54736ef9 | 5,309 | py | Python | src/dockerblade/daemon.py | ChrisTimperley/dockerblade | 2be99bb9b2919ac87831879e04d6739d6967a8f3 | [
"Apache-2.0"
] | 1 | 2020-06-27T23:21:00.000Z | 2020-06-27T23:21:00.000Z | src/dockerblade/daemon.py | ChrisTimperley/dockerblade | 2be99bb9b2919ac87831879e04d6739d6967a8f3 | [
"Apache-2.0"
] | 66 | 2019-10-12T22:20:49.000Z | 2021-12-08T20:15:28.000Z | src/dockerblade/daemon.py | ChrisTimperley/dockerblade | 2be99bb9b2919ac87831879e04d6739d6967a8f3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
__all__ = ('DockerDaemon',)
from types import TracebackType
from typing import Any, Mapping, Optional, Type
from loguru import logger
import attr
import docker
from .container import Container
@attr.s(frozen=True)
class DockerDaemon:
"""Maintains a connection to a Docker daemon."""
url: Optional[str] = attr.ib(default=None)
client: docker.DockerClient = \
attr.ib(init=False, eq=False, hash=False, repr=False)
api: docker.APIClient = \
attr.ib(init=False, eq=False, hash=False, repr=False)
def __attrs_post_init__(self) -> None:
client = docker.DockerClient(self.url)
api = client.api
object.__setattr__(self, 'client', client)
object.__setattr__(self, 'api', api)
logger.debug(f"created daemon connection: {self}")
def __enter__(self) -> 'DockerDaemon':
return self
def __exit__(self,
ex_type: Optional[Type[BaseException]],
ex_val: Optional[BaseException],
ex_tb: Optional[TracebackType]
) -> None:
self.close()
def close(self) -> None:
logger.debug(f"closing daemon connection: {self}")
self.api.close()
self.client.close()
logger.debug(f"closed daemon connection: {self}")
def attach(self, id_or_name: str) -> Container:
"""Attaches to a running Docker with a given ID or name."""
logger.debug(f"attaching to container with ID or name [{id_or_name}]")
docker_container = self.client.containers.get(id_or_name)
container = Container(daemon=self, docker=docker_container)
logger.debug(f"attached to container [{container}]")
return container
def provision(self,
image: str,
command: Optional[str] = None,
*,
entrypoint: Optional[str] = None,
environment: Optional[Mapping[str, str]] = None,
network_mode: str = 'bridge',
name: Optional[str] = None,
ports: Optional[Mapping[int, int]] = None,
user: Optional[str] = None,
volumes: Optional[Mapping[str, Any]] = None,
) -> Container:
"""Creates a Docker container from a given image.
Arguments
---------
image: str
The name of the Docker image that should be used.
command: str
The command that should be run inside the container. If no
command is given, the default command for the Docker image will
be used instead.
name: str, optional
The name that should be given to the Docker container. If no name
is given, Docker will automatically generate one instead.
user: str, optional
The user that should be used by the container. If none is given,
the default user for that container image will be used.
entrypoint: str, optional
The entrypoint that should be used by the container. If none is
given, the default entrypoint for the image will be used.
environment: Mapping[str, str], optional
An optional set of additional environment variables, indexed by
name, that should be used by the system.
volumes: Mapping[str, str], optional
An optional set of volumes that should be mounted inside the
container, specified as a dictionary where keys represent a host
path or volume name, and values are a dictionary containing
the following keys: :code:`bind`, the path to mount the volume
inside the container, and :code:`mode`, specifies whether the
mount should be read-write :code:`rw` or read-only :code:`ro`.
ports: Mapping[int, int], optional
An optional dictionary specifying port mappings between the host
and container, where keys represent container ports and values
represent host ports.
network_mode: str
Specifies the networking mode that should be used by the
container. Can be either :code:`bridge`, :code`none`,
:code:`container:<name|id>`, or :code:`host`.
Returns
-------
Container
An interface to the newly launched container.
"""
logger.debug(f"provisioning container for image [{image}]")
docker_container = \
self.client.containers.run(image,
command=command,
stdin_open=True,
detach=True,
name=name,
entrypoint=entrypoint,
environment=environment,
ports=ports,
user=user,
volumes=volumes,
network_mode=network_mode)
container = self.attach(docker_container.id)
logger.debug(f"provisioned container [{container}]"
f" for image [{image}]")
return container
| 42.134921 | 78 | 0.569787 | 587 | 5,309 | 5.078365 | 0.258944 | 0.024153 | 0.032204 | 0.026837 | 0.127139 | 0.103656 | 0.096612 | 0.086548 | 0.062395 | 0.062395 | 0 | 0.000289 | 0.348465 | 5,309 | 125 | 79 | 42.472 | 0.861521 | 0.362403 | 0 | 0.058824 | 0 | 0 | 0.105643 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.088235 | 0.014706 | 0.279412 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6241a7c60cc25d87aeba3bf7a47f6e1a3cb5132 | 1,606 | py | Python | AreaFinder.py | karmatek/MinnoProto | cb231b44c055b910ce0e890f48fff0e0c7525c6e | [
"MIT"
] | null | null | null | AreaFinder.py | karmatek/MinnoProto | cb231b44c055b910ce0e890f48fff0e0c7525c6e | [
"MIT"
] | null | null | null | AreaFinder.py | karmatek/MinnoProto | cb231b44c055b910ce0e890f48fff0e0c7525c6e | [
"MIT"
] | null | null | null | """this module finds areas of image, it first finds edges in picture's pixel area.
then it makes contours of those edges, and draws biggest contours"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
def smartSelectFunc(filepath):
#load image to analyze
img = cv2.imread(filepath,0)
#find edges, sample picure and min, max values for treshold
edges = cv2.Canny(img,200,210)
#find contours from image edges, note find contour destroys original sample variable, so use copy of it
(_,contours, _) = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key = cv2.contourArea, reverse = True)[:10]
#find biggest contour
#loop through all contours and find which has most points
biggestContourLen=0
biggestContour=0
for i in range (len(contours)):
if len(contours[i]) > biggestContourLen:
biggestContourLen=len(contours[i])
biggestContour=i
print(biggestContour)
print(biggestContourLen)
print(contours[biggestContour].ndim)
print(contours[biggestContour].size)
print(contours[biggestContour])
#draw original picture and biggest contour on it
img = cv2.imread(filepath,-1)
cv2.drawContours(img, contours, biggestContour, (0,255,0,), 3)
return img
#cv2.imshow('image', img)
#cv2.waitKey(0)
#plt.subplot(121),plt.imshow(img,cmap = 'gray')
#plt.title('Original Image'), plt.xticks([]), plt.yticks([])
#plt.subplot(122),plt.imshow(edges,cmap = 'gray')
#plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
#plt.show() | 37.348837 | 107 | 0.702366 | 218 | 1,606 | 5.151376 | 0.481651 | 0.021371 | 0.072128 | 0.035619 | 0.046305 | 0.046305 | 0 | 0 | 0 | 0 | 0 | 0.027314 | 0.179328 | 1,606 | 43 | 108 | 37.348837 | 0.824734 | 0.439601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.227273 | 0.227273 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a62b1c307fdb97895ca854309c5d81de1710a8df | 1,438 | py | Python | back/babar_twitter/serializers.py | dryvenn/babar3 | 6f193ddbc1170739d8b1bf39033ad64d9bc85747 | [
"MIT"
] | null | null | null | back/babar_twitter/serializers.py | dryvenn/babar3 | 6f193ddbc1170739d8b1bf39033ad64d9bc85747 | [
"MIT"
] | null | null | null | back/babar_twitter/serializers.py | dryvenn/babar3 | 6f193ddbc1170739d8b1bf39033ad64d9bc85747 | [
"MIT"
] | null | null | null | import json
from datetime import timedelta
from django.utils import timezone
from rest_framework import serializers
import tweepy
from .models import *
# Get secrets and create the API instance
secrets = open("./babar_twitter/SECRETS.json", 'r')
keychain = json.load(secrets)
secrets.close()
twitter_auth = tweepy.OAuthHandler(keychain['consumer']['key'], keychain['consumer']['secret'])
twitter_auth.set_access_token(keychain['access']['key'], keychain['access']['secret'])
del keychain
twitter_api = tweepy.API(twitter_auth)
class TweetSerializer(serializers.ModelSerializer):
class Meta:
model = Tweet
def validate(self, data):
"""
Throttle the tweets by not allowing to post twice
in a 12-hour window.
"""
now = timezone.localtime(timezone.now())
try:
last = Tweet.objects.order_by('-timestamp')[0].timestamp
delta = timedelta(hours=12)
if last + delta > now:
raise serializers.ValidationError("Wait 12h between two tweets!")
except IndexError:
# No previous tweet
pass
return data
def create(self, validated_data):
"""
If the creation was successful (no error thrown),
do the actual tweeting
"""
tweet = super(TweetSerializer, self).create(validated_data)
twitter_api.update_status(tweet.message)
return tweet
| 29.958333 | 95 | 0.656467 | 166 | 1,438 | 5.608434 | 0.572289 | 0.035446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006434 | 0.243394 | 1,438 | 47 | 96 | 30.595745 | 0.849265 | 0.140473 | 0 | 0 | 0 | 0 | 0.096416 | 0.023891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0.033333 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a62c74b285e912ac651e99cb611624923fcd7697 | 1,105 | py | Python | examples/canopen/canopen_example.py | RobertoRoos/ingenialink-python | c5e82dfbff17898bb316f5dc3f91a7f3c049ba20 | [
"MIT"
] | null | null | null | examples/canopen/canopen_example.py | RobertoRoos/ingenialink-python | c5e82dfbff17898bb316f5dc3f91a7f3c049ba20 | [
"MIT"
] | null | null | null | examples/canopen/canopen_example.py | RobertoRoos/ingenialink-python | c5e82dfbff17898bb316f5dc3f91a7f3c049ba20 | [
"MIT"
] | null | null | null | import sys
from ingenialink.canopen.net import Network, CAN_DEVICE
def run_example():
net = None
try:
net = Network(device=CAN_DEVICE.PCAN)
nodes = net.detect_nodes()
net.scan('canopen_0.2.1.eds', 'registers_dictionary_canopen.xdf')
drives_connected = net.servos
if len(drives_connected) > 0:
drive = drives_connected[0]
try:
print(drive.raw_read("MOT_PAIR_POLES", subnode=2))
except:
pass
print("END")
while 1:
try:
txt = input("Type the id of the register you want to read: ")
if txt == "exit":
break
print(txt + ": " + str(drive.raw_read(txt)))
except Exception as e:
print(e)
else:
print("No drives found! Disconnecting from the network...")
net.disconnect()
except Exception as e:
print(e)
net.disconnect()
if __name__ == '__main__':
test = run_example()
sys.exit()
| 30.694444 | 81 | 0.514932 | 123 | 1,105 | 4.439024 | 0.536585 | 0.082418 | 0.058608 | 0.065934 | 0.087912 | 0.087912 | 0 | 0 | 0 | 0 | 0 | 0.010309 | 0.38552 | 1,105 | 35 | 82 | 31.571429 | 0.793814 | 0 | 0 | 0.272727 | 0 | 0 | 0.159276 | 0.028959 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0.030303 | 0.060606 | 0 | 0.090909 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a62e5d77c05596c38b887d3ac6ee4a9230f19780 | 5,051 | py | Python | rnmu/test/test_acontrario_point.py | marianotepper/nmu_rfit | c726be892b928b884f81452697b9211cf273e03c | [
"BSD-3-Clause"
] | 8 | 2017-06-13T13:07:34.000Z | 2020-02-13T06:30:42.000Z | rnmu/test/test_acontrario_point.py | marianotepper/nmu_rfit | c726be892b928b884f81452697b9211cf273e03c | [
"BSD-3-Clause"
] | null | null | null | rnmu/test/test_acontrario_point.py | marianotepper/nmu_rfit | c726be892b928b884f81452697b9211cf273e03c | [
"BSD-3-Clause"
] | 3 | 2017-06-10T18:30:57.000Z | 2019-03-19T07:28:25.000Z | from __future__ import print_function
import matplotlib.pyplot as plt
import matplotlib.colors as plt_colors
import numpy as np
import scipy.io
import scipy.stats
from rnmu.pme.point import Point
from rnmu.pme.line import Line
import rnmu.pme.stats as stats
def plot_soft_point(ax, point, sigma, box, n_levels=64, color='r', alpha=0.8):
x_min, x_max = box
xi, yi = np.mgrid[slice(x_min[0], x_max[0], .001),
slice(x_min[1], x_max[1], .001)]
pos = np.vstack((xi.flatten(), yi.flatten())).T
dists = point.distances(pos)
alphas = scipy.stats.norm.pdf(dists, loc=0, scale=sigma)
alphas /= alphas.max()
alphas = alphas.reshape(xi.shape)
levels = np.linspace(1e-2, 1, num=n_levels, endpoint=True)
c = plt_colors.ColorConverter().to_rgba(color)
colors = np.tile(c, (n_levels, 1))
colors[:, 3] = levels
ax.contourf(xi, yi, alphas, levels=levels, colors=colors, antialiased=True)
def plot_orthogonal_projection(data, mss, sigma, cutoff, axes=None):
point = Point(data[mss, :])
dists = point.distances(data) / sigma
membership = np.exp(-(dists ** 2))
idx = membership > np.exp(-(cutoff ** 2))
membership = membership[idx]
nfa = stats.concentration_nfa(membership, len(mss))
if axes is not None:
x_lim = (data[:, 0].min() - 0.1, data[:, 0].max() + 0.1)
y_lim = (data[:, 1].min() - 0.1, data[:, 1].max() + 0.1)
bbox = np.vstack((x_lim, y_lim)).T
ax = axes[0]
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
ax.scatter(data[:, 0], data[:, 1], c='w', s=10)
ax.scatter(data[mss, 0], data[mss, 1], c='r', s=10)
plot_soft_point(ax, point, sigma, bbox, n_levels=10, color='r', alpha=0.8)
ax.set_aspect('equal', adjustable='box')
membership.sort()
membership = np.insert(membership, 0, 0)
n = len(membership)
acc = np.arange(n, dtype=np.float)
acc /= n
below = membership > acc
idx = np.where(below)[0]
starts = np.setdiff1d(idx - 1, idx)
ends = np.setdiff1d(idx + 1, idx)
def get_crossing(i):
if i < 0 or i + 2 >= n:
return None
sl = slice(i, i + 2)
x1 = membership[sl]
y2 = acc[sl]
pts2 = np.vstack((x1, y2)).T
crossing = np.cross([-1, 1, 0], Line(pts2).eq)
crossing /= crossing[2]
return crossing
membership_all_runs = []
acc_all_runs = []
for s, e in zip(starts, ends):
run = slice(s + 1, e)
membership_run = membership[run]
acc_run = acc[run]
crossing = get_crossing(s)
if crossing is not None:
membership_run = np.insert(membership_run, 0, crossing[0])
acc_run = np.insert(acc_run, 0, crossing[1])
crossing = get_crossing(e - 1)
if crossing is not None:
membership_run = np.append(membership_run, crossing[0])
acc_run = np.append(acc_run, crossing[1])
membership_all_runs = np.hstack(
(membership_all_runs, membership_run))
acc_all_runs = np.hstack((acc_all_runs, acc_run))
ax = axes[1]
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.fill_between(membership_all_runs, membership_all_runs, acc_all_runs,
color='g', alpha=0.3)
arg_Dmin = np.argmax(membership - acc)
pt = membership[arg_Dmin]
ax.plot([pt, pt], [pt, acc[arg_Dmin]], 'r-', linewidth=2)
ax.plot(membership, acc, 'k-', linewidth=2)
ax.plot([0, 1], [0, 1], 'g-', linewidth=2)
ax.set_aspect('equal', adjustable='box')
ax.set_title('NFA: {:.2e}'.format(nfa))
return nfa
def main(sigma=0.07, cutoff=1.5):
seed = 0
# seed = np.random.randint(0, np.iinfo(np.uint32).max)
print('seed:', seed)
np.random.seed(seed)
# data = mat['Stairs4_S00075_O60'].T
# data = mat['Star5_S00075_O50'].T
noise = np.random.rand(950, 2)
cl = np.random.rand(50, 2) * 0.1 + 0.5
data = np.append(noise, cl, axis=0)
n = len(data)
print(n)
# random_sample = np.random.randint(n, size=1)
# print(random_sample)
fig, axes = plt.subplots(nrows=2, ncols=2)
plot_orthogonal_projection(data, [300], sigma, cutoff, axes[:, 0])
plot_orthogonal_projection(data, [976], sigma, cutoff, axes[:, 1])
fig, ax = plt.subplots(nrows=1, ncols=1)
x_lim = (data[:, 0].min() - 0.1, data[:, 0].max() + 0.1)
y_lim = (data[:, 1].min() - 0.1, data[:, 1].max() + 0.1)
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
ax.scatter(data[:, 0], data[:, 1], c='w', s=10)
for i in range(len(data)):
nfa = plot_orthogonal_projection(data, [i], sigma, cutoff)
if nfa < 0:
print(i, nfa)
ax.scatter(data[i, 0], data[i, 1], c='r', s=20)
ax.set_aspect('equal', adjustable='box')
if __name__ == '__main__':
main()
plt.show()
| 31.372671 | 82 | 0.567808 | 752 | 5,051 | 3.678191 | 0.230053 | 0.0094 | 0.03073 | 0.040492 | 0.199205 | 0.164497 | 0.095445 | 0.095445 | 0.07086 | 0.07086 | 0 | 0.043893 | 0.273807 | 5,051 | 160 | 83 | 31.56875 | 0.710196 | 0.036824 | 0 | 0.128205 | 0 | 0 | 0.012554 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034188 | false | 0 | 0.076923 | 0 | 0.136752 | 0.034188 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a62ea8307891d611a45992bc024911fc8e8e0da9 | 17,456 | py | Python | oldbabylonian/cookbook/poslib.py | sethbam9/tutorials | c259636682304cb516e9048ca8df5a3ab92c62cc | [
"MIT"
] | 2 | 2019-07-17T18:51:26.000Z | 2019-07-24T19:45:23.000Z | oldbabylonian/cookbook/poslib.py | sethbam9/tutorials | c259636682304cb516e9048ca8df5a3ab92c62cc | [
"MIT"
] | 3 | 2019-01-16T10:56:50.000Z | 2020-11-16T16:30:48.000Z | oldbabylonian/cookbook/poslib.py | sethbam9/tutorials | c259636682304cb516e9048ca8df5a3ab92c62cc | [
"MIT"
] | 2 | 2020-12-17T15:41:33.000Z | 2021-11-03T18:23:07.000Z | import os
import collections
from functools import reduce
import yaml
from tf.lib import writeSets
HERE_BASE = "."
DROPBOX_BASE = "~/Dropbox/obb"
SET_NAME = "sets.tfx"
MODULE = "pos"
TF_LOC = f"{MODULE}/tf"
def getCases(caseStr):
caseLines = caseStr.strip().split("\n")
result = {}
for caseLine in caseLines:
(wordStr, categoryStr) = [x.strip() for x in caseLine.split("=", maxsplit=1)]
categories = [x.strip() for x in categoryStr.strip().split(",")]
words = [x.strip() for x in wordStr.strip().split("+")]
for word in words:
if word in result:
print(f"WARNING: word {word} also occurs in another case")
result[word] = categories
return result
def getNoccs(data):
return sum(len(x) for x in data.values())
def getOccs(data):
return reduce(set.union, data.values(), set(),)
class PosTag(object):
def __init__(self, A):
self.A = A
self.api = A.api
self.sets = collections.defaultdict(set)
self.nodeFeatures = dict(pos={}, subpos={}, cs={}, ps={}, gn={}, nu={},)
self.done = set()
def getWoccs(self, data):
wordsOccs = self.wordsOccs
return sum(len(wordsOccs[x]) for x in data)
def prepare(self):
api = self.api
A = self.A
F = api.F
L = api.L
wordFromSigns = {}
wordsOccs = collections.defaultdict(set)
wordsWithoutDet = set()
wordsWithDet = collections.defaultdict(set)
wordsStrippedDet = collections.defaultdict(set)
wordsNumeral = set()
wordsNumeralUnknown = set()
wordsUnknown = set()
def usable(s):
return F.reading.v(s) or F.grapheme.v(s)
for w in F.otype.s("word"):
isNum = False
noDet = False
signs = [s for s in L.d(w, otype="sign") if usable(s)]
if not signs:
continue
word = "-".join(usable(s) for s in signs)
augWord = f"-{word}-"
if "-n-" in augWord:
wordsNumeralUnknown.add(word)
wordsUnknown.add(word)
else:
if "x" in word or "..." in word:
wordsUnknown.add(word)
wordsOccs[word].add(w)
wordFromSigns[w] = word
if any(F.reading.v(s) == "n" or F.type.v(s) == "numeral" for s in signs):
wordsNumeral.add(word)
isNum = True
signsNonDet = [s for s in signs if not F.det.v(s)]
if len(signsNonDet) == 0:
continue
if len(signsNonDet) == len(signs):
wordsWithoutDet.add(word)
noDet = True
if isNum or noDet:
continue
wordStripped = "-".join(usable(s) for s in signsNonDet)
wordsStrippedDet[wordStripped].add(word)
wordsWithDet[word].add(w)
self.wordsOccs = wordsOccs
self.wordsWithDet = wordsWithDet
self.wordsWithoutDet = wordsWithoutDet
self.wordsStrippedDet = wordsStrippedDet
self.wordsNumeral = wordsNumeral
self.wordsNumeralUnknown = wordsNumeralUnknown
self.wordsUnknown = wordsUnknown
self.wordFromSigns = wordFromSigns
md = f"""
kind of word | distinct forms | number of occurrences
--- | ---:| ---:
all | {len(wordsOccs)} | {getNoccs(wordsOccs)}
with unknown sign | {len(wordsUnknown)} | {self.getWoccs(wordsUnknown)}
unknown numeral | {len(wordsNumeralUnknown)} | {self.getWoccs(wordsNumeralUnknown)}
numeral | {len(wordsNumeral)} | {self.getWoccs(wordsNumeral)}
without dets | {len(wordsWithoutDet)} | {self.getWoccs(wordsWithoutDet)}
with det | {len(wordsWithDet)} | {getNoccs(wordsWithDet)}
with det cut away | {len(wordsStrippedDet)} | {getNoccs(wordsStrippedDet)}
"""
A.dm(md)
def initFeatures(self, *feats):
return {feat: {} for feat in feats}
def addFeatures(self, theseFeats):
nodeFeatures = self.nodeFeatures
for (feat, data) in theseFeats.items():
for (n, v) in data.items():
nodeFeatures[feat][n] = v
def doKnownCases(self, caseStr):
wordsOccs = self.wordsOccs
cases = getCases(caseStr)
done = self.done
sets = self.sets
theseFeats = self.initFeatures("pos", "subpos")
pos = theseFeats["pos"]
subpos = theseFeats["subpos"]
for (word, occs) in wordsOccs.items():
cats = cases.get(word, None)
if cats:
cat = "".join(cats)
done.add(word)
for w in occs:
pos[w] = cats[0]
if len(cats) > 1:
subpos[w] = cats[1]
sets[cat].add(w)
print(f" distinct words: {len(done):>6}")
for (f, data) in sorted(theseFeats.items()):
print(f"{f:>6} assignments: {len(data):>6}")
self.addFeatures(theseFeats)
def doPrnPrs(self, prnPrsStr):
wordFromSigns = self.wordFromSigns
sets = self.sets
done = self.done
api = self.api
F = api.F
L = api.L
T = api.T
error = self.api.TF.error
setSilent = self.api.TF.setSilent
setSilent(False)
# collect the forms in handy dicts
prnPrsForms = yaml.load(prnPrsStr, Loader=yaml.FullLoader)
exceptions = collections.defaultdict(dict)
regular = {}
for (case, tags) in prnPrsForms.items():
for (tag, forms) in tags.items():
for frm in forms:
if type(frm) is dict:
for (f, occs) in frm.items():
if f == "/":
continue
for occ in occs:
(doc, faceLine) = occ.split(maxsplit=1)
(face, line) = faceLine.split(":", maxsplit=1)
ln = T.nodeFromSection((doc, face, line))
if ln is None:
error(f"Unknown section: {occ}")
exceptions[ln][f] = (case, tag)
else:
if frm == "/":
continue
regular[frm] = (case, tag)
# collect the occurrences
featsFromTag = {}
def parseTag(t):
info = dict(ps=t[0], gn=t[1], nu=t[2:4],)
featsFromTag[t] = info
return info
theseFeats = self.initFeatures("pos", "subpos", "cs", "ps", "gn", "nu")
pos = theseFeats["pos"]
subpos = theseFeats["subpos"]
cs = theseFeats["cs"]
pronouns = set()
exceptionHits = collections.defaultdict(collections.Counter)
regularHits = collections.Counter()
for ln in F.otype.s("line"):
lineExceptions = exceptions[ln] if ln in exceptions else None
for w in L.d(ln, otype="word"):
word = wordFromSigns.get(w, None)
if word is None or word in done:
continue
if lineExceptions and word in lineExceptions:
exceptionHits[ln][word] += 1
elif word in regular:
regularHits[word] += 1
result = (
lineExceptions.get(word, regular.get(word, None))
if lineExceptions
else regular.get(word, None)
)
if result:
pronouns.add(word)
(case, tag) = result
pos[w] = "prn"
subpos[w] = "prs"
cs[w] = case
for (k, v) in featsFromTag.get(result[1], parseTag(tag)).items():
theseFeats[k][w] = v
sets["prnprs"].add(w)
nRegDeclared = len(regular)
nRegHit = len(regularHits)
eDiff = nRegDeclared - nRegHit
if eDiff:
error(f"{nRegDeclared} forms declared, but {nRegHit} ones encountered:")
for (word, info) in regular.items():
hits = regularHits[word]
if not hits:
infoRep = ", ".join(info)
error(f"missed {word:<20} => {infoRep}")
nExcDeclared = sum(len(y) for y in exceptions.values())
nExcHit = sum(len(y) for y in exceptionHits.values())
eDiff = nExcDeclared - nExcHit
if eDiff:
error(
f"{nExcDeclared} exceptions declared, but {nExcHit} ones encountered:"
)
for (ln, words) in sorted(exceptions.items()):
for (word, info) in words.items():
hits = exceptionHits.get(ln, {}).get(word, 0)
if not hits:
infoRep = ", ".join(info)
line = "{:<7} {:>5}:{:<3}".format(*T.sectionFromNode(ln))
error(f"missed exception {word:<20} in line {line} => {infoRep}")
for word in pronouns:
done.add(word)
print(f" distinct words: {len(pronouns):>6}")
for (f, data) in sorted(theseFeats.items()):
print(f"{f:>6} assignments: {len(data):>6}")
self.addFeatures(theseFeats)
def doPreps(self, prepStr):
api = self.api
F = api.F
wordsOccs = self.wordsOccs
theseFeats = self.initFeatures("pos", "subpos")
pos = theseFeats["pos"]
done = self.done
sets = self.sets
preps = set(prepStr.strip().split())
cat = "prep"
nPreps = 0
nOccs = 0
for (word, occs) in wordsOccs.items():
if word in done:
continue
if word in preps:
nPreps += 1
nOccs += len(occs)
for w in occs:
pos[w] = cat
sets[cat].add(w)
done.add(word)
sets["nonprep"] = set(F.otype.s("word")) - sets[cat]
self.preps = preps
print(f" distinct words: {nPreps:>6}")
for (f, data) in sorted(theseFeats.items()):
print(f"{f:>6} assignments: {len(data):>6}")
print(f' non-prep occs: {len(sets["nonprep"]):>6}')
self.addFeatures(theseFeats)
def doNouns(self):
api = self.api
S = api.S
nouns = {}
nouns[""] = {}
markedData = None
unmarkedData = None
label = None
wordsOccs = self.wordsOccs
wordsWithDet = self.wordsWithDet
wordsWithoutDet = self.wordsWithoutDet
wordsStrippedDet = self.wordsStrippedDet
wordsNumeral = self.wordsNumeral
wordsNumeralUnknown = self.wordsNumeralUnknown
wordsUnknown = self.wordsUnknown
wordFromSigns = self.wordFromSigns
done = self.done
sets = self.sets
def gather():
prefix = f"Before step {label}"
print(
f'{prefix:<35}: {len(nouns[""]):>5} words in {getNoccs(nouns[""]):>6} occurrences'
)
allData = collections.defaultdict(set)
for (word, occs) in markedData.items():
allData[word] = set(occs)
for (word, occs) in unmarkedData.items():
allData[word] |= occs
prefix = f"Due to step {label} marked"
print(
f"{prefix:35}: {len(markedData):>5} words in {getNoccs(markedData):>6} occurrences"
)
nouns[f"M{label}"] = markedData
prefix = f"Due to step {label} unmarked"
print(
f"{prefix:35}: {len(unmarkedData):>5} words in {getNoccs(unmarkedData):>6} occurrences"
)
nouns[f"U{label}"] = unmarkedData
prefix = f"Due to step {label} all"
print(
f"{prefix:35}: {len(allData):>5} words in {getNoccs(allData):>6} occurrences"
)
nouns[label] = allData
nouns[""].update(allData)
prefix = f"After step {label}"
print(
f'{prefix:<35}: {len(nouns[""]):>5} words in {getNoccs(nouns[""]):>6} occurrences'
)
print("-" * 40)
# based on determinatives
label = "det"
markedData = {
word: wordsOccs[word]
for word in wordsWithDet
if (word not in wordsNumeralUnknown and word not in done)
}
unmarkedData = {
word: wordsOccs[word]
for word in wordsWithoutDet & set(wordsStrippedDet)
if word not in wordsUnknown and word not in markedData
}
gather()
# based on prepositions
label = "prep"
query = """
prep
<: nonprep
"""
results = list(S.search(query, sets=sets))
markedData = collections.defaultdict(set)
for (p, w) in results:
word = wordFromSigns[w]
if word in done:
continue
markedData[word].add(w)
unmarkedData = collections.defaultdict(set)
for (word, markedOccs) in markedData.items():
if word in wordsUnknown:
continue
unmarkedData[word] = wordsOccs[word] - markedOccs
gather()
# based on Sumerian logograms
label = "logo"
query = """
word
/with/
sign langalt
/-/
"""
results = list(S.search(query))
markedData = collections.defaultdict(set)
for (w,) in results:
word = wordFromSigns[w]
if word in done:
continue
markedData[word].add(w)
unmarkedData = collections.defaultdict(set)
for (word, markedOccs) in markedData.items():
if word in wordsUnknown:
continue
unmarkedData[word] = wordsOccs[word] - markedOccs
gather()
# based on numerals
label = "num"
markedData = {
word: wordsOccs[word] for word in wordsNumeral if word not in done
}
unmarkedData = {}
gather()
# mark as done
for word in nouns[""]:
done.add(word)
# deliver to sets
for (name, data) in sorted(nouns.items()):
print(
f"noun{name:<9} with {len(data):>5} words and {getNoccs(data):>6} occurrences"
)
sets[f"noun{name}"] = getOccs(data)
# deliver to pos and subpos
theseFeats = self.initFeatures("pos", "subpos")
pos = theseFeats["pos"]
subpos = theseFeats["subpos"]
for (nkind, occs) in sets.items():
if nkind.startswith("noun"):
for n in occs:
pos[n] = "noun"
if nkind == "nounnum":
for n in occs:
subpos[n] = "numeral"
for (f, data) in sorted(theseFeats.items()):
print(f"{f:>6} assignments: {len(data):>6}")
self.addFeatures(theseFeats)
def export(self, metaData):
sets = self.sets
nodeFeatures = self.nodeFeatures
wordsOccs = self.wordsOccs
total = sum(len(x) for x in wordsOccs.values())
A = self.A
api = self.api
F = api.F
TF = api.TF
version = A.version
TF.save(
metaData=metaData,
nodeFeatures=nodeFeatures,
location=HERE_BASE,
module=f"{TF_LOC}/{version}",
silent=True,
)
nFeats = len(nodeFeatures)
featRep = ", ".join(sorted(nodeFeatures))
cats = collections.Counter()
pos = nodeFeatures["pos"]
subpos = nodeFeatures["subpos"]
for w in F.otype.s("word"):
ps = pos.get(w, "")
sp = subpos.get(w, "")
cat = f"{ps}-{sp}"
cats[cat] += 1
uncategorized = cats["-"]
categorized = total - uncategorized
catPerc = int(round(100 * categorized / total))
uncatPerc = int(round(100 * uncategorized / total))
nCats = len(cats) - 1
stst = "**"
md = f"""
---
## Features
{stst}{nFeats} TF features saved: {featRep}**.
### Catgories (pos, subpos)
{nCats} categories.
category | % | number of nodes
--- | ---:| ---:
none | {uncatPerc} | {uncategorized}
all | {catPerc} | {categorized}
"""
for (cat, n) in sorted(cats.items(), key=lambda x: (-x[1], x[0]),):
if cat == "-":
continue
perc = int(round(100 * n / total))
md += f"{cat} | {perc} | {n}\n"
A.dm(md)
md = f"""
### All features
feature | % | number of nodes
--- | ---:| ---:
"""
for feat in sorted(nodeFeatures):
n = len(nodeFeatures[feat])
perc = int(round(100 * n / total))
md += f"{feat} | {perc} | {n}\n"
A.dm(md)
for loc in [HERE_BASE, DROPBOX_BASE]:
path = os.path.expanduser(f"{loc}/{SET_NAME}")
writeSets(sets, path)
md = f"""
---
## sets
{stst}{len(sets)} sets written to disk (GitHub repo and Dropbox)**.
set | number of nodes
--- | ---:
"""
for (name, nodes) in sorted(sets.items()):
md += f"{name} | {len(nodes)}\n"
A.dm(md)
| 29.737649 | 103 | 0.507734 | 1,832 | 17,456 | 4.830786 | 0.158843 | 0.010847 | 0.025424 | 0.00791 | 0.256497 | 0.199548 | 0.139887 | 0.12791 | 0.116723 | 0.116723 | 0 | 0.00707 | 0.368011 | 17,456 | 586 | 104 | 29.788396 | 0.795141 | 0.011629 | 0 | 0.318681 | 0 | 0.013187 | 0.149336 | 0.02546 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035165 | false | 0 | 0.010989 | 0.008791 | 0.063736 | 0.035165 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a630e19b17abd22ec5eb9f8bfa6cd41dd44b3348 | 1,613 | py | Python | tcfcli/cmds/native/startapi/cli.py | tencentyun/scfcli | ef15508ad34a851cf0d2750dfaa5202f6a600887 | [
"Apache-2.0"
] | 103 | 2019-06-11T06:09:56.000Z | 2021-12-18T22:48:59.000Z | tcfcli/cmds/native/startapi/cli.py | TencentCloud/Serverless-cli | 57f98b24cfd10712770a4806212cfb69d981a11a | [
"Apache-2.0"
] | 8 | 2019-07-12T12:08:40.000Z | 2020-10-20T07:18:17.000Z | tcfcli/cmds/native/startapi/cli.py | TencentCloud/Serverless-cli | 57f98b24cfd10712770a4806212cfb69d981a11a | [
"Apache-2.0"
] | 49 | 2019-06-11T06:26:05.000Z | 2020-02-19T08:13:36.000Z | # -*- coding: utf-8 -*-
import click
from tcfcli.cmds.native.common.start_api_context import StartApiContext
from tcfcli.help.message import NativeHelp as help
DEF_TMP_FILENAME = "template.yaml"
@click.command(name='start-api', short_help=help.START_API_SHORT_HElP)
@click.option('--env-vars', '-n', help='JSON file contains function environment variables.',
type=click.Path(exists=True))
@click.option('--template', '-t', default=DEF_TMP_FILENAME, type=click.Path(exists=True),
envvar="TCF_TEMPLATE_FILE", show_default=True)
@click.option('--debug-port', '-d', help=help.START_API_DEBUG_PORT, default=None)
@click.option('--debug-args', help=help.START_API_DEBUG_ARGS, default="")
@click.argument('namespace_identifier', required=False)
@click.argument('function_identifier', required=False)
def startapi(template, namespace_identifier, function_identifier, env_vars, debug_port, debug_args):
'''
\b
Execute your scf in a environment natively.
\b
Common usage:
\b
$ scf native start-api -t template.yaml
'''
start(template, namespace_identifier, function_identifier, env_vars, debug_port, debug_args)
def start(template, namespace, function, env_vars, debug_port, debug_args):
try:
with StartApiContext(
template_file=template,
namespace=namespace,
debug_port=debug_port,
debug_args=debug_args,
function=function,
env_file=env_vars,
) as context:
context.start()
except Exception as e:
raise e
| 36.659091 | 100 | 0.681959 | 200 | 1,613 | 5.295 | 0.35 | 0.05949 | 0.0661 | 0.067989 | 0.238905 | 0.155807 | 0.1322 | 0.1322 | 0.1322 | 0.1322 | 0 | 0.000776 | 0.201488 | 1,613 | 43 | 101 | 37.511628 | 0.821429 | 0.083075 | 0 | 0 | 0 | 0 | 0.123611 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.107143 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6329ec460cd0fd9eb39a536eca18ce065e12138 | 712 | py | Python | core/models/bert/classifier.py | readerbench/PASTEL | bef88d05c37b68f62a76983e4c31d9591b9a679a | [
"Apache-2.0"
] | null | null | null | core/models/bert/classifier.py | readerbench/PASTEL | bef88d05c37b68f62a76983e4c31d9591b9a679a | [
"Apache-2.0"
] | null | null | null | core/models/bert/classifier.py | readerbench/PASTEL | bef88d05c37b68f62a76983e4c31d9591b9a679a | [
"Apache-2.0"
] | null | null | null | from torch.nn.functional import relu, softmax, sigmoid
from transformers import BertModel
import torch.nn as nn
class BERTClassifier(nn.Module):
def __init__(self, n_classes, pretrained_bert_model):
super(BERTClassifier, self).__init__()
self.bert = BertModel.from_pretrained(pretrained_bert_model)
self.drop = nn.Dropout(p=0.2)
self.tmp = nn.Linear(self.bert.config.hidden_size, self.bert.config.hidden_size)
self.out = nn.Linear(self.bert.config.hidden_size, n_classes)
def forward(self, input_ids, attention_mask):
_, pooled_output = self.bert(
input_ids=input_ids,
attention_mask=attention_mask
)
output = self.drop(pooled_output)
return self.out(output) | 37.473684 | 84 | 0.75 | 101 | 712 | 5.019802 | 0.415842 | 0.078895 | 0.08284 | 0.118343 | 0.18146 | 0.18146 | 0.126233 | 0 | 0 | 0 | 0 | 0.003295 | 0.147472 | 712 | 19 | 85 | 37.473684 | 0.83196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6333b4280980727cc41b230009b8b7911647e4a | 3,095 | py | Python | django/apps/audit/operations.py | wykys/project-thesaurus | f700396b30ed44e6b001c15397a25450ac068af4 | [
"MIT"
] | null | null | null | django/apps/audit/operations.py | wykys/project-thesaurus | f700396b30ed44e6b001c15397a25450ac068af4 | [
"MIT"
] | 93 | 2020-05-19T18:14:12.000Z | 2022-03-29T00:26:39.000Z | django/apps/audit/operations.py | wykys/project-thesaurus | f700396b30ed44e6b001c15397a25450ac068af4 | [
"MIT"
] | 1 | 2020-11-21T20:24:35.000Z | 2020-11-21T20:24:35.000Z | from typing import Type
from django.db.migrations.operations.base import Operation
from django.db.models import Model
class AddAuditOperation(Operation):
reduces_to_sql = True
reversible = True
enabled = True
def __init__(self, model_name, audit_rows=True, audit_text=False, excluded=('created', 'modified')):
self._model_name = model_name
self._audit_text = audit_text
self._audit_rows = audit_rows
self._excluded = excluded
def state_forwards(self, app_label, state):
pass # no visible changes for Django schema
def database_forwards(
self, app_label, schema_editor, from_state, to_state,
):
model: Type[Model] = to_state.apps.get_model(app_label, self._model_name)
table = model._meta.db_table
with schema_editor.connection.cursor() as cursor:
cursor.execute('SELECT to_regclass(\'audit.logged_actions\')')
has_audit = cursor.fetchone()[0]
BOOLEANS = ("BOOLEAN 'f'", "BOOLEAN 't'")
if has_audit:
schema_editor.execute(
'SELECT audit.audit_table({})'.format(
', '.join(( # join parameters
f"'public.{table}'",
BOOLEANS[self._audit_rows],
BOOLEANS[self._audit_text],
"'{{ {} }}'".format(
','.join(map( # join as postgres array
lambda col: f'"{col}"',
map( # extract column names from field names
lambda f: model._meta.get_field(f).get_attname_column()[1],
self._excluded,
)
))
)
))
),
)
def database_backwards(
self, app_label, schema_editor, from_state, to_state,
):
model = to_state.apps.get_model(app_label, self._model_name)
table = model._meta.db_table
schema_editor.execute(
'DROP TRIGGER IF EXISTS audit_trigger_row ON {}'.format(table),
)
schema_editor.execute(
'DROP TRIGGER IF EXISTS audit_trigger_stm ON {}'.format(table),
)
def describe(self):
return 'Add audit triggers on model {}'.format(self._model_name)
class RemoveAuditOperation(AddAuditOperation):
enabled = False
def database_forwards(
self, app_label, schema_editor, from_state, to_state,
):
super().database_backwards(
app_label, schema_editor, from_state, to_state,
)
def database_backwards(
self, app_label, schema_editor, from_state, to_state,
):
super().database_forwards(
app_label, schema_editor, from_state, to_state,
)
def describe(self):
return 'Remove audit triggers on model {}'.format(self._model_name)
EnableAuditOperation = AddAuditOperation
DisableAuditOperation = RemoveAuditOperation
| 34.388889 | 104 | 0.57189 | 321 | 3,095 | 5.23053 | 0.29595 | 0.071471 | 0.046456 | 0.071471 | 0.392496 | 0.392496 | 0.392496 | 0.392496 | 0.346039 | 0.30137 | 0 | 0.000972 | 0.33538 | 3,095 | 89 | 105 | 34.775281 | 0.815265 | 0.036511 | 0 | 0.315068 | 0 | 0 | 0.093047 | 0.007054 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109589 | false | 0.013699 | 0.041096 | 0.027397 | 0.260274 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a635f25e0d832aa8c213627d281f0fbe8a76415f | 9,791 | py | Python | train_due.py | BlackHC/DUE | e6370a89f9aab8bfbbafe0d9a544dd637867751d | [
"MIT"
] | 49 | 2021-02-23T14:34:04.000Z | 2022-03-24T22:50:02.000Z | train_due.py | BlackHC/DUE | e6370a89f9aab8bfbbafe0d9a544dd637867751d | [
"MIT"
] | 2 | 2021-12-06T22:25:41.000Z | 2022-02-02T12:38:08.000Z | train_due.py | BlackHC/DUE | e6370a89f9aab8bfbbafe0d9a544dd637867751d | [
"MIT"
] | 13 | 2021-03-05T01:35:12.000Z | 2022-02-22T11:24:14.000Z | import argparse
import json
import torch
import torch.nn.functional as F
from torch.utils.tensorboard.writer import SummaryWriter
from ignite.engine import Events, Engine
from ignite.metrics import Accuracy, Average, Loss
from ignite.contrib.handlers import ProgressBar
from gpytorch.mlls import VariationalELBO
from gpytorch.likelihoods import SoftmaxLikelihood
from due import dkl
from due.wide_resnet import WideResNet
from due.sngp import Laplace
from lib.datasets import get_dataset
from lib.evaluate_ood import get_ood_metrics
from lib.utils import get_results_directory, Hyperparameters, set_seed
def main(hparams):
results_dir = get_results_directory(hparams.output_dir)
writer = SummaryWriter(log_dir=str(results_dir))
ds = get_dataset(hparams.dataset, root=hparams.data_root)
input_size, num_classes, train_dataset, test_dataset = ds
hparams.seed = set_seed(hparams.seed)
if hparams.n_inducing_points is None:
hparams.n_inducing_points = num_classes
print(f"Training with {hparams}")
hparams.save(results_dir / "hparams.json")
feature_extractor = WideResNet(
input_size,
hparams.spectral_conv,
hparams.spectral_bn,
dropout_rate=hparams.dropout_rate,
coeff=hparams.coeff,
n_power_iterations=hparams.n_power_iterations,
)
if hparams.sngp:
# Defaults from SNGP in uncertainty-baselines
num_deep_features = 640
num_gp_features = 128
normalize_gp_features = True
num_random_features = 1024
num_data = len(train_dataset)
mean_field_factor = 25
ridge_penalty = 1
lengthscale = 2
model = Laplace(
feature_extractor,
num_deep_features,
num_gp_features,
normalize_gp_features,
num_random_features,
num_classes,
num_data,
hparams.batch_size,
mean_field_factor,
ridge_penalty,
lengthscale,
)
loss_fn = F.cross_entropy
likelihood = None
else:
initial_inducing_points, initial_lengthscale = dkl.initial_values(
train_dataset, feature_extractor, hparams.n_inducing_points
)
gp = dkl.GP(
num_outputs=num_classes,
initial_lengthscale=initial_lengthscale,
initial_inducing_points=initial_inducing_points,
kernel=hparams.kernel,
)
model = dkl.DKL(feature_extractor, gp)
likelihood = SoftmaxLikelihood(num_classes=num_classes, mixing_weights=False)
likelihood = likelihood.cuda()
elbo_fn = VariationalELBO(likelihood, gp, num_data=len(train_dataset))
loss_fn = lambda x, y: -elbo_fn(x, y)
model = model.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
lr=hparams.learning_rate,
momentum=0.9,
weight_decay=hparams.weight_decay,
)
milestones = [60, 120, 160]
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=milestones, gamma=0.2
)
def step(engine, batch):
model.train()
if not hparams.sngp:
likelihood.train()
optimizer.zero_grad()
x, y = batch
x, y = x.cuda(), y.cuda()
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
return loss.item()
def eval_step(engine, batch):
model.eval()
if not hparams.sngp:
likelihood.eval()
x, y = batch
x, y = x.cuda(), y.cuda()
with torch.no_grad():
y_pred = model(x)
return y_pred, y
trainer = Engine(step)
evaluator = Engine(eval_step)
metric = Average()
metric.attach(trainer, "loss")
def output_transform(output):
y_pred, y = output
# Sample softmax values independently for classification at test time
y_pred = y_pred.to_data_independent_dist()
# The mean here is over likelihood samples
y_pred = likelihood(y_pred).probs.mean(0)
return y_pred, y
if hparams.sngp:
output_transform = lambda x: x # noqa
metric = Accuracy(output_transform=output_transform)
metric.attach(evaluator, "accuracy")
if hparams.sngp:
metric = Loss(F.cross_entropy)
else:
metric = Loss(lambda y_pred, y: -likelihood.expected_log_prob(y, y_pred).mean())
metric.attach(evaluator, "loss")
kwargs = {"num_workers": 4, "pin_memory": True}
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=hparams.batch_size,
shuffle=True,
drop_last=True,
**kwargs,
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=512, shuffle=False, **kwargs
)
if hparams.sngp:
@trainer.on(Events.EPOCH_STARTED)
def reset_precision_matrix(trainer):
model.reset_precision_matrix()
@trainer.on(Events.EPOCH_COMPLETED)
def log_results(trainer):
metrics = trainer.state.metrics
train_loss = metrics["loss"]
result = f"Train - Epoch: {trainer.state.epoch} "
if hparams.sngp:
result += f"Loss: {train_loss:.2f} "
else:
result += f"ELBO: {train_loss:.2f} "
print(result)
writer.add_scalar("Loss/train", train_loss, trainer.state.epoch)
if hparams.spectral_conv:
for name, layer in model.feature_extractor.named_modules():
if isinstance(layer, torch.nn.Conv2d):
writer.add_scalar(
f"sigma/{name}", layer.weight_sigma, trainer.state.epoch
)
if trainer.state.epoch > 150 and trainer.state.epoch % 5 == 0:
_, auroc, aupr = get_ood_metrics(
hparams.dataset, "SVHN", model, likelihood, hparams.data_root
)
print(f"OoD Metrics - AUROC: {auroc}, AUPR: {aupr}")
writer.add_scalar("OoD/auroc", auroc, trainer.state.epoch)
writer.add_scalar("OoD/auprc", aupr, trainer.state.epoch)
evaluator.run(test_loader)
metrics = evaluator.state.metrics
acc = metrics["accuracy"]
test_loss = metrics["loss"]
result = f"Test - Epoch: {trainer.state.epoch} "
if hparams.sngp:
result += f"Loss: {test_loss:.2f} "
else:
result += f"NLL: {test_loss:.2f} "
result += f"Acc: {acc:.4f} "
print(result)
writer.add_scalar("Loss/test", test_loss, trainer.state.epoch)
writer.add_scalar("Accuracy/test", acc, trainer.state.epoch)
scheduler.step()
pbar = ProgressBar(dynamic_ncols=True)
pbar.attach(trainer)
trainer.run(train_loader, max_epochs=200)
# Done training - time to evaluate
results = {}
evaluator.run(test_loader)
test_acc = evaluator.state.metrics["accuracy"]
test_loss = evaluator.state.metrics["loss"]
results["test_accuracy"] = test_acc
results["test_loss"] = test_loss
_, auroc, aupr = get_ood_metrics(
hparams.dataset, "SVHN", model, likelihood, hparams.data_root
)
results["auroc_ood_svhn"] = auroc
results["aupr_ood_svhn"] = aupr
print(f"Final accuracy {results['test_accuracy']:.4f}")
results_json = json.dumps(results, indent=4, sort_keys=True)
(results_dir / "results.json").write_text(results_json)
torch.save(model.state_dict(), results_dir / "model.pt")
if likelihood is not None:
torch.save(likelihood.state_dict(), results_dir / "likelihood.pt")
writer.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--batch_size", type=int, default=128, help="Batch size to use for training"
)
parser.add_argument(
"--learning_rate",
type=float,
default=0.1,
help="Learning rate",
)
parser.add_argument("--weight_decay", type=float, default=5e-4, help="Weight decay")
parser.add_argument("--dropout_rate", type=float, default=0.3, help="Dropout rate")
parser.add_argument(
"--dataset",
default="CIFAR10",
choices=["CIFAR10", "CIFAR100"],
help="Pick a dataset",
)
parser.add_argument(
"--kernel",
default="RBF",
choices=["RBF", "RQ", "Matern12", "Matern32", "Matern52"],
help="Pick a kernel",
)
parser.add_argument(
"--no_spectral_conv",
action="store_false",
dest="spectral_conv",
help="Don't use spectral normalization on the convolutions",
)
parser.add_argument(
"--no_spectral_bn",
action="store_false",
dest="spectral_bn",
help="Don't use spectral normalization on the batch normalization layers",
)
parser.add_argument(
"--sngp",
action="store_true",
help="Use SNGP (RFF and Laplace) instead of a DUE (sparse GP)",
)
parser.add_argument(
"--n_inducing_points", type=int, help="Number of inducing points"
)
parser.add_argument("--seed", type=int, help="Seed to use for training")
parser.add_argument(
"--coeff", type=float, default=3, help="Spectral normalization coefficient"
)
parser.add_argument(
"--n_power_iterations", default=1, type=int, help="Number of power iterations"
)
parser.add_argument(
"--output_dir", default="./default", type=str, help="Specify output directory"
)
parser.add_argument(
"--data_root", default="./data", type=str, help="Specify data directory"
)
args = parser.parse_args()
hparams = Hyperparameters(**vars(args))
main(hparams)
| 28.297688 | 88 | 0.629456 | 1,161 | 9,791 | 5.106804 | 0.234281 | 0.022769 | 0.043009 | 0.012818 | 0.165964 | 0.090066 | 0.069152 | 0.05802 | 0.045539 | 0.03913 | 0 | 0.009845 | 0.263405 | 9,791 | 345 | 89 | 28.37971 | 0.812257 | 0.019406 | 0 | 0.162791 | 0 | 0 | 0.13111 | 0.007504 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.062016 | 0 | 0.096899 | 0.01938 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6377793c412ebc024d19ecb586fc91e462e185d | 2,032 | py | Python | dataset/cifar10_c.py | xuanqing94/NeuralSDE | f3511799cfc9c3d6b95ff9bcb07563df88715e0c | [
"MIT"
] | 5 | 2020-06-28T07:15:35.000Z | 2022-01-20T01:52:31.000Z | dataset/cifar10_c.py | xuanqing94/NeuralSDE | f3511799cfc9c3d6b95ff9bcb07563df88715e0c | [
"MIT"
] | null | null | null | dataset/cifar10_c.py | xuanqing94/NeuralSDE | f3511799cfc9c3d6b95ff9bcb07563df88715e0c | [
"MIT"
] | null | null | null | from PIL import Image
import os
import numpy as np
import torch
import torch.utils.data as data
class CIFAR10_C(object):
def __init__(self, path, levels=[1,2,3,4,5]):
path = os.path.expanduser(path)
self.path = path
self.levels = levels
files = os.listdir(path)
datasets = []
label = np.load(os.path.join(path, "labels.npy"))
selected = []
for l in levels:
selected.append(label[(l-1)*10000:l*10000])
label = np.concatenate(selected, axis=0)
for f in files:
if f == "labels.npy":
continue
result = np.load(os.path.join(path, f))
selected = []
for l in levels:
selected.append(result[(l-1)*10000:l*10000])
selected = np.concatenate(selected, axis=0)
datasets.append(selected)
self.datasets = datasets
self.label = label
def num_datasets(self):
return len(self.datasets)
def get_ith_data(self, i, transform=None, target_transform=None):
return CIFAR10_C_data(self.datasets[i], self.label, transform, target_transform)
class CIFAR10_C_data(data.Dataset):
def __init__(self, dataset, label, transform=None, target_transform=None):
assert dataset.shape[0] == len(label)
self.dataset = dataset
self.label = np.asarray(label, dtype=int)
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img, target = self.dataset[index], self.label[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.label)
if __name__ == "__main__":
data = CIFAR10_C("~/data/CIFAR-10-C", [1])
d = data.get_ith_data(0)
print(d[0])
| 30.328358 | 88 | 0.596457 | 260 | 2,032 | 4.5 | 0.284615 | 0.089744 | 0.030769 | 0.020513 | 0.213675 | 0.092308 | 0.05812 | 0 | 0 | 0 | 0 | 0.029861 | 0.291339 | 2,032 | 66 | 89 | 30.787879 | 0.782639 | 0 | 0 | 0.075472 | 0 | 0 | 0.022146 | 0 | 0 | 0 | 0 | 0 | 0.018868 | 1 | 0.113208 | false | 0 | 0.09434 | 0.056604 | 0.320755 | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a637a96d54f58cda8435da2b415a24a67ca1842e | 3,544 | py | Python | models/word2vec/cbow_model.py | ktodorov/eval-historical-texts | e2994d594525d1d92056a6398935376a96659abb | [
"MIT"
] | 9 | 2020-08-27T15:03:46.000Z | 2022-01-02T10:48:35.000Z | models/word2vec/cbow_model.py | ktodorov/eval-historical-texts | e2994d594525d1d92056a6398935376a96659abb | [
"MIT"
] | 16 | 2020-09-12T17:37:59.000Z | 2020-11-18T10:36:32.000Z | models/word2vec/cbow_model.py | ktodorov/eval-historical-texts | e2994d594525d1d92056a6398935376a96659abb | [
"MIT"
] | 1 | 2022-03-08T16:16:52.000Z | 2022-03-08T16:16:52.000Z | import os
from typing import Callable
import torch
from torch import nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from overrides import overrides
from entities.metric import Metric
from entities.batch_representation import BatchRepresentation
from entities.options.embedding_layer_options import EmbeddingLayerOptions
from entities.options.pretrained_representations_options import PretrainedRepresentationsOptions
from enums.embedding_type import EmbeddingType
from models.model_base import ModelBase
from models.embedding.embedding_layer import EmbeddingLayer
from services.arguments.semantic_arguments_service import SemanticArgumentsService
from services.vocabulary_service import VocabularyService
from services.data_service import DataService
from services.file_service import FileService
from services.process.cbow_process_service import CBOWProcessService
class CBOWModel(ModelBase):
def __init__(
self,
arguments_service: SemanticArgumentsService,
vocabulary_service: VocabularyService,
data_service: DataService,
process_service: CBOWProcessService,
file_service: FileService,
use_only_embeddings: bool = False):
super().__init__(data_service, arguments_service)
self._arguments_service = arguments_service
self._vocabulary_service = vocabulary_service
self._mask_token_idx = process_service._mask_idx
pretrained_word_weights = None
if not arguments_service.evaluate and not arguments_service.resume_training and not arguments_service.run_experiments:
pretrained_word_weights = process_service.get_pretrained_embedding_weights()
pretrained_weight_matrix_dim = 300
if pretrained_word_weights is not None:
pretrained_weight_matrix_dim = pretrained_word_weights.shape[1]
embedding_layer_options = EmbeddingLayerOptions(
device=arguments_service.device,
pretrained_representations_options=PretrainedRepresentationsOptions(
include_pretrained_model=False),
vocabulary_size=vocabulary_service.vocabulary_size(),
learn_word_embeddings=True,
word_embeddings_size=pretrained_weight_matrix_dim,
pretrained_word_weights=pretrained_word_weights,
output_embedding_type=EmbeddingType.Word)
self._embedding_layer = EmbeddingLayer(
file_service, embedding_layer_options)
self._mapping_layer = nn.Linear(
in_features=6 * pretrained_weight_matrix_dim,
out_features=128)
self._output_layer = nn.Linear(
in_features=128,
out_features=vocabulary_service.vocabulary_size())
self._use_only_embeddings = use_only_embeddings
@overrides
def forward(self, input_batch: BatchRepresentation, **kwargs):
embeddings = self._embedding_layer.forward(input_batch)
if self._use_only_embeddings:
return embeddings
embeddings = embeddings.view(input_batch.batch_size, -1)
mapped_embeddings = self._mapping_layer.forward(embeddings)
output_result = self._output_layer.forward(mapped_embeddings)
return output_result, input_batch.targets
@overrides
def compare_metric(self, best_metric: Metric, new_metrics: Metric) -> bool:
if best_metric.is_new or best_metric.get_current_loss() >= new_metrics.get_current_loss():
return True
return False | 38.945055 | 126 | 0.752822 | 381 | 3,544 | 6.619423 | 0.288714 | 0.057098 | 0.04996 | 0.039651 | 0.054718 | 0.036479 | 0.036479 | 0 | 0 | 0 | 0 | 0.004222 | 0.198081 | 3,544 | 91 | 127 | 38.945055 | 0.883181 | 0 | 0 | 0.028986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.26087 | 0 | 0.376812 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a637ff1c369797ef1abc4107c3a9bce38ee9b23d | 990 | py | Python | mri_works/NodeEditor/modules/File_IO/Deleting_files_folders.py | montigno/mri_works | 8ec6ff1500aa34d3540e44e4b0148023cf821f61 | [
"CECILL-B"
] | 2 | 2020-08-20T21:00:53.000Z | 2021-08-16T15:28:51.000Z | mri_works/NodeEditor/modules/File_IO/Deleting_files_folders.py | montigno/mri_works | 8ec6ff1500aa34d3540e44e4b0148023cf821f61 | [
"CECILL-B"
] | 3 | 2020-09-24T06:50:43.000Z | 2020-12-15T11:02:04.000Z | mri_works/NodeEditor/modules/File_IO/Deleting_files_folders.py | montigno/mri_works | 8ec6ff1500aa34d3540e44e4b0148023cf821f61 | [
"CECILL-B"
] | 1 | 2020-08-20T21:00:59.000Z | 2020-08-20T21:00:59.000Z | class deleting_folder:
def __init__(self, input_folder='path', ignore_errors=False):
import shutil
shutil.rmtree(input_folder, ignore_errors=ignore_errors)
##############################################################################
class deleting_file:
def __init__(self, input_file='path'):
import os
try:
os.remove(input_file)
except OSError as e:
print("Error : %s : %s" % (input_file, e.strerror))
##############################################################################
class deleting_files_model:
def __init__(self, input_dir='path', model='*.txt'):
import os
import glob
files = glob.glob(os.path.join(input_dir, model))
for f in files:
try:
os.remove(f)
except OSError as e:
print('Error : %s : %s' % (f, e.strerror))
##############################################################################
| 26.756757 | 78 | 0.435354 | 94 | 990 | 4.308511 | 0.37234 | 0.096296 | 0.081481 | 0.118519 | 0.138272 | 0.138272 | 0.138272 | 0.138272 | 0 | 0 | 0 | 0 | 0.244444 | 990 | 36 | 79 | 27.5 | 0.541444 | 0 | 0 | 0.285714 | 0 | 0 | 0.062169 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.190476 | 0 | 0.47619 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a63828f981b1214d9b5676be8d17b2e2e623a148 | 834 | py | Python | scripts/opts.py | RipZ/beerbox | b396c5f14ed088ac82c2ebcea9746456748d763d | [
"Apache-2.0"
] | null | null | null | scripts/opts.py | RipZ/beerbox | b396c5f14ed088ac82c2ebcea9746456748d763d | [
"Apache-2.0"
] | null | null | null | scripts/opts.py | RipZ/beerbox | b396c5f14ed088ac82c2ebcea9746456748d763d | [
"Apache-2.0"
] | null | null | null | import argparse
parser = argparse.ArgumentParser(description='oled arguments')
parser.add_argument(
'--port', '-p',
type=int,
default=0,
help='i2c bus number',
)
parser.add_argument(
'--parameter', '-t',
type=str,
default='',
help='parameter to display',
)
parser.add_argument(
'--value', '-v',
type=str,
default='',
help='value of parameter to display',
)
parser.add_argument(
'--address', '-a',
type=str,
default='0x3c',
help='i2c display address',
)
parser.add_argument(
'--display', '-d',
type=str,
default='ssd1306',
help='display type, one of ssd1306 or sh1106',
)
args = parser.parse_args()
args.address = int(args.address, 0)
import oled.device
Device = getattr(oled.device, args.display)
device = Device(port=args.port, address=args.address)
| 20.341463 | 62 | 0.642686 | 104 | 834 | 5.096154 | 0.375 | 0.084906 | 0.160377 | 0.067925 | 0.132075 | 0.132075 | 0 | 0 | 0 | 0 | 0 | 0.026588 | 0.188249 | 834 | 40 | 63 | 20.85 | 0.756278 | 0 | 0 | 0.297297 | 0 | 0 | 0.236211 | 0 | 0 | 0 | 0.004796 | 0 | 0 | 1 | 0 | false | 0 | 0.054054 | 0 | 0.054054 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a639c17d6fd2fadfce0ccee6caacea6630ec1b9f | 1,931 | py | Python | deploy/app/pipeline.py | PBenavides/credit-fraud-API | 154b7ab2247f89649f8244b35d115cf352be23a5 | [
"MIT"
] | null | null | null | deploy/app/pipeline.py | PBenavides/credit-fraud-API | 154b7ab2247f89649f8244b35d115cf352be23a5 | [
"MIT"
] | null | null | null | deploy/app/pipeline.py | PBenavides/credit-fraud-API | 154b7ab2247f89649f8244b35d115cf352be23a5 | [
"MIT"
] | null | null | null | import pandas as pd
import datetime
import numpy as np
import json
from app import artifacts_dict as artifacts
from app.utils import get_time_now, get_time_sin_cos
class InferencePipeline():
"""Pipeline to receive data_dict and transform for inference.
---------
Parameters:
data_dict: from the request
---------
Methods:
.predict(model_name): Get the prediction of a choosen model.
"""
def __init__(self, data_dict):
"""Initialize the data pipeline
"""
self.data = data_dict #Raw Data
self.time = datetime.datetime.utcnow #Requested time
self.X = np.float32(self.__transform_to_infer()) #Transform data to infer
def create_features(self):
"""Adds features to data
"""
seconds = get_time_now()
sin_time, cos_time = get_time_sin_cos(seconds)
self.data['sin_time'] = sin_time
self.data['cos_time'] = cos_time
def normalize_columns(self):
"""Normalize data
--------
output: pd.DataFrame object
"""
self.normalizer = artifacts['normalizer']
to_norm = pd.DataFrame(data={0: self.data}).T.infer_objects()
self.transformed_data = self.normalizer.transform(to_norm.values)
X_norm = pd.DataFrame(self.transformed_data, index=[0], #index = [0] to have one inf in pandas
columns= to_norm.columns
)
return X_norm
def __transform_to_infer(self):
"""Transform data. Ready to predict
"""
self.create_features()
X = self.normalize_columns()
return X
def predict(self, model_name):
"""Predict from data request
"""
self.model = artifacts[model_name] #Get model
prediction = self.model.predict(self.X) #Predict
pred_json = json.dumps(prediction.tolist()) #Send
return pred_json | 25.407895 | 102 | 0.616261 | 235 | 1,931 | 4.851064 | 0.323404 | 0.035088 | 0.017544 | 0.022807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003595 | 0.279648 | 1,931 | 76 | 103 | 25.407895 | 0.81596 | 0.254272 | 0 | 0 | 0 | 0 | 0.019259 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.181818 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a639e39129cf3e132136165ec24e35e67033099f | 1,054 | py | Python | setup.py | bfontaine/wpydumps | f01ff6fc7dfae75a5b1526d26e33d92410898971 | [
"MIT"
] | 1 | 2021-01-06T17:49:01.000Z | 2021-01-06T17:49:01.000Z | setup.py | bfontaine/wpydumps | f01ff6fc7dfae75a5b1526d26e33d92410898971 | [
"MIT"
] | null | null | null | setup.py | bfontaine/wpydumps | f01ff6fc7dfae75a5b1526d26e33d92410898971 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
from setuptools import setup
# http://stackoverflow.com/a/7071358/735926
import re
VERSIONFILE = 'wpydumps/__init__.py'
verstrline = open(VERSIONFILE, 'rt').read()
VSRE = r'^__version__\s+=\s+[\'"]([^\'"]+)[\'"]'
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % VERSIONFILE)
setup(
name='wpydumps',
version=verstr,
author='Baptiste Fontaine',
author_email='b@ptistefontaine.fr',
packages=['wpydumps'],
url='https://github.com/bfontaine/wpydumps',
license='MIT License',
description='Read Wikipedia dumps',
long_description=open('README.md', 'r', encoding='utf-8').read(),
long_description_content_type='text/markdown',
install_requires=[
"libarchive",
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| 28.486486 | 76 | 0.634725 | 121 | 1,054 | 5.413223 | 0.652893 | 0.087023 | 0.114504 | 0.119084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025731 | 0.188805 | 1,054 | 36 | 77 | 29.277778 | 0.740351 | 0.059772 | 0 | 0.064516 | 0 | 0 | 0.398785 | 0.021255 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.064516 | 0 | 0.064516 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a63bcfa9188011d5f54844e668d23332501081e1 | 36,617 | py | Python | xData/multiD_XYs.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 14 | 2019-08-29T23:46:24.000Z | 2022-03-21T10:16:25.000Z | xData/multiD_XYs.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 1 | 2020-08-04T16:14:45.000Z | 2021-12-01T01:54:34.000Z | xData/multiD_XYs.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 2 | 2022-03-03T22:41:41.000Z | 2022-03-03T22:54:43.000Z | # <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
This module contains the XYsnd classes for n > 1.
"""
__metaclass__ = type
"""
Missing methods
copyDataToGridWsAndXsAndYs
def getValue( self, *values ) :
setFromList
setFromW_XYs
thin
toPointwise_withLinearXYs
toString
plot
"""
import abc
from pqu import PQU as PQUModule
from . import formatVersion as formatVersionModule
from . import standards as standardsModule
from . import base as baseModule
from . import axes as axesModule
from . import XYs as XYsModule
from . import regions as regionsModule
from . import series1d as series1dModule
from . import xs_pdf_cdf as xs_pdf_cdfMoudle
from . import uncertainties as uncertaintiesModule
def flatInterpolationToLinearPoint( lowerDomain, upperDomain, domain, epsilon ) :
if( domain < 0 ) :
domain *= ( 1.0 - epsilon )
elif( domain > 0 ) :
domain *= ( 1.0 + epsilon )
else :
domain = epsilon
if( epsilon != 0.0 ) :
if( ( domain <= lowerDomain ) or ( domain >= upperDomain ) ) : domain = 0.5 * ( lowerDomain + upperDomain )
return( domain )
class XYsnd( baseModule.xDataFunctional ) :
ancestryMembers = ( '[functionals', )
def __init__( self, interpolation = standardsModule.interpolation.linlinToken, axes = None,
index = None, valueType = standardsModule.types.float64Token, outerDomainValue = None, label = None,
interpolationQualifier = standardsModule.interpolation.noneQualifierToken ) :
"""
Abstract base class constructor for XYsnd class.
"""
baseModule.xDataFunctional.__init__( self, self.moniker, axes, index = index, valueType = valueType,
outerDomainValue = outerDomainValue, label = label )
if( not( isinstance( interpolation, str ) ) ) : raise TypeError( 'interpolation must be a string' )
self.interpolation = interpolation
if( not( isinstance( interpolationQualifier, str ) ) ) : raise TypeError( 'interpolation qualifier must be a string' )
self.interpolationQualifier = interpolationQualifier
self.__functionals = []
def __len__( self ) :
return( len( self.__functionals ) )
def __getitem__( self, index ) :
return( self.__functionals[index] )
def __setitem__( self, index, functional ) :
index1, functional1 = self._set_insertCommon( index, functional.outerDomainValue, functional )
if( index1 is not None ) :
if( ( index1 > 0 ) and ( functional1.outerDomainValue <= self.__functionals[index1-1].outerDomainValue ) ) :
raise ValueError( 'functional.outerDomainValue = %s is <= prior functional.outerDomainValue = %s' % ( functional1.outerDomainValue , self.__functionals[index1-1].outerDomainValue ) )
if( ( index1 < ( len( self ) - 1 ) ) and ( functional1.outerDomainValue >= self.__functionals[index1+1].outerDomainValue ) ) :
raise ValueError( 'functional.outerDomainValue = %s is >= next functional.outerDomainValue = %s' % ( functional1.outerDomainValue, self.__functionals[index1+1].outerDomainValue ) )
self.__functionals[index1] = functional1
def __mul__( self, other ) :
try :
value = float( other )
except :
raise ValueError( "Other must be a number." )
functionNd = self.__class__( interpolation = self.interpolation, axes = self.axes.copy( ), index = self.index, outerDomainValue = self.outerDomainValue,
label = self.label, interpolationQualifier = self.interpolationQualifier )
for function in self.__functionals : functionNd.append( function * value )
return( functionNd )
__rmul__ = __mul__
@property
def domainMin( self ) :
return( self.__functionals[0].outerDomainValue )
@property
def domainMax( self ) :
return( self.__functionals[-1].outerDomainValue )
@property
def domainUnit( self ) :
return( self.getAxisUnitSafely( self.dimension ) )
@property
def domainGrid( self ) :
return( [ functional.outerDomainValue for functional in self ] )
@property
def rangeMin( self ) :
return( min( [ func.rangeMin for func in self ] ) )
@property
def rangeMax( self ) :
return( max( [ func.rangeMax for func in self ] ) )
@property
def rangeUnit( self ) :
return( self.getAxisUnitSafely( 0 ) )
@property
def functionals( self ) :
"""Returns self's __functionals."""
return( self.__functionals )
@property
def functionNdsName( self ) :
"""Returns the node name for the child "function#ds"."""
return( "function%dds" % ( self.dimension - 1 ) )
def append( self, functional ) :
self.insert( len( self ), functional )
def insert( self, index, functional, outerDomainValue = None ) :
"""
Inserts functional at index. If outerDomainValue is None, outerDomainValue is take from the outerDomainValue of functional.
"""
if( outerDomainValue is None ) : outerDomainValue = functional.outerDomainValue
index1, functional1 = self._set_insertCommon( index, outerDomainValue, functional )
if( index1 is not None ) :
if( ( index1 > 0 ) and ( outerDomainValue <= self.__functionals[index1-1].outerDomainValue ) ) :
raise Exception( 'outerDomainValue = %s is <= prior functionals.outerDomainValue = %s' % ( outerDomainValue, self.__functionals[index1-1].outerDomainValue ) )
if( outerDomainValue >= self.__functionals[index1].outerDomainValue ) :
raise Exception( 'outerDomainValue = %s is >= next functionals.outerDomainValue = %s. index = %d' % ( outerDomainValue, self.__functionals[index1].outerDomainValue, index1 ) )
self.__functionals.insert( index1, functional1 )
def insertAtValue( self, functional, outerDomainValue = None ) :
"""
Inserts functional at the appropriate index for outerDomainValue. The inserted functional instance will have outerDomainValue outerDomainValue,
even if functional as a outerDomainValue.
"""
if( outerDomainValue is None ) : outerDomainValue = functional.outerDomainValue
outerDomainValue = float( outerDomainValue )
index = -1 # Set in case self is empty and next line does not set index or functional.
for index, functional1 in enumerate( self ) :
if( functional1.outerDomainValue >= outerDomainValue ) : break
if( index == -1 ) :
index = 0
else :
if( functional1.outerDomainValue == outerDomainValue ) :
del self.__functionals[index]
elif( functional1.outerDomainValue < outerDomainValue ) : # Happens when outerDomainValue is greater than last items outerDomainValue.
index += 1
self.insert( index, functional, outerDomainValue = outerDomainValue )
def pop( self, index ):
return self.__functionals.pop( index )
def _set_insertCommon( self, index, outerDomainValue, functional ) :
"""For internal use only."""
if( not( isinstance( functional, self.allowedSubElements( ) ) ) ) :
raise TypeError( 'Invalid class "%s" for insertion into "%s".' % ( functional.__class__, self.__class__ ) )
outerDomainValue = float( outerDomainValue )
if( not( isinstance( functional, baseModule.xDataFunctional ) ) ) :
raise TypeError( 'right-hand-side must be instance of xDataFunctional' )
if( functional.dimension != ( self.dimension - 1 ) ) :
raise Exception( 'functional dimension = %d not one less than self diemension = %d'
% ( functional.dimension, self.dimension ) )
n1 = len( self )
if( n1 < index ) : raise IndexError( 'index = %s while length is %s' % ( index, n1 ) )
index1 = index
if( index1 < 0 ) : index1 += n1
if( index1 < 0 ) : raise IndexError( 'index = %s' % index )
functional.setAncestor( self )
if( n1 == 0 ) :
self.__functionals.append( functional )
return( None, None )
elif( n1 == index1 ) :
if( outerDomainValue <= self.__functionals[-1].outerDomainValue ) :
raise Exception( 'outerDomainValue = %s is <= prior functional.outerDomainValue = %s' % ( outerDomainValue, self.__functionals[-1].outerDomainValue ) )
self.__functionals.append( functional )
return( None, None )
return( ( index1, functional ) )
def convertUnits( self, unitMap ) :
"""
unitMap is a dictionary of the for { 'eV' : 'MeV', 'b' : 'mb' }.
"""
for functional in self : functional.convertUnits( unitMap )
factors = self.axes.convertUnits( unitMap )
self.fixValuePerUnitChange( factors )
def copy( self ) :
axes = self.axes
if( axes is not None ) : axes = axes.copy( )
multid_xys = self.__class__( interpolation = self.interpolation, index = self.index,
outerDomainValue = self.outerDomainValue, axes = axes, interpolationQualifier = self.interpolationQualifier )
for i1, functional in enumerate( self ) : multid_xys[i1] = functional.copy( )
return( multid_xys )
__copy__ = copy
def copyDataToNestedLists( self ) :
return( [ [ subData.outerDomainValue, subData.copyDataToNestedLists( ) ] for subData in self ] )
def evaluate( self, domainValue, extrapolation = standardsModule.noExtrapolationToken, epsilon = 0, interpolationQualifier = None ) :
"""
Evaluates the function at the domain point domainValue.
Interpolation is used if domainValue is between two sub-functions. However, if one of the
sub-functions is within domainValue * epsilon of domainValue then that sub-function is returned.
If both sub-functions are within domainValue * epsilon of domainValue, the closest is returned.
"""
if( interpolationQualifier is None ) : interpolationQualifier = self.interpolationQualifier
outerDomainValue = baseModule.getDomainValue2( domainValue )
if( extrapolation not in standardsModule.validExtrapolations ) :
raise ValueError( 'Invalid extrapolation outerDomainValue = "%s"' % extrapolation )
position, function1, function2, frac, interpolation, interpolationQualifier2 = self.getBoundingSubFunctions( domainValue )
if( position is None ) : raise Exception( "No data to interpolate" )
if( frac <= epsilon ) : # If close to first point pick it.
function = function1.copy( )
elif( abs( 1 - frac ) <= epsilon ) : # If close to second point pick it.
function = function2.copy( )
else :
if( position in ( '=', '<', '>' ) ) :
if( position != '=' ) :
if( extrapolation != standardsModule.flatExtrapolationToken ) :
index = { '<' : 0, '>' : -1 }[position]
raise Exception( "evaluation point = %s %s than %s" %
( outerDomainValue, { '<' : 'less', '>' : 'greater' }[position], self[index].outerDomainValue ) )
function = function1.copy( )
else :
if( not( isinstance( function1, XYsModule.XYs1d ) ) ) : # FIXME, accuracy, lowerEps and upperEps should not be hardwired.
if( hasattr( function1, 'toPointwiseLinear' ) ) :
function1 = function1.toPointwiseLinear( accuracy = 1e-4, lowerEps = 1e-6, upperEps = 1e-6 )
else :
function1 = function1.toPointwise_withLinearXYs( accuracy = 1e-4, lowerEps = 1e-6, upperEps = 1e-6 )
if( not( isinstance( function2, XYsModule.XYs1d ) ) ) :
if( hasattr( function1, 'toPointwiseLinear' ) ) :
function2 = function2.toPointwiseLinear( accuracy = 1e-4, lowerEps = 1e-6, upperEps = 1e-6 )
else :
function2 = function2.toPointwise_withLinearXYs( accuracy = 1e-4, lowerEps = 1e-6, upperEps = 1e-6 )
if( interpolationQualifier == standardsModule.interpolation.unitBaseToken ) :
if( function1.dimension == 1 ) :
xy = XYsModule.pointwiseXY_C.unitbaseInterpolate( outerDomainValue, function1.outerDomainValue, function1.nf_pointwiseXY,
function2.outerDomainValue, function2.nf_pointwiseXY, 1 )
elif( function1.dimension == 2 ) :
frac1 = 1.0 - frac
frac2 = frac
function1 = function1.copy( )
function2 = function2.copy( )
EPrime1_1 = function1.domainMin
EPrime2_1 = function1.domainMax
EPrime1_2 = function2.domainMin
EPrime2_2 = function2.domainMax
EPrime1 = frac1 * EPrime1_1 + frac2 * EPrime1_2
EPrime2 = frac1 * EPrime2_1 + frac2 * EPrime2_2
energyPrimes = set( )
for function1d in function1 :
frac = ( EPrime2_1 - function1d.outerDomainValue ) / ( EPrime2_1 - EPrime1_1 )
function1d.outerDomainValue = frac * EPrime1 + ( 1.0 - frac ) * EPrime2
energyPrimes.add( function1d.outerDomainValue )
for function1d in function2 :
frac = ( EPrime2_2 - function1d.outerDomainValue ) / ( EPrime2_2 - EPrime1_2 )
function1d.outerDomainValue = frac * EPrime1 + ( 1.0 - frac ) * EPrime2
energyPrimes.add( function1d.outerDomainValue )
energyPrimes = sorted( list( energyPrimes ) )
function = function1.__class__( outerDomainValue = outerDomainValue )
scale1 = ( EPrime2_1 - EPrime1_1 ) / ( EPrime2 - EPrime1 )
scale2 = ( EPrime2_2 - EPrime1_2 ) / ( EPrime2 - EPrime1 )
for energyPrime in energyPrimes :
function1d1 = function1.evaluate( energyPrime )
function1d2 = function2.evaluate( energyPrime )
function1d = scale1 * frac1 * function1d1 + scale2 * frac2 * function1d2
function.append( function1d )
return( function )
else :
raise ValueError( "Unitbase interpolate of %d dimensional function not supported." % function1.dimension )
elif( interpolationQualifier == standardsModule.interpolation.unitBaseUnscaledToken ) :
xy = XYsModule.pointwiseXY_C.unitbaseInterpolate( outerDomainValue, function1.outerDomainValue, function1.nf_pointwiseXY,
function2.outerDomainValue, function2.nf_pointwiseXY, 0 )
else :
xy = ( 1.0 - frac ) * function1 + frac * function2
try :
interpolation = xy.interpolation
except :
interpolation = xy.getInterpolation( )
function = function1.returnAsClass( function1, xy, outerDomainValue = outerDomainValue, interpolation = interpolation )
function.outerDomainValue = outerDomainValue
return( function )
def findInstancesOfClassInChildren( self, cls, level = 9999 ) :
"""
Finds all instances of class *cls* in self's children, grand-children, etc.
"""
foundInstances = []
level -= 1
if( level < 0 ) : return( foundInstances )
for functional in self :
if( isinstance( functional, cls ) ) : foundInstances.append( functional )
foundInstances += functional.findInstancesOfClassInChildren( cls, level = level )
return( foundInstances )
def integrate( self, **limits ):
"""
Integrate a XYsnd function. Supports limits for each axis.
Example:
>XYsnd.integrate( energy_in = ('1e-5 eV', '10 eV'), energy_out = ('1 keV', '10 keV') )
:param limits: dictionary containing limits for each independent axis (keyed by axis label or index).
If an independent axis is missing from the dictionary, integrate over the entire domain of that axis.
:return: float or PQU
"""
domainMin, domainMax = None, None
if( len( limits ) > 0 ) :
if self.axes[-1].label in limits :
domainMin, domainMax = limits.pop( self.axes[-1].label )
elif self.axes[-1].index in limits :
domainMin, domainMax = limits.pop( self.axes[-1].index )
xys_ = []
for functional in self :
if isinstance( functional, ( XYsModule.XYs1d, series1dModule.series ) ) :
xys_.append( [ functional.outerDomainValue, functional.integrate( domainMin = domainMin, domainMax = domainMax ) ] )
elif isinstance( functional, ( XYsnd, regionsModule.regions ) ) :
xys_.append( [ functional.outerDomainValue, functional.integrate( **limits ) ] )
else :
raise TypeError( "Unsupported class for integration: %s" % type( functional ) )
if( type( xys_[0][1] ) == float ) :
yUnit = "1"
else :
yUnit = xys_[0][1].getUnitSymbol( )
xys = [ [ x, float( y ) ] for x, y in xys_ ]
unit = self.getAxisUnitSafely( self.dimension )
domainMin, domainMax = baseModule.getDomainLimits( self, domainMin, domainMax, unit )
value = float( XYsModule.XYs1d( xys, interpolation = self.interpolation ).integrate( domainMin, domainMax ) )
if( yUnit == "1" ) :
return( PQUModule.PQU( value, "" ) )
else :
return( PQUModule.PQU( value, baseModule.processUnits( unit, yUnit, '*' ) ) )
def interpolateAtValue( self, value, unitBase = False, extrapolation = standardsModule.noExtrapolationToken ) :
"""
Returns a functional with dimension one less than self that is the interpolation of self at value.
If value is outside the domain of self and extrapolation is 'noExtrapolationToken' a raise is executed. Otherwise,
a flat interpolated functional is returned. If unitBase is True, then unit base interpolation is performed on
the lowest dimension and the dependent data. This method is deprecated (see evaluate).
"""
if( extrapolation not in standardsModule.validExtrapolations ) : raise ValueError( 'Invalid extrapolation value = "%s"' % extrapolation )
if( len( self ) == 0 ) : raise Exception( "No data to interpolate" )
if( value < self[0].outerDomainValue ) :
if( extrapolation == standardsModule.flatExtrapolationToken ) :
function = self[0].copy( )
function.outerDomainValue = value
return( function )
else :
raise Exception( "Interpolation point = %s less than %s" % ( value, self[0].outerDomainValue ) )
if( value > self[-1].outerDomainValue ) :
if( extrapolation == standardsModule.flatExtrapolationToken ) :
function = self[-1].copy( )
function.outerDomainValue = value
return( function )
else :
raise Exception( "Interpolation point = %s greater than %s" % ( value, self[-1].outerDomainValue ) )
for index, functional2 in enumerate( self ) :
if( functional2.outerDomainValue >= value ) : break
if( value == functional2.outerDomainValue ) :
function = functional2.copy( )
function.outerDomainValue = value
return( function )
functional1 = self[index-1]
# FIXME: following logic only works if functional1 and functional2 are both XYs1d:
if( unitBase ) :
xy = XYsModule.pointwiseXY_C.unitbaseInterpolate( value, functional1.outerDomainValue, functional1.nf_pointwiseXY,
functional2.outerDomainValue, functional2.nf_pointwiseXY, 1 )
else :
f = ( functional2.outerDomainValue - value ) / ( functional2.outerDomainValue - functional1.outerDomainValue )
xy = f * functional1 + ( 1. - f ) * functional2
xyp = functional1.returnAsClass( functional1, xy, outerDomainValue = value )
return( xyp )
def getBoundingSubFunctions( self, value ) :
"""
Returns the tuple flag, functional1, functional2, frac, interpolation and interpolationQualifier.
Flag is one of
+-------+---------------------------------------------------------------------------+
| None | no data, |
+-------+---------------------------------------------------------------------------+
| '<' | value below domainMin, |
+-------+---------------------------------------------------------------------------+
| '>' | value above domainMax, |
+-------+---------------------------------------------------------------------------+
| '=' | value at functional1 or |
+-------+---------------------------------------------------------------------------+
| '' | functional1.outerDomainValue <= value < functional2.outerDomainValue. |
+-------+---------------------------------------------------------------------------+
If flag is None then functional1, functional2 and frac are also None. If flag is not '' then functional2 is None.
"""
interpolation = self.interpolation
interpolationQualifier = self.interpolationQualifier
if( len( self ) == 0 ) :
return( None, None, None, None, interpolation, interpolationQualifier )
elif( len( self ) == 1 ) :
if( value == self[0].outerDomainValue ) :
return( '=', self[0], None, 0.0, interpolation, interpolationQualifier )
symbol = '<'
if( value > self[0].outerDomainValue ) : symbol = '>'
return( symbol, self[0], None, 0.0, interpolation, interpolationQualifier )
elif( value < self[0].outerDomainValue ) :
frac = ( self[0].outerDomainValue - value ) / max( abs( value ), abs( self[0].outerDomainValue ) )
return( '<', self[0], None, frac, interpolation, interpolationQualifier )
elif( value > self[-1].outerDomainValue ) :
frac = ( value - self[-1].outerDomainValue ) / max( abs( value ), abs( self[-1].outerDomainValue ) )
return( '>', self[-1], None, frac, interpolation, interpolationQualifier )
for index, functional2 in enumerate( self ) :
if( functional2.outerDomainValue >= value ) : break
functional1 = functional2
if( value == functional2.outerDomainValue ) : return( '=', functional2, None, 0, interpolation, interpolationQualifier )
frac = ( value - functional1.outerDomainValue ) / ( functional2.outerDomainValue - functional1.outerDomainValue )
return( '', functional1, functional2, frac, interpolation, interpolationQualifier )
def normalize( self, insitu = True, dimension = None ) :
selfsDimension = self.dimension
if( dimension is None ) : dimension = selfsDimension
if( dimension < 0 ) : dimension += selfsDimension
if( dimension < 1 ) : raise Exception( 'Dimension %d out of range, must be greater than 1' % dimension )
multid_xys = self
if( not( insitu ) ) : multid_xys = self.copy( )
if( dimension == 0 ) : return( multid_xys )
if( dimension >= selfsDimension ) :
multid_xys.scaleDependent( 1. / multid_xys.integrate( ), insitu = True )
else :
for functional in multid_xys.__functionals : functional.normalize( insitu = True, dimension = dimension )
return( multid_xys )
def domainUnitConversionFactor( self, unitTo ) :
if( unitTo is None ) : return( 1. )
return( PQUModule.PQU( '1 ' + self.domainUnit ).getValueAs( unitTo ) )
def domainSlice( self, domainMin = None, domainMax = None, fill = 1, dullEps = 0. ) :
"""
Returns a new instance with self sliced between ``domainMin`` and ``domainMax``.
:param domainMin: [optional] the lower x-value of the slice, default is domain minimum of self,
:param domainMax: [optional] the upper x-value of the slice, default is domain maximum of self,
:param fill: [optional] if True, points are added at domainMin and domainMax if they are not in self,
else only existing points in the range [domainMin, domainMax] are included.
:param dullEps: [optional] (Currently not implemented) the lower and upper points are dulled, default is 0.
"""
if( domainMin is None ) : domainMin = self.domainMin
domainMin = max( domainMin, self.domainMin )
if( domainMax is None ) : domainMax = self.domainMax
domainMax = min( domainMax, self.domainMax )
newMultiD = self.__class__( interpolation = self.interpolation, axes = self.axes.copy( ), index = self.index, valueType = self.valueType,
outerDomainValue = self.outerDomainValue, label = self.label, interpolationQualifier = self.interpolationQualifier )
domainGrid = [ tmp.outerDomainValue for tmp in self ]
for idx1, val in enumerate( domainGrid ) :
if( val >= domainMin ) : break
for idx2, val in enumerate( domainGrid ) :
if( val >= domainMax ) : break
if( domainGrid[idx1] == domainMin ) :
newMultiD.append( self[idx1].copy( ) )
idx1 += 1
else :
newMultiD.append( self.evaluate( domainMin ) )
for idx in range( idx1, idx2 ) : newMultiD.append( self[idx].copy( ) )
if( domainGrid[idx2] == domainMax ) :
newMultiD.append( self[idx2].copy( ) )
else:
newMultiD.append( self.evaluate( domainMax ) )
return( newMultiD )
def rangeUnitConversionFactor( self, unitTo ) :
if( unitTo is None ) : return( 1. )
return( PQUModule.PQU( '1 ' + self.rangeUnit ).getValueAs( unitTo ) )
def scaleDependent( self, value, insitu = False ) :
multid_xys = self
if( not( insitu ) ) : multid_xys = self.copy( )
for functional in multid_xys : functional.scaleDependent( value, insitu = True )
def toPointwiseLinear( self, **kwargs ) :
if( self.interpolation not in [ standardsModule.interpolation.flatToken, standardsModule.interpolation.linlinToken ] ) :
raise TypeError( 'Unsupported interpolation = "%s".' % self.interpolation )
flatInterpolation = self.interpolation == standardsModule.interpolation.flatToken
if( flatInterpolation ) :
lowerEps = kwargs.get( 'lowerEps', 0.0 )
upperEps = kwargs.get( 'upperEps', 0.0 )
lowerEps = abs( lowerEps )
upperEps = abs( upperEps )
if( ( lowerEps <= 0.0 ) and ( upperEps <= 0.0 ) ) :
raise ValueError( 'For "%s" interpolation, one or both of lowerEps and/or upperEps must be greater than zero.' % self.interpolation )
lowerFlatInterpolation = flatInterpolation and ( lowerEps > 0 )
upperFlatInterpolation = flatInterpolation and ( upperEps > 0 )
pointwiseLinear = self.toLinearXYsClass( )
pointwiseLinear = pointwiseLinear( interpolation = standardsModule.interpolation.linlinToken, axes = self.axes, index = self.index,
valueType = self.valueType, outerDomainValue = self.outerDomainValue, interpolationQualifier = self.interpolationQualifier )
lastIndex = len( self.__functionals ) - 1
for index, subFunction in enumerate( self ) :
if( hasattr( subFunction, 'toPointwiseLinear' ) ) :
subFunctionPointwiseLinear = subFunction.toPointwiseLinear( **kwargs )
else :
subFunctionPointwiseLinear = subFunction.toPointwiseLinear( **kwargs )
if( flatInterpolation and ( index > 0 ) ) :
lowerDomainValue = priorSumPointwiseLinear.outerDomainValue
upperDomainValue = subFunction.outerDomainValue
if( index == lastIndex ) : lowerEps = 0.0
priorSumPointwiseLinear.outerDomainValue = flatInterpolationToLinearPoint( lowerDomainValue, upperDomainValue, upperDomainValue, -lowerEps )
pointwiseLinear.append( priorSumPointwiseLinear )
if( index < lastIndex ) :
nextDomainValue = self[index+1].outerDomainValue
subFunctionPointwiseLinear.outerDomainValue = flatInterpolationToLinearPoint( upperDomainValue, nextDomainValue, upperDomainValue, upperEps )
pointwiseLinear.append( subFunctionPointwiseLinear )
priorSumPointwiseLinear = subFunctionPointwiseLinear.copy( )
else :
pointwiseLinear.append( subFunctionPointwiseLinear )
if( flatInterpolation ) : priorSumPointwiseLinear = subFunctionPointwiseLinear.copy( )
return( pointwiseLinear )
def toPointwise_withLinearXYs( self, **kwargs ) :
arguments = self.getArguments( kwargs, { 'cls' : None } )
cls = arguments['cls']
kwargs.pop( 'cls', None )
if( cls is None ) : cls = self.__class__
newMultiD = cls( interpolation = self.interpolation, axes = self.axes, index = self.index, valueType = self.valueType,
outerDomainValue = self.outerDomainValue, label = self.label, interpolationQualifier = self.interpolationQualifier )
for subsec in self:
newPW = subsec.toPointwise_withLinearXYs( cls = subsec.toLinearXYsClass( ), **kwargs )
newPW.outerDomainValue = subsec.outerDomainValue
newMultiD.append( newPW )
return newMultiD
def toXMLList( self, indent = '', **kwargs ) :
formatVersion = kwargs.get( 'formatVersion', formatVersionModule.default )
indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )
indent3 = indent2 + kwargs.get( 'incrementalIndent', ' ' )
if( formatVersion == formatVersionModule.version_1_10 ) : indent3 = indent2
outline = kwargs.get( 'outline', False )
if( len( self ) < 6 ) : outline = False
attributeStr = baseModule.xDataFunctional.attributesToXMLAttributeStr( self )
if( self.interpolation != standardsModule.interpolation.linlinToken ) :
attributeStr += ' interpolation="%s"' % self.interpolation
if( self.interpolationQualifier != standardsModule.interpolation.noneQualifierToken ) :
attributeStr += ' interpolationQualifier="%s"' % self.interpolationQualifier
XMLList = [ '%s<%s%s>' % ( indent, self.moniker, attributeStr ) ]
if( self.isPrimaryXData( ) ) :
if( self.axes is not None ) : XMLList += self.axes.toXMLList( indent2 )
if( 'oneLine' not in kwargs ) :
if( self.dimension == 2 ) : kwargs['oneLine'] = True
if( formatVersion != formatVersionModule.version_1_10 ) : XMLList.append( '%s<%s>' % ( indent2, self.functionNdsName ) )
if( outline ) :
XMLList += self.__functionals[0].toXMLList( indent3, **kwargs )
XMLList += self.__functionals[1].toXMLList( indent3, **kwargs )
XMLList += [ '%s ... ' % indent3 ]
XMLList += self.__functionals[-2].toXMLList( indent3, **kwargs )
XMLList += self.__functionals[-1].toXMLList( indent3, **kwargs )
else :
for functional in self.__functionals : XMLList += functional.toXMLList( indent3, **kwargs )
if( formatVersion != formatVersionModule.version_1_10 ) : XMLList[-1] += "</%s>" % ( self.functionNdsName )
if( self.uncertainty ) : XMLList += self.uncertainty.toXMLList( indent2, **kwargs )
XMLList[-1] += '</%s>' % self.moniker
return( XMLList )
@classmethod
def parseXMLNode( cls, xDataElement, xPath, linkData, axes = None ) :
"""
Translates XYsnd XML into the python XYsnd xData class.
"""
xmlAttr = False
for attrName in ( 'outerDomainValue', 'label' ) :
if xDataElement.get(attrName) is not None:
xmlAttr = True
xPath.append( '%s[@%s="%s"]' % (xDataElement.tag, attrName, xDataElement.get(attrName) ) )
if( not xmlAttr ) : xPath.append( xDataElement.tag )
allowedSubElements = cls.allowedSubElements( )
attrs = { 'index' : None, 'valueType' : None, 'outerDomainValue' : None, 'label' : None,
'interpolation' : standardsModule.interpolation.linlinToken, 'interpolationQualifier' : standardsModule.interpolation.noneQualifierToken }
attributes = { 'index' : int, 'valueType' : str, 'outerDomainValue' : float, 'label' : str,
'interpolation' : str, 'interpolationQualifier' : str }
for key, item in list( xDataElement.items( ) ) :
if( key not in attributes ) : raise TypeError( 'Invalid attribute "%s"' % key )
attrs[key] = attributes[key]( item )
multid_xys = cls( axes = axes, **attrs )
functionElements = [] # This support GNDS 1.10 and 2.0
functions = xDataElement.find( multid_xys.functionNdsName )
if( functions is not None ) :
for child in functions : functionElements.append( child )
for child in xDataElement :
if( child.tag == axesModule.axes.moniker ) :
multid_xys.axes = axesModule.axes.parseXMLNode( child, xPath, linkData )
elif( child.tag == uncertaintiesModule.uncertainty.moniker ) :
multid_xys.uncertainty = uncertaintiesModule.uncertainty.parseXMLNode( child, xPath, linkData )
elif( child.tag == multid_xys.functionNdsName ) :
continue
else :
if( functions is not None ) : raise Exception( 'Unsupported child name = "%s".' % child.tag )
functionElements.append( child )
for child in functionElements :
subElementClass = None
for subElement in allowedSubElements :
if( subElement.moniker == child.tag ) :
subElementClass = subElement
break
if( subElementClass is None ) : raise TypeError( 'unknown sub-element "%s" in element "%s"' % ( child.tag, cls.moniker ) )
xdata = subElementClass.parseXMLNode( child, xPath = xPath, linkData = linkData, axes = multid_xys.axes )
multid_xys.append( xdata )
xPath.pop( )
return( multid_xys )
@classmethod
def defaultAxes( cls, labelsUnits ) :
"""
:param labelsUnits: dictionary of form {
0:('dependent label','dependent unit'),
1:('1st independent label','1st independent unit'),
2:('2nd independent label','2nd independent unit'), ... }
:return: new axes instance
"""
return( axesModule.axes( rank = cls.dimension + 1, labelsUnits = labelsUnits ) )
class XYs2d( XYsnd ) :
moniker = 'XYs2d'
dimension = 2
@staticmethod
def allowedSubElements( ) :
return( ( XYsModule.XYs1d, series1dModule.series, regionsModule.regions1d, xs_pdf_cdfMoudle.xs_pdf_cdf1d ) )
class XYs3d( XYsnd ) :
moniker = 'XYs3d'
dimension = 3
@staticmethod
def allowedSubElements( ) :
return( ( XYs2d, regionsModule.regions2d ) )
| 49.482432 | 198 | 0.598984 | 3,247 | 36,617 | 6.68956 | 0.144133 | 0.020027 | 0.017126 | 0.015331 | 0.259104 | 0.213112 | 0.179412 | 0.133281 | 0.125363 | 0.121311 | 0 | 0.016971 | 0.293579 | 36,617 | 739 | 199 | 49.549391 | 0.822747 | 0.122757 | 0 | 0.182731 | 0 | 0.002008 | 0.054762 | 0.008356 | 0 | 0 | 0 | 0.002706 | 0 | 1 | 0.080321 | false | 0 | 0.022088 | 0.026104 | 0.126506 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6430c564af2fe8b504ab18c9fbc24d3c163cb63 | 41,339 | py | Python | src/api/datahub/access/deploy_plan_views.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/api/datahub/access/deploy_plan_views.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/api/datahub/access/deploy_plan_views.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from common.auth import perm_check
from common.decorators import detail_route
from common.exceptions import ValidationError
from common.log import logger
from common.views import APIViewSet
from datahub.access import settings
from datahub.access.models import AccessOperationLog, AccessRawData, DatabusChannel
from datahub.access.raw_data import rawdata
from datahub.access.serializers import (
BaseSerializer,
CollectDeleteSerializer,
DeployPlanListByParamSerializer,
RetriveSerializer,
UpdateBaseSerializer,
)
from datahub.access.utils import kafka_tool
from django.utils.translation import ugettext as _
from rest_framework.response import Response
from ..common.const import BK_BIZ_ID, DATA_SCENARIO, ID, OFFLINEFILE, RAW_DATA_IDS
from .collectors.factory import CollectorFactory
class CollectorDeployPlanViewSet(APIViewSet):
lookup_field = "raw_data_id"
def list(self, request):
"""
@api {get} v3/access/deploy_plan/?bk_biz_id=&data_scenario=log&raw_data_ids=XX&raw_data_ids=XX 获取源数据列表
@apiGroup RawData
@apiDescription 获取部署计划详情列表
@apiParam {int} [raw_data_ids] 源数据id列表
@apiParam {int{CMDB合法的业务ID}} [bk_biz_id] 业务ID。原始数据会归属到指定的业务下,后续可使用业务ID获取原始数据列表。
@apiParam {string{接入场景}} [data_scenario] 接入场景
@apiParam {list{data_id列表}} [raw_data_ids] 数据分类
@apiError (错误码) 1500001 <code>参数</code> 校验不通过.
@apiError (错误码) 1500500 <code>服务异常</code>
@apiParamExample {json} 参数样例:
http://x.x.x.x:xxxx/v3/access/deploy_plan/?bk_biz_id=&data_scenario=log&raw_data_ids=XX&raw_data_ids=XX
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": [
{
"bk_biz_id": 591,
"created_at": "2018-11-07T10:04:57",
"data_source": "svr",
"maintainer": "admin",
"updated_by": null,
"data_category_alias": "",
"raw_data_name": "http_test",
"topic": "http_test591",
"storage_partitions": 1,
"sensitivity": "private",
"storage_channel_id": 11,
"data_encoding": "UTF-8",
"raw_data_alias": "中文名",
"updated_at": "2018-11-07T14:21:28",
"bk_app_code": "bk_dataweb",
"data_scenario": "http",
"created_by": "admin",
"data_category": "",
"data_scenario_alias": null,
"data_source_alias": "svr",
"id": 287,
"description": "这里填写描述"
}
],
"result": true
}
"""
collector_factory = CollectorFactory.get_collector_factory()
param = self.params_valid(serializer=DeployPlanListByParamSerializer)
raw_data_ids = param.get(RAW_DATA_IDS)
data_scenario = param.get(DATA_SCENARIO, None)
bk_biz_id = param.get(BK_BIZ_ID, None)
res_list = []
if bk_biz_id and data_scenario:
filterd_raw_data_ids = AccessRawData.objects.filter(
id__in=raw_data_ids, data_scenario=data_scenario, bk_biz_id=bk_biz_id
).values_list(ID, flat=True)
elif bk_biz_id:
filterd_raw_data_ids = AccessRawData.objects.filter(id__in=raw_data_ids, bk_biz_id=bk_biz_id).values_list(
ID, flat=True
)
elif data_scenario:
filterd_raw_data_ids = AccessRawData.objects.filter(
id__in=raw_data_ids, data_scenario=data_scenario
).values_list(ID, flat=True)
else:
filterd_raw_data_ids = raw_data_ids
# 针对文件场景优化返回速度
if data_scenario and data_scenario == OFFLINEFILE:
collector = collector_factory.get_collector_by_data_scenario(data_scenario)
deploy_plan_list = collector(raw_data_id=filterd_raw_data_ids[0], show_display=1).get_by_list(
filterd_raw_data_ids
)
return Response(deploy_plan_list)
for raw_data_id in filterd_raw_data_ids:
try:
if data_scenario:
collector = collector_factory.get_collector_by_data_scenario(data_scenario)
else:
collector = collector_factory.get_collector_by_data_id(int(raw_data_id))
deploy_plan = collector(raw_data_id=raw_data_id, show_display=1).get()
res_list.append(deploy_plan)
except Exception as e:
logger.warning("raw_data_id {} get deploy_plan failed, {}".format(raw_data_id, e))
return Response(res_list)
def create(self, request):
"""
@api {post} v3/access/deploy_plan/ log提交接入部署计划
@apiName create_deploy_plan_log
@apiGroup CollectorDeployPlan
@apiDescription log提交接入部署计划
@apiParam {string{合法蓝鲸APP标识}} bk_app_code 蓝鲸APP标识
@apiParam {string{合法蓝鲸用户标识}} bk_username 用户名。用户名需要具有<code>bk_biz_id</code>业务的权限。
@apiParam {int{CMDB合法的业务ID}} bk_biz_id 业务ID。原始数据会归属到指定的业务下,后续可使用业务ID获取原始数据列表。
@apiParam {string{合法接入场景}} data_scenario 接入场景
@apiParam {string} [description] 接入数据备注
@apiParam {dict} access_raw_data 接入源数据信息
@apiParam {string{唯一,小于15字符,符合正则'^[a-zA-Z][a-zA-Z0-9_]*$'}} raw_data_name 数据英文标识。英文标识在业务下唯一,
重复创建会报错。这个字段会用来在消息队列中创建对应的channel。
@apiParam {string{小于15字符,中文}} raw_data_alias 数据别名(中文名)。别名会用在数据展示,请填写可读性强的名字。
@apiParam {string{合法数据来源}} data_source 数据来源
@apiParam {string{合法字符编码来源}} data_encoding 字符编码
@apiParam {string{合法敏感度标识}} sensitivity 敏感度。作为数据使用权限审核的依据
@apiParam {string{合法数据维护人}} maintainer 数据维护者
@apiParam {string{小于100字符}} [description] 源数据数据描述
@apiParam {dict} access_conf_info 接入配置信息
@apiParam {dict} collection_model 接入方式
@apiParam {string{"incr":增量,"all":全量,暂时不支持}} collection_type 接入类型
@apiParam {int} start_at 起始位置,采集周期为实时时,0是表示接入存量数据
@apiParam {int{-1:实时,0:一次性,n:周期性}} period 采集周期,单位s
@apiParam {dict} filters 接入过滤条件
@apiParam {string{合法分隔符}} delimiter 分隔符
@apiParam {dict{必须为合法的字典,请参考请求参数实例}} fields 过滤条件
@apiParam {dict} resource 接入对象资源
@apiParam {array{必须为合法的数组,支持多个接入对象。请参考请求参数实例}} scope 接入对象
@apiError (错误码) 1500001 <code>参数</code> 校验不通过.
@apiError (错误码) 1500500 <code>服务异常</code> .
@apiError (错误码) 1500405 <code>请求方法错误</code> .
@apiParamExample {json} 日志接入参数样例
{
"bk_app_secret": "xxx",
"bk_app_code": "bk_dataweb",
"bk_username": "xxxx",
"data_scenario": "log",
"bk_biz_id": 591,
"description": "xx",
"access_raw_data": {
"raw_data_name": "log_new_00011",
"maintainer": "xxxx",
"raw_data_alias": "asdfsaf",
"data_source": "svr",
"data_encoding": "UTF-8",
"sensitivity": "private",
"description": "xx"
},
"access_conf_info": {
"collection_model": {
"collection_type": "incr",
"start_at": 1,
"period": 0
},
"filters": {
"delimiter": "|",
"fields": [{
"index": 1,
"op": "=",
"logic_op": "and",
"value": "111"
}]
},
"resource": {
"scope": [
{
"module_scope": [{
"bk_obj_id": "set",
"bk_inst_id": 123
}],
"host_scope": [{
"bk_cloud_id": 1,
"ip": "x.x.x.x"
}],
"scope_config": {
"paths": [
{
"system":"linux",
"path":[
"/tmp/*.aaaz",
"/tmp/*.l"
]
},
{
"system":"windows",
"path":[
"c:/tmp/*.aaaz",
"c:/tmp/*.l"
]
}
]
}
}]
}
}
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"raw_data_id": 263
},
"result": true
}
"""
"""
@api {post} v3/access/deploy_plan/ http提交接入部署计划
@apiName create_deploy_plan_http
@apiGroup CollectorDeployPlan
@apiDescription http提交接入部署计划
@apiParam {string{合法蓝鲸APP标识}} bk_app_code 蓝鲸APP标识
@apiParam {string{合法蓝鲸用户标识}} bk_username 用户名。用户名需要具有<code>bk_biz_id</code>业务的权限。
@apiParam {int{CMDB合法的业务ID}} bk_biz_id 业务ID。原始数据会归属到指定的业务下,后续可使用业务ID获取原始数据列表。
@apiParam {string{合法接入场景}} data_scenario 接入场景
@apiParam {string} [description] 接入数据备注
@apiParam {dict} access_raw_data 接入源数据信息
@apiParam {string{唯一,小于15字符,符合正则'^[a-zA-Z][a-zA-Z0-9_]*$'}} raw_data_name 数据英文标识。英文标识在业务下唯一,
重复创建会报错。这个字段会用来在消息队列中创建对应的channel。
@apiParam {string{小于15字符,中文}} raw_data_alias 数据别名(中文名)。别名会用在数据展示,请填写可读性强的名字。
@apiParam {string{合法数据来源}} data_source 数据来源
@apiParam {string{合法字符编码来源}} data_encoding 字符编码
@apiParam {string{合法敏感度标识}} sensitivity 敏感度。作为数据使用权限审核的依据
@apiParam {string{合法数据维护人}} maintainer 数据维护者
@apiParam {string{小于100字符}} [description] 源数据数据描述
@apiParam {dict} access_conf_info 接入配置信息
@apiParam {dict} collection_model 接入方式
@apiParam {string{"pull":拉,"push":推送}} collection_type 采集方式
@apiParam {string} time_format 时间格式
@apiParam {string} increment_field 时间参数,存在多个用','隔开
@apiParam {int{-1:实时,0:一次性,n:周期性}} period 采集周期,单位s
@apiParam {dict} [filters] 接入过滤条件
@apiParam {string{合法分隔符}} [delimiter] 分隔符
@apiParam {dict{必须为合法的字典,请参考请求参数实例}} [fields] 过滤条件
@apiParam {dict} resource 接入对象资源
@apiParam {array{必须为合法的数组,支持多个接入对象。请参考请求参数实例}} scope 接入对象
@apiParam {string} url 接入url
@apiParam {string{"post":post请求,"get":get请求}} method 请求方式
@apiParam {string{json格式,当请求方式为post,body为必填项}} [body] 请求body参数
@apiError (错误码) 1500001 <code>参数</code> 校验不通过.
@apiError (错误码) 1500500 <code>服务异常</code> .
@apiError (错误码) 1500405 <code>请求方法错误</code> .
@apiParamExample {json} HTTP接入参数样例
{
"bk_app_code": "bk_dataweb",
"bk_username": ",admin",
"data_scenario": "http",
"bk_biz_id": 591,
"description": "xx",
"access_raw_data": {
"raw_data_name": "http_new_0x03",
"maintainer": "xxxx",
"raw_data_alias": "asdfsaf",
"data_source": "svr",
"data_encoding": "UTF-8",
"sensitivity": "private",
"description": "xx"
},
"access_conf_info": {
"collection_model": {
"collection_type": "pull",
"period": 0,
"time_format": "yyyy-MM-dd HH:mm:ss",
"increment_field":"created_at"
},
"filters": {
"delimiter": "|",
"fields": [{
"index": 1,
"op": "=",
"logic_op": "and",
"value": "111"
}]
},
"resource": {
"scope":[{
"url":"http://x.x.x.x/v3/access/rawdata/?created_at=<begin>&page_size=1&page=1",
"method":"get"
}]
}
}
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"raw_data_id": 263
},
"result": true
}
"""
"""
@api {post} v3/access/deploy_plan/ db提交接入部署计划
@apiName create_deploy_plan_db
@apiGroup CollectorDeployPlan
@apiDescription db提交接入部署计划
@apiParam {string{合法蓝鲸APP标识}} bk_app_code 蓝鲸APP标识
@apiParam {string{合法蓝鲸用户标识}} bk_username 用户名。用户名需要具有<code>bk_biz_id</code>业务的权限。
@apiParam {int{CMDB合法的业务ID}} bk_biz_id 业务ID。原始数据会归属到指定的业务下,后续可使用业务ID获取原始数据列表。
@apiParam {string{合法接入场景}} data_scenario 接入场景
@apiParam {string} [description] 接入数据备注
@apiParam {dict} access_raw_data 接入源数据信息
@apiParam {string{唯一,小于15字符,符合正则'^[a-zA-Z][a-zA-Z0-9_]*$'}} raw_data_name 数据英文标识。英文标识在业务下唯一,
重复创建会报错。这个字段会用来在消息队列中创建对应的channel。
@apiParam {string{小于15字符,中文}} raw_data_alias 数据别名(中文名)。别名会用在数据展示,请填写可读性强的名字。
@apiParam {string{合法数据来源}} data_source 数据来源
@apiParam {string{合法字符编码来源}} data_encoding 字符编码
@apiParam {string{合法敏感度标识}} sensitivity 敏感度。作为数据使用权限审核的依据
@apiParam {string{合法数据维护人}} maintainer 数据维护者
@apiParam {string{小于100字符}} [description] 源数据数据描述
@apiParam {dict} access_conf_info 接入配置信息
@apiParam {dict} collection_model 接入方式
@apiParam {int{"pri":主键,"all":全量,"time":时间范围}} collection_type 接入方式
@apiParam {string} time_format 时间格式
@apiParam {string} increment_field 增量字段
@apiParam {int{-1:实时,0:一次性,n:周期性}} period 采集周期,单位s
@apiParam {int} start_at 起始位置,采集周期为实时时,0是表示接入存量数据
@apiParam {int} before_time 数据延迟时间
@apiParam {dict} [filters] 接入过滤条件
@apiParam {string{合法分隔符}} [delimiter] 分隔符
@apiParam {dict{必须为合法的字典,请参考请求参数实例}} [fields] 过滤条件
@apiParam {dict} resource 接入对象资源
@apiParam {array{必须为合法的数组,请参考请求参数实例}} scope 接入对象
@apiParam {string{ip或者域名}} db_host 接入DB host
@apiParam {int} db_port 接入DB 端口
@apiParam {string} db_user 接入DB 用户名
@apiParam {string} db_pass 接入DB 密码
@apiParam {string} db_name 接入DB 数据库名称
@apiParam {string} table_name 接入DB 数据库表名称
@apiParam {int} db_type_id 接入DB 类型id
@apiError (错误码) 1500001 <code>参数</code> 校验不通过.
@apiError (错误码) 1500500 <code>服务异常</code> .
@apiError (错误码) 1500405 <code>请求方法错误</code> .
@apiParamExample {json} DB接入参数样例
{
"bk_app_code": "bk_dataweb",
"bk_username": "admin",
"data_scenario": "db",
"bk_biz_id": 591,
"description": "xx",
"access_raw_data": {
"raw_data_name": "db_new_04",
"maintainer": "xxxx",
"raw_data_alias": "asdfsaf",
"data_source": "svr",
"data_encoding": "UTF-8",
"sensitivity": "private",
"description": "xx"
},
"access_conf_info": {
"collection_model": {
"collection_type": "time",
"start_at": 0,
"period": 0,
"time_format":"yyyy-MM-dd HH:mm:ss",
"increment_field":"create_at",
"before_time":100
},
"filters": {
"delimiter": "|",
"fields": [{
"index": 1,
"op": "=",
"logic_op": "and",
"value": "111"
}]
},
"resource": {
"scope":[{
"db_host": "x.x.x.x",
"db_port": 10000,
"db_user": "user",
"db_pass": "pwd",
"db_name": "bkdata_basic",
"table_name": "access_db_info",
"db_type_id": 1
}]
}
}
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"raw_data_id": 263
},
"result": true
}
"""
"""
@api {post} v3/access/deploy_plan/ 脚本提交接入部署计划
@apiName create_deploy_plan_script
@apiGroup CollectorDeployPlan
@apiDescription 脚本提交接入部署计划
@apiParam {string{合法蓝鲸APP标识}} bk_app_code 蓝鲸APP标识
@apiParam {string{合法蓝鲸用户标识}} bk_username 用户名。用户名需要具有<code>bk_biz_id</code>业务的权限。
@apiParam {int{CMDB合法的业务ID}} bk_biz_id 业务ID。原始数据会归属到指定的业务下,后续可使用业务ID获取原始数据列表。
@apiParam {string{合法接入场景}} data_scenario 接入场景
@apiParam {string} [description] 接入数据备注
@apiParam {dict} access_raw_data 接入源数据信息
@apiParam {string{唯一,小于15字符,符合正则'^[a-zA-Z][a-zA-Z0-9_]*$'}} raw_data_name 数据英文标识。英文标识在业务下唯一,
重复创建会报错。这个字段会用来在消息队列中创建对应的channel。
@apiParam {string{小于15字符,中文}} raw_data_alias 数据别名(中文名)。别名会用在数据展示,请填写可读性强的名字。
@apiParam {string{合法数据来源}} data_source 数据来源
@apiParam {string{合法字符编码来源}} data_encoding 字符编码
@apiParam {string{合法敏感度标识}} sensitivity 敏感度。作为数据使用权限审核的依据
@apiParam {string{合法数据维护人}} maintainer 数据维护者
@apiParam {string{小于100字符}} [description] 源数据数据描述
@apiParam {dict} access_conf_info 接入配置信息
@apiParam {int{-1:实时,0:一次性,n:周期性}} period 采集周期,单位s
@apiParam {dict} [filters] 接入过滤条件
@apiParam {string{合法分隔符}} delimiter 分隔符
@apiParam {dict{必须为合法的字典,请参考请求参数实例}} [fields] 过滤条件
@apiParam {dict} resource 接入对象资源
@apiParam {array{必须为合法的数组,支持多个接入对象。请参考请求参数实例}} scope 接入对象
@apiParam {array} module_scope 模块
@apiParam {array} host_scope ip
@apiParam {dict} scope_config 脚本配置
@apiError (错误码) 1500001 <code>参数</code> 校验不通过.
@apiError (错误码) 1500500 <code>服务异常</code> .
@apiError (错误码) 1500405 <code>请求方法错误</code> .
@apiParamExample {json} 脚本接入参数样例
{
"bk_app_code": "bk_dataweb",
"bk_username": "xxxx",
"data_scenario": "script",
"bk_biz_id": 2,
"description": "xx",
"access_raw_data": {
"raw_data_name": "script_07",
"maintainer": "xxxx",
"raw_data_alias": "asdfsaf",
"data_source": "svr",
"data_encoding": "UTF-8",
"sensitivity": "private",
"description": "xx"
},
"access_conf_info": {
"collection_model": {
"period": 10
},
"filters": {
"delimiter": "|",
"fields": []
},
"resource": {
"scope": [
{
"module_scope": [{
"bk_obj_id": "set",
"bk_inst_id": 123
}],
"host_scope": [{
"bk_cloud_id": 1,
"ip": "x.x.x.x"
}],
"scope_config": {
"content":"xxxx"
}
}]
}
}
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"raw_data_id": 263
},
"result": true
}
"""
"""
@api {post} v3/access/deploy_plan/ 文件上传提交接入部署计划
@apiName create_deploy_plan_file
@apiGroup CollectorDeployPlan
@apiDescription 文件上传提交接入部署计划
@apiParam {string{合法蓝鲸APP标识}} bk_app_code 蓝鲸APP标识
@apiParam {string{合法蓝鲸用户标识}} bk_username 用户名。用户名需要具有<code>bk_biz_id</code>业务的权限。
@apiParam {int{CMDB合法的业务ID}} bk_biz_id 业务ID。原始数据会归属到指定的业务下,后续可使用业务ID获取原始数据列表。
@apiParam {string{合法接入场景}} data_scenario 接入场景
@apiParam {string} [description] 接入数据备注
@apiParam {dict} access_raw_data 接入源数据信息
@apiParam {string{唯一,小于15字符,符合正则'^[a-zA-Z][a-zA-Z0-9_]*$'}} raw_data_name 数据英文标识。英文标识在业务下唯一,
重复创建会报错。这个字段会用来在消息队列中创建对应的channel。
@apiParam {string{小于15字符,中文}} raw_data_alias 数据别名(中文名)。别名会用在数据展示,请填写可读性强的名字。
@apiParam {string{合法数据来源}} data_source 数据来源
@apiParam {string{合法字符编码来源}} data_encoding 字符编码
@apiParam {string{合法敏感度标识}} sensitivity 敏感度。作为数据使用权限审核的依据
@apiParam {string{合法数据维护人}} maintainer 数据维护者
@apiParam {string{小于100字符}} [description] 源数据数据描述
@apiParam {dict} access_conf_info 接入配置信息
@apiParam {dict} collection_model 接入方式
@apiParam {string{"incr":增量,"all":全量,暂时不支持}} collection_type 接入类型
@apiParam {int{-1:实时,0:一次性,n:周期性}} period 采集周期,单位s
@apiParam {dict} [filters] 接入过滤条件
@apiParam {string{合法分隔符}} [delimiter] 分隔符
@apiParam {dict{必须为合法的字典,请参考请求参数实例}} [fields] 过滤条件
@apiParam {dict} resource 接入对象资源
@apiParam {array{必须为合法的数组,支持多个接入对象。请参考请求参数实例}} scope 接入对象
@apiParam {string} file_name 文件名称
@apiError (错误码) 1500001 <code>参数</code> 校验不通过.
@apiError (错误码) 1500500 <code>服务异常</code> .
@apiError (错误码) 1500405 <code>请求方法错误</code> .
@apiParamExample {json} 文件上传接入参数样例
{
"bk_app_code": "bk_dataweb",
"bk_username": "admin",
"data_scenario": "file",
"bk_biz_id": 2,
"description": "xx",
"access_raw_data": {
"raw_data_name": "file_223",
"maintainer": "xxxx",
"raw_data_alias": "asdfsaf",
"data_source": "svr",
"data_encoding": "UTF-8",
"sensitivity": "private",
"description": "xx"
},
"access_conf_info": {
"collection_model": {
"collection_type": "incr",
"period": 0
},
"filters": {
"delimiter": "|",
"fields": []
},
"resource": {
"scope": [{
"file_name":"file_591_test_xls测试2.csv"
}]
}
}
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"raw_data_id": 263
},
"result": true
}
"""
"""
@api {post} v3/access/deploy_plan/ 离线文件上传提交接入部署计划
@apiName create_deploy_plan_file
@apiGroup CollectorDeployPlan
@apiDescription 离线文件上传提交接入部署计划
@apiParam {string{合法蓝鲸APP标识}} bk_app_code 蓝鲸APP标识
@apiParam {string{合法蓝鲸用户标识}} bk_username 用户名。用户名需要具有<code>bk_biz_id</code>业务的权限。
@apiParam {int{CMDB合法的业务ID}} bk_biz_id 业务ID。原始数据会归属到指定的业务下,后续可使用业务ID获取原始数据列表。
@apiParam {string{合法接入场景}} data_scenario 接入场景
@apiParam {string} [description] 接入数据备注
@apiParam {dict} access_raw_data 接入源数据信息
@apiParam {string{唯一,小于15字符,符合正则'^[a-zA-Z][a-zA-Z0-9_]*$'}} raw_data_name 数据英文标识。英文标识在业务下唯一,
重复创建会报错。这个字段会用来在消息队列中创建对应的channel。
@apiParam {string{小于15字符,中文}} raw_data_alias 数据别名(中文名)。别名会用在数据展示,请填写可读性强的名字。
@apiParam {string{合法数据来源}} data_source 数据来源
@apiParam {string{合法字符编码来源}} data_encoding 字符编码
@apiParam {string{合法敏感度标识}} sensitivity 敏感度。作为数据使用权限审核的依据
@apiParam {string{合法数据维护人}} maintainer 数据维护者
@apiParam {string{小于100字符}} [description] 源数据数据描述
@apiParam {dict} access_conf_info 接入配置信息
@apiParam {dict} collection_model 接入方式
@apiParam {string{"incr":增量,"all":全量,暂时不支持}} collection_type 接入类型
@apiParam {int{-1:实时,0:一次性,n:周期性}} period 采集周期,单位s
@apiParam {dict} [filters] 接入过滤条件
@apiParam {string{合法分隔符}} [delimiter] 分隔符
@apiParam {dict{必须为合法的字典,请参考请求参数实例}} [fields] 过滤条件
@apiParam {dict} resource 接入对象资源
@apiParam {array{必须为合法的数组,支持多个接入对象。请参考请求参数实例}} scope 接入对象
@apiParam {string} file_name 文件名称
@apiError (错误码) 1500001 <code>参数</code> 校验不通过.
@apiError (错误码) 1500500 <code>服务异常</code> .
@apiError (错误码) 1500405 <code>请求方法错误</code> .
@apiParamExample {json} 文件上传接入参数样例
{
"bk_app_code": "bk_dataweb",
"bk_username": "admin",
"data_scenario": "offlinefile",
"bk_biz_id": 2,
"description": "xx",
"access_raw_data": {
"raw_data_name": "file_223",
"maintainer": "xxxx",
"raw_data_alias": "asdfsaf",
"data_source": "svr",
"data_encoding": "UTF-8",
"sensitivity": "private",
"description": "xx"
},
"access_conf_info": {
"collection_model": {
"collection_type": "incr",
"period": 0
},
"filters": {
"delimiter": "|",
"fields": []
},
"resource": {
"scope": [{
"file_name":"file_591_test_xls测试2.csv"
}]
}
}
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"raw_data_id": 263
},
"result": true
}
"""
logger.info("deploy_plan: basic parameter verification starts")
params = self.params_valid(serializer=BaseSerializer)
collector_factory = CollectorFactory.get_collector_factory()
collector = collector_factory.get_collector_by_data_scenario(params["data_scenario"])(access_param=request.data)
logger.info("deploy_plan: basic parameter verification starts")
collector.valid_access_param()
raw_data_id = collector.update_or_create()
return Response({"raw_data_id": raw_data_id})
@perm_check("raw_data.retrieve")
def retrieve(self, request, raw_data_id):
"""
@api {get} v3/access/deploy_plan/:raw_data_id/ 查询部署计划
@apiName retrieve_deploy_plan
@apiGroup CollectorDeployPlan
@apiParam {int} raw_data_id 源始数据ID.
@apiParam {boolean} show_display 是否显示详情
@apiSuccessExample {json} 成功返回:
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"bk_biz_id": 2,
"description": "xx",
"data_scenario": "log",
"bk_app_code": "bk_dataweb",
"access_raw_data": {
"bk_biz_id": 2,
"data_source": "svr",
"maintainer": ",admin",
"updated_by": null,
"raw_data_name": "log_new_00010",
"storage_partitions": 1,
"created_at": "2018-11-22T16:38:07",
"storage_channel_id": 1000,
"data_encoding": "UTF-8",
"raw_data_alias": "asdfsaf",
"updated_at": null,
"bk_app_code": "bk_dataweb",
"data_scenario": "log",
"created_by": ",admin",
"data_category": "",
"id": 302,
"sensitivity": "private",
"description": "xx"
},
"access_conf_info": {
"collection_model": {
"start_at": 1,
"increment_field": null,
"period": 0,
"time_format": null,
"collection_type": "incr",
"before_time": null
},
"resource": {
"scope": [
{
"deploy_plan_id": 18,
"scope_config": {
"paths": [
"/tmp/*.log",
"/tmp/*.l",
"/tmp/*.aaaz"
]
},
"module_scope": [
{
"bk_obj_id": "set",
"bk_inst_id": 123
}
],
"host_scope": [
{
"ip": "x.x.x.x",
"bk_cloud_id": 1
}
]
}
]
},
"filters": {
"fields": [
{
"index": 1,
"logic_op": "and",
"value": "111",
"op": "="
}
],
"delimiter": "|"
}
}
},
"result": true
}
"""
param = {
"raw_data_id": int(raw_data_id),
"bk_username": self.request.query_params.get("bk_username"),
"show_display": 0
if not self.request.query_params.get("show_display")
else self.request.query_params.get("show_display"),
}
serializer = RetriveSerializer(data=param)
if not serializer.is_valid():
raise ValidationError(message=_(u"参数校验不通过:raw_data_id,show_display,bk_username校验不通过"))
collector_factory = CollectorFactory.get_collector_factory()
data = collector_factory.get_collector_by_data_id(raw_data_id)(
raw_data_id=raw_data_id, show_display=int(param["show_display"])
).get()
return Response(data)
@perm_check("raw_data.update")
def update(self, request, raw_data_id):
"""
@api {put} /v3/access/deploy_plan/:raw_data_id/ 更新接入部署计划
@apiName update_deploy_plan
@apiGroup CollectorDeployPlan
@apiParam {int} raw_data_id 源始数据ID.
@apiParamExample {json} 日志接入参数样例
{
"bk_app_code": "bk_dataweb",
"bk_username": ",admin",
"data_scenario": "db",
"bk_biz_id": 2,
"description": "xx",
"access_raw_data": {
"raw_data_name": "log_new_00007",
"maintainer": "xxxx",
"data_scenario": "log",
"raw_data_alias": "asdfsaf",
"data_source": "svr",
"data_encoding": "UTF-8",
"sensitivity": "private",
"description": "xx"
},
"access_conf_info": {
"collection_model": {
"collection_type": "incr",
"start_at": 11111,
"period": 0
},
"filters": {
"delimiter": "|",
"fields": [{
"index": 1,
"op": "=11111",
"logic_op": "and",
"value": "111"
}]
},
"resource": {
"scope": [
{
"deploy_plan_id":2,
"module_scope": [{
"bk_obj_id": "xxxx",
"bk_inst_id": 123
}],
"host_scope": [{
"bk_cloud_id": 222,
"ip": "x.x.x.x"
}],
"scope_config": {
"paths": [
"/tmp/*.log",
"/tmp/*.l",
"/tmp/*.aaaz"
]
}
},
{
"module_scope": [{
"bk_obj_id": "xxxx",
"bk_inst_id": 123
}],
"host_scope": [{
"bk_cloud_id": 222,
"ip": "x.x.x.x"
}],
"scope_config": {
"paths": [
"/tmp/*.log",
"/tmp/*.l",
"/tmp/*.aaaz"
]
}
}]
}
}
}
@apiSuccessExample {json} 成功返回:
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"raw_data_id": 263
},
"result": true
}
"""
data = request.data
data["raw_data_id"] = int(raw_data_id)
params = self.params_valid(serializer=UpdateBaseSerializer)
access_raw_data = params["access_raw_data"]
access_raw_data["updated_by"] = params["bk_username"]
collector_factory = CollectorFactory.get_collector_factory()
collector = collector_factory.get_collector_by_data_scenario(data["data_scenario"])(
access_param=params, raw_data_id=raw_data_id
)
collector.valid_access_param()
raw_data_id = collector.update_or_create()
return Response({"raw_data_id": raw_data_id})
@perm_check("raw_data.delete")
def delete(self, request, raw_data_id):
"""
@api {delete} /v3/access/deploy_plan/:raw_data_id/ 删除部署计划
@apiName delete_deploy_plan
@apiGroup CollectorDeployPlan
@apiParam {int} raw_data_id 源始数据ID.
@apiParamExample {json} 日志接入参数样例
{
"bk_app_code": "bk_dataweb",
"bk_username": ",admin",
"bk_biz_id": 2,
"force": true
}
@apiSuccessExample {json} 成功返回:
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": "ok",
"result": true
}
"""
raw_data_id = int(raw_data_id)
logger.info("delete collector raw_data %d" % raw_data_id)
param = self.params_valid(serializer=CollectDeleteSerializer)
# query data_scenario
try:
raw_data = AccessRawData.objects.get(id=raw_data_id)
except AccessRawData.DoesNotExist:
return Response("ok")
data_scenario = raw_data.data_scenario
collector_factory = CollectorFactory.get_collector_factory()
collector = collector_factory.get_collector_by_data_scenario(data_scenario)(access_param=request.data)
# stop collector
collector.delete(raw_data_id, param["force"])
# delete raw_data
rawdata.delete_raw_data(raw_data_id)
# delete topic
topic = "%s%d" % (raw_data.raw_data_name, raw_data.bk_biz_id)
databus_channel = DatabusChannel.objects.get(id=raw_data.storage_channel_id)
bootstrap_server = "{}:{}".format(
databus_channel.cluster_domain,
databus_channel.cluster_port,
)
kafka_tool.delete_topic(bootstrap_server, topic)
return Response("ok")
@detail_route(methods=["get"])
def history(self, request, raw_data_id):
"""
@api {get} v3/access/deploy_plan/:raw_data_id/history/ 接入变更历史
@apiName deploy_plan_history
@apiGroup CollectorDeployPlan
@apiParam {int} raw_data_id 源始数据ID.
@apiParamExample {json} 参数样例:
v3/access/deploy_plan/1/history/
@apiSuccessExample {json} 成功返回:
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": [
{
"updated_by": null,
"created_at": "2018-10-30T14:58:23",
"args": {
"log": "xxxx",
"scope_config": ""
},
"updated_at": null,
"created_by": "admin",
"raw_data_id": 1,
"status": "success",
"id": 2,
"description": "xxxx"
}
],
"result": true
}
"""
# 参数校验
result = AccessOperationLog.objects.filter(raw_data_id=raw_data_id).order_by("-created_at")
log_list = result.values()
for log in log_list:
# 历史记录某条错误的话, 忽略异常, 不影响整体返回
try:
log["args"] = json.loads(log["args"])
except Exception as e:
logger.error("raw_data_id {} AccessOperationLog args parse json failed, {}".format(raw_data_id, e))
continue
log["args"]["log"] = _(log["args"].get("log", ""))
if log.get("created_at"):
log["created_at"] = log.get("created_at", "").strftime(settings.DATA_TIME_FORMAT)
if log.get("updated_at"):
log["updated_at"] = log.get("updated_at", "").strftime(settings.DATA_TIME_FORMAT)
return Response(log_list)
@detail_route(methods=["get"], url_path="status")
def status_check(self, request, raw_data_id):
"""
@api {get} v3/access/deploy_plan/:op_log_id/status/?status="xx" 修改接入状态
@apiName deploy_plan_status
@apiGroup CollectorDeployPlan
@apiParam {int} raw_data_id 源始数据ID.
@apiParamExample {json} 参数样例:
v3/access/deploy_plan/1/status/
@apiSuccessExample {json} 成功返回:
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": null,
"result": true
}
"""
# 参数校验
AccessOperationLog.objects.filter(id=raw_data_id).update(status=self.request.query_params["status"])
return Response()
| 38.419145 | 120 | 0.497037 | 3,813 | 41,339 | 5.158668 | 0.137162 | 0.048754 | 0.02608 | 0.013726 | 0.700305 | 0.668378 | 0.647789 | 0.626894 | 0.604321 | 0.591917 | 0 | 0.02363 | 0.389874 | 41,339 | 1,075 | 121 | 38.454884 | 0.756245 | 0.3189 | 0 | 0.195804 | 0 | 0 | 0.085745 | 0.006596 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048951 | false | 0 | 0.104895 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a649eb74dba0e4238520088fa3858aec2514206f | 2,264 | py | Python | j.py | thatch/cloudmesh-iu | 903b897e8234904ea4637f6783e6c53fb9a3c7a8 | [
"Apache-2.0"
] | null | null | null | j.py | thatch/cloudmesh-iu | 903b897e8234904ea4637f6783e6c53fb9a3c7a8 | [
"Apache-2.0"
] | null | null | null | j.py | thatch/cloudmesh-iu | 903b897e8234904ea4637f6783e6c53fb9a3c7a8 | [
"Apache-2.0"
] | null | null | null | from cloudmesh.common.Shell import Shell
import subprocess
import asyncio
import sys
from subprocess import PIPE, Popen
import threading
from queue import Queue, Empty
import time
import os
import shlex
host = "r-003"
port = 9010
command = f'ssh juliet "ssh {host} ./ENV3/bin/jupyter-lab --ip localhost --port {port}"'
# command = f'ssh juliet "ssh {host} ./ENV3/bin/jupyter-lab --ip 0.0.0.0 --port {port}"'
# command = f'jupyter-lab --ip localhost --port {port} --no-browser'
localcommand = "ssh -L 9000:{host}:9000 -i {file} juliet" # juliet = <username>@juliet.futuresystems.org
def live(command):
file = None
localhost = None
process = subprocess.Popen(shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
printed = False
while True:
output = process.stderr.readline()
if output == b'' and process.poll() is not None:
break
if "file://" in str(output):
file = output.strip().decode(encoding="utf-8")
if "localhost" in str(output):
localhost = output.strip().decode(encoding="utf-8")
if file is not None and localhost is not None and not printed:
print('File:', file)
localhost = "http://" + localhost.split("http://")[1]
print('Localhost:', localhost)
printed = True
# start jupyter
jupyter = localcommand.format(host=host, file=file)
print (jupyter)
os.system(jupyter)
rc = process.poll()
live(command)
"""
print (' '.join(c))
import subprocess
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while proc.poll() is None:
output = proc.stdout.readline()
e = proc.stderr.readline()
print (output)
print (e)
"""
"""
# os.system(f'ssh juliet "ssh {host} ./ENV3/bin/jupyter-lab --ip localhost --port {port}"')
p = await asyncio.create_subprocess_exec("ssh", "juliet",
f"ssh {host} ./ENV3/bin/jupyter-lab --ip localhost --port {port}",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
output, errors = p.communicate()
print (output)
print (errors)
""" | 29.789474 | 104 | 0.618816 | 282 | 2,264 | 4.957447 | 0.312057 | 0.060086 | 0.042918 | 0.040057 | 0.293276 | 0.293276 | 0.186695 | 0.142346 | 0.142346 | 0.142346 | 0 | 0.015187 | 0.243816 | 2,264 | 76 | 105 | 29.789474 | 0.801402 | 0.09364 | 0 | 0 | 0 | 0.025641 | 0.127088 | 0.015977 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.25641 | 0 | 0.282051 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a64bbe192a36abeb0640fa965fed32d5ef6bad50 | 297 | py | Python | lib/data_coll/source_file.py | KentWangYQ/server-migration | 82b0eaac42db3eb697fd53b79d64bd0d39024842 | [
"MIT"
] | null | null | null | lib/data_coll/source_file.py | KentWangYQ/server-migration | 82b0eaac42db3eb697fd53b79d64bd0d39024842 | [
"MIT"
] | null | null | null | lib/data_coll/source_file.py | KentWangYQ/server-migration | 82b0eaac42db3eb697fd53b79d64bd0d39024842 | [
"MIT"
] | null | null | null | import os
import config
def walk():
"""
遍历源文件目录
:return: Generator
"""
for root in config.Source.include:
for path, _, files in os.walk(root):
if path not in config.Source.exclude:
for file in files:
yield (path, file)
| 19.8 | 49 | 0.535354 | 36 | 297 | 4.388889 | 0.555556 | 0.101266 | 0.177215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.373737 | 297 | 14 | 50 | 21.214286 | 0.849462 | 0.087542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6544ccff7f22122adea74703d79ead395520358 | 5,371 | py | Python | qa/L0_custom_legacy/custom_legacy_test.py | wiggin66/server | d32e253244be8539a087ba59fee5ab63f9f6a040 | [
"BSD-3-Clause"
] | 4 | 2021-06-02T02:37:53.000Z | 2022-01-20T19:32:57.000Z | qa/L0_custom_legacy/custom_legacy_test.py | wiggin66/server | d32e253244be8539a087ba59fee5ab63f9f6a040 | [
"BSD-3-Clause"
] | null | null | null | qa/L0_custom_legacy/custom_legacy_test.py | wiggin66/server | d32e253244be8539a087ba59fee5ab63f9f6a040 | [
"BSD-3-Clause"
] | 1 | 2021-12-17T03:07:54.000Z | 2021-12-17T03:07:54.000Z | #!/bin/bash
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
from functools import partial
import numpy as np
import queue
import unittest
import test_util as tu
import tritonclient.grpc as grpcclient
import tritonclient.http as httpclient
from tritonclient.utils import InferenceServerException
class UserData:
def __init__(self):
self._completed_requests = queue.Queue()
def callback(user_data, result, error):
if error:
user_data._completed_requests.put(error)
else:
user_data._completed_requests.put(result)
#
# The simple inference tests on leagacy custom backend.
#
class CustomLegacyTest(tu.TestResultCollector):
def setUp(self):
self.model_name_ = "custom_identity_int32"
self.input0_data_ = np.array([[10]], dtype=np.int32)
def _prepare_request(self, protocol):
if (protocol == "grpc"):
self.inputs_ = []
self.inputs_.append(grpcclient.InferInput('INPUT0', [1, 1],
"INT32"))
self.outputs_ = []
self.outputs_.append(grpcclient.InferRequestedOutput('OUTPUT0'))
else:
self.inputs_ = []
self.inputs_.append(httpclient.InferInput('INPUT0', [1, 1],
"INT32"))
self.outputs_ = []
self.outputs_.append(httpclient.InferRequestedOutput('OUTPUT0'))
self.inputs_[0].set_data_from_numpy(self.input0_data_)
def _test_no_outputs_helper(self,
use_grpc=True,
use_http=True,
use_streaming=True):
if use_grpc:
triton_client = grpcclient.InferenceServerClient(
url="localhost:8001", verbose=True)
self._prepare_request("grpc")
result = triton_client.infer(model_name=self.model_name_,
inputs=self.inputs_,
outputs=self.outputs_,
client_timeout=1)
# The response should not contain any outputs
self.assertEqual(result.as_numpy('OUTPUT0'), None)
if use_http:
triton_client = httpclient.InferenceServerClient(
url="localhost:8000", verbose=True, network_timeout=2.0)
self._prepare_request("http")
result = triton_client.infer(model_name=self.model_name_,
inputs=self.inputs_,
outputs=self.outputs_)
# The response should not contain any outputs
self.assertEqual(result.as_numpy('OUTPUT0'), None)
if use_streaming:
triton_client = grpcclient.InferenceServerClient(
url="localhost:8001", verbose=True)
self._prepare_request("grpc")
user_data = UserData()
triton_client.stop_stream()
triton_client.start_stream(callback=partial(callback, user_data),
stream_timeout=1)
triton_client.async_stream_infer(model_name=self.model_name_,
inputs=self.inputs_,
outputs=self.outputs_)
result = user_data._completed_requests.get()
if type(result) == InferenceServerException:
raise result
# The response should not contain any outputs
self.assertEqual(result.as_numpy('OUTPUT0'), None)
# The tests needs the identity backend to be configured with "suppress_outputs"
# with TRUE.
def test_no_outputs(self):
self._test_no_outputs_helper()
if __name__ == '__main__':
unittest.main()
| 40.383459 | 83 | 0.633402 | 595 | 5,371 | 5.541176 | 0.364706 | 0.030027 | 0.024264 | 0.022748 | 0.313922 | 0.266606 | 0.266606 | 0.266606 | 0.266606 | 0.266606 | 0 | 0.011594 | 0.293428 | 5,371 | 132 | 84 | 40.689394 | 0.857181 | 0.329361 | 0 | 0.315789 | 0 | 0 | 0.042881 | 0.005886 | 0 | 0 | 0 | 0 | 0.039474 | 1 | 0.078947 | false | 0 | 0.118421 | 0 | 0.223684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a6551b4d98c72a4734c14321b779357ef66e0ea1 | 811 | py | Python | testsuite/sesstion_test.py | olegvg/telebot | e37bce862518a1addaf1db4624a62eaf9dfc4afa | [
"MIT"
] | null | null | null | testsuite/sesstion_test.py | olegvg/telebot | e37bce862518a1addaf1db4624a62eaf9dfc4afa | [
"MIT"
] | null | null | null | testsuite/sesstion_test.py | olegvg/telebot | e37bce862518a1addaf1db4624a62eaf9dfc4afa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from telebot.storage import EphemeralStorage
class TestEphemeralSession(unittest.TestCase):
def setUp(self):
from telebot.logger import init_root_logger
init_root_logger()
def test_session_storage_singleton(self):
session1 = EphemeralStorage()
session1.set_by_key(1, 1)
session2 = EphemeralStorage()
session2.set_by_key(2, 2)
self.assertEqual(session1, session2)
def test_session_storage_cleanup(self):
sess = EphemeralStorage()
sess.set_by_key(1, 1)
sess.set_by_key(2, 2)
sess.set_by_key(3, 3)
sess.clear()
self.assertFalse(sess.is_key_exists(1))
self.assertFalse(sess.is_key_exists(2))
self.assertFalse(sess.is_key_exists(3))
| 23.852941 | 51 | 0.664612 | 102 | 811 | 5.029412 | 0.352941 | 0.048733 | 0.077973 | 0.070175 | 0.253411 | 0.175439 | 0 | 0 | 0 | 0 | 0 | 0.03231 | 0.236745 | 811 | 33 | 52 | 24.575758 | 0.796446 | 0.025894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.190476 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a65643a5cfe26a75ee90de2f3f498ab27ada61a1 | 1,856 | py | Python | logger.py | SoPudge/lib.py | 7e8e6cb76a1838c2af969bd1f3b7cf5ce6cb0441 | [
"MIT"
] | null | null | null | logger.py | SoPudge/lib.py | 7e8e6cb76a1838c2af969bd1f3b7cf5ce6cb0441 | [
"MIT"
] | null | null | null | logger.py | SoPudge/lib.py | 7e8e6cb76a1838c2af969bd1f3b7cf5ce6cb0441 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import logging
class Logger(object):
def __init__(self,logger,**kw):
'''
creat a logger class,port from default loggin module
Description:
对于不传入**kw参数的调用,为旧式调用,用于一些旧式程序
对于传输**kw的掉用,则支持如下功能
1、定义日志的存储位置
2、日志名称命名方式:以程序命名,以电脑命名
'''
#**kw参数定义
if len(kw) == 0:
#获取当前文件所在的目录
file_path = sys.path[0]
file_name = 'log.log'
else:
file_path = kw['file_path']
file_name = kw['file_name']
#**kw参数定义结束
#creat a logger
self.logger = logging.getLogger(logger)
self.logger.setLevel(logging.DEBUG)
# creat a handler to output log to file
self.fh = logging.FileHandler(file_path + '/' + file_name)
self.fh.setLevel(logging.INFO)
# creat a handler to output log to stream
self.ch = logging.StreamHandler()
self.ch.setLevel(logging.INFO)
# formatter of log
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.fh.setFormatter(formatter)
self.ch.setFormatter(formatter)
# add handler to logger
self.logger.addHandler(self.fh)
self.logger.addHandler(self.ch)
def info(self,msg):
self.logger.info(msg)
def warning(self,msg):
self.logger.warning(msg)
def error(self,msg):
self.logger.error(msg)
def debug(self,msg):
self.logger.debug(msg)
def close(self):
self.logger.removeHandler(self.fh)
self.logger.removeHandler(self.ch)
if __name__ == '__main__':
#kw = {'file_path':'N:\\005_Software','file_name':'abc.log'}
logger = Logger('mylog')
logger.info('this is in log')
| 26.898551 | 93 | 0.579741 | 225 | 1,856 | 4.68 | 0.377778 | 0.104463 | 0.041785 | 0.064577 | 0.049383 | 0.049383 | 0.049383 | 0 | 0 | 0 | 0 | 0.006116 | 0.295259 | 1,856 | 68 | 94 | 27.294118 | 0.79893 | 0.231681 | 0 | 0 | 0 | 0 | 0.077663 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |