hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
843f97dd8ec994e4357ed02f96f7842db3d9a402
| 5,867
|
py
|
Python
|
cloudflare-deploy.py
|
antonini/certbot-hooks
|
61e200b7a038952f2f559953f47be62e1f992e39
|
[
"Apache-2.0"
] | null | null | null |
cloudflare-deploy.py
|
antonini/certbot-hooks
|
61e200b7a038952f2f559953f47be62e1f992e39
|
[
"Apache-2.0"
] | null | null | null |
cloudflare-deploy.py
|
antonini/certbot-hooks
|
61e200b7a038952f2f559953f47be62e1f992e39
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import logging
import sys
import CloudFlare
import os
import re
from os import path
from certbot.plugins import dns_common
__author__ = "Endrigo Antonini"
__copyright__ = "Copyright 2020, Endrigo Antonini"
__license__ = "Apache License 2.0"
__version__ = "1.0"
__maintainer__ = "Endrigo Antonini"
__email__ = "eantonini@eidoscode.com"
__status__ = "Production"
logger = logging.getLogger(__name__)
DEFAULT_CERT_FOLDER = "/etc/letsencrypt/live"
CERTBOT_CONF_DIR = "/etc/letsencrypt/renewal"
PROPERTIES = {}
def read_file(filename):
"""
Read a file from disk and return all the content
:param str filename: File name of the file that is going to read.
:raises Exception: if the file doesn't exists
"""
if not path.isfile(filename):
raise Exception("File {} doesn't exists!".format(filename))
with open(filename) as f:
return f.read()
def read_certificate(filename):
return re.sub('\r?\n', '\\n', read_file(filename))
def read_properties_file(file):
myvars = {}
if not path.isfile(file):
raise Exception("Config file {} doesn't exists!".format(file))
with open(file) as myfile:
for line in myfile:
name, var = line.partition("=")[::2]
myvars[name.strip()] = var.strip()
return myvars
def read_domain_properties(domain):
global PROPERTIES
if domain in PROPERTIES:
return PROPERTIES[domain]
config_file="{}/{}.conf".format(CERTBOT_CONF_DIR, domain)
myvars = read_properties_file(config_file)
PROPERTIES[domain] = myvars
return myvars
def connect_cloudflare(domain):
print("Connection to Cloudflare of domain {}".format(domain))
properties = read_domain_properties(domain)
cred_file = None
if not "dns_cloudflare_credentials" in properties:
raise Exception("File {} doesn't have property dns_cloudflare_api_token on it.".format(cred_file))
cred_file = properties["dns_cloudflare_credentials"]
props = read_properties_file(cred_file)
if not "dns_cloudflare_api_token" in props:
raise Exception("File {} doesn't have property dns_cloudflare_api_token on it.".format(cred_file))
api_key = props["dns_cloudflare_api_token"]
return CloudFlare.CloudFlare(token=api_key)
def find_zone_id(cf, domain):
zone_name_guesses = dns_common.base_domain_name_guesses(domain)
zones = [] # type: List[Dict[str, Any]]
code = msg = None
for zone_name in zone_name_guesses:
params = {'name': zone_name,
'per_page': 1}
try:
zones = cf.zones.get(params=params) # zones | pylint: disable=no-member
except CloudFlare.exceptions.CloudFlareAPIError as e:
code = int(e)
msg = str(e)
hint = None
if code == 6003:
hint = ('Did you copy your entire API token/key? To use Cloudflare tokens, '
'you\'ll need the python package cloudflare>=2.3.1.{}'
.format(' This certbot is running cloudflare ' + str(CloudFlare.__version__)
if hasattr(CloudFlare, '__version__') else ''))
elif code == 9103:
hint = 'Did you enter the correct email address and Global key?'
elif code == 9109:
hint = 'Did you enter a valid Cloudflare Token?'
if hint:
raise Exception('Error determining zone_id: {0} {1}. Please confirm '
'that you have supplied valid Cloudflare API credentials. ({2})'
.format(code, msg, hint))
else:
logger.debug('Unrecognised CloudFlareAPIError while finding zone_id: %d %s. '
'Continuing with next zone guess...', e, e)
if zones:
zone_id = zones[0]['id']
logger.debug('Found zone_id of %s for %s using name %s', zone_id, domain, zone_name)
return zone_id
raise Exception('Unable to determine zone_id for {0} using zone names: {1}. '
'Please confirm that the domain name has been entered correctly '
'and is already associated with the supplied Cloudflare account.{2}'
.format(domain, domain, ' The error from Cloudflare was:'
' {0} {1}'.format(code, msg) if code is not None else ''))
def upload_certificate(domain):
cf = connect_cloudflare(domain)
private_key = read_certificate("{}/{}/privkey.pem".format(DEFAULT_CERT_FOLDER, domain))
fullchain = read_certificate("{}/{}/fullchain.pem".format(DEFAULT_CERT_FOLDER, domain))
zone_id = find_zone_id(cf, domain)
logger.debug("Cloudflare Zone id {} of domain {} ".format(zone_id, domain))
data = {'certificate': fullchain,
'private_key': private_key,
'bundle_method': 'ubiquitous'}
print("Going to deploy certificate.")
try:
cf.zones.custom_certificates.post(zone_id, data=data)
print("Depoyed.")
except CloudFlare.exceptions.CloudFlareAPIError as e:
code = int(e)
msg = str(e)
hint = None
if code == 1228:
print("Cert already deployed.")
else:
logger.error(code)
logger.error(msg)
raise e
return
def main():
domains_str = os.environ['RENEWED_DOMAINS']
domains_lst = domains_str.split()
for domain in domains_lst:
print("")
print("Start domain {} checking".format(domain))
zone_name_guesses = dns_common.base_domain_name_guesses(domain)
zone_domain = None
for temp_zone_domain in zone_name_guesses:
temp_config_file = "{}/{}.conf".format(CERTBOT_CONF_DIR, temp_zone_domain)
logger.debug("Checking zone {} -- {}".format(temp_zone_domain, temp_config_file))
if path.isfile(temp_config_file):
zone_domain = temp_zone_domain
break
if zone_domain is None:
raise Exception("It wasn't possible to continue. There is no config file for domain {}.".format(domain))
upload_certificate(zone_domain)
if __name__ == '__main__':
main()
| 32.236264
| 110
| 0.670701
| 768
| 5,867
| 4.90625
| 0.28125
| 0.020701
| 0.01327
| 0.022293
| 0.16879
| 0.145435
| 0.12845
| 0.110403
| 0.110403
| 0.110403
| 0
| 0.008304
| 0.220044
| 5,867
| 182
| 111
| 32.236264
| 0.815122
| 0.041418
| 0
| 0.133333
| 0
| 0
| 0.270733
| 0.038523
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059259
| false
| 0
| 0.051852
| 0.007407
| 0.17037
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8441be7fed412cc2b0c06a54eaceebee4908fef7
| 272
|
py
|
Python
|
incremental/settings.py
|
Nana0606/IUAD
|
c52439eb5bbbef6bd50533b5d9e142e18091d85e
|
[
"BSD-2-Clause"
] | 1
|
2021-07-05T02:20:32.000Z
|
2021-07-05T02:20:32.000Z
|
incremental/settings.py
|
Nana0606/IUAD
|
c52439eb5bbbef6bd50533b5d9e142e18091d85e
|
[
"BSD-2-Clause"
] | null | null | null |
incremental/settings.py
|
Nana0606/IUAD
|
c52439eb5bbbef6bd50533b5d9e142e18091d85e
|
[
"BSD-2-Clause"
] | 1
|
2021-08-22T08:45:18.000Z
|
2021-08-22T08:45:18.000Z
|
# python3
# -*- coding: utf-8 -*-
# @Author : lina
# @Time : 2018/4/22 21:17
"""
code function: define all parameters.
"""
matched_file_name = "../data/gcn_res.txt"
wordvec_path = '../data/word2vec.model'
incremental_path = "../data/incremental_res.txt"
| 20.923077
| 49
| 0.628676
| 35
| 272
| 4.714286
| 0.828571
| 0.072727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063063
| 0.183824
| 272
| 12
| 50
| 22.666667
| 0.68018
| 0.408088
| 0
| 0
| 0
| 0
| 0.49635
| 0.357664
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
84464ba3de7de8074ab4f3a72392eb3da290f401
| 16,826
|
py
|
Python
|
transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenet_block.py
|
wwhio/awesome-DeepLearning
|
2cc92edcf0c22bdfc670c537cc819c8fadf33fac
|
[
"Apache-2.0"
] | 1,150
|
2021-06-01T03:44:21.000Z
|
2022-03-31T13:43:42.000Z
|
transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenet_block.py
|
wwhio/awesome-DeepLearning
|
2cc92edcf0c22bdfc670c537cc819c8fadf33fac
|
[
"Apache-2.0"
] | 358
|
2021-06-01T03:58:47.000Z
|
2022-03-28T02:55:00.000Z
|
transformer_courses/BERT_distillation/PaddleSlim-develop/paddleslim/nas/search_space/mobilenet_block.py
|
wwhio/awesome-DeepLearning
|
2cc92edcf0c22bdfc670c537cc819c8fadf33fac
|
[
"Apache-2.0"
] | 502
|
2021-05-31T12:52:14.000Z
|
2022-03-31T02:51:41.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from .search_space_base import SearchSpaceBase
from .base_layer import conv_bn_layer
from .search_space_registry import SEARCHSPACE
from .utils import compute_downsample_num, check_points, get_random_tokens
__all__ = ["MobileNetV1BlockSpace", "MobileNetV2BlockSpace"]
@SEARCHSPACE.register
class MobileNetV2BlockSpace(SearchSpaceBase):
def __init__(self,
input_size,
output_size,
block_num,
block_mask=None,
scale=1.0):
super(MobileNetV2BlockSpace, self).__init__(input_size, output_size,
block_num, block_mask)
if self.block_mask == None:
# use input_size and output_size to compute self.downsample_num
self.downsample_num = compute_downsample_num(self.input_size,
self.output_size)
if self.block_num != None:
assert self.downsample_num <= self.block_num, 'downsample numeber must be LESS THAN OR EQUAL TO block_num, but NOW: downsample numeber is {}, block_num is {}'.format(
self.downsample_num, self.block_num)
# self.filter_num means channel number
self.filter_num = np.array([
3, 4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 144, 160, 192, 224,
256, 320, 384, 512
]) # 20
# self.k_size means kernel size
self.k_size = np.array([3, 5]) #2
# self.multiply means expansion_factor of each _inverted_residual_unit
self.multiply = np.array([1, 2, 3, 4, 5, 6]) #6
# self.repeat means repeat_num _inverted_residual_unit in each _invresi_blocks
self.repeat = np.array([1, 2, 3, 4, 5, 6]) #6
self.scale = scale
def init_tokens(self):
return get_random_tokens(self.range_table())
def range_table(self):
range_table_base = []
if self.block_mask != None:
range_table_length = len(self.block_mask)
else:
range_table_length = self.block_num
for i in range(range_table_length):
range_table_base.append(len(self.multiply))
range_table_base.append(len(self.filter_num))
range_table_base.append(len(self.repeat))
range_table_base.append(len(self.k_size))
return range_table_base
def token2arch(self, tokens=None):
"""
return mobilenetv2 net_arch function
"""
if tokens == None:
tokens = self.init_tokens()
self.bottleneck_params_list = []
if self.block_mask != None:
for i in range(len(self.block_mask)):
self.bottleneck_params_list.append(
(self.multiply[tokens[i * 4]],
self.filter_num[tokens[i * 4 + 1]],
self.repeat[tokens[i * 4 + 2]], 2
if self.block_mask[i] == 1 else 1,
self.k_size[tokens[i * 4 + 3]]))
else:
repeat_num = int(self.block_num / self.downsample_num)
num_minus = self.block_num % self.downsample_num
### if block_num > downsample_num, add stride=1 block at last (block_num-downsample_num) layers
for i in range(self.downsample_num):
self.bottleneck_params_list.append(
(self.multiply[tokens[i * 4]],
self.filter_num[tokens[i * 4 + 1]],
self.repeat[tokens[i * 4 + 2]], 2,
self.k_size[tokens[i * 4 + 3]]))
### if block_num / downsample_num > 1, add (block_num / downsample_num) times stride=1 block
for k in range(repeat_num - 1):
kk = k * self.downsample_num + i
self.bottleneck_params_list.append(
(self.multiply[tokens[kk * 4]],
self.filter_num[tokens[kk * 4 + 1]],
self.repeat[tokens[kk * 4 + 2]], 1,
self.k_size[tokens[kk * 4 + 3]]))
if self.downsample_num - i <= num_minus:
j = self.downsample_num * (repeat_num - 1) + i
self.bottleneck_params_list.append(
(self.multiply[tokens[j * 4]],
self.filter_num[tokens[j * 4 + 1]],
self.repeat[tokens[j * 4 + 2]], 1,
self.k_size[tokens[j * 4 + 3]]))
if self.downsample_num == 0 and self.block_num != 0:
for i in range(len(self.block_num)):
self.bottleneck_params_list.append(
(self.multiply[tokens[i * 4]],
self.filter_num[tokens[i * 4 + 1]],
self.repeat[tokens[i * 4 + 2]], 1,
self.k_size[tokens[i * 4 + 3]]))
def net_arch(input, return_mid_layer=False, return_block=None):
# all padding is 'SAME' in the conv2d, can compute the actual padding automatic.
# bottleneck sequences
in_c = int(32 * self.scale)
mid_layer = dict()
layer_count = 0
depthwise_conv = None
for i, layer_setting in enumerate(self.bottleneck_params_list):
t, c, n, s, k = layer_setting
if s == 2:
layer_count += 1
if check_points((layer_count - 1), return_block):
mid_layer[layer_count - 1] = depthwise_conv
input, depthwise_conv = self._invresi_blocks(
input=input,
in_c=in_c,
t=t,
c=int(c * self.scale),
n=n,
s=s,
k=int(k),
name='mobilenetv2_' + str(i + 1))
in_c = int(c * self.scale)
if check_points(layer_count, return_block):
mid_layer[layer_count] = depthwise_conv
if return_mid_layer:
return input, mid_layer
else:
return input,
return net_arch
def _shortcut(self, input, data_residual):
"""Build shortcut layer.
Args:
input(Variable): input.
data_residual(Variable): residual layer.
Returns:
Variable, layer output.
"""
return fluid.layers.elementwise_add(input, data_residual)
def _inverted_residual_unit(self,
input,
num_in_filter,
num_filters,
ifshortcut,
stride,
filter_size,
expansion_factor,
reduction_ratio=4,
name=None):
"""Build inverted residual unit.
Args:
input(Variable), input.
num_in_filter(int), number of in filters.
num_filters(int), number of filters.
ifshortcut(bool), whether using shortcut.
stride(int), stride.
filter_size(int), filter size.
padding(str|int|list), padding.
expansion_factor(float), expansion factor.
name(str), name.
Returns:
Variable, layers output.
"""
num_expfilter = int(round(num_in_filter * expansion_factor))
channel_expand = conv_bn_layer(
input=input,
num_filters=num_expfilter,
filter_size=1,
stride=1,
padding='SAME',
num_groups=1,
act='relu6',
name=name + '_expand')
bottleneck_conv = conv_bn_layer(
input=channel_expand,
num_filters=num_expfilter,
filter_size=filter_size,
stride=stride,
padding='SAME',
num_groups=num_expfilter,
act='relu6',
name=name + '_dwise',
use_cudnn=False)
depthwise_output = bottleneck_conv
linear_out = conv_bn_layer(
input=bottleneck_conv,
num_filters=num_filters,
filter_size=1,
stride=1,
padding='SAME',
num_groups=1,
act=None,
name=name + '_linear')
out = linear_out
if ifshortcut:
out = self._shortcut(input=input, data_residual=out)
return out, depthwise_output
def _invresi_blocks(self, input, in_c, t, c, n, s, k, name=None):
"""Build inverted residual blocks.
Args:
input: Variable, input.
in_c: int, number of in filters.
t: float, expansion factor.
c: int, number of filters.
n: int, number of layers.
s: int, stride.
k: int, filter size.
name: str, name.
Returns:
Variable, layers output.
"""
first_block, depthwise_output = self._inverted_residual_unit(
input=input,
num_in_filter=in_c,
num_filters=c,
ifshortcut=False,
stride=s,
filter_size=k,
expansion_factor=t,
name=name + '_1')
last_residual_block = first_block
last_c = c
for i in range(1, n):
last_residual_block, depthwise_output = self._inverted_residual_unit(
input=last_residual_block,
num_in_filter=last_c,
num_filters=c,
ifshortcut=True,
stride=1,
filter_size=k,
expansion_factor=t,
name=name + '_' + str(i + 1))
return last_residual_block, depthwise_output
@SEARCHSPACE.register
class MobileNetV1BlockSpace(SearchSpaceBase):
def __init__(self,
input_size,
output_size,
block_num,
block_mask=None,
scale=1.0):
super(MobileNetV1BlockSpace, self).__init__(input_size, output_size,
block_num, block_mask)
if self.block_mask == None:
# use input_size and output_size to compute self.downsample_num
self.downsample_num = compute_downsample_num(self.input_size,
self.output_size)
if self.block_num != None:
assert self.downsample_num <= self.block_num, 'downsample numeber must be LESS THAN OR EQUAL TO block_num, but NOW: downsample numeber is {}, block_num is {}'.format(
self.downsample_num, self.block_num)
# self.filter_num means channel number
self.filter_num = np.array([
3, 4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 144, 160, 192, 224,
256, 320, 384, 512, 576, 640, 768, 1024, 1048
])
self.k_size = np.array([3, 5])
self.scale = scale
def init_tokens(self):
return get_random_tokens(self.range_table())
def range_table(self):
range_table_base = []
if self.block_mask != None:
for i in range(len(self.block_mask)):
range_table_base.append(len(self.filter_num))
range_table_base.append(len(self.filter_num))
range_table_base.append(len(self.k_size))
else:
for i in range(self.block_num):
range_table_base.append(len(self.filter_num))
range_table_base.append(len(self.filter_num))
range_table_base.append(len(self.k_size))
return range_table_base
def token2arch(self, tokens=None):
if tokens == None:
tokens = self.init_tokens()
self.bottleneck_params_list = []
if self.block_mask != None:
for i in range(len(self.block_mask)):
self.bottleneck_params_list.append(
(self.filter_num[tokens[i * 3]],
self.filter_num[tokens[i * 3 + 1]], 2
if self.block_mask[i] == 1 else 1,
self.k_size[tokens[i * 3 + 2]]))
else:
repeat_num = int(self.block_num / self.downsample_num)
num_minus = self.block_num % self.downsample_num
for i in range(self.downsample_num):
### if block_num > downsample_num, add stride=1 block at last (block_num-downsample_num) layers
self.bottleneck_params_list.append(
(self.filter_num[tokens[i * 3]],
self.filter_num[tokens[i * 3 + 1]], 2,
self.k_size[tokens[i * 3 + 2]]))
### if block_num / downsample_num > 1, add (block_num / downsample_num) times stride=1 block
for k in range(repeat_num - 1):
kk = k * self.downsample_num + i
self.bottleneck_params_list.append(
(self.filter_num[tokens[kk * 3]],
self.filter_num[tokens[kk * 3 + 1]], 1,
self.k_size[tokens[kk * 3 + 2]]))
if self.downsample_num - i <= num_minus:
j = self.downsample_num * (repeat_num - 1) + i
self.bottleneck_params_list.append(
(self.filter_num[tokens[j * 3]],
self.filter_num[tokens[j * 3 + 1]], 1,
self.k_size[tokens[j * 3 + 2]]))
if self.downsample_num == 0 and self.block_num != 0:
for i in range(len(self.block_num)):
self.bottleneck_params_list.append(
(self.filter_num[tokens[i * 3]],
self.filter_num[tokens[i * 3 + 1]], 1,
self.k_size[tokens[i * 3 + 2]]))
def net_arch(input, return_mid_layer=False, return_block=None):
mid_layer = dict()
layer_count = 0
for i, layer_setting in enumerate(self.bottleneck_params_list):
filter_num1, filter_num2, stride, kernel_size = layer_setting
if stride == 2:
layer_count += 1
if check_points((layer_count - 1), return_block):
mid_layer[layer_count - 1] = input
input = self._depthwise_separable(
input=input,
num_filters1=filter_num1,
num_filters2=filter_num2,
stride=stride,
scale=self.scale,
kernel_size=int(kernel_size),
name='mobilenetv1_{}'.format(str(i + 1)))
if return_mid_layer:
return input, mid_layer
else:
return input,
return net_arch
def _depthwise_separable(self,
input,
num_filters1,
num_filters2,
stride,
scale,
kernel_size,
name=None):
num_groups = input.shape[1]
s_oc = int(num_filters1 * scale)
if s_oc > num_groups:
output_channel = s_oc - (s_oc % num_groups)
else:
output_channel = num_groups
depthwise_conv = conv_bn_layer(
input=input,
filter_size=kernel_size,
num_filters=output_channel,
stride=stride,
num_groups=num_groups,
use_cudnn=False,
name=name + '_dw')
pointwise_conv = conv_bn_layer(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
name=name + '_sep')
return pointwise_conv
| 39.130233
| 178
| 0.530013
| 1,931
| 16,826
| 4.371828
| 0.130502
| 0.050817
| 0.036958
| 0.03376
| 0.610993
| 0.558754
| 0.533168
| 0.502843
| 0.477967
| 0.474651
| 0
| 0.027162
| 0.382979
| 16,826
| 429
| 179
| 39.221445
| 0.785976
| 0.136753
| 0
| 0.571885
| 0
| 0.00639
| 0.023949
| 0.002958
| 0
| 0
| 0
| 0
| 0.00639
| 1
| 0.044728
| false
| 0
| 0.031949
| 0.00639
| 0.127796
| 0.003195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
844826018788435b356bf6f9c896357ffb15fd09
| 11,680
|
py
|
Python
|
baiduspider/core/parser.py
|
samzhangjy/GSSpider
|
344d9c9053a5d5bf08692e0c817d30763dbd8ab7
|
[
"MIT"
] | 31
|
2020-07-17T08:26:37.000Z
|
2021-08-24T02:28:50.000Z
|
baiduspider/core/parser.py
|
samzhangjy/GSSpider
|
344d9c9053a5d5bf08692e0c817d30763dbd8ab7
|
[
"MIT"
] | 6
|
2020-07-14T17:13:17.000Z
|
2020-09-12T06:02:01.000Z
|
baiduspider/core/parser.py
|
samzhangjy/GSSpider
|
344d9c9053a5d5bf08692e0c817d30763dbd8ab7
|
[
"MIT"
] | 12
|
2020-07-27T08:38:26.000Z
|
2021-07-28T16:05:58.000Z
|
import json
from html import unescape
from bs4 import BeautifulSoup
from baiduspider.core._spider import BaseSpider
from baiduspider.errors import ParseError
class Parser(BaseSpider):
def __init__(self) -> None:
super().__init__()
def parse_web(self, content: str) -> dict:
"""解析百度网页搜索的页面源代码
Args:
content (str): 已经转换为UTF-8编码的百度网页搜索HTML源码
Returns:
dict: 解析后的结果
"""
soup = BeautifulSoup(content, 'html.parser')
if soup.find('div', id='content_left') is None:
raise ParseError('Invalid HTML content.')
# 尝试获取搜索结果总数
try:
num = int(str(soup.find('span', class_='nums_text').text).strip(
'百度为您找到相关结果约').strip('个').replace(',', ''))
except:
num = 0
# 查找运算窗口
calc = soup.find('div', class_='op_new_cal_screen')
# 定义预结果(运算以及相关搜索)
pre_results = []
# 预处理相关搜索
try:
_related = soup.find('div', id='rs').find('table').find_all('th')
except:
_related = []
related = []
# 预处理新闻
news = soup.find('div', class_='result-op',
tpl='sp_realtime_bigpic5', srcid='19')
# 确认是否有新闻块
try:
news_title = self._format(
news.find('h3', class_='t').find('a').text)
except:
news_title = None
news_detail = []
else:
news_rows = news.findAll('div', class_='c-row')
news_detail = []
prev_row = None
for row in news_rows:
try:
row_title = self._format(row.find('a').text)
except AttributeError:
prev_row['des'] = self._format(row.text)
continue
row_time = self._format(
row.find('span', class_='c-color-gray2').text)
row_author = self._format(
row.find('span', class_='c-color-gray').text)
row_url = self._format(row.find('a')['href'])
news_detail.append({
'title': row_title,
'time': row_time,
'author': row_author,
'url': row_url,
'des': None
})
prev_row = news_detail[-1]
# 预处理短视频
video = soup.find('div', class_='op-short-video-pc')
if video:
video_rows = video.findAll('div', class_='c-row')
video_results = []
for row in video_rows:
row_res = []
videos = row.findAll('div', class_='c-span6')
for v in videos:
v_link = v.find('a')
v_title = v_link['title']
v_url = self._format(v_link['href'])
v_img = v_link.find('img')['src']
v_len = self._format(
v.find('div', class_='op-short-video-pc-duration-wrap').text)
v_from = self._format(
v.find('div', class_='op-short-video-pc-clamp1').text)
row_res.append({
'title': v_title,
'url': v_url,
'cover': v_img,
'length': v_len,
'origin': v_from
})
video_results += row_res
else:
video_results = []
# 一个一个append相关搜索
for _ in _related:
if _.text:
related.append(_.text)
# 预处理百科
baike = soup.find('div', class_='c-container', tpl='bk_polysemy')
if baike:
b_title = self._format(baike.find('h3').text)
b_url = baike.find('a')['href']
b_des = self._format(baike.find(
'div', class_='c-span-last').find('p').text)
try:
b_cover = baike.find(
'div', class_='c-span6').find('img')['src']
b_cover_type = 'image'
except (TypeError, AttributeError):
try:
b_cover = baike.find(
'video', class_='op-bk-polysemy-video')['data-src']
b_cover_type = 'video'
except TypeError:
b_cover = None
b_cover_type = None
baike = {
'title': b_title,
'url': b_url,
'des': b_des,
'cover': b_cover,
'cover-type': b_cover_type
}
# 加载搜索结果总数
if num != 0:
pre_results.append(dict(type='total', result=num))
# 加载运算
if calc:
pre_results.append(dict(type='calc', process=str(calc.find('p', class_='op_new_val_screen_process').find(
'span').text), result=str(calc.find('p', class_='op_new_val_screen_result').find('span').text)))
# 加载相关搜索
if related:
pre_results.append(dict(type='related', results=related))
# 加载资讯
if news_detail:
pre_results.append(dict(type='news', results=news_detail))
# 加载短视频
if video_results:
pre_results.append(dict(type='video', results=video_results))
# 加载百科
if baike:
pre_results.append(dict(type='baike', result=baike))
# 预处理源码
error = False
try:
soup = BeautifulSoup(content, 'html.parser')
# 错误处理
except IndexError:
error = True
finally:
if error:
raise ParseError(
'Failed to generate BeautifulSoup object for the given source code content.')
results = soup.findAll('div', class_='result')
res = []
for result in results:
soup = BeautifulSoup(self._minify(str(result)), 'html.parser')
# 链接
href = soup.find('a').get('href').strip()
# 标题
title = self._format(str(soup.find('a').text))
# 时间
try:
time = self._format(soup.findAll(
'div', class_='c-abstract')[0].find('span', class_='newTimeFactor_before_abs').text)
except (AttributeError, IndexError):
time = None
try:
# 简介
des = soup.find_all('div', class_='c-abstract')[0].text
soup = BeautifulSoup(str(result), 'html.parser')
des = self._format(des).lstrip(str(time)).strip()
except IndexError:
try:
des = des.replace('mn', '')
except (UnboundLocalError, AttributeError):
des = None
if time:
time = time.split('-')[0].strip()
# 因为百度的链接是加密的了,所以需要一个一个去访问
# 由于性能原因,分析链接部分暂略
# if href is not None:
# try:
# # 由于性能原因,这里设置1秒超时
# r = requests.get(href, timeout=1)
# href = r.url
# except:
# # 获取网页失败,默认换回原加密链接
# href = href
# # 分析链接
# if href:
# parse = urlparse(href)
# domain = parse.netloc
# prepath = parse.path.split('/')
# path = []
# for loc in prepath:
# if loc != '':
# path.append(loc)
# else:
# domain = None
# path = None
try:
is_not_special = result['tpl'] not in [
'short_video_pc', 'sp_realtime_bigpic5', 'bk_polysemy']
except KeyError:
is_not_special = False
if is_not_special: # 确保不是特殊类型的结果
# 获取可见的域名
try:
domain = result.find('div', class_='c-row').find('div', class_='c-span-last').find(
'div', class_='se_st_footer').find('a', class_='c-showurl').text
except Exception as error:
try:
domain = result.find(
'div', class_='c-row').find('div', class_='c-span-last').find('p', class_='op-bk-polysemy-move').find('span', class_='c-showurl').text
except Exception as error:
try:
domain = result.find(
'div', class_='se_st_footer').find('a', class_='c-showurl').text
except:
domain = None
if domain:
domain = domain.replace(' ', '')
else:
domain = None
# 加入结果
if title and href and is_not_special:
res.append({
'title': title,
'des': des,
'origin': domain,
'url': href,
'time': time,
'type': 'result'})
soup = BeautifulSoup(content, 'html.parser')
try:
soup = BeautifulSoup(str(soup.findAll('div', id='page')
[0]), 'html.parser')
# 分页
pages_ = soup.findAll('span', class_='pc')
except IndexError:
pages_ = []
pages = []
for _ in pages_:
pages.append(int(_.text))
# 如果搜索结果仅有一页时,百度不会显示底部导航栏
# 所以这里直接设置成1,如果不设会报错`TypeError`
if not pages:
pages = [1]
# 设置最终结果
result = pre_results
result.extend(res)
return {
'results': result,
# 最大页数
'pages': max(pages)
}
def parse_pic(self, content: str) -> dict:
"""解析百度图片搜索的页面源代码
Args:
content (str): 已经转换为UTF-8编码的百度图片搜索HTML源码
Returns:
dict: 解析后的结果
"""
# 从JavaScript中加载数据
# 因为JavaScript很像JSON(JavaScript Object Notation),所以直接用json加载就行了
# 还有要预处理一下,把函数和无用的括号过滤掉
error = None
try:
data = json.loads(content.split('flip.setData(\'imgData\', ')[1].split(
'flip.setData(')[0].split(']);')[0].replace(');', '').replace('<\\/strong>', '</strong>').replace('\\\'', '\''))
except Exception as err:
error = err
if type(err) in [IndexError, AttributeError]:
raise ParseError('Invalid HTML content.')
finally:
if error: raise ParseError(str(error))
results = []
for _ in data['data'][:-1]:
if _:
# 标题
title = str(_['fromPageTitle']).encode('utf-8').decode('utf-8')
# 去除标题里的HTML
title = unescape(self._remove_html(title))
# 链接
url = _['objURL']
# 来源域名
host = _['fromURLHost']
# 生成结果
result = {
'title': title,
'url': url,
'host': host
}
results.append(result) # 加入结果
# 获取分页
bs = BeautifulSoup(content, 'html.parser')
pages_ = bs.find('div', id='page').findAll('span', class_='pc')
pages = []
for _ in pages_:
pages.append(int(_.text))
return {
'results': results,
# 取最大页码
'pages': max(pages)
}
| 36.72956
| 162
| 0.447603
| 1,099
| 11,680
| 4.584167
| 0.22566
| 0.031759
| 0.033347
| 0.018063
| 0.228067
| 0.118499
| 0.118499
| 0.113339
| 0.082175
| 0.069869
| 0
| 0.004499
| 0.42911
| 11,680
| 317
| 163
| 36.845426
| 0.75105
| 0.091524
| 0
| 0.268085
| 0
| 0
| 0.112684
| 0.012244
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012766
| false
| 0
| 0.021277
| 0
| 0.046809
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8449b868c5c55bebc3c70da12ca1d458ad2a711a
| 2,142
|
py
|
Python
|
virtual/lib/python3.6/site-packages/requests_unixsocket/adapters.py
|
marknesh/pitches
|
0a480d9bc2beafaefa0121393b1502cc05edab89
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.6/site-packages/requests_unixsocket/adapters.py
|
marknesh/pitches
|
0a480d9bc2beafaefa0121393b1502cc05edab89
|
[
"MIT"
] | 10
|
2020-03-08T21:13:29.000Z
|
2021-04-08T19:41:14.000Z
|
flask/lib/python3.6/site-packages/requests_unixsocket/adapters.py
|
JOFLIX/grapevines
|
34576e01184570d79cc140b42ffb71d322132da6
|
[
"MIT",
"Unlicense"
] | 1
|
2020-11-04T06:48:34.000Z
|
2020-11-04T06:48:34.000Z
|
import socket
from requests.adapters import HTTPAdapter
from requests.compat import urlparse, unquote
try:
from requests.packages.urllib3.connection import HTTPConnection
from requests.packages.urllib3.connectionpool import HTTPConnectionPool
except ImportError:
from urllib3.connection import HTTPConnection
from urllib3.connectionpool import HTTPConnectionPool
# The following was adapted from some code from docker-py
# https://github.com/docker/docker-py/blob/master/docker/unixconn/unixconn.py
class UnixHTTPConnection(HTTPConnection):
def __init__(self, unix_socket_url, timeout=60):
"""Create an HTTP connection to a unix domain socket
:param unix_socket_url: A URL with a scheme of 'http+unix' and the
netloc is a percent-encoded path to a unix domain socket. E.g.:
'http+unix://%2Ftmp%2Fprofilesvc.sock/status/pid'
"""
HTTPConnection.__init__(self, 'localhost', timeout=timeout)
self.unix_socket_url = unix_socket_url
self.timeout = timeout
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
socket_path = unquote(urlparse(self.unix_socket_url).netloc)
sock.connect(socket_path)
self.sock = sock
class UnixHTTPConnectionPool(HTTPConnectionPool):
def __init__(self, socket_path, timeout=60):
HTTPConnectionPool.__init__(self, 'localhost', timeout=timeout)
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self):
return UnixHTTPConnection(self.socket_path, self.timeout)
class UnixAdapter(HTTPAdapter):
def __init__(self, timeout=60):
super(UnixAdapter, self).__init__()
self.timeout = timeout
def get_connection(self, socket_path, proxies=None):
proxies = proxies or {}
proxy = proxies.get(urlparse(socket_path.lower()).scheme)
if proxy:
raise ValueError('%s does not support specifying proxies'
% self.__class__.__name__)
return UnixHTTPConnectionPool(socket_path, self.timeout)
| 35.114754
| 77
| 0.710551
| 252
| 2,142
| 5.81746
| 0.357143
| 0.061392
| 0.044338
| 0.034789
| 0.129604
| 0.047749
| 0
| 0
| 0
| 0
| 0
| 0.007051
| 0.205416
| 2,142
| 60
| 78
| 35.7
| 0.854289
| 0.169935
| 0
| 0.078947
| 0
| 0
| 0.032221
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.210526
| 0.026316
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
844aff8b757e567eab04101d17c08cb3e245797f
| 8,032
|
py
|
Python
|
profiles_weak.py
|
andreuvall/HybridPlaylistContinuation
|
6e31e50050c61a2c3ae55183e18b665fd54c7250
|
[
"BSD-2-Clause"
] | 8
|
2017-06-04T11:42:49.000Z
|
2021-10-19T12:16:01.000Z
|
profiles_weak.py
|
andreuvall/HybridPlaylistContinuation
|
6e31e50050c61a2c3ae55183e18b665fd54c7250
|
[
"BSD-2-Clause"
] | null | null | null |
profiles_weak.py
|
andreuvall/HybridPlaylistContinuation
|
6e31e50050c61a2c3ae55183e18b665fd54c7250
|
[
"BSD-2-Clause"
] | 5
|
2017-08-27T17:02:14.000Z
|
2020-06-09T01:21:09.000Z
|
from __future__ import print_function
from __future__ import division
from sklearn.utils import check_random_state
from sklearn import preprocessing as prep
from utils.data import load_data, show_data_splits, shape_data
from utils.evaluation import evaluate
from utils.profiles import select_model, show_design, train, fit, compute_scores
import theano
import lasagne as lg
import numpy as np
import argparse
import os
'''
Hybrid music playlist continuation based on a song-to-playlist classifier.
We learn a classifier that takes song features as inputs and predicts the
playlists songs belong to. Once it is learned, such classifier can be
used to populate a matrix of song-playlist scores describing how well a song
and a playlist fit together. Thus, a playlist can be extended by selecting
the songs with highest score. This approach is "hybrid" in the usual sense in
the recommender systems literature, i.e., it combines content (given by the
song features) and cf information (given by playlists examples).
As it is, this approach only works on the so-called weak generalization setting.
That is, the model is trained on the same playlists that will be extended.
'''
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hybrid music playlist continuation based on a song-to-playlist classifier.')
parser.add_argument('--model', type=str, help='path to the model specification file', metavar='')
parser.add_argument('--dataset', type=str, help='path to the playlists dataset directory', metavar='')
parser.add_argument('--msd', type=str, help='path to the MSD directory', metavar='')
parser.add_argument('--train', action='store_true', help='train the song-to-playist classifier with monitoring')
parser.add_argument('--fit', action='store_true', help='fit the song-to-playlist classifier')
parser.add_argument('--test', action='store_true', help='evaluate the playlist continuations')
parser.add_argument('--ci', action='store_true', help='compute confidence intervals if True')
parser.add_argument('--song_occ', type=int, help='test on songs observed song_occ times during training', nargs='+', metavar='')
parser.add_argument('--metrics_file', type=str, help='file name to save metrics', metavar='')
parser.add_argument('--seed', type=int, help='set random behavior', metavar='')
args = parser.parse_args()
# set random behavior
rng = check_random_state(args.seed)
lg.random.set_rng(rng)
# set model configuration
model = select_model(args.model)
# prepare output directory
data_name = os.path.basename(os.path.normpath(args.dataset))
out_dir = os.path.join('params', 'profiles', model.name + '_' + data_name + '_weak')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# load data: playlists, splits, features and artist info
data = load_data(args.dataset, args.msd, model)
playlists_coo, split_weak, _, features, song2artist = data
# playlists_coo are the playlists stored in coordinate format
playlists_idx, songs_idx, _, idx2song = playlists_coo
# each playlist is split into a "query" of ~80% of the songs (train_idx +
# valid_idx) and a "continuation" of ~20% of the songs (test_idx)
train_idx, valid_idx, test_idx = split_weak
# define splits for this experiment
# train model on the training queries
# validate model on the validation queries
# fit the model on the full queries
# extend all the playlists, using all queries and continuations
train_idx = train_idx
valid_idx = valid_idx
fit_idx = np.hstack((train_idx, valid_idx))
query_idx = fit_idx
cont_idx = test_idx
# provide data information
show_data_splits(playlists_idx, songs_idx, idx2song, song2artist,
train_idx, valid_idx, fit_idx, query_idx, cont_idx)
# provide model information
print('\nNetwork:')
show_design(model)
if args.train:
#
# train the hybrid model while validating on withheld playlists
#
# prepare input song features and playlist targets at training
X_train, Y_train = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='train', subset=train_idx
)
# prepare input song features and playlist targets at validation
X_valid, Y_valid = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='test', subset=valid_idx
)
# preprocess input features if required
# use the training song features to standardize the validation data
if model.standardize:
scaler = prep.RobustScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
if model.normalize:
X_train = prep.normalize(X_train, norm=model.normalize)
X_valid = prep.normalize(X_valid, norm=model.normalize)
# train the classifier
train(
model=model,
train_input=X_train.astype(theano.config.floatX),
train_target=Y_train.astype(np.int8),
valid_input=X_valid.astype(theano.config.floatX),
valid_target=Y_valid.astype(np.int8),
out_dir=out_dir,
random_state=rng
)
if args.fit:
#
# fit the hybrid model
#
# prepare input song features and playlist targets at training
X_fit, Y_fit = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='train', subset=fit_idx
)
# preprocess input features if required
if model.standardize:
X_fit = prep.robust_scale(X_fit)
if model.normalize:
X_fit = prep.normalize(X_fit, norm=model.normalize)
# fit the classifier
fit(
model=model,
fit_input=X_fit.astype(theano.config.floatX),
fit_target=Y_fit.astype(np.int8),
out_dir=out_dir,
random_state=rng
)
if args.test:
#
# extend the playlists in the query split and evaluate the
# continuations by comparing them to actual withheld continuations
#
# prepare input song features and playlist targets at test
X_cont, Y_cont = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='test', subset=cont_idx
)
# preprocess input features if required
# use the training song features to standardize the test data
if model.standardize:
X_fit, _ = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='train', subset=fit_idx
)
scaler = prep.RobustScaler()
scaler.fit(X_fit)
X_cont = scaler.transform(X_cont)
if model.normalize:
X_cont = prep.normalize(X_cont, norm=model.normalize)
# songs in the "query" playlists need to be masked to make sure that
# they are not recommended as continuations
_, Y_query = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='test', subset=query_idx
)
# get number of song occurrences when fitting for cold-start analysis
# Y_fit = Y_query
train_occ = np.asarray(Y_query.sum(axis=1)).flatten()
# compute the song-playlist scores
cont_output = compute_scores(
model=model,
params_dir=out_dir,
cont_input=X_cont.astype(theano.config.floatX),
cont_target=Y_cont.astype(np.int8)
)
# evaluate the continuations
evaluate(
scores=[cont_output.T],
targets=[Y_cont.T.tocsr()],
queries=[Y_query.T.tocsr()],
train_occ=[train_occ],
k_list=[10, 30, 100],
ci=args.ci,
song_occ=args.song_occ,
metrics_file=args.metrics_file
)
| 37.886792
| 132
| 0.662475
| 1,054
| 8,032
| 4.870968
| 0.234345
| 0.01753
| 0.033113
| 0.031165
| 0.257499
| 0.21231
| 0.194196
| 0.182898
| 0.165758
| 0.165758
| 0
| 0.004343
| 0.254607
| 8,032
| 211
| 133
| 38.066351
| 0.853182
| 0.19385
| 0
| 0.188525
| 0
| 0
| 0.10708
| 0
| 0.008197
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.098361
| 0
| 0.098361
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
844c48d7274f542cdb76ae374555eb9e43a3cc30
| 21,999
|
py
|
Python
|
deliverable1/analyzer/clientGUI.py
|
tonellotto/pira-project
|
13f1f40fd3339d60067c09396822af8f3c83239c
|
[
"MIT"
] | null | null | null |
deliverable1/analyzer/clientGUI.py
|
tonellotto/pira-project
|
13f1f40fd3339d60067c09396822af8f3c83239c
|
[
"MIT"
] | null | null | null |
deliverable1/analyzer/clientGUI.py
|
tonellotto/pira-project
|
13f1f40fd3339d60067c09396822af8f3c83239c
|
[
"MIT"
] | null | null | null |
import analyzer_client as analyzer
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
import json
import os
from pathlib import Path
IP_ADDRESS = "localhost"
PORT = "8061"
ENGINE_CURR_OPTIONS = {}
ANALYZE_CURR_OPTIONS = {'language':'en', 'entities': None, 'correlation_id': None, 'score_threshold': "0.1", 'return_decision_process': "0" }
DENY_LIST = {'supported_entities': [], 'valuesList': [], 'length': 0 }
REGEX_LIST = {'entities': [], 'names_pattern': [], 'patterns': [], 'scores': [], 'context_words': [], 'length': 0 }
class Frames(object):
def __init__(self, root):
self.root = root
self.root.title('Presidio Analyzer gRPC Client')
self.root.geometry('650x260')
self.root.configure(bg="#0B0C10")
self.root.resizable(0, 0)
# Title
frameTitle = Frame(self.root, width = 650, height = 60, bg="#0B0C10")
frameTitle.grid(row = 0, columnspan = 2)
Label(frameTitle, text="Microsoft Presidio Analyzer", font=("Helvetica", 17, "bold"), bg="#0B0C10", fg="#C5C6C7", anchor = CENTER).pack(ipady = 20)
# Settings
frameBtnSettings = Frame(self.root, bg="#0B0C10")
frameBtnSettings.grid(row = 2, columnspan = 2)
settingsButton = Button(frameBtnSettings, text="Settings", font=("Helvetica", 14), bg="#0B0C10", fg="#C5C6C7", command = self.settings).pack(pady = 10, ipadx= 33, ipady = 3)
# Start analyzer
frameBtnAnalyze = Frame(self.root, width = 650, height = 1, bg="#0B0C10")
frameBtnAnalyze.grid(row = 1, columnspan = 2)
analyzeBtn = Button(frameTitle, text="Start analyzer", font=("Helvetica", 14), bg="#0B0C10", fg="#C5C6C7", command = self.startAnalyzer).pack(pady = 22, ipadx= 10, ipady = 3)
def startAnalyzer(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
path = Path(dir_path)
self.root.filenames = filedialog.askopenfilenames(initialdir= str(path.parent.absolute()) + "/files", title="Select A File", filetypes=(("txt files", "*.txt"),("all files", "*.*")))
if self.root.filenames:
clientAnalyzer = analyzer.ClientEntity(IP_ADDRESS, PORT)
# send options if setted
for elem in ANALYZE_CURR_OPTIONS:
clientAnalyzer.setupOptions(elem, ANALYZE_CURR_OPTIONS[elem], "ANALYZE_OPTIONS")
if DENY_LIST['length'] > 0:
clientAnalyzer.setupDenyList(DENY_LIST['supported_entities'], DENY_LIST['valuesList'])
if REGEX_LIST['length'] > 0:
patterns = analyzer.createPatternInfo(1, REGEX_LIST['names_pattern'], REGEX_LIST['patterns'], REGEX_LIST['scores'])
clientAnalyzer.setupRegex(REGEX_LIST['entities'][0], patterns, REGEX_LIST['context_words'][0])
progressWindow = Toplevel()
progressWindow.title("Analyzer Status")
progressWindow.geometry("330x80")
progressWindow.configure(bg="white")
self.root.update_idletasks()
Label(progressWindow, text="Analyzer process is starting..it may take a while!", font=("Helvetica", 10), bg="white", fg="black").pack(side=TOP, padx = 15, pady = 7)
progressBar = ttk.Progressbar(progressWindow, orient=HORIZONTAL, length=200, mode="determinate")
progressBar.pack(side=TOP, pady = 14)
self.root.update_idletasks()
filenameList = []
for path in self.root.filenames:
filename, ext = os.path.basename(path).split(".")
filenameList.append(filename)
res = clientAnalyzer.sendRequestAnalyze(os.path.basename(filename))
if res == -2:
progressWindow.destroy()
messagebox.showerror("gRPC Server Error", "Cannot connect to the server! Check your server settings")
break
if progressBar['value'] < 100:
progressBar['value'] += (100/len(self.root.filenames))
self.root.update_idletasks()
if int(progressBar['value']) == 100:
messagebox.showinfo(parent=progressWindow, message='Analyzer process completed!')
progressWindow.destroy()
if res != -2:
clientAnalyzer.closeConnection()
self.readResults(filenameList)
def readResults(self, filenameList):
self.result = Toplevel()
self.result.title("Presidio Analyzer gRPC - RESULTS")
self.result.geometry("850x450")
self.result.configure(bg="#0B0C10")
self.result.resizable(0, 0)
## List filename-results.txt
frameList = Frame(self.result, width = 150, height = 30)
frameList.pack(side=LEFT, padx=13)
# Scrollbar
resultsScrollbar = Scrollbar(frameList, orient=VERTICAL)
listbox_widget = Listbox(frameList, yscrollcommand=resultsScrollbar.set, height = 20, font=("Courier", 12), bg="#1F2833", fg="#C5C6C7")
# configure scrollbar
resultsScrollbar.config(command=listbox_widget.yview)
resultsScrollbar.pack(side=RIGHT, fill=Y)
## END LIST
## Frame that will contain results
frameResults = Frame(self.result, width = 680, bg="#0B0C10")
frameResults.pack(side=RIGHT, pady = 15, padx = 10)
self.text_widget = Text(frameResults, font=("Courier", 13), spacing1=3, bg="#1F2833", fg="#C5C6C7")
self.text_widget.pack(pady = 10, padx= 15)
## END FRAME
for filename in filenameList:
listbox_widget.insert(END, filename)
listbox_widget.bind('<<ListboxSelect>>', self.clickEvent)
listbox_widget.pack()
def clickEvent(self, e):
dir_path = os.path.dirname(os.path.realpath(__file__))
path = Path(dir_path)
currSelection = e.widget.curselection()
filename = e.widget.get(currSelection)
#print(filename)
with open(str(path.parent.absolute()) + "/files/" + filename + ".txt", "r") as originalFile:
originalText = originalFile.read()
with open(str(path.parent.absolute()) + "/analyzer-results/" + filename + "-results.txt", "r") as resultsFile:
self.text_widget.configure(state='normal')
self.text_widget.delete("1.0", END)
for line in resultsFile:
resultStr = json.loads(line)
#print(resultStr)
start = resultStr['start']
end = resultStr['end']
self.text_widget.insert(END, f"FOUND WORD: {originalText[start:end]}\n\n")
self.text_widget.insert(END, f"ENTITY TYPE: {resultStr['entity_type']}\nSTART: {resultStr['start']}\nEND: {resultStr['end']}\nSCORE: {resultStr['score']}")
self.text_widget.insert(END, "\n-------------------------------------------------\n")
self.text_widget.configure(state='disabled')
def settings(self):
self.settings = Toplevel()
self.settings.title("Presidio Analyzer gRPC - Settings")
self.settings.geometry("790x430")
self.settings.configure(bg="#0B0C10")
self.settings.resizable(0, 0)
## List of options
frameList = Frame(self.settings, width = 100, height = 30)
frameList.pack(side=LEFT, padx=8, pady=10)
listbox_widget = Listbox(frameList, height = 20, font=("Courier", 12), bg="#1F2833", fg="#C5C6C7")
## Container options
self.frameOptions = Frame(self.settings, bg="#0B0C10")
self.frameOptions.pack(side=RIGHT, pady = 15, padx = 10, expand = True)
listbox_widget.insert(0, "Server settings")
listbox_widget.insert(1, "PII Recognition")
listbox_widget.insert(2, "Analyzer Options")
listbox_widget.bind('<<ListboxSelect>>', self.clickEventOption)
listbox_widget.pack()
def clickEventOption(self, e):
currSelection = e.widget.curselection()
optionName = e.widget.get(currSelection)
for widget in self.frameOptions.winfo_children():
widget.destroy()
if optionName == "Server settings":
Label(self.frameOptions, text = "SERVER IP: " + IP_ADDRESS + " | SERVER PORT: " + str(PORT), font=("courier", 10), bg="#0B0C10", fg="#C5C6C7").pack(side=TOP)
Label(self.frameOptions, text = "Server IP", font=("helvetica", 15), bg="#0B0C10", fg="#C5C6C7").pack(side=TOP, pady = 10)
self.server_ip = Entry(self.frameOptions, font=("helvetica", 13), justify=CENTER, bd=3)
self.server_ip.pack(anchor=S, pady = 5, padx = 20, ipady = 2)
Label(self.frameOptions, text = "Server Port", font=("helvetica", 15), bg="#0B0C10", fg="#C5C6C7").pack(side=TOP, pady = 10)
self.server_port = Entry(self.frameOptions, font=("helvetica", 13), justify=CENTER, bd=3)
self.server_port.pack(anchor=S, pady = 5, padx = 20, ipady = 2)
Button(self.frameOptions, text = "Save", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.setupServer).pack(side=TOP, ipadx = 10, pady = 10)
if IP_ADDRESS != "null" and PORT != "null":
self.server_ip.insert(0, IP_ADDRESS)
self.server_port.insert(0, PORT)
elif optionName == "Analyzer Options":
frameNameOptions = Frame(self.frameOptions, width = 650, height = 60, bg="#0B0C10")
frameNameOptions.grid(row = 0, column = 0, padx = 12)
frameValues = Frame(self.frameOptions, width = 650, height = 60, bg="#0B0C10")
frameValues.grid(row = 0, column = 1)
Label(frameNameOptions, text = "LANGUAGE", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 0, column = 0, pady = 5)
self.language = Entry(frameValues, font=("helvetica", 13), bd=3)
self.language.grid(row = 0, column = 0, pady = 5)
Label(frameNameOptions, text = "ENTITIES", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 1, column = 0, pady = 5)
self.entities = Entry(frameValues, font=("helvetica", 13), bd=3)
self.entities.grid(row = 1, column = 0, pady = 5)
Label(frameNameOptions, text = "CORRELATION ID", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 2, column = 0, pady = 5)
self.corr_id = Entry(frameValues, font=("helvetica", 13), bd=3)
self.corr_id.grid(row = 2, column = 0, pady = 5)
Label(frameNameOptions, text = "SCORE THRESHOLD", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 3, column = 0, pady = 5)
self.score = Entry(frameValues, font=("helvetica", 13), bd=3)
self.score.grid(row = 3, column = 0, pady = 5)
self.decision_process = IntVar(None, int(ANALYZE_CURR_OPTIONS['return_decision_process']))
Label(frameNameOptions, text = "RETURN DECISION PROCESS", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 4, column = 0, pady = 5)
Radiobutton(frameValues, text="YES", font=("helvetica", 10), variable=self.decision_process, value=1).grid(row=4, sticky=W, pady = 5)
Radiobutton(frameValues, text="NO", font=("helvetica", 10), variable=self.decision_process, value=0).grid(row=4, sticky=E, pady = 5)
Button(self.frameOptions, text = "Save", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.saveAnalyzeConfig).grid(row = 5, columnspan = 2, ipadx = 10, pady = 20)
# load the current config
self.language.insert(0, ANALYZE_CURR_OPTIONS['language'])
if ANALYZE_CURR_OPTIONS['entities'] != None:
self.entities.insert(0, ANALYZE_CURR_OPTIONS['entities'])
if ANALYZE_CURR_OPTIONS['correlation_id'] != None:
self.corr_id.insert(0, ANALYZE_CURR_OPTIONS['correlation_id'])
self.score.insert(0, ANALYZE_CURR_OPTIONS['score_threshold'])
elif optionName == "PII Recognition":
frameMenu = Frame(self.frameOptions, bg="#0B0C10")
frameMenu.grid(row = 0, column = 0, padx = 12)
self.frameInsertOption = Frame(self.frameOptions, width = 300, height = 150, bg="#0B0C10")
self.frameInsertOption.grid(row = 0, column = 1, padx = 12)
# menu options
self.value_inside = StringVar()
# Set the default value of the variable
self.value_inside.set("Select an option")
recognition_menu = OptionMenu(frameMenu, self.value_inside, "Select an option", *("Regex", "Deny List"), command=self.optionChanged)
recognition_menu.pack()
self.frameCurr = Frame(self.frameOptions, width = 520, height = 100, bg="#0B0C10")
self.frameCurr.grid(row = 1, columnspan = 2, pady = 7)
def setupServer(self):
global IP_ADDRESS, PORT
IP_ADDRESS = self.server_ip.get()
PORT = self.server_port.get()
messagebox.showinfo(parent=self.settings, title = "Save", message=f"Server options saved succefully!")
def saveAnalyzeConfig(self):
if self.language.get() != "en":
messagebox.showerror("Setup Error", "Only English language is supported!")
else:
ANALYZE_CURR_OPTIONS['language'] = self.language.get()
if self.entities.get() == "" or str(self.entities.get()).lower() == "none":
ANALYZE_CURR_OPTIONS['entities'] = None
else:
ANALYZE_CURR_OPTIONS['entities'] = self.entities.get()
if self.corr_id.get() == "":
ANALYZE_CURR_OPTIONS['correlation_id'] = None
else:
ANALYZE_CURR_OPTIONS['correlation_id'] = self.corr_id.get()
ANALYZE_CURR_OPTIONS['score_threshold'] = self.score.get()
ANALYZE_CURR_OPTIONS['return_decision_process'] = str(self.decision_process.get())
print(ANALYZE_CURR_OPTIONS)
messagebox.showinfo(parent=self.settings, title = "Save", message=f"Options saved succefully!")
def optionChanged(self, e):
for widget in self.frameInsertOption.winfo_children():
widget.destroy()
for widget in self.frameCurr.winfo_children():
widget.destroy()
if self.value_inside.get() == "Deny List":
Label(self.frameInsertOption, text = "ENTITY", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 0, column = 0, pady = 5, padx = 5)
self.entity = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3)
self.entity.grid(row = 0, column = 1, pady = 5)
Label(self.frameInsertOption, text = "VALUES LIST", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 1, column = 0, pady = 5, padx = 5)
self.values = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3)
self.values.grid(row = 1, column = 1, pady = 5)
Button(self.frameInsertOption, text = "Save", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.setupDenyList).grid(row=3, column = 0, ipadx = 10, pady = 20)
Button(self.frameInsertOption, text = "Reset", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.clearDenyConfig).grid(row=3, column = 1, ipadx = 10, pady = 20)
# Print current deny lists
self.deny_widget = Text(self.frameCurr, font=("helvetica", 13), width = 60, height = 10, spacing1=3, bg="#1F2833", fg="#C5C6C7")
self.deny_widget.grid(row = 0, column = 0)
for i in range(DENY_LIST['length']):
self.deny_widget.insert(END, f"{DENY_LIST['supported_entities'][i]} - {DENY_LIST['valuesList'][i]}\n")
self.deny_widget.configure(state='disabled')
elif self.value_inside.get() == "Regex":
Label(self.frameInsertOption, text = "ENTITY", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 0, column = 0, pady = 5, padx = 5)
self.entity_regex = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3)
self.entity_regex.grid(row = 0, column = 1, pady = 5)
Label(self.frameInsertOption, text = "NAME PATTERN", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 1, column = 0, pady = 5, padx = 5)
self.name_pattern = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3)
self.name_pattern.grid(row = 1, column = 1, pady = 5)
Label(self.frameInsertOption, text = "REGEX", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 2, column = 0, pady = 5, padx = 5)
self.regex = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3)
self.regex.grid(row = 2, column = 1, pady = 5)
Label(self.frameInsertOption, text = "SCORE", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 3, column = 0, pady = 5, padx = 5)
self.score_regex = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3)
self.score_regex.grid(row = 3, column = 1, pady = 5)
Label(self.frameInsertOption, text = "CONTEXT WORD", font=("helvetica", 13), bg="#0B0C10", fg="#C5C6C7").grid(row = 4, column = 0, pady = 5, padx = 5)
self.context = Entry(self.frameInsertOption, font=("helvetica", 13), bd=3)
self.context.grid(row = 4, column = 1, pady = 5)
Button(self.frameInsertOption, text = "Save", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.setupRegexList).grid(row=5, column = 0, ipadx = 10, pady = 10)
Button(self.frameInsertOption, text = "Reset", font=("helvetica", 12), bg="#0B0C10", fg="#C5C6C7", command=self.clearRegexConfig).grid(row=5, column = 1, ipadx = 10, pady = 10)
self.regex_widget = Text(self.frameCurr, font=("helvetica", 13), width = 60, height = 6, spacing1=3, bg="#1F2833", fg="#C5C6C7")
self.regex_widget.grid(row = 0, column = 0)
# print current regex patterns
for i in range(REGEX_LIST['length']):
self.regex_widget.insert(END, f"{REGEX_LIST['entities'][i]} - {REGEX_LIST['names_pattern'][i]} - {REGEX_LIST['patterns'][i]} - {REGEX_LIST['scores'][i]} - {REGEX_LIST['context_words'][i]}\n")
self.regex_widget.configure(state='disabled')
def setupDenyList(self):
if len(self.entity.get()) > 2 and len(self.values.get()) > 2:
DENY_LIST['supported_entities'].append(self.entity.get())
DENY_LIST['valuesList'].append(self.values.get())
DENY_LIST['length'] += 1
self.deny_widget.configure(state='normal')
self.deny_widget.insert(END, f"{self.entity.get()} - {self.values.get()}\n")
self.deny_widget.configure(state='disabled')
messagebox.showinfo(parent=self.settings, title = "Save", message=f"Deny list for {self.entity.get()} saved!")
else:
messagebox.showerror(parent=self.settings, title ="Error", message="Compile all the fields!")
#print(DENY_LIST)
def clearDenyConfig(self):
answer = messagebox.askyesno(parent=self.settings, title = None, message="Do you want to reset deny list configuration?")
if answer:
DENY_LIST['supported_entities'] = []
DENY_LIST['valuesList'] = []
DENY_LIST['length'] = 0
self.deny_widget.configure(state='normal')
self.deny_widget.delete("1.0", END)
self.deny_widget.configure(state='disabled')
def setupRegexList(self):
if len(self.entity_regex.get()) > 2:
REGEX_LIST['entities'].append(self.entity_regex.get())
REGEX_LIST['names_pattern'].append(self.name_pattern.get())
REGEX_LIST['patterns'].append(self.regex.get())
REGEX_LIST['scores'].append(self.score_regex.get())
REGEX_LIST['context_words'].append(self.context.get())
REGEX_LIST['length'] += 1
self.regex_widget.configure(state='normal')
self.regex_widget.insert(END, f"{self.entity_regex.get()} - {self.name_pattern.get()} - {self.regex.get()} - {self.score_regex.get()} - {self.context.get()}\n")
self.regex_widget.configure(state='disabled')
messagebox.showinfo(parent=self.settings, title = "Save", message=f"Regex for {self.entity_regex.get()} saved!")
else:
messagebox.showerror(parent=self.settings, title ="Error", message="Compile all the fields!")
#print(REGEX_LIST)
def clearRegexConfig(self):
answer = messagebox.askyesno(parent=self.settings, title = None, message="Do you want to reset regex configuration?")
if answer:
REGEX_LIST['entities'] = []
REGEX_LIST['names_pattern'] = []
REGEX_LIST['patterns'] = []
REGEX_LIST['scores'] = []
REGEX_LIST['context_words'] = []
REGEX_LIST['length'] = 0
self.regex_widget.configure(state='normal')
self.regex_widget.delete("1.0", END)
self.regex_widget.configure(state='disabled')
root = Tk()
app = Frames(root)
root.mainloop()
| 50.456422
| 208
| 0.589027
| 2,486
| 21,999
| 5.125503
| 0.131537
| 0.04183
| 0.031785
| 0.030137
| 0.460995
| 0.401428
| 0.347905
| 0.292811
| 0.261419
| 0.202558
| 0
| 0.042692
| 0.263194
| 21,999
| 436
| 209
| 50.456422
| 0.743414
| 0.017592
| 0
| 0.12628
| 0
| 0.010239
| 0.164019
| 0.027896
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044369
| false
| 0
| 0.027304
| 0
| 0.075085
| 0.003413
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
844ee290c97366006e042d8ac5ba0899c883ac56
| 1,903
|
py
|
Python
|
kge/core/component.py
|
Fredkiss3/kge
|
389d5ab21ecb6dc1a25dd9f98245ba5938a5d253
|
[
"CC0-1.0"
] | 4
|
2020-03-17T02:15:10.000Z
|
2021-06-29T13:34:40.000Z
|
kge/core/component.py
|
Fredkiss3/kge
|
389d5ab21ecb6dc1a25dd9f98245ba5938a5d253
|
[
"CC0-1.0"
] | 4
|
2020-05-23T05:47:30.000Z
|
2022-01-13T02:15:35.000Z
|
kge/core/component.py
|
Fredkiss3/kge
|
389d5ab21ecb6dc1a25dd9f98245ba5938a5d253
|
[
"CC0-1.0"
] | null | null | null |
from typing import Callable
import kge
from kge.core import events
from kge.core.eventlib import EventMixin
from kge.core.events import Event
class BaseComponent(EventMixin):
"""
A component represents an element that can be added to an entity
to add a functionality
"""
def __fire_event__(self, event: Event, dispatch: Callable[[Event], None]):
"""
Initialize the component before everything
"""
if event.scene is not None:
if event.scene.engine.running:
if not self._initialized and not isinstance(event, events.SceneStopped) and \
not isinstance(event, events.Init):
# Initialize the component
super(BaseComponent, self).__fire_event__(events.Init(scene=event.scene), dispatch)
self._initialized = True
# fire event
super(BaseComponent, self).__fire_event__(event, dispatch)
if isinstance(event, events.Init) and not self._initialized:
self._initialized = True
def on_scene_stopped(self, ev, dispatch):
self._initialized = False
nbItems = 0
def __init__(self, entity=None):
if entity is not None:
if not isinstance(entity, kge.Entity):
raise TypeError("entity should be of type 'kge.Entity' or a subclass of 'kge.Entity'")
self.entity = entity # type: kge.Entity
type(self).nbItems += 2
self.name = f"new {type(self)} {type(self).nbItems}"
# Used to Initialize component
self._initialized = False
# Used to tell if the component is active
self.is_active = True
def __repr__(self):
return f"component {type(self).__name__} of entity '{self.entity}'"
Component = BaseComponent
| 33.982143
| 104
| 0.603783
| 218
| 1,903
| 5.105505
| 0.316514
| 0.080863
| 0.02965
| 0.019766
| 0.104223
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001535
| 0.315292
| 1,903
| 55
| 105
| 34.6
| 0.852648
| 0.132948
| 0
| 0.129032
| 0
| 0
| 0.104005
| 0.013566
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.16129
| 0.032258
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
844f9857dd2ca03aee9ac58b1348e52e4bc8e0ee
| 766
|
py
|
Python
|
src/870. Advantage Shuffle.py
|
rajshrivastava/LeetCode
|
dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0
|
[
"MIT"
] | 1
|
2019-12-16T08:18:25.000Z
|
2019-12-16T08:18:25.000Z
|
src/870. Advantage Shuffle.py
|
rajshrivastava/LeetCode
|
dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0
|
[
"MIT"
] | null | null | null |
src/870. Advantage Shuffle.py
|
rajshrivastava/LeetCode
|
dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0
|
[
"MIT"
] | null | null | null |
class Solution:
def advantageCount(self, A: List[int], B: List[int]) -> List[int]:
n=len(A)
A.sort()
B_sorted_idxs = sorted(list(range(0,n)), key = lambda x: B[x])
permuted_A = [-1]*n
j = 0 #for A -index
remainingA = []
for idx in B_sorted_idxs:
while(j<n and A[j] <= B[idx]):
remainingA.append(A[j])
j += 1
if j == n:
break
else:
permuted_A[idx] = A[j]
A[j] = None
j += 1
j = 0
for val in remainingA:
while permuted_A[j] != -1:
j+=1
permuted_A[j] = val
j += 1
return permuted_A
| 27.357143
| 70
| 0.399478
| 98
| 766
| 3.030612
| 0.367347
| 0.040404
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022556
| 0.479112
| 766
| 27
| 71
| 28.37037
| 0.721805
| 0.015666
| 0
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
84508cc0743106693c25a4c91852516182d10958
| 11,162
|
py
|
Python
|
generate_population_dataset.py
|
p-enel/stable-and-dynamic-value
|
3f78e24f5bef9b12b8cc43d075d2e66b8a603325
|
[
"CC0-1.0"
] | 1
|
2020-07-29T09:18:00.000Z
|
2020-07-29T09:18:00.000Z
|
generate_population_dataset.py
|
p-enel/stable-and-dynamic-value
|
3f78e24f5bef9b12b8cc43d075d2e66b8a603325
|
[
"CC0-1.0"
] | null | null | null |
generate_population_dataset.py
|
p-enel/stable-and-dynamic-value
|
3f78e24f5bef9b12b8cc43d075d2e66b8a603325
|
[
"CC0-1.0"
] | 3
|
2020-07-27T03:12:19.000Z
|
2021-11-02T20:03:00.000Z
|
from pathlib import Path
import numpy as np
import pickle as pk
from itertools import chain, product
from collections import OrderedDict
from structure import Struct
MONKEYS = ['M', 'N']
REGIONS = ['OFC', 'ACC']
TASKVARS = ['value', 'type']
SUBSPACES = [True, False]
EVT_WINS = OrderedDict((('cues ON', (-500, 1500)),
('response cue', (-500, 500)),
('rwd', (-400, 400))))
def pp_from_filename(filename):
'''Get the preprocessing parameters from a unit data set filename
Arguments:
filename - str or Path: name or full path of unit data set file
'''
fnamestr = filename if isinstance(filename, str) else filename.name
params = [paramstr.split('.') for paramstr in fnamestr.split('_')[2:]]
preproc_params = {'align': params[0][1],
'binsize': int(params[1][1]),
'smooth': params[2][1],
'smoothsize': int(params[3][1]),
'step': int(params[4][1])}
return preproc_params
def get_dataset_fname(dataseed, pp):
'''Generate the file name of a population data set given data seed and preprocessing parameters
Arguments:
dataseed - int: the seed of the data set that will be included in the file name
pp - dict: the pre-processing parameters of the data set'''
fname = "population_dataset_align.{align}_binsize.{binsize}_smooth.{smooth}"
fname += "_smoothsize.{smoothsize}_step.{step}_seed.%d.pk" % dataseed
fname = fname.format(**pp)
return fname
def generate_dataset(dataseed, unit_folder, unit_file, save_folder=None):
'''Generate a pseudo-population by combining data from monkeys and sessions
Arguments:
dataseed - int: the seed for pseudo-random selection of the trials to be
part of the data set
unit_file - str: the path to the file containing the unit data set
save_folder - str or Path: optional, a folder to save the generated data
set. After being saved once, if the same folder is specified, it will be
loaded instead of being generated.
Returns:
X - Structure: A structure that contains the pseudo-population firing rate
data. The structure contains 3 levels:
- monkey: which can take values 'M' or 'N' for individual monkey data,
or 'both' for the data of both monkeys combined
- region: which can take value 'OFC' or 'ACC'
- task variable: which can take value 'value' or 'type' for data sets
targeted to decoding these variables
The elements of the structure are numpy arrays of the shape:
trials x bins x neurons
Example:
X['N', 'ACC', 'value'] contains a matrix of the pseudo-population
firing rate of monkey N for region ACC meant to decode value
y - Structure: A structure of numpy vectors with the same map as 'X' that
contains the ground truth of the related variable for each trial.
Example:
y['N', 'ACC', 'value'] contains the value of each trials of monkey N
for ACC population.
delaymask - numpy vector of booleans: A boolean mask for the time bin
dimension to select time bins that are part of the delay activity
bins - numpy vector of ints: The time of each bin of the firing rate data
in the structure X, with events ordered this way:
'cues ON' -> 'response cue' -> 'rwd'
'''
events = list(EVT_WINS.keys())
pp = pp_from_filename(unit_file)
if save_folder is not None:
dataset_fname = get_dataset_fname(dataseed, pp)
dataset_fullpath = Path(save_folder)/dataset_fname
if dataset_fullpath.exists():
print("Data set already generated, loading...")
with open(dataset_fullpath, 'rb') as f:
X, y, delaymask, bins = pk.load(f)
return X, y, delaymask, bins
with open(Path(unit_folder)/unit_file, 'rb') as f:
data = pk.load(f)
evtxs = data['M']['OFC'][0]['bins']
#### Format the data for decoding
#################################
keymap = [MONKEYS, REGIONS, TASKVARS]
act = Struct.new_empty(keymap)
minntrials = Struct.new_empty(keymap)
for monkey, region in product(MONKEYS, REGIONS):
act[monkey, region, 'value'] = [[] for _ in range(4)]
act[monkey, region, 'type'] = [[], []]
minntrials[monkey, region, 'value'] = [[] for _ in range(4)]
minntrials[monkey, region, 'type'] = [[], []]
datamr = data[monkey][region]
## Select bins that are within the window of interest for each event
## then concatenate the activity of the different events in a single tensor
catepochs = []
for sessdata in datamr:
if sessdata['fr'] is not None:
cattmp = []
for evt in events:
included_bins = (evtxs[evt] >= EVT_WINS[evt][0]) & (evtxs[evt] <= EVT_WINS[evt][1])
cattmp.append(sessdata['fr'][evt][included_bins])
catepochs.append(np.concatenate(cattmp))
else:
catepochs.append(None)
## Separate trials by value and type
for sessfr, sessdata in zip(catepochs, datamr):
if sessfr is not None:
if sessdata['fr'] is not None:
sessvars = sessdata['vars']
for val in range(1, 5):
trialbool = (sessvars.value == val)
act[monkey, region, 'value'][val-1].append(sessfr[:, :, trialbool])
for itype, type_ in enumerate(['juice', 'bar']):
trialbool = (sessvars.type == type_)
act[monkey, region, 'type'][itype].append(sessfr[:, :, trialbool])
## Get the minimum number of trials across all sessions for each value/type
minntrials[monkey, region, 'value'] = [np.nanmin([sessfr.shape[2] for sessfr in valdata])
for valdata in act[monkey, region, 'value']]
minntrials[monkey, region, 'type'] = [np.nanmin([sessfr.shape[2] for sessfr in typedata])
for typedata in act[monkey, region, 'type']]
## Get the minimum number of trials for pooled data across monkeys
minntrials.move_level_(0, 2)
mintogether = minntrials.apply(lambda x: [min(valmin) for valmin in zip(*x.values())], depth=2)
mintogether = Struct.from_nested_dict({'both': mintogether.ndict}, n_layers=3)
minntrials.move_level_(2, 0)
minntrials = minntrials.combine(mintogether)
# extra trials are discarded after trials are shuffled
np.random.seed(dataseed)
catactboth = Struct.empty_like(act, values=list)
# taskvar, monkey, region = next(product(TASKVARS, MONKEYS, REGIONS))
for taskvar, monkey, region in product(TASKVARS, MONKEYS, REGIONS):
keymap = [monkey, region, taskvar]
minns = minntrials['both', region, taskvar]
# minn, acttmp = next(zip(minns, act[keymap]))
for minn, acttmp in zip(minns, act[keymap]):
tocat = []
for sessdata in acttmp:
ntrials = sessdata.shape[2]
trialind = np.arange(ntrials)
np.random.shuffle(trialind)
tmp = sessdata[:, :, trialind]
tocat.append(tmp[:, :, :minn])
catactboth[keymap].append(np.concatenate(tocat, 1))
catact = Struct.empty_like(act, values=list)
for taskvar, monkey, region in product(TASKVARS, MONKEYS, REGIONS):
keymap = [monkey, region, taskvar]
minns = minntrials[keymap]
for minn, acttmp in zip(minns, act[keymap]):
tocat = []
for sessdata in acttmp:
ntrials = sessdata.shape[2]
trialind = np.arange(ntrials)
np.random.shuffle(trialind)
tmp = sessdata[:, :, trialind]
tocat.append(tmp[:, :, :minn])
catact[keymap].append(np.concatenate(tocat, 1))
catactboth.move_level_(0, 2)
def cat_monkeys(x):
'''x: {monkey}[4 (values)] np.array<nbins*nneurons*ntrials>'''
return [np.concatenate([x['M'][ival], x['N'][ival]], axis=1) for ival in range(len(x['M']))]
catactboth.apply_agg_(cat_monkeys, depth=2)
catactboth = Struct.from_nested_dict({'both': catactboth.ndict}, n_layers=3)
catact = catact.combine(catactboth)
#### Moving data from arrays to a list ####
def get_actvallist(x):
tmp = [[(trial, ival) for trial in np.moveaxis(x[ival], 2, 0)] for ival in range(len(x))]
return list(zip(*chain(*zip(*tmp))))
actvallist = catact.apply(get_actvallist)
X, y = actvallist.apply(lambda x: x[0]), actvallist.apply(lambda x: x[1])
X.apply_(np.stack)
y.apply_(np.array)
del(catact, act)
#### Defining a boolean mask to get only the bins between cue ON and rwd
########################################################################
cuesON_bins_mask = (evtxs['cues ON'] >= EVT_WINS['cues ON'][0]) & (evtxs['cues ON'] <= EVT_WINS['cues ON'][1])
cuesON_bins = evtxs['cues ON'][cuesON_bins_mask]
resp_bins_mask = (evtxs['response cue'] >= EVT_WINS['response cue'][0]) &\
(evtxs['response cue'] <= EVT_WINS['response cue'][1])
resp_bins = evtxs['response cue'][resp_bins_mask]
rwd_bins_mask = (evtxs['rwd'] >= EVT_WINS['rwd'][0]) & (evtxs['rwd'] <= EVT_WINS['rwd'][1])
rwd_bins = evtxs['rwd'][rwd_bins_mask]
delaymask = np.concatenate((cuesON_bins >= 0, np.ones(resp_bins.shape, dtype=bool), rwd_bins <= 0))
bins = {}
for evt, (start, end) in EVT_WINS.items():
xs = evtxs[evt]
bins[evt] = xs[(xs >= start) & (xs <= end)]
if save_folder is not None:
with open(dataset_fullpath, 'wb') as f:
pk.dump((X, y, delaymask, bins), f)
print(f'data set created and saved in {unit_folder}')
return X, y, delaymask, bins
# The following is an example. Replace the right hand side of the first three
# statements to get a specific data set
if __name__ == '__main__':
# Data seeds used to generate the pseudo population data for decoding are
# listed below:
# dataseeds = [634564236, 9453241, 70010207, 43661999, 60410205]
dataseed = 634564236
# The following folder path must contain the unit data set file specified
# below
unit_folder = Path("/home/john/datasets")
# The following statement specifies which unit data set (with which
# preprocessing parameters) is to be used to generate the population data
# set
unit_file = "unit_dataset_align.center_binsize.100_smooth.gaussian_smoothsize.100_step.25.pk"
# The last argument of the function allows you to save the data set in a
# specified folder, or to load an already generated population data set if
# it already exists in this folder. In this example the population data set
# is saved in the same folder as the unit data set.
X, y, delaymask, bins = generate_dataset(dataseed, unit_folder, unit_file,
save_folder=unit_folder)
| 43.601563
| 114
| 0.614854
| 1,460
| 11,162
| 4.620548
| 0.222603
| 0.018678
| 0.009784
| 0.011118
| 0.231396
| 0.169878
| 0.126594
| 0.101097
| 0.091906
| 0.076786
| 0
| 0.015343
| 0.26429
| 11,162
| 255
| 115
| 43.772549
| 0.806137
| 0.308726
| 0
| 0.181818
| 0
| 0
| 0.077734
| 0.025956
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034965
| false
| 0
| 0.041958
| 0
| 0.118881
| 0.013986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8450d07e5cec286e40f858637377c3e87f1ab9e5
| 634
|
py
|
Python
|
setup.py
|
joepatmckenna/ohmlr
|
2f3e63243758b995596f37897814634fc432f337
|
[
"MIT"
] | null | null | null |
setup.py
|
joepatmckenna/ohmlr
|
2f3e63243758b995596f37897814634fc432f337
|
[
"MIT"
] | null | null | null |
setup.py
|
joepatmckenna/ohmlr
|
2f3e63243758b995596f37897814634fc432f337
|
[
"MIT"
] | null | null | null |
import setuptools
with open('README.rst', 'r') as f:
readme = f.read()
with open('version', 'r') as f:
version = f.read()
if __name__ == '__main__':
setuptools.setup(
name='ohmlr',
version=version,
description='One-hot multinomial logisitc regression',
long_description=readme,
author='Joseph P. McKenna',
author_email='joepatmckenna@gmail.com',
url='http://joepatmckenna.github.io/ohmlr',
download_url='https://pypi.org/project/ohmlr',
packages=['ohmlr'],
license='MIT',
keywords=['inference', 'statistics', 'machine learning'])
| 27.565217
| 65
| 0.615142
| 70
| 634
| 5.414286
| 0.685714
| 0.042216
| 0.021108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230284
| 634
| 22
| 66
| 28.818182
| 0.776639
| 0
| 0
| 0
| 0
| 0
| 0.347003
| 0.036278
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8450ee0e08874b8a26468c905f5abfbc7260c448
| 1,301
|
py
|
Python
|
commands/climber/holdcimbersposition.py
|
1757WestwoodRobotics/2022-RapidReact
|
b6d9cf203fd35e93dc5d26ba2d6889e2a9edb137
|
[
"MIT"
] | 1
|
2022-01-21T22:00:24.000Z
|
2022-01-21T22:00:24.000Z
|
commands/climber/holdcimbersposition.py
|
1757WestwoodRobotics/2022-RapidReact
|
b6d9cf203fd35e93dc5d26ba2d6889e2a9edb137
|
[
"MIT"
] | 40
|
2022-01-18T21:20:54.000Z
|
2022-03-31T20:56:44.000Z
|
commands/climber/holdcimbersposition.py
|
1757WestwoodRobotics/2022-RapidReact
|
b6d9cf203fd35e93dc5d26ba2d6889e2a9edb137
|
[
"MIT"
] | 1
|
2022-01-28T02:46:38.000Z
|
2022-01-28T02:46:38.000Z
|
from commands2 import CommandBase, ParallelCommandGroup
from subsystems.climbers.leftclimbersubsystem import LeftClimber
from subsystems.climbers.rightclimbersubsystem import RightClimber
class HoldLeftClimberPosition(CommandBase):
def __init__(self, climber: LeftClimber) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.climber = climber
self.addRequirements([self.climber])
def initialize(self) -> None:
self.climber.leftClimber.climberMotor.neutralOutput()
self.climber.leftClimber.activateBrake()
class HoldRightClimberPosition(CommandBase):
def __init__(self, climber: RightClimber) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.climber = climber
self.addRequirements([self.climber])
def initialize(self) -> None:
self.climber.rightClimber.climberMotor.neutralOutput()
self.climber.rightClimber.activateBrake()
class HoldBothClimbersPosition(ParallelCommandGroup):
def __init__(self, leftClimber: LeftClimber, rightClimber: RightClimber):
super().__init__(
HoldLeftClimberPosition(leftClimber),
HoldRightClimberPosition(rightClimber),
)
self.setName(__class__.__name__)
| 35.162162
| 77
| 0.730976
| 109
| 1,301
| 8.284404
| 0.256881
| 0.121816
| 0.036545
| 0.066445
| 0.336656
| 0.272425
| 0.272425
| 0.272425
| 0.272425
| 0.272425
| 0
| 0.000941
| 0.182936
| 1,301
| 36
| 78
| 36.138889
| 0.848542
| 0
| 0
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0
| 0.107143
| 0
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
84533ec2f7f2ad9597755a4499563c795ed9f246
| 737
|
py
|
Python
|
algo/visualizations/temporalchart.py
|
alexeyev/visartm
|
d19e193b3c084d7f355a45b966c8bb2ebb6fa366
|
[
"BSD-3-Clause"
] | 1
|
2020-10-01T10:11:21.000Z
|
2020-10-01T10:11:21.000Z
|
algo/visualizations/temporalchart.py
|
alexeyev/visartm
|
d19e193b3c084d7f355a45b966c8bb2ebb6fa366
|
[
"BSD-3-Clause"
] | null | null | null |
algo/visualizations/temporalchart.py
|
alexeyev/visartm
|
d19e193b3c084d7f355a45b966c8bb2ebb6fa366
|
[
"BSD-3-Clause"
] | null | null | null |
from models.models import Topic, TopicInTopic
import json
def visual(vis, params):
model = vis.model
group_by = params[1] # year,month,week,day
topics = Topic.objects.filter(
model=model,
layer=model.layers_count).order_by("spectrum_index")
topics = [topic.title for topic in topics]
cells, dates = model.group_matrix(group_by=group_by, named_groups=False)
topics_count = len(topics)
dates_count = len(dates)
charts = [[topics[y]] + [len(cells[x][y]) for x in range(dates_count)]
for y in range(topics_count)]
dates = [str(date.date()) for date in dates]
return "charts=" + json.dumps(charts) + ";\n" + \
"dates=" + json.dumps(['date'] + dates) + ";\n"
| 32.043478
| 76
| 0.639077
| 103
| 737
| 4.456311
| 0.446602
| 0.045752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001733
| 0.217096
| 737
| 22
| 77
| 33.5
| 0.793761
| 0.02578
| 0
| 0
| 0
| 0
| 0.051676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
84580bc22605d3bb58c5f232f6e1f847342e88fa
| 3,596
|
py
|
Python
|
submissions-api/app/main/model/submissions_manifest.py
|
sanger-tol/tol-submissions
|
8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331
|
[
"MIT"
] | null | null | null |
submissions-api/app/main/model/submissions_manifest.py
|
sanger-tol/tol-submissions
|
8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331
|
[
"MIT"
] | null | null | null |
submissions-api/app/main/model/submissions_manifest.py
|
sanger-tol/tol-submissions
|
8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 Genome Research Ltd.
#
# SPDX-License-Identifier: MIT
from .base import Base, db
class SubmissionsManifest(Base):
__tablename__ = "manifest"
manifest_id = db.Column(db.Integer, primary_key=True)
samples = db.relationship('SubmissionsSample', back_populates="manifest",
lazy=False, order_by='SubmissionsSample.row')
created_at = db.Column(db.DateTime, nullable=False, default=db.func.now())
created_by = db.Column(db.Integer, db.ForeignKey('user.user_id'))
user = db.relationship("SubmissionsUser", uselist=False, foreign_keys=[created_by])
submission_status = db.Column(db.Boolean, nullable=True)
project_name = db.Column(db.String(), nullable=False, default="ToL")
sts_manifest_id = db.Column(db.String(), nullable=True)
excel_file = db.Column(db.String(), nullable=True)
target_rack_plate_tube_wells = set()
duplicate_rack_plate_tube_wells = []
target_specimen_taxons = {}
whole_organisms = set()
duplicate_whole_organisms = []
def reset_trackers(self):
# Target rack/plate and tube/well ids
all = []
for sample in self.samples:
if not sample.is_symbiont() and sample.rack_or_plate_id is not None \
and sample.tube_or_well_id is not None:
concatenated = sample.rack_or_plate_id + '/' + sample.tube_or_well_id
all.append(concatenated)
self.target_rack_plate_tube_wells = set()
seen_add = self.target_rack_plate_tube_wells.add
# adds all elements it doesn't know yet to seen and all other to seen_twice
self.duplicate_rack_plate_tube_wells = set(x for x in all if x in
self.target_rack_plate_tube_wells
or seen_add(x))
# Target specimen/taxons
self.target_specimen_taxons = {}
for sample in self.samples:
if not sample.is_symbiont() and sample.specimen_id is not None \
and sample.taxonomy_id is not None:
# Only add the first one
if sample.specimen_id not in self.target_specimen_taxons:
self.target_specimen_taxons[sample.specimen_id] = sample.taxonomy_id
# Whole organisms
all = []
for sample in self.samples:
if sample.organism_part == "WHOLE_ORGANISM":
all.append(sample.specimen_id)
self.whole_organisms = set()
seen_add = self.whole_organisms.add
# adds all elements it doesn't know yet to seen and all other to seen_twice
self.duplicate_whole_organisms = set(x for x in all if x in
self.whole_organisms
or seen_add(x))
def unique_taxonomy_ids(cls):
return set([x.taxonomy_id for x in cls.samples])
def to_dict(cls):
return {'manifestId': cls.manifest_id,
'projectName': cls.project_name,
'stsManifestId': cls.sts_manifest_id,
'samples': cls.samples,
'submissionStatus': cls.submission_status}
def to_dict_short(cls):
return {'manifestId': cls.manifest_id,
'projectName': cls.project_name,
'stsManifestId': cls.sts_manifest_id,
'submissionStatus': cls.submission_status,
'createdAt': cls.created_at,
'numberOfSamples': len(cls.samples),
'user': cls.user}
| 44.95
| 88
| 0.614294
| 436
| 3,596
| 4.827982
| 0.270642
| 0.026603
| 0.033254
| 0.051306
| 0.451781
| 0.376247
| 0.278385
| 0.222328
| 0.222328
| 0.222328
| 0
| 0.001587
| 0.298943
| 3,596
| 79
| 89
| 45.518987
| 0.833399
| 0.0901
| 0
| 0.213115
| 0
| 0
| 0.071735
| 0.006438
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.016393
| 0.04918
| 0.393443
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8458ddef5330c4ed60d249ea5883464e063cf5ba
| 6,411
|
py
|
Python
|
eden/integration/hg/histedit_test.py
|
jmswen/eden
|
5e0b051703fa946cc77fc43004435ae6b20599a1
|
[
"BSD-3-Clause"
] | null | null | null |
eden/integration/hg/histedit_test.py
|
jmswen/eden
|
5e0b051703fa946cc77fc43004435ae6b20599a1
|
[
"BSD-3-Clause"
] | null | null | null |
eden/integration/hg/histedit_test.py
|
jmswen/eden
|
5e0b051703fa946cc77fc43004435ae6b20599a1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
from eden.integration.lib import hgrepo
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
from .lib.histedit_command import HisteditCommand
@hg_test
class HisteditTest(EdenHgTestCase):
_commit1: str
_commit2: str
_commit3: str
def populate_backing_repo(self, repo: hgrepo.HgRepository) -> None:
repo.write_file("first", "")
self._commit1 = repo.commit("first commit")
repo.write_file("second", "")
self._commit2 = repo.commit("second commit")
repo.write_file("third", "")
self._commit3 = repo.commit("third commit")
def test_stop_at_earlier_commit_in_the_stack_without_reordering(self) -> None:
commits = self.repo.log()
self.assertEqual([self._commit1, self._commit2, self._commit3], commits)
# histedit, stopping in the middle of the stack.
histedit = HisteditCommand()
histedit.pick(self._commit1)
histedit.stop(self._commit2)
histedit.pick(self._commit3)
# We expect histedit to terminate with a nonzero exit code in this case.
with self.assertRaises(hgrepo.HgError) as context:
histedit.run(self)
head = self.repo.log(revset=".")[0]
expected_msg = (
"Changes committed as %s. " "You may amend the changeset now." % head[:12]
)
self.assertIn(expected_msg, str(context.exception))
# Verify the new commit stack and the histedit termination state.
# Note that the hash of commit[0] is unpredictable because Hg gives it a
# new hash in anticipation of the user amending it.
parent = self.repo.log(revset=".^")[0]
self.assertEqual(self._commit1, parent)
self.assertEqual(["first commit", "second commit"], self.repo.log("{desc}"))
# Make sure the working copy is in the expected state.
self.assert_status_empty(op="histedit")
self.assertSetEqual(
{".eden", ".hg", "first", "second"},
set(os.listdir(self.repo.get_canonical_root())),
)
self.hg("histedit", "--continue")
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "second", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_reordering_commits_without_merge_conflicts(self) -> None:
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
# histedit, reordering the stack in a conflict-free way.
histedit = HisteditCommand()
histedit.pick(self._commit2)
histedit.pick(self._commit3)
histedit.pick(self._commit1)
histedit.run(self)
self.assertEqual(
["second commit", "third commit", "first commit"], self.repo.log("{desc}")
)
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "second", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_drop_commit_without_merge_conflicts(self) -> None:
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
# histedit, reordering the stack in a conflict-free way.
histedit = HisteditCommand()
histedit.pick(self._commit1)
histedit.drop(self._commit2)
histedit.pick(self._commit3)
histedit.run(self)
self.assertEqual(["first commit", "third commit"], self.repo.log("{desc}"))
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_roll_two_commits_into_parent(self) -> None:
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
# histedit, reordering the stack in a conflict-free way.
histedit = HisteditCommand()
histedit.pick(self._commit1)
histedit.roll(self._commit2)
histedit.roll(self._commit3)
histedit.run(self)
self.assertEqual(["first commit"], self.repo.log("{desc}"))
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "second", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_abort_after_merge_conflict(self) -> None:
self.write_file("will_have_confict.txt", "original\n")
self.hg("add", "will_have_confict.txt")
commit4 = self.repo.commit("commit4")
self.write_file("will_have_confict.txt", "1\n")
commit5 = self.repo.commit("commit5")
self.write_file("will_have_confict.txt", "2\n")
commit6 = self.repo.commit("commit6")
histedit = HisteditCommand()
histedit.pick(commit4)
histedit.pick(commit6)
histedit.pick(commit5)
original_commits = self.repo.log()
with self.assertRaises(hgrepo.HgError) as context:
histedit.run(self, ancestor=commit4)
expected_msg = (
"Fix up the change (pick %s)\n" % commit6[:12]
) + " (hg histedit --continue to resume)"
self.assertIn(expected_msg, str(context.exception))
self.assert_status({"will_have_confict.txt": "M"}, op="histedit")
self.assert_file_regex(
"will_have_confict.txt",
"""\
<<<<<<< local: .*
original
=======
2
>>>>>>> histedit: .*
""",
)
self.hg("histedit", "--abort")
self.assertEqual("2\n", self.read_file("will_have_confict.txt"))
self.assertListEqual(
original_commits,
self.repo.log(),
msg="The original commit hashes should be restored by the abort.",
)
self.assert_status_empty()
| 36.220339
| 86
| 0.608641
| 738
| 6,411
| 5.138211
| 0.245257
| 0.046414
| 0.037711
| 0.035865
| 0.501319
| 0.456487
| 0.42827
| 0.351266
| 0.341772
| 0.318829
| 0
| 0.010335
| 0.26049
| 6,411
| 176
| 87
| 36.426136
| 0.789496
| 0.128685
| 0
| 0.388889
| 0
| 0
| 0.162033
| 0.027067
| 0
| 0
| 0
| 0
| 0.230159
| 1
| 0.047619
| false
| 0
| 0.031746
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
845a911380b7475214d4489c0d02b5872a85aa00
| 310
|
py
|
Python
|
Leetcode/0713. Subarray Product Less Than K/0713.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/0713. Subarray Product Less Than K/0713.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/0713. Subarray Product Less Than K/0713.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
class Solution:
def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:
if k <= 1:
return 0
ans = 0
prod = 1
j = 0
for i, num in enumerate(nums):
prod *= num
while prod >= k:
prod /= nums[j]
j += 1
ans += i - j + 1
return ans
| 17.222222
| 72
| 0.490323
| 44
| 310
| 3.454545
| 0.5
| 0.092105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.390323
| 310
| 17
| 73
| 18.235294
| 0.767196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
845d9d3e1de64db798d6f4d7e46d76bf4c2959c6
| 3,965
|
py
|
Python
|
UI/python/runtext.py
|
maxxscholten/nyc-train-sign
|
7da32c413270f3bf4629969bcf16f7def4ddb372
|
[
"MIT"
] | 8
|
2020-02-19T21:17:04.000Z
|
2022-01-04T03:52:56.000Z
|
UI/python/runtext.py
|
maxxscholten/nyc-train-sign
|
7da32c413270f3bf4629969bcf16f7def4ddb372
|
[
"MIT"
] | 1
|
2021-09-20T02:13:41.000Z
|
2021-09-21T07:01:14.000Z
|
UI/python/runtext.py
|
maxxscholten/nyc-train-sign
|
7da32c413270f3bf4629969bcf16f7def4ddb372
|
[
"MIT"
] | 4
|
2021-03-11T17:11:40.000Z
|
2021-11-10T01:20:33.000Z
|
#!/usr/bin/env python
# Display a runtext with double-buffering.
from samplebase import SampleBase
from rgbmatrix import graphics
import time
import requests
import transitfeed
import datetime
import arrow
import schedule
today = datetime.date.today()
starttime = time.time()
schedule = transitfeed.Schedule()
url = "http://localhost:5000/by-id/077e"
font = graphics.Font()
font.LoadFont("../fonts/tom-thumb.bdf")
textColor = graphics.Color(0, 110, 0)
circleColor = graphics.Color(110, 0, 0)
circleNumberColor = graphics.Color(0, 0, 0)
class RunText(SampleBase):
def __init__(self, *args, **kwargs):
super(RunText, self).__init__(*args, **kwargs)
self.parser.add_argument("-t", "--text", help="The text to scroll on the RGB LED panel", default="6 Wall Street")
def getData(self):
r = requests.get(url=url)
time1 = r.json()['data'][0]['N'][0]['time']
time2 = r.json()['data'][0]['N'][1]['time']
print(r.json()['data'][0]['N'])
nowTime = arrow.utcnow().datetime
time1Formatted = arrow.get(time1).to('utc').datetime
time2Formatted = arrow.get(time2).to('utc').datetime
deltaTime1 = time1Formatted - nowTime
deltaTime2 = time2Formatted - nowTime
deltaMod1 = divmod(deltaTime1.total_seconds(), 60)
deltaMod2 = divmod(deltaTime2.total_seconds(), 60)
deltaMins1 = deltaMod1[0] + deltaMod1[1]/60
deltaMins2 = deltaMod2[0] + deltaMod2[1]/60
minsUntilTrain1 = int(round(deltaMins1))
minsUntilTrain2 = int(round(deltaMins2))
minsUntilTrain1Str = str(minsUntilTrain1)
minsUntilTrain2Str = str(minsUntilTrain2)
if minsUntilTrain1 < 10 and minsUntilTrain1 >= 0:
minsUntilTrain1Str = " " + str(minsUntilTrain1)
if minsUntilTrain2 < 10 and minsUntilTrain2 >= 0:
minsUntilTrain2Str = " " + str(minsUntilTrain2)
return [minsUntilTrain1Str, minsUntilTrain2Str]
def drawCircle(self, canvas, x, y, color):
# Draw circle with lines
graphics.DrawLine(canvas, x+2, y+0, x+6, y+0, color)
graphics.DrawLine(canvas, x+1, y+1, x+7, y+1, color)
graphics.DrawLine(canvas, x+0, y+2, x+8, y+2, color)
graphics.DrawLine(canvas, x+0, y+3, x+8, y+3, color)
graphics.DrawLine(canvas, x+0, y+4, x+8, y+4, color)
graphics.DrawLine(canvas, x+0, y+5, x+8, y+5, color)
graphics.DrawLine(canvas, x+0, y+6, x+8, y+6, color)
graphics.DrawLine(canvas, x+1, y+7, x+7, y+7, color)
graphics.DrawLine(canvas, x+2, y+8, x+6, y+8, color)
def drawRows(self, canvas, minsTrain1, minsTrain2):
canvas.Clear()
# Top line
self.drawCircle(canvas, 2, 4, circleColor)
graphics.DrawText(canvas, font, 5, 11, circleNumberColor, "3")
graphics.DrawText(canvas, font, 14, 11, textColor, "Kingston")
graphics.DrawText(canvas, font, 47, 11, textColor, minsTrain1)
graphics.DrawText(canvas, font, 54, 11, textColor, "min")
# Bottom line
self.drawCircle(canvas, 2, 20, circleColor)
graphics.DrawText(canvas, font, 5, 27, circleNumberColor, "3")
graphics.DrawText(canvas, font, 14, 27, textColor, "Kingston")
graphics.DrawText(canvas, font, 47, 27, textColor, minsTrain2)
graphics.DrawText(canvas, font, 54, 27, textColor, "min")
def timeDrawing(self):
minsArr = self.getData()
print(minsArr)
minsTrain1 = minsArr[0]
minsTrain2 = minsArr[1]
canvas = self.matrix.CreateFrameCanvas()
self.drawRows(canvas, minsTrain1, minsTrain2)
# draw to the canvas
canvas = self.matrix.SwapOnVSync(canvas)
def run(self):
self.timeDrawing()
i = 0
while True:
time.sleep(60 - time.time() % 60)
print(i)
self.timeDrawing()
i = i + 1
# Main function
if __name__ == "__main__":
run_text = RunText()
if (not run_text.process()):
run_text.print_help()
| 37.056075
| 121
| 0.640858
| 499
| 3,965
| 5.046092
| 0.296593
| 0.0278
| 0.078634
| 0.082208
| 0.262907
| 0.205719
| 0.155679
| 0
| 0
| 0
| 0
| 0.055304
| 0.220177
| 3,965
| 106
| 122
| 37.40566
| 0.759056
| 0.034805
| 0
| 0.023256
| 0
| 0
| 0.046335
| 0.005759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.093023
| 0
| 0.186047
| 0.046512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8462591fa4b3c8c3275d239bf45765f52bee1b94
| 1,188
|
py
|
Python
|
model/board_generator.py
|
myrmarachne/minesweeper
|
777170b7a31f1feed0bdf7aca31aaa9916c9b915
|
[
"AFL-1.1"
] | null | null | null |
model/board_generator.py
|
myrmarachne/minesweeper
|
777170b7a31f1feed0bdf7aca31aaa9916c9b915
|
[
"AFL-1.1"
] | null | null | null |
model/board_generator.py
|
myrmarachne/minesweeper
|
777170b7a31f1feed0bdf7aca31aaa9916c9b915
|
[
"AFL-1.1"
] | null | null | null |
from random import sample
from tile import Tile
from utils import neighbours
class BoardGenerator:
def __init__(self, size, numMines):
self.numMines = numMines
self.size = size
self.board = []
self.generate_board()
def generate_board(self):
# Generate a board for Minesweeper
self.board = [[Tile(j, i) for i in range(0, self.size)] for j in range(0, self.size)]
# select self.numMines random fields from 0 to self.size*self.size - 1
fields_with_mines_ids = sample(range(0, self.size * self.size), self.numMines)
# for a given field n select the field with coordinates (i,j) such that i*self.size + j = n
fields_with_mines = map(lambda n, size=self.size: ((n - n % size) / size, n % size), fields_with_mines_ids)
for field in fields_with_mines:
i, j = field
self.board[i][j].mine = True
# add 1 to all neighbours of that field, except of the fields that already contain a bomb
for (x, y) in neighbours(i, j, self.size):
if not self.board[x][y].mine:
self.board[x][y].neighbours_with_mines += 1
| 38.322581
| 115
| 0.616162
| 178
| 1,188
| 4.011236
| 0.297753
| 0.123249
| 0.084034
| 0.058824
| 0.044818
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008255
| 0.286195
| 1,188
| 30
| 116
| 39.6
| 0.833726
| 0.234848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.157895
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
846536aeea05536d64f4f59f9d2196f85d857b4d
| 19,035
|
py
|
Python
|
forever/Database.py
|
dss285/4ever
|
bd6f70f92d76d43342da401562f2c504adaf3867
|
[
"MIT"
] | null | null | null |
forever/Database.py
|
dss285/4ever
|
bd6f70f92d76d43342da401562f2c504adaf3867
|
[
"MIT"
] | null | null | null |
forever/Database.py
|
dss285/4ever
|
bd6f70f92d76d43342da401562f2c504adaf3867
|
[
"MIT"
] | null | null | null |
import psycopg2
import psycopg2.extras
import discord
from models.BotMention import BotMention
from models.UpdatedMessage import UpdatedMessage
from forever.Steam import Steam_API, Dota_Match, Dota_Match_Player
from forever.Utilities import run_in_executor, log
from forever.Warframe import CetusMessage, FissureMessage, SortieMessage, NightwaveMessage, InvasionMessage, SolSystem
from forever.Newswire import NewswireMessage
from models.Server import Server
from forever.Arknights import Formula, Item, Stage
from forever.GFL import Doll, Fairy
class Database:
def __init__(self, host : str, user : str, password : str, database : str, client : discord.Client=None) -> None:
self.host = host
self.user = user
self.password = password
self.database = database
self.shared = "shared"
self.forever = "forever"
self.tables = {
"forever" : {
'discord_images',
'discord_servers',
'discord_notifications',
'discord_joinable_roles',
'discord_role_messages',
'discord_updated_messages',
},
"shared" : {
"arknights_items",
"arknights_stages",
"arknights_formulas",
"dota_heroes",
"dota_matches",
"dota_matches_players",
'gfl_dolls',
'gfl_equipment',
'wf_builds',
'wf_builds_images',
'wf_items',
'wf_missions',
'wf_nightwave',
'wf_solsystem_nodes',
'wf_solsystem_planets',
'wf_sorties'
}
}
self.query_formats = {
"delete_where" : 'DELETE FROM \"{schema}\".{table} WHERE {column}={value}',
"delete_where_and" : 'DELETE FROM \"{schema}\".{table} WHERE {column_1}={value_1} AND {column_2}={value_2}',
"delete_where_custom" : 'DELETE FROM \"{schema}\".{table} WHERE {custom}',
"insert_into" : "INSERT INTO \"{schema}\".{table} ({columns}) VALUES ({values})"
}
self.connection = psycopg2.connect(host=self.host,
user=self.user,
password=self.password,
database=self.database,
port=5432)
def query(self, sql : str) -> None:
try:
data = None
with self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute(sql)
if "SELECT" in sql:
data = cursor.fetchall()
self.connection.commit()
if data:
return data
except Exception as e:
print(e)
self.connection.rollback()
def get_data(self,) -> dict[str, dict]:
results = {}
for i, j in self.tables.items():
for x in j:
results[x] = self.get_table_rows(f'\"{i}\".{x}')
return results
def get_table_rows(self, tabletype : str) -> dict:
results = None
with self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute(f"SELECT * FROM {tabletype}")
results = cursor.fetchall()
self.connection.commit()
return results
class DB_API(Database):
def __init__(self, host :str, user:str, password:str, database:str, client) -> None:
super().__init__(host, user, password, database)
self.client = client
self.runtime = {}
self.saved_messages = set()
self.mentions = []
self.init_done = False
def __getitem__(self, item):
return self.runtime[item]
def structure(self,) -> None:
self.runtime["warframe"] = {}
self.runtime["warframe"]["nightwave"] = []
self.runtime["warframe"]["invasions"] = []
self.runtime["warframe"]["sorties"] = None
self.runtime["warframe"]["translate"] = {}
self.runtime["warframe"]["translate"]["missions"] = {}
self.runtime["warframe"]["translate"]["nightwave"] = {}
self.runtime["warframe"]["translate"]["sorties"] = {}
self.runtime["warframe"]["translate"]["items"] = {}
self.runtime["warframe"]["translate"]["solsystem"] = {}
self.runtime["warframe"]["translate"]["solsystem"]["planets"] = []
self.runtime["warframe"]["translate"]["solsystem"]["nodes"] = []
self.runtime["arknights"] = {}
self.runtime["arknights"]["formulas"] = {}
self.runtime["arknights"]["items"] = {}
self.runtime["arknights"]["stages"] = {}
self.runtime["arknights"]["items"]["ids"] = {}
self.runtime["arknights"]["items"]["names"] = {}
self.runtime["arknights"]["stages"]["ids"] = {}
self.runtime["arknights"]["stages"]["codes"] = {}
self.runtime["gfl"] = {}
self.runtime["gfl"]["dolls"] = {}
self.runtime["gfl"]["dolls"]["aliases"] = {}
self.runtime["gfl"]["dolls"]["names"] = {}
self.runtime["gfl"]["equipment"] = {}
self.runtime["dota"] = {}
self.runtime["droptables"] = {}
self.runtime["servers"] = {}
@run_in_executor
def query(self, sql : str) -> None:
return super().query(sql)
@run_in_executor
def get_data(self,) -> dict[str, dict]:
return super().get_data()
async def get_server(self, server_id, data : dict[str, dict]) -> None:
log_id = next((i["logchannel_id"] for i in data["discord_servers"] if i["server_id"] == server_id), None)
discord_server = self.client.get_guild(server_id)
logchannel = self.client.get_channel(log_id) if log_id else None
updated_messages = {}
joinable_roles = set()
role_messages = {}
notifications = []
for x in data["discord_role_messages"]:
if x["server_id"] == server_id:
channel = self.client.get_channel(x["channel_id"])
message = None
try:
message = await channel.fetch_message(x["message_id"])
except discord.NotFound:
await self.delete_role_message(x["message_id"])
await self.delete_updated_message(x["message_id"])
continue
if message:
role_messages[message.id] = {
"message" : message,
"emoji" : x["emoji"],
"role_id" : x["role_id"]
}
for x in data["discord_joinable_roles"]:
if x["server_id"] == server_id:
role = discord_server.get_role(x["role_id"])
if role:
joinable_roles.add(role)
else:
await self.delete_joinable_role(x["role_id"])
for x in data["discord_notifications"]:
if x["server_id"] == server_id:
role = discord_server.get_role(x["role_id"])
if role:
bot_mention = BotMention(x["notification_name"], role)
notifications.append(bot_mention)
else:
await self.delete_notification(x["notification_name"], x["server_id"])
for x in data["discord_updated_messages"]:
if x["server_id"] == server_id:
channel = self.client.get_channel(x["channel_id"])
if channel:
message = None
try:
message = await channel.fetch_message(x["message_id"])
except discord.NotFound:
await self.delete_role_message(x["message_id"])
await self.delete_updated_message(x["message_id"])
message = None
if message:
message_type = x["message_type"]
if message_type == "nightwave":
updated_messages[message_type] = NightwaveMessage(message)
elif message_type == "invasions":
updated_messages[message_type] = InvasionMessage(message, [])
elif message_type == "fissures":
updated_messages[message_type] = FissureMessage(message, [])
elif message_type == "sorties":
updated_messages[message_type] = SortieMessage(message)
elif message_type == "poe":
mention = next((i for i in notifications if i.name == "poe_night"), None)
updated_messages[message_type] = CetusMessage(message, mention, self.client)
elif message_type == "gtanw":
updated_messages[message_type] = NewswireMessage(message)
server = Server(server_id, discord_server, logchannel, updated_messages, notifications, joinable_roles, role_messages)
self.runtime["servers"][server_id] = server
async def update_runtime(self,) -> None:
data = self.get_data()
if "gfl" in self.runtime:
self.gfl(data)
if "warframe" in self.runtime:
self.warframe(data)
if "droptables" in self.runtime:
self.droptables(data)
def arknights(self, data : dict[str, dict]) -> None:
formulas = data.get("arknights_formulas")
stages = data.get("arknights_stages")
items = data.get("arknights_items")
for i in items:
tmp = Item(i["id"], i["name"], i["description"], i["rarity"], i["icon_id"], i["usage"])
tmp._stage_drop_list_str = i["stage_drop_list"]
self.runtime["arknights"]["items"]["ids"][i["id"]] = tmp
self.runtime["arknights"]["items"]["names"][tmp.name] = tmp
for f in formulas:
costs = []
if f["costs"] != "":
tmp = f["costs"].split(" ")
for c in tmp:
splitted = c.split("|")
item_id = splitted[0]
amount = splitted[1]
costs.append({
"item" : self.runtime["arknights"]["items"]["ids"][item_id],
"amount" : amount
})
tmp = Formula(f["id"], self.runtime["arknights"]["items"]["ids"][f["item_id"]], f["count"], costs, f["room"])
self.runtime["arknights"]["items"]["ids"][f["item_id"]].set_formula(tmp)
self.runtime["arknights"]["formulas"][f"{f['id']}_{f['room']}"] = tmp
for s in stages:
drops = []
if s["drops"] != "":
tmp = s["drops"].split(" ")
for x in tmp:
splitted = x.split("|")
itemid = splitted[0]
droptype = splitted[1]
occurence = splitted[2]
item = self.runtime["arknights"]["items"].get(itemid)
if item is None:
item = itemid
drops.append({
"item" : item,
"drop_type" : droptype,
"occurence" : occurence
})
sta = Stage(s["id"], s["code"], s["name"], s["description"], s["sanity_cost"], drops)
self.runtime["arknights"]["stages"]["ids"][s["id"]] = sta
self.runtime["arknights"]["stages"]["codes"][sta.code] = sta
for itemid, item in self.runtime["arknights"]["items"]["ids"].items():
stage_drop_list = []
if item._stage_drop_list_str not in ["", "-"]:
tmp = item._stage_drop_list_str.split(" ")
for i in tmp:
splitted = i.split("|")
stageid = splitted[0]
occurence = splitted[1]
stage = self.runtime["arknights"]["stages"]["ids"][stageid]
stage_drop_list.append({
"stage" : stage,
"occurence" : occurence
})
item.set_stage_drop_list(stage_drop_list)
def gfl(self, data : dict[str, dict]) -> None:
for d in data["gfl_dolls"]:
aliases = d["aliases"].split("|") if d["aliases"] else []
doll = Doll(d["id"], d["name"],
d["type"],
d["rarity"],
d["formation_bonus"],
d["formation_tiles"],
d["skill"],
aliases,
d["production_timer"])
self.runtime["gfl"]["dolls"]["names"][d["name"].lower()] = doll
for x in aliases:
self.runtime["gfl"]["dolls"]["aliases"][x.lower()] = doll
def warframe(self, data : dict[str, dict]) -> None:
self.runtime["warframe"]["translate"]["solsystem"]["planets"].clear()
self.runtime["warframe"]["translate"]["solsystem"]["nodes"].clear()
for item in data["wf_missions"]:
self.runtime["warframe"]["translate"]["missions"][item["code_name"]] = item["name"]
for item in data["wf_nightwave"]:
self.runtime["warframe"]["translate"]["nightwave"][item["code_name"]] = item["name"]
for item in data["wf_sorties"]:
self.runtime["warframe"]["translate"]["sorties"][item["code_name"]] = item["name"]
for item in data["wf_items"]:
self.runtime["warframe"]["translate"]["items"][item["code_name"]] = item["name"]
for item in data["wf_solsystem_planets"]:
self.runtime["warframe"]["translate"]["solsystem"]["planets"].append(SolSystem.SolPlanet(item["planet_id"], item["name"]))
for item in data["wf_solsystem_nodes"]:
self.runtime["warframe"]["translate"]["solsystem"]["nodes"].append(SolSystem.SolNode(item["node_id"], item["name"],
next(planet for planet in self.runtime["warframe"]["translate"]["solsystem"]["planets"] if planet.id == item["planet_id"])))
def dota(self, data : dict[str, dict]) -> None:
match_players = {}
dota_heroes = {"id" : {}, "name" : {}}
for i in data["dota_heroes"]:
dota_heroes["id"][i["id"]] = i["name"]
dota_heroes["name"][i["name"]] = i["id"]
for i in data["dota_matches_players"]:
if i["match_id"] not in match_players:
match_players[i["match_id"]] = {"players" : {"dire" : {}, "radiant" : {}}, "radiant_team_ids" : set(), "dire_team_ids" : set()}
player_slot = i["player_slot"]
if i["team"] == "dire":
player_slot -= 128
match_players[i["match_id"]]["dire_team_ids"].add(i["id"])
elif i["team"] == "radiant":
match_players[i["match_id"]]["radiant_team_ids"].add(i["id"])
match_players[i["match_id"]]["players"][i["team"]][player_slot] = Dota_Match_Player(
i["id"],
i["player_slot"],
i["hero_id"],
i["kills"],
i["deaths"],
i["assists"],
i["last_hits"],
i["denies"],
i["gpm"],
i["xpm"],
i["level"],
i["hero_dmg"],
i["building_dmg"],
i["healing"],
i["networth"]
)
for i in data["dota_matches"]:
dire_team_ids = match_players[i["id"]]["dire_team_ids"]
radiant_team_ids = match_players[i["id"]]["radiant_team_ids"]
players = match_players[i["id"]]["players"]
dota_match = Dota_Match(
i["id"],
players,
i["game_mode"],
i["duration"],
i["start_time"],
i["radiant_win"],
i["radiant_kills"],
i["dire_kills"],
radiant_team_ids,
dire_team_ids
)
Steam_API.cache.add(f"match_details_{dota_match.id}", dota_match)
self.runtime["dota"]["heroes"] = dota_heroes
def droptables(self, data : dict[str, dict]) -> None:
return
# for i in data['droptables']:
# if i['droptable_name'] not in self.runtime["droptables"]:
# self.runtime["droptables"][i["droptable_name"]] = DropTable()
# self.runtime["droptables"][i["droptable_name"]].add(i["weight"], i["item_name"])
async def init_runtime(self,) -> None:
self.structure()
data = await self.get_data()
#Server Translation
for i in data["discord_servers"]:
await self.get_server(i["server_id"], data)
#GFL Translation
self.gfl(data)
#WF Translation
self.warframe(data)
#dota matches
self.dota(data)
#AK Translation
self.arknights(data)
self.init_done = True
def delete_joinable_role(self, role_id : int) -> None:
self.query(self.query_formats["delete_where"].format(
schema=self.forever,
table="discord_joinable_roles",
column="role_id",
value=role_id
))
async def delete_updated_message(self, message_id : int) -> None:
await self.query(self.query_formats["delete_where"].format(
schema=self.forever,
table="discord_updated_messages",
column="message_id",
value=message_id
))
async def delete_role_message(self, message_id : int=None, role_id : int=None) -> None:
query = None
if message_id and role_id:
query = self.query_formats["delete_where_and"].format(
schema=self.forever,
table="discord_role_messages",
column_1="message_id",
value_1=message_id,
column_2="role_id",
value_2=role_id
)
elif message_id:
query = self.query_formats["delete_where"].format(
schema=self.forever,
table="discord_role_messages",
column="message_id",
value=message_id
)
elif role_id:
query = self.query_formats["delete_where"].format(
schema=self.forever,
table="discord_role_messages",
column="role_id",
value=role_id
)
if query:
await self.query(query)
async def delete_notification(self, notification_name : str, server_id : int) -> None:
await self.query(self.query_formats["delete_where_and"].format(
schema=self.forever,
table="discord_notifications",
column_1="name",
value_1=f"\"{notification_name}\"",
column_2="server_id",
value_2=server_id
))
async def delete_server(self, server_id : int) -> None:
await self.query(self.query_formats["delete_where"].format(
schema=self.forever,
table="discord_servers",
column="server_id",
value=server_id
))
async def create_joinable_role(self, role_id : int, server_id : int) -> None:
await self.query(self.query_formats["insert_into"].format(
schema=self.forever,
table="discord_joinable_roles",
columns="role_id, server_id",
values=f"{role_id}, {server_id}"
))
async def create_updated_message(self, server_id : int, message_type : str, channel_id : int, message_id : int) -> None:
await self.query(self.query_formats["insert_into"].format(
schema=self.forever,
table="discord_updated_messages",
columns="server_id, message_type, channel_id, message_id",
values=f"{server_id}, \"{message_type}\", {channel_id}, {message_id}"
))
async def create_role_message(self, role_id : int, message_id : int, channel_id : int, emoji, server_id : int) -> None:
await self.query(self.query_formats["insert_into"].format(
schema=self.forever,
table="discord_role_messages",
columns="role_id, message_id, channel_id, emoji, server_id",
values=f"{role_id}, {message_id}, {channel_id}, \"{emoji}\", {server_id}"
))
async def create_notification(self, notification_name : str, role_id : int, server_id : int) -> None:
await self.query(self.query_formats["insert_into"].format(
schema=self.forever,
table="discord_notifications",
columns="notification_name, role_id, server_id",
values=f"\"{notification_name}\", {role_id}, {server_id}"
))
async def create_server(self, server_id : int) -> None:
await self.query(self.query_formats["insert_into"].format(
schema=self.forever,
table="discord_servers",
columns="server_id",
values=f"{server_id}"
))
async def create_dota_match(self, dota_match : Dota_Match) -> None:
query_match = self.query_formats["insert_into"]
query_player = self.query_formats["insert_into"]
query_match = query_match.format(
schema=self.shared,
table="dota_matches",
columns="id, game_mode, start_time, radiant_win, radiant_kills, dire_kills, duration",
values=f"{dota_match.id}, {dota_match.game_mode}, {dota_match.start_time}, {dota_match.radiant_win}, {dota_match.radiant_kills}, {dota_match.dire_kills}, {dota_match.duration}"
)
await self.query(query_match)
for team, players in dota_match.players.items():
for player_slot, player in players.items():
await self.query(
query_player.format(
schema=self.shared,
table="dota_matches_players",
columns="id, match_id, player_slot, hero_id, kills, deaths, assists, last_hits, denies, gpm, xpm, level, hero_dmg, building_dmg, healing, networth, team",
values="{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, '{}'".format(
player.id or "null",
dota_match.id,
player.player_slot,
player.hero_id,
player.kills,
player.deaths,
player.assists,
player.last_hits,
player.denies,
player.gpm,
player.xpm,
player.level,
player.hero_dmg or "null",
player.building_dmg or "null",
player.healing or "null",
player.networth or "null",
team
)
)
)
| 38.222892
| 180
| 0.640294
| 2,411
| 19,035
| 4.85027
| 0.099959
| 0.056439
| 0.03412
| 0.040705
| 0.445442
| 0.326065
| 0.242774
| 0.211048
| 0.184454
| 0.176586
| 0
| 0.002015
| 0.1917
| 19,035
| 498
| 181
| 38.222892
| 0.758027
| 0.016706
| 0
| 0.212371
| 0
| 0.004124
| 0.240514
| 0.032288
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030928
| false
| 0.010309
| 0.024742
| 0.008247
| 0.074227
| 0.002062
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8465f309612202475ac3cb61d22a9dcf1509182e
| 822
|
py
|
Python
|
Week06/q_cifar10_cnn.py
|
HowardNTUST/HackNTU_Data_2017
|
ad8e753a16719b6f9396d88b313a5757f5ed4794
|
[
"MIT"
] | null | null | null |
Week06/q_cifar10_cnn.py
|
HowardNTUST/HackNTU_Data_2017
|
ad8e753a16719b6f9396d88b313a5757f5ed4794
|
[
"MIT"
] | null | null | null |
Week06/q_cifar10_cnn.py
|
HowardNTUST/HackNTU_Data_2017
|
ad8e753a16719b6f9396d88b313a5757f5ed4794
|
[
"MIT"
] | 1
|
2019-02-24T17:41:45.000Z
|
2019-02-24T17:41:45.000Z
|
import keras
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape
model = Sequential()
model.add(Reshape((3, 32, 32), input_shape=(3*32*32,) ))
model.add(Conv2D(filters=32, kernel_size=(3,3), padding='same', activation="relu", data_format='channels_first'))
model.add(MaxPool2D())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation="relu", data_format='channels_first'))
model.add(MaxPool2D())
model.add(Reshape((-1,)))
model.add(Dense(units=1024, activation="relu"))
model.add(Dense(units=10, activation="softmax"))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(train_X, train_Y, validation_split=0.02, batch_size=128, epochs=30)
rtn = model.evaluate(test_X, test_Y)
print("\ntest accuracy=", rtn[1])
| 48.352941
| 113
| 0.723844
| 118
| 822
| 4.923729
| 0.483051
| 0.110155
| 0.051635
| 0.072289
| 0.292599
| 0.292599
| 0.292599
| 0.292599
| 0.292599
| 0.292599
| 0
| 0.053691
| 0.093674
| 822
| 17
| 114
| 48.352941
| 0.726175
| 0
| 0
| 0.117647
| 0
| 0
| 0.130012
| 0.029162
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
84664082e1511f1729add08f835b69444a8edf67
| 9,697
|
py
|
Python
|
polyanalyst6api/api.py
|
Megaputer/polyanalyst6api-python
|
c6626a8a5f8f926b1f32285e18457ed70dfba73a
|
[
"MIT"
] | 2
|
2021-01-30T19:04:12.000Z
|
2021-06-18T09:41:15.000Z
|
polyanalyst6api/api.py
|
Megaputer/polyanalyst6api-python
|
c6626a8a5f8f926b1f32285e18457ed70dfba73a
|
[
"MIT"
] | null | null | null |
polyanalyst6api/api.py
|
Megaputer/polyanalyst6api-python
|
c6626a8a5f8f926b1f32285e18457ed70dfba73a
|
[
"MIT"
] | 1
|
2021-04-19T09:57:14.000Z
|
2021-04-19T09:57:14.000Z
|
"""
polyanalyst6api.api
~~~~~~~~~~~~~~~~~~~
This module contains functionality for access to PolyAnalyst API.
"""
import configparser
import contextlib
import pathlib
import warnings
from typing import Any, Dict, List, Tuple, Union, Optional
from urllib.parse import urljoin, urlparse
import requests
import urllib3
from . import __version__
from .drive import Drive
from .project import Parameters, Project
from .exceptions import APIException, ClientException, _WrapperNotFound
__all__ = ['API']
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
warnings.simplefilter(
'always', UserWarning
) # without this set_parameters will show warnings only once
NodeTypes = [
"CSV Exporter/",
"DataSource/CSV",
"DataSource/EXCEL",
"DataSource/FILES",
"DataSource/INET",
"DataSource/ODBC",
"DataSource/RSS",
"DataSource/XML",
"Dataset/Biased",
"Dataset/ExtractTerms",
"Dataset/Python",
"Dataset/R",
"Dataset/ReplaceTerms",
"ODBC Exporter/",
"PA6TaxonomyResult/TaxonomyResult",
"SRLRuleSet/Filter Rows",
"SRLRuleSet/SRL Rule",
"TmlEntityExtractor/FEX",
"Sentiment Analysis",
"TmlLinkTerms/",
]
class API:
"""PolyAnalyst API
:param url: (optional) The scheme, host and port(if exists) of a PolyAnalyst server \
(e.g. ``https://localhost:5043/``, ``http://example.polyanalyst.com``)
:param username: (optional) The username to login with
:param password: (optional) The password for specified username
:param ldap_server: (optional) LDAP Server address
:param version: (optional) Choose which PolyAnalyst API version to use. Default: ``1.0``
If ldap_server is provided, then login will be performed via LDAP Server.
Usage::
>>> with API(POLYANALYST_URL, YOUR_USERNAME, YOUR_PASSWORD) as api:
... print(api.get_server_info())
or if you're using configuration file (New in version 0.23.0):
>>> with API() as api:
... print(api.get_server_info())
"""
_api_path = '/polyanalyst/api/'
_valid_api_versions = ['1.0']
user_agent = f'PolyAnalyst6API python client v{__version__}'
def __enter__(self) -> 'API':
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
self._s.__exit__()
def __init__(
self,
url: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
ldap_server: Optional[str] = None,
version: str = '1.0',
) -> None:
if version not in self._valid_api_versions:
raise ClientException('Valid api versions are ' + ', '.join(self._valid_api_versions))
if url is None or username is None:
try:
cfg_path = pathlib.Path.home() / '.polyanalyst6api' / 'config'
parser = configparser.ConfigParser(allow_no_value=True)
with open(cfg_path, encoding='utf8') as f:
parser.read_file(f)
default = dict(parser['DEFAULT'])
url = default['url']
username = default['username']
password = default['password']
ldap_server = default.get(ldap_server)
except FileNotFoundError:
raise ClientException(f"The credentials file doesn't exist. Nor credentials passed as arguments")
except KeyError as exc:
raise ClientException(f"The credentials file doesn't contain required key: {exc}")
if not url:
raise ClientException(f'Invalid url: "{url}".')
self.base_url = urljoin(url, self._api_path)
self.url = urljoin(self.base_url, f'v{version}/')
self.username = username
self.password = password or ''
self.ldap_server = ldap_server
self._s = requests.Session()
self._s.headers.update({'User-Agent': self.user_agent})
self.sid = None # session identity
# path to certificate file. by default ignore insecure connection warnings
self.certfile = False
self.drive = Drive(self)
@property
def fs(self):
warnings.warn('"fs" attribute has been renamed "drive"', DeprecationWarning, 2)
return self.drive
def get_versions(self) -> List[str]:
"""Returns api versions supported by PolyAnalyst server."""
# the 'versions' endpoint was added in the 2191 polyanalyst's version
try:
return self.request(urljoin(self.base_url, 'versions'), method='get')[1]
except APIException:
return ['1.0']
def get_server_info(self) -> Optional[Dict[str, Union[int, str, Dict[str, str]]]]:
"""Returns general server information including build number, version and commit hashes."""
_, data = self.request(urljoin(self.url, 'server/info'), method='get')
return data
def get_parameters(self) -> List[Dict[str, Union[str, List]]]:
"""
Returns list of nodes with parameters supported by ``Parameters`` node.
.. deprecated:: 0.18.0
Use :meth:`Parameters.get` instead.
"""
warnings.warn(
'API.get_parameters() is deprecated, use Parameters.get() instead.',
DeprecationWarning,
stacklevel=2,
)
class ProjectStub:
api = self
return Parameters(ProjectStub(), None).get()
def login(self) -> None:
"""Logs in to PolyAnalyst Server with user credentials."""
credentials = {'uname': self.username, 'pwd': self.password}
if self.ldap_server:
credentials['useLDAP'] = '1'
credentials['svr'] = self.ldap_server
resp, _ = self.request('login', method='post', params=credentials)
try:
self.sid = resp.cookies['sid']
except KeyError:
self._s.headers['Authorization'] = f"Bearer {resp.headers['x-session-id']}"
def logout(self) -> None:
"""Logs out current user from PolyAnalyst server."""
self.get('logout')
def run_task(self, id: int) -> None:
"""Initiates scheduler task execution.
:param id: the task ID
"""
self.post('scheduler/run-task', json={'taskId': id})
def project(self, uuid: str) -> Project:
"""Returns :class:`Project <Project>` instance with given uuid.
:param uuid: The project uuid
"""
prj = Project(self, uuid)
prj._update_node_list() # check that the project with given uuid exists
return prj
def get(self, endpoint: str, **kwargs) -> Any:
"""Shortcut for GET requests via :meth:`request <API.request>`
:param endpoint: PolyAnalyst API endpoint
:param kwargs: :func:`requests.request` keyword arguments
"""
return self.request(endpoint, method='get', **kwargs)[1]
def post(self, endpoint: str, **kwargs) -> Any:
"""Shortcut for POST requests via :meth:`request <API.request>`
:param endpoint: PolyAnalyst API endpoint
:param kwargs: :func:`requests.request` keyword arguments
"""
return self.request(endpoint, method='post', **kwargs)[1]
def request(self, url: str, method: str, **kwargs) -> Tuple[requests.Response, Any]:
"""Sends ``method`` request to ``endpoint`` and returns tuple of
:class:`requests.Response` and json-encoded content of a response.
:param url: url or PolyAnalyst API endpoint
:param method: request method (e.g. GET, POST)
:param kwargs: :func:`requests.request` keyword arguments
"""
if not urlparse(url).netloc:
url = urljoin(self.url, url)
kwargs['verify'] = self.certfile
try:
resp = self._s.request(method, url, **kwargs)
except requests.RequestException as exc:
raise ClientException(exc)
else:
return self._handle_response(resp)
@staticmethod
def _handle_response(response: requests.Response) -> Tuple[requests.Response, Any]:
try:
json = response.json()
except ValueError:
json = None
if response.status_code in (200, 202):
return response, json
if isinstance(json, dict) and json.get('error'):
with contextlib.suppress(KeyError):
error = json['error']
if 'The wrapper with the given GUID is not found on the server' == error['message']:
raise _WrapperNotFound
if error['title']:
error_msg = f"{error['title']}. Message: '{error['message']}'"
else:
error_msg = error['message']
# the old error response format handling
elif response.status_code == 403:
if 'are not logged in' in response.text:
error_msg = 'You are not logged in to PolyAnalyst Server'
elif 'operation is limited ' in response.text:
error_msg = (
'Access to this operation is limited to project owners and administrator'
)
elif response.status_code == 500:
with contextlib.suppress(IndexError, TypeError):
if json[0] == 'Error':
error_msg = json[1]
else:
try:
response.raise_for_status()
except requests.HTTPError as exc:
error_msg = str(exc)
with contextlib.suppress(NameError):
raise APIException(error_msg, response.url, response.status_code)
return response, None
| 35.00722
| 113
| 0.608642
| 1,091
| 9,697
| 5.311641
| 0.27956
| 0.018982
| 0.011044
| 0.013978
| 0.101812
| 0.094219
| 0.094219
| 0.065229
| 0.049698
| 0.049698
| 0
| 0.007432
| 0.278437
| 9,697
| 276
| 114
| 35.134058
| 0.82078
| 0.237084
| 0
| 0.051429
| 0
| 0
| 0.169477
| 0.011804
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0.028571
| 0.068571
| 0
| 0.251429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffbf148e7df59ebdd237d38695723231b7824b44
| 462
|
py
|
Python
|
src/abc/106/106_b.py
|
ryuichi1208/atcoder_stack
|
19ec81fb9a3edb44be422b79e98b23e8ff17ef60
|
[
"MIT"
] | null | null | null |
src/abc/106/106_b.py
|
ryuichi1208/atcoder_stack
|
19ec81fb9a3edb44be422b79e98b23e8ff17ef60
|
[
"MIT"
] | null | null | null |
src/abc/106/106_b.py
|
ryuichi1208/atcoder_stack
|
19ec81fb9a3edb44be422b79e98b23e8ff17ef60
|
[
"MIT"
] | null | null | null |
n = int(input())
# @return [0]:約数の個数 [1]:約数リスト
def divisor(num):
ret=[]
L=[]
for i in range(1,num+1):
if (num%i==0):
L.append(i)
ret.append(len(L))
ret.append(L)
return ret
L=[]
ans=0
for i in range(1,n+1):
if(i%2==0):
continue
else:
for j in range(1,n+1):
if(i%j==0):
L.append(j)
if (len(L)==8):
ans+=1
L.clear()
print(ans)
print(divisor(15))
| 14.4375
| 30
| 0.452381
| 79
| 462
| 2.64557
| 0.367089
| 0.100478
| 0.114833
| 0.105263
| 0.200957
| 0.124402
| 0.124402
| 0
| 0
| 0
| 0
| 0.056478
| 0.348485
| 462
| 31
| 31
| 14.903226
| 0.637874
| 0.058442
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0
| 0
| 0.083333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffc1536722c6684539bdbe4eaba7de45c07a8edb
| 6,296
|
py
|
Python
|
dataPipelines/gc_crawler/nato_stanag/models.py
|
ekmixon/gamechanger-crawlers
|
60a0cf20338fb3dc134eec117bccd519cede9288
|
[
"MIT"
] | null | null | null |
dataPipelines/gc_crawler/nato_stanag/models.py
|
ekmixon/gamechanger-crawlers
|
60a0cf20338fb3dc134eec117bccd519cede9288
|
[
"MIT"
] | 4
|
2021-07-27T21:44:51.000Z
|
2022-03-04T01:38:48.000Z
|
dataPipelines/gc_crawler/nato_stanag/models.py
|
ekmixon/gamechanger-crawlers
|
60a0cf20338fb3dc134eec117bccd519cede9288
|
[
"MIT"
] | null | null | null |
import bs4
import os
import re
from typing import Iterable
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait # for implicit and explict waits
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from dataPipelines.gc_crawler.requestors import MapBasedPseudoRequestor
from dataPipelines.gc_crawler.exec_model import Crawler, Parser, Pager
from dataPipelines.gc_crawler.data_model import Document, DownloadableItem
from dataPipelines.gc_crawler.utils import abs_url, close_driver_windows_and_quit
from . import SOURCE_SAMPLE_DIR, BASE_SOURCE_URL
class STANAGPager(Pager):
"""Pager for Nato Stanag crawler"""
def iter_page_links(self) -> Iterable[str]:
"""Iterator for page links"""
base_url = 'https://nso.nato.int/nso/nsdd/'
starting_url = base_url + 'ListPromulg.html'
global driver
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--start-maximized")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--disable-setuid-sandbox")
driver = webdriver.Chrome(options=options)
yield starting_url
class STANAGParser(Parser):
"""Parser for Nato Stanag crawler"""
def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:
"""Parse document objects from page of text"""
# parse html response
pdf_prefix = 'https://nso.nato.int/nso/'
driver.get(page_url)
WebDriverWait(driver, 10).until(ec.presence_of_element_located((By.XPATH, "//*[@id='headerSO']")))
html = driver.execute_script("return document.documentElement.outerHTML")
soup = bs4.BeautifulSoup(html, features="html.parser")
parsed_docs = []
table = soup.find('table', attrs={'id': 'dataSearchResult'})
rows = table.find_all('tr')
for row in rows[1:]:
data = row.find_all('td')
if "No" not in data[1].text:
doc_title = data[4].text.splitlines()[1].strip()
doc_helper = data[2].text.split("Ed:")[0].strip()
if "STANAG" in doc_helper or"STANREC" in doc_helper:
doc_num = doc_helper.split("\n")[1].strip().replace(" ","_")
doc_type = doc_helper.split("\n")[0].strip().replace(" ","_")
else:
doc_ = doc_helper.split("\n")[0].strip()
doc_num = doc_.split('-',1)[1].strip().replace(" ","_")
doc_type = doc_.split('-',1)[0].strip().replace(" ","_")
if len(doc_helper.split())>1:
if re.match("^M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$", doc_helper.split()[1].strip()):
doc_num = doc_num + "_VOL" + doc_helper.split()[1].strip()
if re.match("^\d$",doc_helper.split()[1].strip()):
doc_num = doc_num + "_PART" + doc_helper.split()[1].strip()
if len(data[2].text.split("VOL")) > 1:
volume = data[2].text.split("VOL")[1].split()[0].strip()
doc_num = doc_num + "_VOL" + volume
if len(data[2].text.split("PART")) > 1:
volume = data[2].text.split("PART")[1].split()[0].strip()
doc_num = doc_num + "_PART" + volume
doc_name = doc_type + " " + doc_num
if doc_name in (o.doc_name for o in parsed_docs) and doc_title in (t.doc_title for t in parsed_docs):
#getting rid of duplicates
continue
if len(data[2].text.split("Ed:")) > 1:
edition = data[2].text.split("Ed:")[1].strip()
else:
edition = ""
publication_date = data[5].text.splitlines()[1].strip()
pdf_suffix = data[4].find('a')
if pdf_suffix is None:
continue
if "../classDoc.htm" in pdf_suffix['href']:
cac_login_required = True
else:
cac_login_required = False
di = DownloadableItem(
doc_type='pdf',
web_url=pdf_prefix + pdf_suffix['href'].replace('../', '').replace(" ", "%20")
)
crawler_used = "nato_stanag"
version_hash_fields = {
"editions_and_volume": edition,
"type": data[1].text
}
doc = Document(
doc_name=doc_name,
doc_title=doc_title,
doc_num=doc_num,
doc_type=doc_type,
publication_date=publication_date,
cac_login_required=cac_login_required,
crawler_used=crawler_used,
source_page_url=page_url.strip(),
version_hash_raw_data=version_hash_fields,
downloadable_items=[di]
)
parsed_docs.append(doc)
close_driver_windows_and_quit(driver)
return parsed_docs
class STANAGCrawler(Crawler):
"""Crawler for the example web scraper"""
def __init__(self, *args, **kwargs):
super().__init__(
*args,
**kwargs,
pager=STANAGPager(
starting_url=BASE_SOURCE_URL
),
parser=STANAGParser()
)
class FakeSTANAGCrawler(Crawler):
"""Nato Stanag crawler that just uses stubs and local source files"""
def __init__(self, *args, **kwargs):
with open(os.path.join(SOURCE_SAMPLE_DIR, 'dod_issuances.html')) as f:
default_text = f.read()
super().__init__(
*args,
**kwargs,
pager=DoDPager(
requestor=MapBasedPseudoRequestor(
default_text=default_text
),
starting_url=BASE_SOURCE_URL
),
parser=STANAGParser()
)
| 38.625767
| 127
| 0.550191
| 712
| 6,296
| 4.636236
| 0.307584
| 0.023629
| 0.021812
| 0.029688
| 0.217207
| 0.142381
| 0.059376
| 0.033929
| 0.019388
| 0
| 0
| 0.011765
| 0.324968
| 6,296
| 162
| 128
| 38.864198
| 0.764941
| 0.047967
| 0
| 0.16129
| 0
| 0.008065
| 0.081698
| 0.022983
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.104839
| 0
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffc168320dcc3879d9935e0c48e2582d2d304fa1
| 3,938
|
py
|
Python
|
app/signals.py
|
MakuZo/bloggy
|
550e5285728b285e0d5243670d6aa0f40c414777
|
[
"MIT"
] | 7
|
2018-11-12T20:52:53.000Z
|
2021-12-17T23:04:41.000Z
|
app/signals.py
|
MakuZo/bloggy
|
550e5285728b285e0d5243670d6aa0f40c414777
|
[
"MIT"
] | 2
|
2019-12-24T08:53:51.000Z
|
2019-12-26T19:26:51.000Z
|
app/signals.py
|
MakuZo/bloggy
|
550e5285728b285e0d5243670d6aa0f40c414777
|
[
"MIT"
] | 8
|
2018-12-28T12:31:51.000Z
|
2020-01-25T09:07:52.000Z
|
import re
from django.db.models.signals import m2m_changed, post_save, pre_delete
from django.dispatch import receiver
from django.urls import reverse
from .models import Entry, Notification, User
@receiver(post_save, sender=Entry)
def entry_notification(sender, instance, created, **kwargs):
"""
Signal used to create notification(s) when an entry is created
This function notifies an user if this entry is a reply to him.
This function notifies an user if he's mentioned (by @username) in one's entry
"""
if created:
# First find usernames mentioned (by @ tag)
p = re.compile(r"^(@)(\w+)$")
usernames = set(
[
p.match(c).group(2).lower()
for c in instance.content.split()
if p.match(c)
]
)
# Remove the author of an entry from users to notify
if instance.user.username in usernames:
usernames.remove(instance.user.username)
# If entry has a parent and it's parent is not the same author then notify about a reply
# and delete from usernames if being notified
if instance.parent and instance.parent.user.username != instance.user.username:
if instance.parent.user.username in usernames:
usernames.remove(instance.parent.user.username)
Notification.objects.create(
type="user_replied",
sender=instance.user,
target=instance.parent.user,
object=instance,
)
# Notify mentioned users without the author of an entry
for name in usernames:
if name == instance.user.username:
continue
try:
target = User.objects.get(username=name)
except Exception:
continue
Notification.objects.create(
type="user_mentioned",
sender=instance.user,
target=target,
object=instance,
)
@receiver(m2m_changed, sender=Entry.tags.through)
def entry_tag_notification(instance, action, **kwargs):
"""
Notifies users if one of the tags in entry is observed by them.
"""
if not instance.modified_date and "post" in action:
already_notified = set()
reversed_user = reverse(
"user-detail-view", kwargs={"username": instance.user.username}
)
reversed_entry = reverse("entry-detail-view", kwargs={"pk": instance.pk})
all_tags = instance.tags.all().prefetch_related("observers", "blacklisters")
all_blacklisters = [
blacklister for tag in all_tags for blacklister in tag.blacklisters.all()
]
to_create = []
for tag in all_tags:
for observer in tag.observers.all():
# If user blacklisted one of the tags in an entry, don't notify him.
if observer in all_blacklisters:
continue
if (
observer.username == instance.user.username
or observer in already_notified
):
continue
reversed_tag = reverse("tag", kwargs={"tag": tag.name})
content = (
f'<a href="{reversed_user}">{instance.user.username}</a> used tag <a href="{reversed_tag}">#{tag.name}</a>'
f' in <a href="{reversed_entry}">"{instance.content:.25}..."</a>'
)
to_create.append(
Notification(
type="tag_used",
sender=instance.user,
target=observer,
object=instance,
content=content,
)
)
already_notified.add(observer)
Notification.objects.bulk_create(to_create)
| 39.777778
| 127
| 0.561199
| 431
| 3,938
| 5.055684
| 0.278422
| 0.055071
| 0.06425
| 0.035796
| 0.144103
| 0.084442
| 0.042221
| 0
| 0
| 0
| 0
| 0.001953
| 0.349924
| 3,938
| 98
| 128
| 40.183673
| 0.849219
| 0.156171
| 0
| 0.153846
| 0
| 0.012821
| 0.086612
| 0.043611
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.064103
| 0
| 0.089744
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffc35164c1764ae381a92d8e3682d0250a4793ea
| 912
|
py
|
Python
|
utils/jwt_custom_decorator.py
|
w0rm1995/face-comparison-backend
|
9e231aabcf129e887e25a8ffdb5ae9617fee3e00
|
[
"MIT"
] | null | null | null |
utils/jwt_custom_decorator.py
|
w0rm1995/face-comparison-backend
|
9e231aabcf129e887e25a8ffdb5ae9617fee3e00
|
[
"MIT"
] | 3
|
2021-06-08T22:05:30.000Z
|
2022-01-13T03:04:03.000Z
|
utils/jwt_custom_decorator.py
|
w0rm1995/face-comparison-backend
|
9e231aabcf129e887e25a8ffdb5ae9617fee3e00
|
[
"MIT"
] | null | null | null |
from functools import wraps
from flask_jwt_extended import verify_jwt_in_request, get_jwt_claims, exceptions
from jwt import exceptions as jwt_exception
from utils.custom_response import bad_request
def admin_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
verify_jwt_in_request()
claims = get_jwt_claims()
if claims['roles'] != 'admin':
return bad_request('Admins only', 403)
else:
return fn(*args, **kwargs)
except jwt_exception.DecodeError as e:
return bad_request(str(e), 401)
# except jwt_exception.DecodeError as e:
# return bad_request(str(e), 401)
except jwt_exception.PyJWTError as e:
return bad_request(str(e), 401)
except exceptions.JWTExtendedException as e:
return bad_request(str(e), 403)
return wrapper
| 35.076923
| 80
| 0.638158
| 114
| 912
| 4.894737
| 0.359649
| 0.107527
| 0.143369
| 0.086022
| 0.327957
| 0.327957
| 0.327957
| 0.286738
| 0.286738
| 0.229391
| 0
| 0.022901
| 0.281798
| 912
| 25
| 81
| 36.48
| 0.829008
| 0.08114
| 0
| 0.095238
| 0
| 0
| 0.02515
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.190476
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffc4351a518b97d5c4916014accd51d41d76de87
| 14,867
|
py
|
Python
|
skybright/skybright.py
|
ehneilsen/skybright
|
b0e2d7e6e25131393ee76ce334ce1df1521e3659
|
[
"MIT"
] | 1
|
2019-09-24T21:06:45.000Z
|
2019-09-24T21:06:45.000Z
|
skybright/skybright.py
|
ehneilsen/skybright
|
b0e2d7e6e25131393ee76ce334ce1df1521e3659
|
[
"MIT"
] | null | null | null |
skybright/skybright.py
|
ehneilsen/skybright
|
b0e2d7e6e25131393ee76ce334ce1df1521e3659
|
[
"MIT"
] | 1
|
2019-09-24T21:14:35.000Z
|
2019-09-24T21:14:35.000Z
|
#!/usr/bin/env python
"""A model for the sky brightness
"""
from functools import partial
from math import pi, cos, acos, sin, sqrt, log10
from datetime import datetime, tzinfo, timedelta
from time import strptime
from calendar import timegm
from copy import deepcopy
from sys import argv
from collections import namedtuple, OrderedDict
from argparse import ArgumentParser
try:
from ConfigParser import ConfigParser
except:
from configparser import ConfigParser
import numexpr
from numexpr import NumExpr
import warnings
from warnings import warn
import numpy as np
try:
from palpy import rdplan as rdplan_not_vectorized
from palpy import gmst as gmst_not_vectorized
from palpy import dmoon
from palpy import evp
except ImportError:
from pyslalib.slalib import sla_rdplan as rdplan_not_vectorized
from pyslalib.slalib import sla_gmst as gmst_not_vectorized
from pyslalib.slalib import sla_dmoon as dmoon
from pyslalib.slalib import sla_evp as evp
palpy_body = {'sun': 0,
'moon': 3}
MAG0 = 23.9
# warnings.simplefilter("always")
rdplan = np.vectorize(rdplan_not_vectorized)
def gmst(mjd):
# Follow Meeus chapter 12
big_t = numexpr.evaluate("(mjd - 51544.5)/36525")
st = np.radians(np.mod(numexpr.evaluate("280.46061837 + 360.98564736629*(mjd-51544.5) + 0.000387933*big_t*big_t - big_t*big_t*big_t/38710000"), 360))
return st
def ang_sep(ra1, decl1, ra2, decl2):
# haversine formula
return numexpr.evaluate("2*arcsin(sqrt(cos(decl1)*cos(decl2)*(sin(((ra1-ra2)/2))**2) + (sin((decl1-decl2)/2))**2))")
## Works and is trivially faster, but less flexible w.r.t. data types
#
# ang_sep = NumExpr("2*arcsin(sqrt(cos(decl1)*cos(decl2)*(sin(((ra1-ra2)/2))**2) + (sin((decl1-decl2)/2))**2))",
# (('ra1', np.float64), ('decl1', np.float64), ('ra2', np.float64), ('decl2', np.float64)))
def calc_zd(latitude, ha, decl):
# zenith is always at ha=0, dec=latitude, by defn.
return ang_sep(ha, decl, 0, latitude)
def calc_airmass(cos_zd):
a = numexpr.evaluate("462.46 + 2.8121/(cos_zd**2 + 0.22*cos_zd + 0.01)")
airmass = numexpr.evaluate("sqrt((a*cos_zd)**2 + 2*a + 1) - a * cos_zd")
airmass[cos_zd < 0] = np.nan
return airmass
def calc_airglow(r0, h, m_zen, k, sin_zd, airmass):
airglow = numexpr.evaluate("10**(-0.4*(m_zen + 1.25*log10(1.0 - (r0/(h+r0))*(sin_zd**2)) + k*(airmass-1) - MAG0))")
return airglow
def calc_scat_extinction(k, x0, x):
if len(np.shape(x0)) == 0:
x0p = calc_airmass(0) if np.isnan(x0) else x0
else:
x0p = np.where(np.isnan(x0), calc_airmass(0), x0)
extinct = numexpr.evaluate("(10**(-0.4*k*x) - 10**(-0.4*k*x0p))/(-0.4*k*(x-x0p))")
return extinct
def elongation_not_vectorized(mjd):
"Calculate the elongation of the moon in radians"
pv = dmoon(mjd)
moon_distance = (sum([x**2 for x in pv[:3]]))**0.5
dvb, dpb, dvh, dph = evp(mjd,-1)
sun_distance = (sum([x**2 for x in dph[:3]]))**0.5
a = np.degrees(np.arccos(
(-pv[0]*dph[0] - pv[1]*dph[1] - pv[2]*dph[2])/
(moon_distance*sun_distance)))
return a
elongation = np.vectorize(elongation_not_vectorized)
def calc_moon_brightness(mjd, moon_elongation=None):
"""The brightness of the moon (relative to full)
The value here matches about what I expect from the value in
Astrophysical Quantities corresponding to the elongation calculated by
http://ssd.jpl.nasa.gov/horizons.cgi
>>> mjd = 51778.47
>>> print "%3.2f" % moon_brightness(mjd)
0.10
"""
if moon_elongation is None:
moon_elongation = elongation(mjd)
alpha = 180.0-moon_elongation
# Allen's _Astrophysical Quantities_, 3rd ed., p. 144
return 10**(-0.4*(0.026*abs(alpha) + 4E-9*(alpha**4)))
def one_calc_twilight_fract(z, twi1=-2.52333, twi2=0.01111):
if z<90:
return 1.0
if z>108:
return 0.0
if z>100:
twi0 = -1*(twi1*90+ twi2*90*90)
logfrac = twi0 + twi1*z + twi2*z*z
else:
logfrac = 137.11-2.52333*z+0.01111*z*z
frac = 10**logfrac
frac = 1.0 if frac>1.0 else frac
frac = 0.0 if frac<0.0 else frac
return frac
def calc_twilight_fract(zd, twi1=-2.52333, twi2=0.01111):
z = zd if len(np.shape(zd)) > 0 else np.array(zd)
logfrac = numexpr.evaluate("137.11-2.52333*z+0.01111*z*z")
logfrac[z>100] = numexpr.evaluate("twi1*z + twi2*z*z - (twi1*90 + twi2*90*90)")[z>100]
frac = 10**logfrac
frac = np.where(z<90, 1.0, frac)
frac = np.where(z>108, 0.0, frac)
frac = np.where(frac>1.0, 1.0, frac)
frac = np.where(frac<0.0, 0.0, frac)
return frac
def calc_body_scattering(brightness, body_zd_deg, cos_zd, body_ra, body_decl, ra, decl,
twi1, twi2, k, airmass, body_airmass, rayl_m, mie_m, g,
rayleigh=True, mie=True):
if len(np.shape(brightness)) == 0:
brightness = np.array(brightness)
brightness = np.where(body_zd_deg > 107.8, 0, brightness)
body_twi = body_zd_deg > 90
brightness[body_twi] = brightness[body_twi]*calc_twilight_fract(body_zd_deg[body_twi], twi1, twi2)
extinct = calc_scat_extinction(k, body_airmass, airmass)
cos_rho = numexpr.evaluate("cos(2*arcsin(sqrt(cos(decl)*cos(body_decl)*(sin(((ra-body_ra)/2))**2) + (sin((decl-body_decl)/2))**2)))")
rayleigh_frho = numexpr.evaluate("0.75*(1.0+cos_rho**2)") if rayleigh else np.zeros_like(cos_rho)
mie_frho = numexpr.evaluate("1.5*((1.0-g**2)/(2.0+g**2)) * (1.0 + cos_rho) * (1.0 + g**2 - 2.0*g*cos_rho*cos_rho)**(-1.5)") if mie else np.zeros_like(cos_rho)
mie_frho = np.where(mie_frho<0, 0.0, mie_frho)
# Fitter sometimes explores values of g resulting mie_frho being negative.
# Force a physical result.
mie_frho = np.where(mie_frho<0, 0.0, mie_frho)
rayl_c = 10**(-0.4*(rayl_m-MAG0))
mie_c = 10**(-0.4*(mie_m-MAG0))
flux = brightness*extinct*(rayl_c*rayleigh_frho + mie_c*mie_frho)
return flux
class MoonSkyModel(object):
def __init__(self, model_config):
self.longitude = model_config.getfloat("Observatory Position",
"longitude")
self.latitude = model_config.getfloat("Observatory Position",
"latitude")
self.k = OrderedDict()
self.m_inf = OrderedDict()
self.m_zen = OrderedDict()
self.h = OrderedDict()
self.rayl_m = OrderedDict()
self.g = OrderedDict()
self.mie_m = OrderedDict()
self.offset = OrderedDict()
self.sun_dm = OrderedDict()
self.twi1 = OrderedDict()
self.twi2 = OrderedDict()
for i, band in enumerate(model_config.get("sky","filters").split()):
i = model_config.get("sky","filters").split().index(band)
self.k[band] = float(model_config.get("sky","k").split()[i])
self.m_inf[band] = float(model_config.get("sky","m_inf").split()[i])
self.m_zen[band] = float(model_config.get("sky","m_zen").split()[i])
self.h[band] = float(model_config.get("sky","h").split()[i])
self.rayl_m[band] = float(model_config.get("sky","rayl_m").split()[i])
self.g[band] = float(model_config.get("sky","g").split()[i])
self.mie_m[band] = float(model_config.get("sky","mie_m").split()[i])
self.offset[band] = 0.0
self.sun_dm[band] = float(model_config.get("sky","sun_dm").split()[i])
self.twi1[band] = float(model_config.get("sky","twi1").split()[i])
self.twi2[band] = float(model_config.get("sky","twi2").split()[i])
self.calc_zd = partial(calc_zd, np.radians(self.latitude))
self.r0 = 6375.0
self.twilight_nan = True
def __call__(self, mjd, ra_deg, decl_deg, band, sun=True, moon=True,
moon_crds=None, moon_elongation=None, sun_crds=None, lst=None):
if len(np.shape(band)) < 1:
return self.single_band_call(
mjd, ra_deg, decl_deg, band, sun=sun, moon=moon,
moon_crds=moon_crds, moon_elongation=moon_elongation, sun_crds=sun_crds,
lst=lst)
mags = np.empty_like(ra_deg, dtype=np.float64)
mags.fill(np.nan)
for this_band in np.unique(band):
these = band == this_band
mjd_arg = mjd if len(np.shape(mjd))==0 else mjd[these]
mags[these] = self.single_band_call(
mjd_arg, ra_deg[these], decl_deg[these], this_band, sun=sun, moon=moon,
moon_crds=moon_crds, moon_elongation=moon_elongation, sun_crds=sun_crds,
lst=lst
)
return mags
def single_band_call(self, mjd, ra_deg, decl_deg, band, sun=True, moon=True,
moon_crds=None, moon_elongation=None, sun_crds=None, lst=None):
longitude = np.radians(self.longitude)
latitude = np.radians(self.latitude)
ra = np.radians(ra_deg)
decl = np.radians(decl_deg)
k = self.k[band]
twi1 = self.twi1[band]
twi2 = self.twi2[band]
m_inf = self.m_inf[band]
lst = gmst(mjd) + longitude if lst is None else np.radians(lst)
ha = lst - ra
if sun_crds is None:
sun_ra, sun_decl, diam = rdplan(mjd, 0, longitude, latitude)
else:
sun_ra = sun_crds.ra.rad
sun_decl = sun_crds.dec.rad
sun_ha = lst - sun_ra
sun_zd = self.calc_zd(sun_ha, sun_decl)
sun_zd_deg = np.degrees(sun_zd)
if len(np.shape(sun_zd_deg)) == 0 and self.twilight_nan:
if sun_zd_deg < 98:
m = np.empty_like(ra)
m.fill(np.nan)
return m
sun_cos_zd = np.cos(sun_zd)
sun_airmass = calc_airmass(sun_cos_zd)
if moon_crds is None:
moon_ra, moon_decl, diam = rdplan(mjd, 3, longitude, latitude)
else:
moon_ra = moon_crds.ra.rad
moon_decl = moon_crds.dec.rad
moon_ha = lst - moon_ra
moon_zd = self.calc_zd(moon_ha, moon_decl)
moon_cos_zd = np.cos(moon_zd)
moon_airmass = calc_airmass(moon_cos_zd)
moon_zd_deg = np.degrees(moon_zd)
# Flux from infinity
sky_flux = np.empty_like(ra)
sky_flux.fill(10**(-0.4*(m_inf-MAG0)))
# Airglow
zd = self.calc_zd(ha, decl)
sin_zd = np.sin(zd)
cos_zd = np.cos(zd)
airmass = calc_airmass(cos_zd)
airglow_flux = calc_airglow(self.r0, self.h[band], self.m_zen[band], k, sin_zd, airmass)
sky_flux += airglow_flux
# Needed for both scattering calculations
zd_deg = np.degrees(zd)
# Add scattering of moonlight
if moon:
moon_flux = calc_body_scattering(
calc_moon_brightness(mjd, moon_elongation),
moon_zd_deg, cos_zd, moon_ra, moon_decl, ra, decl, twi1, twi2, k, airmass, moon_airmass,
self.rayl_m[band], self.mie_m[band], self.g[band])
sky_flux += moon_flux
# Add scattering of sunlight
if sun:
sun_flux = calc_body_scattering(
10**(-0.4*(self.sun_dm[band])),
sun_zd_deg, cos_zd, sun_ra, sun_decl, ra, decl, twi1, twi2, k, airmass, sun_airmass,
self.rayl_m[band], self.mie_m[band], self.g[band])
sky_flux += sun_flux
m = MAG0 - 2.5*np.log10(sky_flux)
if len(np.shape(m)) > 0 and self.twilight_nan:
m[sun_zd_deg < 98] = np.nan
return m
#
# Included for backword compatibility with previous implementation
#
def skymag(m_inf, m_zen, h, g, mie_m, rayl_m, ra, decl, mjd, k, latitude, longitude, offset=0.0,
sun_dm=-14.0, twi1=-2.52333, twi2=0.01111):
config = ConfigParser()
sect = "Observatory Position"
config.add_section(sect)
config.set(sect, 'longitude', longitude)
config.set(sect, 'latitude', latitude)
sect = "sky"
config.add_section(sect)
config.set(sect, 'filters', 'x')
config.set(sect, 'k', k)
config.set(sect, 'm_inf', m_inf)
config.set(sect, 'm_zen', m_zen)
config.set(sect, 'h', h)
config.set(sect, 'rayl_m', rayl_m)
config.set(sect, 'g', g)
config.set(sect, 'mie_m', mie_m)
config.set(sect, 'sun_dm', sun_dm)
config.set(sect, 'twi1', twi1)
config.set(sect, 'twi2', twi2)
calc_sky = MoonSkyModel(config)
sky = calc_sky(mjd, ra, decl, 'x')
return sky
if __name__=='__main__':
parser = ArgumentParser('Estimate the sky brightness')
parser.add_argument("-m", "--mjd", type=float,
help="Modified Julian Date (float) (UTC)")
parser.add_argument("-r", "--ra", type=float,
help="the RA (decimal degrees)")
parser.add_argument("-d", "--dec", type=float,
help="the declination (decimal degrees)")
parser.add_argument("-f", "--filter",
help="the filter")
parser.add_argument("-c", "--config",
help="the configuration file")
args = parser.parse_args()
model_config = ConfigParser()
model_config.read(args.config)
longitude = model_config.getfloat("Observatory Position",
"longitude")
latitude = model_config.getfloat("Observatory Position",
"latitude")
lst = gmst(args.mjd) + np.radians(longitude)
print("GMST: %f" % np.degrees(gmst(args.mjd)))
print("LST: %f" % np.degrees(lst))
sun_ra, sun_decl, diam = rdplan(args.mjd, 0, np.radians(longitude), np.radians(latitude))
sun_ha = lst - sun_ra
sun_zd = np.degrees(calc_zd(np.radians(latitude), sun_ha, sun_decl))
print("Sun zenith distance: %f" % sun_zd)
moon_ra, moon_decl, diam = rdplan(args.mjd, 3, longitude, latitude)
moon_ha = lst - moon_ra
moon_zd = np.degrees(calc_zd(np.radians(latitude), moon_ha, moon_decl))
print("Moon zenith distance: %f" % moon_zd)
print("Elongation of the moon: %f" % elongation(args.mjd))
print("Moon brightness: %f" % calc_moon_brightness(args.mjd))
sep = ang_sep(moon_ra, moon_decl, np.radians(args.ra), np.radians(args.dec))
print("Pointing angle with moon: %f" % sep)
ha = lst - np.radians(args.ra)
print("Hour angle: %f" % np.degrees(ha))
z = calc_zd(np.radians(latitude), ha, np.radians(args.dec))
print("Pointing zenith distance: %f" % np.degrees(z))
print("Airmass: %f" % calc_airmass(np.cos(z)))
sky_model = MoonSkyModel(model_config)
print("Sky brightness at pointing: %f" % sky_model(args.mjd, args.ra, args.dec, args.filter))
| 37.1675
| 163
| 0.609807
| 2,210
| 14,867
| 3.935747
| 0.147059
| 0.025293
| 0.01943
| 0.023454
| 0.294091
| 0.242814
| 0.169924
| 0.090251
| 0.0745
| 0.069901
| 0
| 0.04326
| 0.249008
| 14,867
| 399
| 164
| 37.260652
| 0.735781
| 0.075738
| 0
| 0.103448
| 0
| 0.027586
| 0.107882
| 0.031177
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.086207
| 0.006897
| 0.203448
| 0.037931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffc7043d4112113fd11d3bba2367bfc4002daece
| 8,004
|
py
|
Python
|
pynetstation_send_tags/pynetstation_send_tags.py
|
mattmoo/Pynetstation-Plug-In
|
aba2d312e5543cc5c2100793805acfeff075c59c
|
[
"MIT"
] | null | null | null |
pynetstation_send_tags/pynetstation_send_tags.py
|
mattmoo/Pynetstation-Plug-In
|
aba2d312e5543cc5c2100793805acfeff075c59c
|
[
"MIT"
] | null | null | null |
pynetstation_send_tags/pynetstation_send_tags.py
|
mattmoo/Pynetstation-Plug-In
|
aba2d312e5543cc5c2100793805acfeff075c59c
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.item import item
from libqtopensesame.items.qtautoplugin import qtautoplugin
from openexp.canvas import canvas
blankText = u'Enter Variable Name Here'
blankID = u'****'
def make_fit(k):
n = len(k)
d = n - 4
if d > 0:
return k[0:4]
else:
return k + ' ' * abs(d)
class pynetstation_send_tags(item):
"""
This class (the class with the same name as the module) handles the basic
functionality of the item. It does not deal with GUI stuff.
"""
# Provide an informative description for your plug-in.
description = u'Send event tags to Netstation'
def reset(self):
"""
desc:
Resets plug-in to initial values.
"""
# Here we provide default values for the variables that are specified
# in info.json. If you do not provide default values, the plug-in will
# work, but the variables will be undefined when they are not explicitly
# set in the GUI.
self.eventTag = u'evt-'
self.labelCheck = u'yes'
self.labelText = u'Description of events or somesuch'
self.descriptionCheck = u'yes'
self.descriptionText = u'Description of events or somesuch'
self.tag1check = u'yes'
self.tagText1 = blankText
self.tagID1 = blankID
self.tag2check = u'no'
self.tagText2 = blankText
self.tagID2 = blankID
self.tag3check = u'no'
self.tagText3 = blankText
self.tagID3 = blankID
self.tag4check = u'no'
self.tagText4 = blankText
self.tagID4 = blankID
self.tag5check = u'no'
self.tagText5 = blankText
self.tagID5 = blankID
def prepare(self):
"""The preparation phase of the plug-in goes here."""
# Call the parent constructor.
item.prepare(self)
def run(self):
"""The run phase of the plug-in goes here."""
# self.set_item_onset() sets the time_[item name] variable. Optionally,
# you can pass a timestamp, such as returned by canvas.show().
self.set_item_onset(self.time())
if self.get(u'nsOnOff') == u'yes':
tagTable = {}
if self.get(u'labelCheck') != u'yes':
self.labelText = ''
if self.get(u'descriptionCheck') != u'yes':
self.descriptionText = ''
for i in range(1, 6):
if self.get(u'tag%dcheck' % i) == u'yes':
#
# Force all keys to become a utf-8 string, regardless of whether they're an int or string.
# keyI = ('%s' % self.get(u'tagID%d' % i)).encode('utf-8')
keyI = str(self.get(u'tagID%d' % i))
#
# check if variable exists. If not, use the literal.
try:
valueI = self.get(self.get(u'tagText%d' % i))
except:
valueI = self.get(u'tagText%d' % i)
#
# Differentiate between integers and strings while encoding strings in utf-8 for pynetstation.
if type(valueI) == int or type(valueI) == long or type(valueI) == float:
tagTable[keyI] = (valueI)
else:
tagTable[keyI] = str(valueI)
'''
for i in tagTable:
print "\nKey %s is type: %s" % (i, type(i))
print "\nValue %s is type: %s" % (tagTable[i], type(tagTable[i]))
print tagTable
'''
#
# Encode everything to 'utf-8' before sending the message to NetStation.
# event = ('%s' % self.experiment.get(u'eventTag')).encode('utf-8')
# event = ('%s' % self.get(u'eventTag')).encode('utf-8')
# label = ('%s' % self.get(u'labelText')).encode('utf-8')
# description = ('%s' % self.get(u'descriptionText')).encode('utf-8')
event = str(self.get(u'eventTag'))
label = str(self.get(u'labelText'))
description = str(self.get(u'descriptionText'))
timestamp = self.experiment.egi.ms_localtime()
table = tagTable
self.experiment.window.callOnFlip(self.experiment.ns.send_timestamped_event, event, label, description,
table, pad=True)
self.experiment.ns.send_event('evtT', timestamp, label, description, table, pad=True)
class qtpynetstation_send_tags(pynetstation_send_tags, qtautoplugin):
"""
This class handles the GUI aspect of the plug-in. By using qtautoplugin, we
usually need to do hardly anything, because the GUI is defined in info.json.
"""
def __init__(self, name, experiment, script=None):
"""
Constructor.
Arguments:
name -- The name of the plug-in.
experiment -- The experiment object.
Keyword arguments:
script -- A definition script. (default=None)
"""
# We don't need to do anything here, except call the parent
# constructors.
pynetstation_send_tags.__init__(self, name, experiment, script)
qtautoplugin.__init__(self, __file__)
def apply_edit_changes(self):
"""
desc:
Applies the controls.
"""
if not qtautoplugin.apply_edit_changes(self) or self.lock:
return False
self.custom_interactions()
return True
def edit_widget(self):
"""
Refreshes the controls.
Returns:
The QWidget containing the controls
"""
if self.lock:
return
self.lock = True
w = qtautoplugin.edit_widget(self)
self.custom_interactions()
self.lock = False
return w
def custom_interactions(self):
"""
desc:
Activates the relevant controls for each tracker.
"""
self.eventTag = make_fit(str(self.eventTag))
self.event_line_edit_widget.setEnabled(True)
for i in range(1, 6):
self.set(u'tagID%d' % i, make_fit(str(self.get(u'tagID%d' % i))))
onOffLabel = self.get(u'labelCheck') == u'yes'
self.label_line_edit_widget.setEnabled(onOffLabel)
onOffDesc = self.get(u'descriptionCheck') == u'yes'
self.description_line_edit_widget.setEnabled(onOffDesc)
onOffTag1 = self.get(u'tag1check') == u'yes'
self.tag1_line_edit_widget.setEnabled(onOffTag1)
self.tagid1_line_edit_widget.setEnabled(onOffTag1)
onOffTag2 = self.get(u'tag2check') == u'yes'
self.tag2_line_edit_widget.setEnabled(onOffTag2)
self.tagid2_line_edit_widget.setEnabled(onOffTag2)
onOffTag3 = self.get(u'tag3check') == u'yes'
self.tag3_line_edit_widget.setEnabled(onOffTag3)
self.tagid3_line_edit_widget.setEnabled(onOffTag3)
onOffTag4 = self.get(u'tag4check') == u'yes'
self.tag4_line_edit_widget.setEnabled(onOffTag4)
self.tagid4_line_edit_widget.setEnabled(onOffTag4)
onOffTag5 = self.get(u'tag5check') == u'yes'
self.tag5_line_edit_widget.setEnabled(onOffTag5)
self.tagid5_line_edit_widget.setEnabled(onOffTag5)
| 35.415929
| 115
| 0.594328
| 983
| 8,004
| 4.753815
| 0.298067
| 0.034453
| 0.037663
| 0.066767
| 0.214209
| 0.0948
| 0.057351
| 0
| 0
| 0
| 0
| 0.011313
| 0.304223
| 8,004
| 225
| 116
| 35.573333
| 0.827797
| 0.318591
| 0
| 0.057143
| 0
| 0
| 0.073813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07619
| false
| 0
| 0.028571
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffc857a75ba7aa5ef44304f6675fe0e78e0727a5
| 976
|
py
|
Python
|
experiments/centralisation/centralisation.py
|
MichaelAllen1966/2105_london_acute_stroke_unit
|
56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff
|
[
"MIT"
] | null | null | null |
experiments/centralisation/centralisation.py
|
MichaelAllen1966/2105_london_acute_stroke_unit
|
56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff
|
[
"MIT"
] | null | null | null |
experiments/centralisation/centralisation.py
|
MichaelAllen1966/2105_london_acute_stroke_unit
|
56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('results.csv')
labels = [1,2,3,4]
width = 0.75
x = np.arange(len(labels)) # the label locations
fig = plt.figure(figsize=(9,6))
# Number people waiting
ax1 = fig.add_subplot(121)
y1 = data['av_waiting'].values.flatten()
waiting = ax1.bar(x, y1, width, color='b')
ax1.set_ylabel('Average number of patients waiting for ASU bed')
ax1.set_xlabel('ASUs per region')
ax1.set_title('Average number of patients waiting\nfor ASU bed')
ax1.set_xticks(x)
ax1.set_xticklabels(labels)
ax2 = fig.add_subplot(122)
y2 = data['av_waiting_days'].values.flatten()
days = ax2.bar(x, y2, width, color='r')
ax2.set_ylabel('Average waiting time (days)')
ax2.set_xlabel('ASUs per region')
ax2.set_title('Average waiting time\n(days, for patients who have to wait)')
ax2.set_xticks(x)
ax2.set_xticklabels(labels)
plt.tight_layout(pad=2)
plt.savefig('centralisation.png', dpi=300)
plt.show()
| 21.217391
| 76
| 0.731557
| 167
| 976
| 4.173653
| 0.491018
| 0.043042
| 0.037303
| 0.065997
| 0.149211
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043224
| 0.122951
| 976
| 45
| 77
| 21.688889
| 0.771028
| 0.042008
| 0
| 0
| 0
| 0
| 0.28556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffd1926ccd96f4b70e990d54bad23c4b64c849e9
| 2,531
|
py
|
Python
|
cloudianapi/tools/statistics.py
|
romerojunior/cloudian-api
|
f17b45653a0e3e27a78d0d6bdc094ec6ab521550
|
[
"Apache-2.0"
] | 11
|
2017-11-01T17:48:10.000Z
|
2020-08-25T04:29:17.000Z
|
cloudianapi/tools/statistics.py
|
romerojunior/cloudian-api
|
f17b45653a0e3e27a78d0d6bdc094ec6ab521550
|
[
"Apache-2.0"
] | 5
|
2017-11-10T12:46:44.000Z
|
2019-09-18T07:18:19.000Z
|
cloudianapi/tools/statistics.py
|
romerojunior/cloudian-api
|
f17b45653a0e3e27a78d0d6bdc094ec6ab521550
|
[
"Apache-2.0"
] | 7
|
2018-01-26T20:08:37.000Z
|
2021-05-26T14:32:06.000Z
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
# Copyright 2017, Schuberg Philis BV
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Romero Galiza Jr. - rgaliza@schubergphilis.com
""" This is not part of the Admin API, but it incorporates additional tooling
to support statistical analysis of monitored data within a cluster, data center
or node """
def get_hs_used_kb(node):
""" Receives a node monitor JSON string and returns a list containing the
used disk space in KB for each hyperstore disk.
:param node: an iterable object
:type node: dict
:rtype: list
"""
if 'disksInfo' not in node:
raise TypeError('Unsupported input.')
# filter function to select only HyperStore disks:
f = (lambda n: True if 'HS' in n['storageUse'] else False)
hs_disks = filter(
f, (d for d in node['disksInfo']['disks'])
)
return [abs(int(disk['diskUsedKb'])) for disk in hs_disks]
def disk_avg_abs_deviation(node):
""" Returns the average absolute deviation for a given set of disks of a
given node based entirely on used capacity (expressed in KB).
Particularly useful if you want to visualize the average difference
between all disks in a given node. The closer the result is to zero the
better (less deviation = balanced usage).
:param node: an iterable object
:type node: dict
:rtype: int
"""
try:
disk_usage = get_hs_used_kb(node)
except TypeError:
return 0
mean = (sum(disk_usage) / len(disk_usage))
deviation = [abs(kb_used - mean) for kb_used in disk_usage]
return sum(deviation)/len(deviation)
| 34.202703
| 79
| 0.66772
| 353
| 2,531
| 4.739377
| 0.501416
| 0.035864
| 0.015541
| 0.019127
| 0.068141
| 0.050209
| 0.050209
| 0.050209
| 0.050209
| 0
| 0
| 0.005348
| 0.261162
| 2,531
| 73
| 80
| 34.671233
| 0.889305
| 0.711181
| 0
| 0
| 0
| 0
| 0.098901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffd544a103259a41233ed3e0af2e2d453a43568d
| 1,446
|
py
|
Python
|
E_ledproject.py
|
randomstring/raspberrypi
|
fe226ce33f116480bfea8f258fdffa1fd96e379c
|
[
"MIT"
] | null | null | null |
E_ledproject.py
|
randomstring/raspberrypi
|
fe226ce33f116480bfea8f258fdffa1fd96e379c
|
[
"MIT"
] | null | null | null |
E_ledproject.py
|
randomstring/raspberrypi
|
fe226ce33f116480bfea8f258fdffa1fd96e379c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
led_color_gpio = {
'yellow': 0,
'orange': 2,
'red': 3,
'green': 4,
'blue': 5,
'white': 6
}
buttons_gpio = {
'red': 28,
'blue': 29,
}
gpio_to_bcm = {
0: 17,
1: 18,
2: 27,
3: 22,
4: 23,
5: 24,
6: 25,
21: 5,
22: 6,
23: 13,
24: 19,
25: 26,
26: 12,
27: 16,
28: 20,
29: 21,
}
def led_color(color, on):
if color not in led_color_gpio:
print('No LEDs of color {0}'.format(color))
return
bcm_pin = gpio_to_bcm[led_color_gpio[color]]
if on:
GPIO.output(bcm_pin, False)
else:
GPIO.output(bcm_pin, True)
GPIO.setmode(GPIO.BCM)
for gpio in led_color_gpio.values():
bcm_pin = gpio_to_bcm[gpio]
GPIO.setup(bcm_pin, GPIO.OUT)
GPIO.output(bcm_pin, True)
print("Type 'quit' to quit")
while True:
user_input = raw_input("Enter Color and on/off: ")
tokens = user_input.split()
if len(tokens) < 1:
continue
color = tokens[0]
if color == "quit":
break
onoff = 1
if len(tokens) > 1:
onoff = tokens[1]
if onoff == "on":
onoff = 1
elif onoff == "off":
onoff = 0
else:
onoff = int(onoff)
led_color(color, onoff)
for gpio in led_color_gpio.values():
bcm_pin = gpio_to_bcm[gpio]
GPIO.output(bcm_pin, True)
| 18.075
| 54
| 0.538728
| 216
| 1,446
| 3.458333
| 0.375
| 0.064257
| 0.080321
| 0.085676
| 0.228916
| 0.133869
| 0.133869
| 0.133869
| 0.133869
| 0.133869
| 0
| 0.074719
| 0.324343
| 1,446
| 79
| 55
| 18.303797
| 0.689867
| 0.013831
| 0
| 0.161765
| 0
| 0
| 0.075789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.014706
| 0
| 0.044118
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffd92d23d660d2a840a6dec51a3209da982b029c
| 1,172
|
py
|
Python
|
word_vectorizer/tests/unittest/model_downloading/test_gensimModelDownloader.py
|
RodSernaPerez/WordVectorizer
|
097b2ccfc284b39ad43f56047ee25e393b7525ec
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
word_vectorizer/tests/unittest/model_downloading/test_gensimModelDownloader.py
|
RodSernaPerez/WordVectorizer
|
097b2ccfc284b39ad43f56047ee25e393b7525ec
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
word_vectorizer/tests/unittest/model_downloading/test_gensimModelDownloader.py
|
RodSernaPerez/WordVectorizer
|
097b2ccfc284b39ad43f56047ee25e393b7525ec
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch
from word_vectorizer.constants import Constants
from word_vectorizer.model_downloading.gensim_model_downloader import \
GensimModelDownloader
class TestGensimModelDownloader(TestCase):
NAME_MODEL = "name_model"
URL = "gensim"
PATH_WHERE_GENSIM_DOWNLOADS_MODEL = "this/is/a/path/to/the/" + NAME_MODEL
PATH_TO_FOLDER_WHERE_GENSIM_DOWNLOADS = "this/is/a/path"
@patch(GensimModelDownloader.__module__ + ".shutil", spec=True)
@patch(GensimModelDownloader.__module__ + ".api")
def test_download_from_url(self, mock_api, mock_shutil):
mock_api.load.return_value = self.PATH_WHERE_GENSIM_DOWNLOADS_MODEL
path = GensimModelDownloader.download_from_url(self.URL,
self.NAME_MODEL)
mock_shutil.move.assert_called_once_with(
self.PATH_WHERE_GENSIM_DOWNLOADS_MODEL,
Constants.DESTINATION_FOLDER + "/" + self.NAME_MODEL)
mock_shutil.rmtree.assert_called_once_with(
self.PATH_TO_FOLDER_WHERE_GENSIM_DOWNLOADS)
self.assertTrue(path.endswith(self.NAME_MODEL))
| 41.857143
| 77
| 0.728669
| 138
| 1,172
| 5.76087
| 0.347826
| 0.067925
| 0.125786
| 0.090566
| 0.313208
| 0.218868
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195392
| 1,172
| 27
| 78
| 43.407407
| 0.843054
| 0
| 0
| 0
| 0
| 0
| 0.054608
| 0.018771
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.045455
| false
| 0
| 0.181818
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffd92f6660bddf66dfe789ef939a022a436eddba
| 26,840
|
py
|
Python
|
results/generate_result.py
|
riscv-android-src/platform-test-mlts-benchmark
|
fc22878823896b81eb8b7e63e952a13f9675edcb
|
[
"Apache-2.0"
] | null | null | null |
results/generate_result.py
|
riscv-android-src/platform-test-mlts-benchmark
|
fc22878823896b81eb8b7e63e952a13f9675edcb
|
[
"Apache-2.0"
] | null | null | null |
results/generate_result.py
|
riscv-android-src/platform-test-mlts-benchmark
|
fc22878823896b81eb8b7e63e952a13f9675edcb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
#
# Copyright 2018, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MLTS benchmark result generator.
Reads a CSV produced by MLTS benchmark and generates
an HTML page with results summary.
Usage:
generate_result [csv input file] [html output file]
"""
import argparse
import collections
import csv
import os
import re
import math
class ScoreException(Exception):
"""Generator base exception type. """
pass
LatencyResult = collections.namedtuple(
'LatencyResult',
['iterations', 'total_time_sec', 'time_freq_start_sec', 'time_freq_step_sec', 'time_freq_sec'])
COMPILATION_TYPES = ['compile_without_cache', 'save_to_cache', 'prepare_from_cache']
BASELINE_COMPILATION_TYPE = COMPILATION_TYPES[0]
CompilationResult = collections.namedtuple(
'CompilationResult',
['cache_size_bytes'] + COMPILATION_TYPES)
BenchmarkResult = collections.namedtuple(
'BenchmarkResult',
['name', 'backend_type', 'inference_latency', 'max_single_error',
'testset_size', 'evaluator_keys', 'evaluator_values', 'validation_errors',
'compilation_results'])
ResultsWithBaseline = collections.namedtuple(
'ResultsWithBaseline',
['baseline', 'other'])
BASELINE_BACKEND = 'TFLite_CPU'
KNOWN_GROUPS = [
(re.compile('mobilenet_v1.*quant.*'), 'MobileNet v1 Quantized'),
(re.compile('mobilenet_v1.*'), 'MobileNet v1 Float'),
(re.compile('mobilenet_v2.*quant.*'), 'MobileNet v2 Quantized'),
(re.compile('mobilenet_v2.*'), 'MobileNet v2 Float'),
(re.compile('mobilenet_v3.*uint8.*'), 'MobileNet v3 Quantized'),
(re.compile('mobilenet_v3.*'), 'MobileNet v3 Float'),
(re.compile('tts.*'), 'LSTM Text-to-speech'),
(re.compile('asr.*'), 'LSTM Automatic Speech Recognition'),
]
class BenchmarkResultParser:
"""A helper class to parse the input CSV file."""
def __init__(self, csvfile):
self.csv_reader = csv.reader(filter(lambda row: row[0] != '#', csvfile))
self.row = None
self.index = 0
def next(self):
"""Advance to the next row, returns the current row or None if reaches the end."""
try:
self.row = next(self.csv_reader)
except StopIteration:
self.row = None
finally:
self.index = 0
return self.row
def read_boolean(self):
"""Read the next CSV cell as a boolean."""
s = self.read_typed(str).lower()
if s == 'true':
return True
elif s == 'false':
return False
else:
raise ValueError('Cannot convert \'%s\' to a boolean' % s)
def read_typed(self, Type):
"""Read the next CSV cell as the given type."""
if Type is bool:
return self.read_boolean()
entry = self.row[self.index]
self.index += 1
return Type(entry)
def read_typed_array(self, Type, length):
"""Read the next CSV cells as a typed array."""
return [self.read_typed(Type) for _ in range(length)]
def read_latency_result(self):
"""Read the next CSV cells as a LatencyResult."""
result = {}
result['iterations'] = self.read_typed(int)
result['total_time_sec'] = self.read_typed(float)
result['time_freq_start_sec'] = self.read_typed(float)
result['time_freq_step_sec'] = self.read_typed(float)
time_freq_sec_count = self.read_typed(int)
result['time_freq_sec'] = self.read_typed_array(float, time_freq_sec_count)
return LatencyResult(**result)
def read_compilation_result(self):
"""Read the next CSV cells as a CompilationResult."""
result = {}
for compilation_type in COMPILATION_TYPES:
has_results = self.read_typed(bool)
result[compilation_type] = self.read_latency_result() if has_results else None
result['cache_size_bytes'] = self.read_typed(int)
return CompilationResult(**result)
def read_benchmark_result(self):
"""Read the next CSV cells as a BenchmarkResult."""
result = {}
result['name'] = self.read_typed(str)
result['backend_type'] = self.read_typed(str)
result['inference_latency'] = self.read_latency_result()
result['max_single_error'] = self.read_typed(float)
result['testset_size'] = self.read_typed(int)
evaluator_keys_count = self.read_typed(int)
validation_error_count = self.read_typed(int)
result['evaluator_keys'] = self.read_typed_array(str, evaluator_keys_count)
result['evaluator_values'] = self.read_typed_array(float, evaluator_keys_count)
result['validation_errors'] = self.read_typed_array(str, validation_error_count)
result['compilation_results'] = self.read_compilation_result()
return BenchmarkResult(**result)
def parse_csv_input(input_filename):
"""Parse input CSV file, returns: (benchmarkInfo, list of BenchmarkResult)."""
with open(input_filename, 'r') as csvfile:
parser = BenchmarkResultParser(csvfile)
# First line contain device info
benchmark_info = parser.next()
results = []
while parser.next():
results.append(parser.read_benchmark_result())
return (benchmark_info, results)
def group_results(results):
"""Group list of results by their name/backend, returns list of lists."""
# Group by name
groupings = collections.defaultdict(list)
for result in results:
groupings[result.name].append(result)
# Find baseline for each group, make ResultsWithBaseline for each name
groupings_baseline = {}
for name, results in groupings.items():
baseline = next(filter(lambda x: x.backend_type == BASELINE_BACKEND,
results))
other = sorted(filter(lambda x: x is not baseline, results),
key=lambda x: x.backend_type)
groupings_baseline[name] = ResultsWithBaseline(
baseline=baseline,
other=other)
# Merge ResultsWithBaseline for known groups
known_groupings_baseline = collections.defaultdict(list)
for name, results_with_bl in sorted(groupings_baseline.items()):
group_name = name
for known_group in KNOWN_GROUPS:
if known_group[0].match(results_with_bl.baseline.name):
group_name = known_group[1]
break
known_groupings_baseline[group_name].append(results_with_bl)
# Turn into a list sorted by name
groupings_list = []
for name, results_wbl in sorted(known_groupings_baseline.items()):
groupings_list.append((name, results_wbl))
return groupings_list
def get_frequency_graph_min_max(latencies):
"""Get min and max times of latencies frequency."""
mins = []
maxs = []
for latency in latencies:
mins.append(latency.time_freq_start_sec)
to_add = len(latency.time_freq_sec) * latency.time_freq_step_sec
maxs.append(latency.time_freq_start_sec + to_add)
return min(mins), max(maxs)
def get_frequency_graph(time_freq_start_sec, time_freq_step_sec, time_freq_sec,
start_sec, end_sec):
"""Generate input x/y data for latency frequency graph."""
left_to_pad = (int((time_freq_start_sec - start_sec) / time_freq_step_sec)
if time_freq_step_sec != 0
else math.inf)
end_time = time_freq_start_sec + len(time_freq_sec) * time_freq_step_sec
right_to_pad = (int((end_sec - end_time) / time_freq_step_sec)
if time_freq_step_sec != 0
else math.inf)
# After pading more that 64 values, graphs start to look messy,
# bail out in that case.
if (left_to_pad + right_to_pad) < 64:
left_pad = (['{:.2f}ms'.format(
(start_sec + x * time_freq_step_sec) * 1000.0)
for x in range(left_to_pad)], [0] * left_to_pad)
right_pad = (['{:.2f}ms'.format(
(end_time + x * time_freq_step_sec) * 1000.0)
for x in range(right_to_pad)], [0] * right_to_pad)
else:
left_pad = [[], []]
right_pad = [[], []]
data = (['{:.2f}ms'.format(
(time_freq_start_sec + x * time_freq_step_sec) * 1000.0)
for x in range(len(time_freq_sec))], time_freq_sec)
return (left_pad[0] + data[0] + right_pad[0],
left_pad[1] + data[1] + right_pad[1])
def is_topk_evaluator(evaluator_keys):
"""Are these evaluator keys from TopK evaluator?"""
return (len(evaluator_keys) == 5 and
evaluator_keys[0] == 'top_1' and
evaluator_keys[1] == 'top_2' and
evaluator_keys[2] == 'top_3' and
evaluator_keys[3] == 'top_4' and
evaluator_keys[4] == 'top_5')
def is_melceplogf0_evaluator(evaluator_keys):
"""Are these evaluator keys from MelCepLogF0 evaluator?"""
return (len(evaluator_keys) == 2 and
evaluator_keys[0] == 'max_mel_cep_distortion' and
evaluator_keys[1] == 'max_log_f0_error')
def is_phone_error_rate_evaluator(evaluator_keys):
"""Are these evaluator keys from PhoneErrorRate evaluator?"""
return (len(evaluator_keys) == 1 and
evaluator_keys[0] == 'max_phone_error_rate')
def generate_accuracy_headers(result):
"""Accuracy-related headers for result table."""
if is_topk_evaluator(result.evaluator_keys):
return ACCURACY_HEADERS_TOPK_TEMPLATE
elif is_melceplogf0_evaluator(result.evaluator_keys):
return ACCURACY_HEADERS_MELCEPLOGF0_TEMPLATE
elif is_phone_error_rate_evaluator(result.evaluator_keys):
return ACCURACY_HEADERS_PHONE_ERROR_RATE_TEMPLATE
else:
return ACCURACY_HEADERS_BASIC_TEMPLATE
raise ScoreException('Unknown accuracy headers for: ' + str(result))
def get_diff_span(value, same_delta, positive_is_better):
if abs(value) < same_delta:
return 'same'
if positive_is_better and value > 0 or not positive_is_better and value < 0:
return 'better'
return 'worse'
def generate_accuracy_values(baseline, result):
"""Accuracy-related data for result table."""
if is_topk_evaluator(result.evaluator_keys):
val = [float(x) * 100.0 for x in result.evaluator_values]
if result is baseline:
topk = [TOPK_BASELINE_TEMPLATE.format(val=x) for x in val]
return ACCURACY_VALUES_TOPK_TEMPLATE.format(
top1=topk[0], top2=topk[1], top3=topk[2], top4=topk[3],
top5=topk[4]
)
else:
base = [float(x) * 100.0 for x in baseline.evaluator_values]
diff = [a - b for a, b in zip(val, base)]
topk = [TOPK_DIFF_TEMPLATE.format(
val=v, diff=d, span=get_diff_span(d, 1.0, positive_is_better=True))
for v, d in zip(val, diff)]
return ACCURACY_VALUES_TOPK_TEMPLATE.format(
top1=topk[0], top2=topk[1], top3=topk[2], top4=topk[3],
top5=topk[4]
)
elif is_melceplogf0_evaluator(result.evaluator_keys):
val = [float(x) for x in
result.evaluator_values + [result.max_single_error]]
if result is baseline:
return ACCURACY_VALUES_MELCEPLOGF0_TEMPLATE.format(
max_log_f0=MELCEPLOGF0_BASELINE_TEMPLATE.format(
val=val[0]),
max_mel_cep_distortion=MELCEPLOGF0_BASELINE_TEMPLATE.format(
val=val[1]),
max_single_error=MELCEPLOGF0_BASELINE_TEMPLATE.format(
val=val[2]),
)
else:
base = [float(x) for x in
baseline.evaluator_values + [baseline.max_single_error]]
diff = [a - b for a, b in zip(val, base)]
v = [MELCEPLOGF0_DIFF_TEMPLATE.format(
val=v, diff=d, span=get_diff_span(d, 1.0, positive_is_better=False))
for v, d in zip(val, diff)]
return ACCURACY_VALUES_MELCEPLOGF0_TEMPLATE.format(
max_log_f0=v[0],
max_mel_cep_distortion=v[1],
max_single_error=v[2],
)
elif is_phone_error_rate_evaluator(result.evaluator_keys):
val = [float(x) for x in
result.evaluator_values + [result.max_single_error]]
if result is baseline:
return ACCURACY_VALUES_PHONE_ERROR_RATE_TEMPLATE.format(
max_phone_error_rate=PHONE_ERROR_RATE_BASELINE_TEMPLATE.format(
val=val[0]),
max_single_error=PHONE_ERROR_RATE_BASELINE_TEMPLATE.format(
val=val[1]),
)
else:
base = [float(x) for x in
baseline.evaluator_values + [baseline.max_single_error]]
diff = [a - b for a, b in zip(val, base)]
v = [PHONE_ERROR_RATE_DIFF_TEMPLATE.format(
val=v, diff=d, span=get_diff_span(d, 1.0, positive_is_better=False))
for v, d in zip(val, diff)]
return ACCURACY_VALUES_PHONE_ERROR_RATE_TEMPLATE.format(
max_phone_error_rate=v[0],
max_single_error=v[1],
)
else:
return ACCURACY_VALUES_BASIC_TEMPLATE.format(
max_single_error=result.max_single_error,
)
raise ScoreException('Unknown accuracy values for: ' + str(result))
def getchartjs_source():
return open(os.path.dirname(os.path.abspath(__file__)) + '/' +
CHART_JS_FILE).read()
def generate_avg_ms(baseline, latency):
"""Generate average latency value."""
if latency is None:
latency = baseline
result_avg_ms = (latency.total_time_sec / latency.iterations)*1000.0
if latency is baseline:
return LATENCY_BASELINE_TEMPLATE.format(val=result_avg_ms)
baseline_avg_ms = (baseline.total_time_sec / baseline.iterations)*1000.0
diff = (result_avg_ms/baseline_avg_ms - 1.0) * 100.0
diff_val = result_avg_ms - baseline_avg_ms
return LATENCY_DIFF_TEMPLATE.format(
val=result_avg_ms,
diff=diff,
diff_val=diff_val,
span=get_diff_span(diff, same_delta=1.0, positive_is_better=False))
def generate_result_entry(baseline, result):
if result is None:
result = baseline
return RESULT_ENTRY_TEMPLATE.format(
row_class='failed' if result.validation_errors else 'normal',
name=result.name,
backend=result.backend_type,
iterations=result.inference_latency.iterations,
testset_size=result.testset_size,
accuracy_values=generate_accuracy_values(baseline, result),
avg_ms=generate_avg_ms(baseline.inference_latency, result.inference_latency))
def generate_latency_graph_entry(tag, latency, tmin, tmax):
"""Generate a single latency graph."""
return LATENCY_GRAPH_ENTRY_TEMPLATE.format(
tag=tag,
i=id(latency),
freq_data=get_frequency_graph(latency.time_freq_start_sec,
latency.time_freq_step_sec,
latency.time_freq_sec,
tmin, tmax))
def generate_latency_graphs_group(tags, latencies):
"""Generate a group of latency graphs with the same tmin and tmax."""
tmin, tmax = get_frequency_graph_min_max(latencies)
return ''.join(
generate_latency_graph_entry(tag, latency, tmin, tmax)
for tag, latency in zip(tags, latencies))
def snake_case_to_title(string):
return string.replace('_', ' ').title()
def generate_inference_latency_graph_entry(results_with_bl):
"""Generate a group of latency graphs for inference latencies."""
results = [results_with_bl.baseline] + results_with_bl.other
tags = [result.backend_type for result in results]
latencies = [result.inference_latency for result in results]
return generate_latency_graphs_group(tags, latencies)
def generate_compilation_latency_graph_entry(results_with_bl):
"""Generate a group of latency graphs for compilation latencies."""
tags = [
result.backend_type + ', ' + snake_case_to_title(type)
for result in results_with_bl.other
for type in COMPILATION_TYPES
if getattr(result.compilation_results, type)
]
latencies = [
getattr(result.compilation_results, type)
for result in results_with_bl.other
for type in COMPILATION_TYPES
if getattr(result.compilation_results, type)
]
return generate_latency_graphs_group(tags, latencies)
def generate_validation_errors(entries_group):
"""Generate validation errors table."""
errors = []
for result_and_bl in entries_group:
for result in [result_and_bl.baseline] + result_and_bl.other:
for error in result.validation_errors:
errors.append((result.name, result.backend_type, error))
if errors:
return VALIDATION_ERRORS_TEMPLATE.format(
results=''.join(
VALIDATION_ERRORS_ENTRY_TEMPLATE.format(
name=name,
backend=backend,
error=error) for name, backend, error in errors))
return ''
def generate_compilation_result_entry(result):
format_args = {
'row_class':
'failed' if result.validation_errors else 'normal',
'name':
result.name,
'backend':
result.backend_type,
'cache_size':
f'{result.compilation_results.cache_size_bytes:,}'
if result.compilation_results.cache_size_bytes > 0 else '-'
}
for compilation_type in COMPILATION_TYPES:
latency = getattr(result.compilation_results, compilation_type)
if latency:
format_args[compilation_type + '_iterations'] = f'{latency.iterations}'
format_args[compilation_type + '_avg_ms'] = generate_avg_ms(
result.compilation_results.compile_without_cache, latency)
else:
format_args[compilation_type + '_iterations'] = '-'
format_args[compilation_type + '_avg_ms'] = '-'
return COMPILATION_RESULT_ENTRY_TEMPLATE.format(**format_args)
def generate_result(benchmark_info, data):
"""Turn list of results into HTML."""
return MAIN_TEMPLATE.format(
jsdeps=getchartjs_source(),
device_info=DEVICE_INFO_TEMPLATE.format(
benchmark_time=benchmark_info[0],
device_info=benchmark_info[1],
),
results_list=''.join((
RESULT_GROUP_TEMPLATE.format(
group_name=entries_name,
accuracy_headers=generate_accuracy_headers(
entries_group[0].baseline),
results=''.join(
RESULT_ENTRY_WITH_BASELINE_TEMPLATE.format(
baseline=generate_result_entry(
result_and_bl.baseline, None),
other=''.join(
generate_result_entry(
result_and_bl.baseline, x)
for x in result_and_bl.other)
) for result_and_bl in entries_group),
validation_errors=generate_validation_errors(entries_group),
latency_graphs=LATENCY_GRAPHS_TEMPLATE.format(
results=''.join(
LATENCY_GRAPH_ENTRY_GROUP_TEMPLATE.format(
name=result_and_bl.baseline.name,
results=generate_inference_latency_graph_entry(result_and_bl)
) for result_and_bl in entries_group)
),
compilation_results=''.join(
COMPILATION_RESULT_ENTRIES_TEMPLATE.format(
entries=''.join(
generate_compilation_result_entry(x) for x in result_and_bl.other)
) for result_and_bl in entries_group),
compilation_latency_graphs=LATENCY_GRAPHS_TEMPLATE.format(
results=''.join(
LATENCY_GRAPH_ENTRY_GROUP_TEMPLATE.format(
name=result_and_bl.baseline.name,
results=generate_compilation_latency_graph_entry(result_and_bl)
) for result_and_bl in entries_group)
),
) for entries_name, entries_group in group_results(data))
))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input', help='input csv filename')
parser.add_argument('output', help='output html filename')
args = parser.parse_args()
benchmark_info, data = parse_csv_input(args.input)
with open(args.output, 'w') as htmlfile:
htmlfile.write(generate_result(benchmark_info, data))
# -----------------
# Templates below
MAIN_TEMPLATE = """<!doctype html>
<html lang='en-US'>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
<script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script>
<script>{jsdeps}</script>
<title>MLTS results</title>
<style>
.results {{
border-collapse: collapse;
width: 100%;
}}
.results td, .results th {{
border: 1px solid #ddd;
padding: 6px;
}}
.results tbody.values {{
border-bottom: 8px solid #333;
}}
span.better {{
color: #070;
}}
span.worse {{
color: #700;
}}
span.same {{
color: #000;
}}
.results tr:nth-child(even) {{background-color: #eee;}}
.results tr:hover {{background-color: #ddd;}}
.results th {{
padding: 10px;
font-weight: bold;
text-align: left;
background-color: #333;
color: white;
}}
.results tr.failed {{
background-color: #ffc4ca;
}}
.group {{
padding-top: 25px;
}}
.group_name {{
padding-left: 10px;
font-size: 140%;
font-weight: bold;
}}
.section_name {{
padding: 10px;
font-size: 120%;
font-weight: bold;
}}
.latency_results {{
padding: 10px;
border: 1px solid #ddd;
overflow: hidden;
}}
.latency_with_baseline {{
padding: 10px;
border: 1px solid #ddd;
overflow: hidden;
}}
</style>
</head>
<body>
{device_info}
{results_list}
</body>
</html>"""
DEVICE_INFO_TEMPLATE = """<div id='device_info'>
Benchmark for {device_info}, started at {benchmark_time}
</div>"""
RESULT_GROUP_TEMPLATE = """<div class="group">
<div class="group_name">{group_name}</div>
<div class="section_name">Inference results</div>
<table class="results">
<tr>
<th>Name</th>
<th>Backend</th>
<th>Iterations</th>
<th>Test set size</th>
<th>Average latency ms</th>
{accuracy_headers}
</tr>
{results}
</table>
{validation_errors}
{latency_graphs}
<div class="section_name">Compilation results</div>
<table class="results">
<tr>
<th rowspan="2">Name</th>
<th rowspan="2">Backend</th>
<th colspan="2">Compile Without Cache</th>
<th colspan="2">Save To Cache</th>
<th colspan="2">Prepare From Cache</th>
<th rowspan="2">Cache size bytes</th>
</tr>
<tr>
<th>Iterations</th>
<th>Average latency ms</th>
<th>Iterations</th>
<th>Average latency ms</th>
<th>Iterations</th>
<th>Average latency ms</th>
</tr>
{compilation_results}
</table>
{compilation_latency_graphs}
</div>"""
VALIDATION_ERRORS_TEMPLATE = """
<table class="results">
<tr>
<th>Name</th>
<th>Backend</th>
<th>Error</th>
</tr>
{results}
</table>"""
VALIDATION_ERRORS_ENTRY_TEMPLATE = """
<tr class="failed">
<td>{name}</td>
<td>{backend}</td>
<td>{error}</td>
</tr>
"""
LATENCY_GRAPHS_TEMPLATE = """
<div class="latency_results">
{results}
</div>
<div style="clear: left;"></div>
"""
LATENCY_GRAPH_ENTRY_GROUP_TEMPLATE = """
<div class="latency_with_baseline" style="float: left;">
<b>{name}</b>
{results}
</div>
"""
LATENCY_GRAPH_ENTRY_TEMPLATE = """
<div class="latency_result" style='width: 350px;'>
{tag}
<canvas id='latency_chart{i}' class='latency_chart'></canvas>
<script>
$(function() {{
var freqData = {{
labels: {freq_data[0]},
datasets: [{{
data: {freq_data[1]},
backgroundColor: 'rgba(255, 99, 132, 0.6)',
borderColor: 'rgba(255, 0, 0, 0.6)',
borderWidth: 1,
}}]
}};
var ctx = $('#latency_chart{i}')[0].getContext('2d');
window.latency_chart{i} = new Chart(ctx,
{{
type: 'bar',
data: freqData,
options: {{
responsive: true,
title: {{
display: false,
text: 'Latency frequency'
}},
legend: {{
display: false
}},
scales: {{
xAxes: [ {{
barPercentage: 1.0,
categoryPercentage: 0.9,
}}],
yAxes: [{{
scaleLabel: {{
display: false,
labelString: 'Iterations Count'
}}
}}]
}}
}}
}});
}});
</script>
</div>
"""
RESULT_ENTRY_WITH_BASELINE_TEMPLATE = """
<tbody class="values">
{baseline}
{other}
</tbody>
"""
RESULT_ENTRY_TEMPLATE = """
<tr class={row_class}>
<td>{name}</td>
<td>{backend}</td>
<td>{iterations:d}</td>
<td>{testset_size:d}</td>
<td>{avg_ms}</td>
{accuracy_values}
</tr>"""
COMPILATION_RESULT_ENTRIES_TEMPLATE = """
<tbody class="values">
{entries}
</tbody>
"""
COMPILATION_RESULT_ENTRY_TEMPLATE = """
<tr class={row_class}>
<td>{name}</td>
<td>{backend}</td>
<td>{compile_without_cache_iterations}</td>
<td>{compile_without_cache_avg_ms}</td>
<td>{save_to_cache_iterations}</td>
<td>{save_to_cache_avg_ms}</td>
<td>{prepare_from_cache_iterations}</td>
<td>{prepare_from_cache_avg_ms}</td>
<td>{cache_size}</td>
</tr>"""
LATENCY_BASELINE_TEMPLATE = """{val:.2f}ms"""
LATENCY_DIFF_TEMPLATE = """{val:.2f}ms <span class='{span}'>
({diff_val:.2f}ms, {diff:.1f}%)</span>"""
ACCURACY_HEADERS_TOPK_TEMPLATE = """
<th>Top 1</th>
<th>Top 2</th>
<th>Top 3</th>
<th>Top 4</th>
<th>Top 5</th>
"""
ACCURACY_VALUES_TOPK_TEMPLATE = """
<td>{top1}</td>
<td>{top2}</td>
<td>{top3}</td>
<td>{top4}</td>
<td>{top5}</td>
"""
TOPK_BASELINE_TEMPLATE = """{val:.3f}%"""
TOPK_DIFF_TEMPLATE = """{val:.3f}% <span class='{span}'>({diff:.1f}%)</span>"""
ACCURACY_HEADERS_MELCEPLOGF0_TEMPLATE = """
<th>Max log(F0) error</th>
<th>Max Mel Cep distortion</th>
<th>Max scalar error</th>
"""
ACCURACY_VALUES_MELCEPLOGF0_TEMPLATE = """
<td>{max_log_f0}</td>
<td>{max_mel_cep_distortion}</td>
<td>{max_single_error}</td>
"""
MELCEPLOGF0_BASELINE_TEMPLATE = """{val:.2E}"""
MELCEPLOGF0_DIFF_TEMPLATE = \
"""{val:.2E} <span class='{span}'>({diff:.1f}%)</span>"""
ACCURACY_HEADERS_PHONE_ERROR_RATE_TEMPLATE = """
<th>Max phone error rate</th>
<th>Max scalar error</th>
"""
ACCURACY_VALUES_PHONE_ERROR_RATE_TEMPLATE = """
<td>{max_phone_error_rate}</td>
<td>{max_single_error}</td>
"""
PHONE_ERROR_RATE_BASELINE_TEMPLATE = """{val:.3f}"""
PHONE_ERROR_RATE_DIFF_TEMPLATE = \
"""{val:.3f} <span class='{span}'>({diff:.1f}%)</span>"""
ACCURACY_HEADERS_BASIC_TEMPLATE = """
<th>Max single scalar error</th>
"""
ACCURACY_VALUES_BASIC_TEMPLATE = """
<td>{max_single_error:.2f}</td>
"""
CHART_JS_FILE = 'Chart.bundle.min.js'
if __name__ == '__main__':
main()
| 31.613663
| 99
| 0.656222
| 3,436
| 26,840
| 4.862922
| 0.129511
| 0.015321
| 0.014782
| 0.01167
| 0.389072
| 0.30726
| 0.254713
| 0.225388
| 0.183793
| 0.163325
| 0
| 0.01286
| 0.217735
| 26,840
| 848
| 100
| 31.650943
| 0.782959
| 0.085171
| 0
| 0.275556
| 0
| 0.002963
| 0.274696
| 0.045666
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0.001481
| 0.008889
| 0.002963
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffda91245aed33f9125784b3f0d5a73c6224af00
| 6,975
|
py
|
Python
|
ampel/ztf/dev/DevSkyPortalClient.py
|
AmpelProject/Ampel-ZTF
|
7f9736a7be3aa526571004716160cae2a800e410
|
[
"BSD-3-Clause"
] | 1
|
2021-03-11T15:39:28.000Z
|
2021-03-11T15:39:28.000Z
|
ampel/ztf/dev/DevSkyPortalClient.py
|
AmpelProject/Ampel-ZTF
|
7f9736a7be3aa526571004716160cae2a800e410
|
[
"BSD-3-Clause"
] | 18
|
2021-08-02T17:11:25.000Z
|
2022-01-11T16:20:04.000Z
|
ampel/ztf/dev/DevSkyPortalClient.py
|
AmpelProject/Ampel-ZTF
|
7f9736a7be3aa526571004716160cae2a800e410
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-ZTF/ampel/ztf/dev/DevSkyPortalClient.py
# Author: Jakob van Santen <jakob.van.santen@desy.de>
# Date: 16.09.2020
# Last Modified Date: 16.09.2020
# Last Modified By: Jakob van Santen <jakob.van.santen@desy.de>
import gzip
import io
from collections import defaultdict
from datetime import datetime
from typing import Any
from collections.abc import Sequence, Generator
import numpy as np
import requests
from ampel.protocol.AmpelAlertProtocol import AmpelAlertProtocol
from astropy.io import fits
from astropy.time import Time
from matplotlib.colors import Normalize
from matplotlib.figure import Figure
def render_thumbnail(cutout_data: bytes) -> bytes:
"""
Render gzipped FITS as PNG
"""
with gzip.open(io.BytesIO(cutout_data), "rb") as f:
with fits.open(f) as hdu:
header = hdu[0].header
img = np.flipud(hdu[0].data)
mask = np.isfinite(img)
fig = Figure(figsize=(1, 1))
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.set_axis_off()
ax.imshow(
img,
# clip pixel values below the median
norm=Normalize(*np.percentile(img[mask], [0.5, 99.5])),
aspect="auto",
origin="lower",
)
with io.BytesIO() as buf:
fig.savefig(buf, dpi=img.shape[0])
return buf.getvalue()
class DevSkyPortalClient:
"""
Post PhotoAlerts to [a local, test instance of] SkyPortal
"""
def __init__(self, root_token, base_url="http://localhost:9000/api"):
"""
:param root_token: INITIAL_ADMIN from .tokens.yaml in the SkyPortal container
"""
self.base_url = base_url
self.kwargs = {"headers": {"Authorization": f"token {root_token}"}}
self.session = requests.Session()
# Set up seed data ourselves
p48 = self.get_id(
"/telescope",
{"name": "P48"},
{
"diameter": 1.2,
"elevation": 1870.0,
"lat": 33.3633675,
"lon": -116.8361345,
"nickname": "Palomar 1.2m Oschin",
"name": "P48",
"skycam_link": "http://bianca.palomar.caltech.edu/images/allsky/AllSkyCurrentImage.JPG",
"robotic": True,
},
)
source = {
"instrument": self.get_id(
"/instrument",
{"name": "ZTF"},
{
"filters": ["ztfg", "ztfr", "ztfi"],
"type": "imager",
"band": "optical",
"telescope_id": p48,
"name": "ZTF",
},
),
"stream": self.get_id("/streams", {"name": "ztf_partnership"}),
"group": 1, # root group
}
self.post(
f"/groups/{source['group']}/streams", json={"stream_id": source["stream"]}
)
source["filter"] = self.get_id(
"/filters",
{"name": "highlander"},
{
"name": "highlander",
"stream_id": source["stream"],
"group_id": source["group"],
},
)
self.source = source
# ensure that all users are in the root group
for user in self.get("/user")["data"]:
self.post(
f"/groups/{self.source['group']}/users",
json={"username": user["username"]},
)
def get_id(self, endpoint, params, default=None):
"""Query for an object by id, inserting it if not found"""
if not (response := self.get(endpoint, params=params))["data"]:
response = self.post(endpoint, json=default or params, raise_exc=True)
if isinstance(response["data"], list):
return response["data"][0]["id"]
else:
return response["data"]["id"]
def request(self, verb, endpoint, raise_exc=False, **kwargs):
response = self.session.request(
verb, self.base_url + endpoint, **{**self.kwargs, **kwargs}
).json()
if raise_exc and response["status"] != "success":
raise RuntimeError(response["message"])
return response
def get(self, endpoint, **kwargs):
return self.request("GET", endpoint, **kwargs)
def post(self, endpoint, **kwargs):
return self.request("POST", endpoint, **kwargs)
def make_photometry(self, alert: AmpelAlertProtocol, after=-float("inf")):
base = {
"obj_id": alert.id,
"alert_id": alert.datapoints[0]["candid"],
"group_ids": [self.source["group"]],
"instrument_id": self.source["instrument"],
"magsys": "ab",
}
content = defaultdict(list)
for doc in self._transform_datapoints(alert.datapoints, after):
for k, v in doc.items():
content[k].append(v)
return {**base, **content}
def _transform_datapoints(self, dps: Sequence[dict[str,Any]], after=-float("inf")) -> Generator[dict[str,Any],None,None]:
ztf_filters = {1: "ztfg", 2: "ztfr", 3: "ztfi"}
for dp in dps:
if dp["jd"] <= after:
continue
base = {
"filter": ztf_filters[dp["fid"]],
"mjd": dp["jd"] - 2400000.5,
"limiting_mag": dp["diffmaglim"],
}
if dp["magpsf"] is not None:
content = {
"mag": dp["magpsf"],
"magerr": dp["sigmapsf"],
"ra": dp["ra"],
"dec": dp["dec"],
}
else:
content = {k: None for k in ("mag", "magerr", "ra", "dec")}
yield {**base, **content}
def post_alert(self, alert: AmpelAlertProtocol):
# cribbed from https://github.com/dmitryduev/kowalski-dev/blob/882a7fa7e292676dd4864212efa696fb99668b4c/kowalski/alert_watcher_ztf.py#L801-L937
after = -float("inf")
if (candidate := self.get(f"/candidates/{alert.id}"))["status"] != "success":
candidate = alert.datapoints[0]
alert_thin = {
"id": alert.id,
"ra": candidate.get("ra"),
"dec": candidate.get("dec"),
"score": candidate.get("drb", candidate.get("rb")),
"passing_alert_id": candidate["candid"],
"filter_ids": [self.source["filter"]],
}
self.post("/candidates", json=alert_thin, raise_exc=True)
elif candidate["data"]["last_detected"]:
after = Time(datetime.fromisoformat(candidate["data"]["last_detected"])).jd
# post only if there are new photopoints
if "mjd" in (photometry := self.make_photometry(alert, after=after)):
response = self.post("/photometry", json=photometry, raise_exc=True)
| 36.139896
| 151
| 0.531326
| 759
| 6,975
| 4.807642
| 0.370224
| 0.013428
| 0.015347
| 0.010414
| 0.050973
| 0.050973
| 0.018635
| 0.018635
| 0
| 0
| 0
| 0.025988
| 0.321434
| 6,975
| 192
| 152
| 36.328125
| 0.744982
| 0.116989
| 0
| 0.039735
| 0
| 0
| 0.150813
| 0.01495
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059603
| false
| 0.006623
| 0.086093
| 0.013245
| 0.198676
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffddb9df1f192b673556f7659d2310d13ba94e89
| 3,806
|
py
|
Python
|
tools/test_detection_features_converter.py
|
jialinwu17/caption_vqa
|
9bbbb580d031a20ba4f18ef14fcd3599b62a482a
|
[
"MIT"
] | 139
|
2018-03-21T09:39:39.000Z
|
2021-07-07T14:19:26.000Z
|
tools/test_detection_features_converter.py
|
VincentYing/Attention-on-Attention-for-VQA
|
cbc767541667e9bb32760ac7cd2e822eff232ff5
|
[
"MIT"
] | 4
|
2018-05-25T05:15:20.000Z
|
2018-10-11T00:52:14.000Z
|
tools/test_detection_features_converter.py
|
VincentYing/Attention-on-Attention-for-VQA
|
cbc767541667e9bb32760ac7cd2e822eff232ff5
|
[
"MIT"
] | 23
|
2018-03-22T10:12:35.000Z
|
2021-02-20T06:18:00.000Z
|
"""
Reads in a tsv file with pre-trained bottom up attention features and
stores it in HDF5 format. Also store {image_id: feature_idx}
as a pickle file.
Hierarchy of HDF5 file:
{ 'image_features': num_images x num_boxes x 2048 array of features
'image_bb': num_images x num_boxes x 4 array of bounding boxes }
"""
from __future__ import print_function
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import base64
import csv
import h5py
import cPickle
import numpy as np
import utils
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
infile = 'data/test2015_36/test2015_resnet101_faster_rcnn_genome_36.tsv'
test_data_file = 'data/test36.hdf5'
test_indices_file = 'data/test36_imgid2idx.pkl'
test_ids_file = 'data/test_ids.pkl'
feature_length = 2048
num_fixed_boxes = 36
if __name__ == '__main__':
h_test = h5py.File(test_data_file, "w")
if os.path.exists(test_ids_file):
test_imgids = cPickle.load(open(test_ids_file))
else:
test_imgids = utils.load_imageid('data/test2015')
cPickle.dump(test_imgids, open(test_ids_file, 'wb'))
test_indices = {}
test_img_features = h_test.create_dataset(
'image_features', (len(test_imgids), num_fixed_boxes, feature_length), 'f')
test_img_bb = h_test.create_dataset(
'image_bb', (len(test_imgids), num_fixed_boxes, 4), 'f')
test_spatial_img_features = h_test.create_dataset(
'spatial_features', (len(test_imgids), num_fixed_boxes, 6), 'f')
test_counter = 0
print("reading tsv...")
with open(infile, "r+b") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
item['num_boxes'] = int(item['num_boxes'])
image_id = int(item['image_id'])
image_w = float(item['image_w'])
image_h = float(item['image_h'])
bboxes = np.frombuffer(
base64.decodestring(item['boxes']),
dtype=np.float32).reshape((item['num_boxes'], -1))
box_width = bboxes[:, 2] - bboxes[:, 0]
box_height = bboxes[:, 3] - bboxes[:, 1]
scaled_width = box_width / image_w
scaled_height = box_height / image_h
scaled_x = bboxes[:, 0] / image_w
scaled_y = bboxes[:, 1] / image_h
box_width = box_width[..., np.newaxis]
box_height = box_height[..., np.newaxis]
scaled_width = scaled_width[..., np.newaxis]
scaled_height = scaled_height[..., np.newaxis]
scaled_x = scaled_x[..., np.newaxis]
scaled_y = scaled_y[..., np.newaxis]
spatial_features = np.concatenate(
(scaled_x,
scaled_y,
scaled_x + scaled_width,
scaled_y + scaled_height,
scaled_width,
scaled_height),
axis=1)
if image_id in test_imgids:
test_imgids.remove(image_id)
test_indices[image_id] = test_counter
test_img_bb[test_counter, :, :] = bboxes
test_img_features[test_counter, :, :] = np.frombuffer(
base64.decodestring(item['features']),
dtype=np.float32).reshape((item['num_boxes'], -1))
test_spatial_img_features[test_counter, :, :] = spatial_features
test_counter += 1
else:
assert False, 'Unknown image id: %d' % image_id
if len(test_imgids) != 0:
print('Warning: test_image_ids is not empty')
cPickle.dump(test_indices, open(test_indices_file, 'wb'))
h_test.close()
print("done!")
| 34.6
| 83
| 0.618497
| 498
| 3,806
| 4.399598
| 0.273092
| 0.028754
| 0.020082
| 0.024646
| 0.176175
| 0.117754
| 0.062072
| 0.031036
| 0
| 0
| 0
| 0.022857
| 0.26432
| 3,806
| 109
| 84
| 34.917431
| 0.759643
| 0.081713
| 0
| 0.05
| 0
| 0
| 0.109263
| 0.024663
| 0
| 0
| 0
| 0
| 0.0125
| 1
| 0
| false
| 0
| 0.1125
| 0
| 0.1125
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffdf3cdd0117fb616bc6eff58d4c3d502c8bf807
| 6,301
|
py
|
Python
|
aydin/it/classic_denoisers/bilateral.py
|
AhmetCanSolak/aydin
|
e8bc81ee88c96e0f34986df30a63c96468a45f70
|
[
"BSD-3-Clause"
] | 78
|
2021-11-08T16:11:23.000Z
|
2022-03-27T17:51:04.000Z
|
aydin/it/classic_denoisers/bilateral.py
|
AhmetCanSolak/aydin
|
e8bc81ee88c96e0f34986df30a63c96468a45f70
|
[
"BSD-3-Clause"
] | 19
|
2021-11-08T17:15:40.000Z
|
2022-03-30T17:46:55.000Z
|
aydin/it/classic_denoisers/bilateral.py
|
AhmetCanSolak/aydin
|
e8bc81ee88c96e0f34986df30a63c96468a45f70
|
[
"BSD-3-Clause"
] | 7
|
2021-11-09T17:42:32.000Z
|
2022-03-09T00:37:57.000Z
|
from functools import partial
from typing import Optional, List, Tuple
import numpy
from numpy.typing import ArrayLike
from skimage.restoration import denoise_bilateral as skimage_denoise_bilateral
from aydin.it.classic_denoisers import _defaults
from aydin.util.crop.rep_crop import representative_crop
from aydin.util.denoise_nd.denoise_nd import extend_nd
from aydin.util.j_invariance.j_invariance import calibrate_denoiser
def calibrate_denoise_bilateral(
image: ArrayLike,
bins: int = 10000,
crop_size_in_voxels: Optional[int] = _defaults.default_crop_size_normal.value,
optimiser: str = _defaults.default_optimiser.value,
max_num_evaluations: int = _defaults.default_max_evals_normal.value,
blind_spots: Optional[List[Tuple[int]]] = _defaults.default_blind_spots.value,
jinv_interpolation_mode: str = _defaults.default_jinv_interpolation_mode.value,
display_images: bool = False,
display_crop: bool = False,
**other_fixed_parameters,
):
"""
Calibrates the bilateral denoiser for the given image and returns the optimal
parameters obtained using the N2S loss.
Note: it seems that the bilateral filter of scikit-image
is broken!
Parameters
----------
image: ArrayLike
Image to calibrate denoiser for.
bins: int
Number of discrete values for Gaussian weights of
color filtering. A larger value results in improved
accuracy.
(advanced)
crop_size_in_voxels: int or None for default
Number of voxels for crop used to calibrate denoiser.
Increase this number by factors of two if denoising quality is
unsatisfactory -- this can be important for very noisy images.
Values to try are: 65000, 128000, 256000, 320000.
We do not recommend values higher than 512000.
optimiser: str
Optimiser to use for finding the best denoising
parameters. Can be: 'smart' (default), or 'fast' for a mix of SHGO
followed by L-BFGS-B.
(advanced)
max_num_evaluations: int
Maximum number of evaluations for finding the optimal parameters.
Increase this number by factors of two if denoising quality is
unsatisfactory.
blind_spots: bool
List of voxel coordinates (relative to receptive field center) to
be included in the blind-spot. For example, you can give a list of
3 tuples: [(0,0,0), (0,1,0), (0,-1,0)] to extend the blind spot
to cover voxels of relative coordinates: (0,0,0),(0,1,0), and (0,-1,0)
(advanced) (hidden)
jinv_interpolation_mode: str
J-invariance interpolation mode for masking. Can be: 'median' or
'gaussian'.
(advanced)
display_images: bool
When True the denoised images encountered during
optimisation are shown
(advanced) (hidden)
display_crop: bool
Displays crop, for debugging purposes...
(advanced) (hidden)
other_fixed_parameters: dict
Any other fixed parameters
Returns
-------
Denoising function, dictionary containing optimal parameters,
and free memory needed in bytes for computation.
"""
# Convert image to float if needed:
image = image.astype(dtype=numpy.float32, copy=False)
# obtain representative crop, to speed things up...
crop = representative_crop(
image, crop_size=crop_size_in_voxels, display_crop=display_crop
)
# Parameters to test when calibrating the denoising algorithm
parameter_ranges = {'sigma_spatial': (0.01, 1), 'sigma_color': (0.01, 1)}
# Combine fixed parameters:
other_fixed_parameters = other_fixed_parameters | {'bins': bins}
# Partial function:
_denoise_bilateral = partial(denoise_bilateral, **other_fixed_parameters)
# Calibrate denoiser
best_parameters = (
calibrate_denoiser(
crop,
_denoise_bilateral,
mode=optimiser,
denoise_parameters=parameter_ranges,
interpolation_mode=jinv_interpolation_mode,
max_num_evaluations=max_num_evaluations,
blind_spots=blind_spots,
display_images=display_images,
)
| other_fixed_parameters
)
# Memory needed:
memory_needed = 2 * image.nbytes
return denoise_bilateral, best_parameters, memory_needed
def denoise_bilateral(
image: ArrayLike,
sigma_color: Optional[float] = None,
sigma_spatial: float = 1,
bins: int = 10000,
**kwargs,
):
"""
Denoises the given image using a <a
href="https://en.wikipedia.org/wiki/Bilateral_filter">bilateral
filter</a>.
The bilateral filter is a edge-preserving smoothing filter that can
be used for image denoising. Each pixel value is replaced by a
weighted average of intensity values from nearby pixels. The
weighting is inversely related to the pixel distance in space but
also in the pixels value differences.
Parameters
----------
image : ArrayLike
Image to denoise
sigma_color : float
Standard deviation for grayvalue/color distance (radiometric
similarity). A larger value results in averaging of pixels with larger
radiometric differences. Note, that the image will be converted using
the `img_as_float` function and thus the standard deviation is in
respect to the range ``[0, 1]``. If the value is ``None`` the standard
deviation of the ``image`` will be used.
sigma_spatial : float
Standard deviation for range distance. A larger value results in
averaging of pixels with larger spatial differences.
bins : int
Number of discrete values for Gaussian weights of color filtering.
A larger value results in improved accuracy.
kwargs: dict
Other parameters
Returns
-------
Denoised image
"""
# Convert image to float if needed:
image = image.astype(dtype=numpy.float32, copy=False)
_skimage_denoise_bilateral = extend_nd(available_dims=[2])(
skimage_denoise_bilateral
)
return _skimage_denoise_bilateral(
image,
sigma_color=sigma_color,
sigma_spatial=sigma_spatial,
bins=bins,
mode='reflect',
**kwargs,
)
| 32.989529
| 83
| 0.690525
| 792
| 6,301
| 5.342172
| 0.300505
| 0.041598
| 0.033089
| 0.017963
| 0.164264
| 0.136611
| 0.133775
| 0.133775
| 0.133775
| 0.133775
| 0
| 0.01594
| 0.243295
| 6,301
| 190
| 84
| 33.163158
| 0.871435
| 0.552293
| 0
| 0.16129
| 0
| 0
| 0.014193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.145161
| 0
| 0.209677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffe13b312ebb3748c1aadfdca895d3557dc9d9a9
| 1,889
|
py
|
Python
|
pymon/pymon.py
|
crest42/PyMon
|
96494cc37f906e6a07388af29b04c559ec72f116
|
[
"MIT"
] | null | null | null |
pymon/pymon.py
|
crest42/PyMon
|
96494cc37f906e6a07388af29b04c559ec72f116
|
[
"MIT"
] | null | null | null |
pymon/pymon.py
|
crest42/PyMon
|
96494cc37f906e6a07388af29b04c559ec72f116
|
[
"MIT"
] | null | null | null |
import logging
import time
from .exceptions import HostEntryNotValid
from .check import CheckFactory
from .alert import AlertFactory
from .host import Host
from .logging import logger
class PyMon:
def __init__(self, host_list, check_list, alert_list, daemonize=False):
self.hosts = {}
self.checks = []
self.alerts = []
self.logger = logger
for host in host_list:
if 'name' not in host:
raise HostEntryNotValid(host)
name = host['name']
self.hosts[name] = Host(host['name'], host)
for check in check_list:
self.checks.append(CheckFactory(check).create())
self.add_check(self.checks[-1])
for alert in alert_list:
self.alerts.append(AlertFactory(alert).create())
if daemonize:
self.runloop()
def runloop(self):
run = 0
while True:
self.logger.info(f"Start Run {run}")
self.run()
run += 1
time.sleep(1)
def add_check(self, check):
for host in check.hosts:
try:
self.add_check_to_host(host, check)
except HostEntryNotValid:
self.logger.warn(f"Host entry {host} unknown")
except Exception:
raise
def add_check_to_host(self, check_host, check):
if check_host not in self.hosts:
raise HostEntryNotValid(check_host)
self.hosts[check_host].add(check)
def print_hosts(self):
print("Hostlist:")
for k in self.hosts:
print(self.hosts[k])
print()
def run(self):
for k in self.hosts:
result = self.hosts[k].run()
if result is not None and len(result['RESULTS'].list) > 0:
for alert in self.alerts:
alert.send(result)
| 28.19403
| 75
| 0.564849
| 227
| 1,889
| 4.599119
| 0.259912
| 0.068966
| 0.031609
| 0.02682
| 0.028736
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004006
| 0.339333
| 1,889
| 66
| 76
| 28.621212
| 0.832532
| 0
| 0
| 0.036364
| 0
| 0
| 0.035998
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109091
| false
| 0
| 0.127273
| 0
| 0.254545
| 0.072727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffe7a09ec4555bf2573c09777fdb5c2946647fc9
| 3,914
|
py
|
Python
|
submissions_comments.py
|
jbell1991/reddit-scraping
|
73d88501ed0205e78000b9c30780a33186154fda
|
[
"MIT"
] | null | null | null |
submissions_comments.py
|
jbell1991/reddit-scraping
|
73d88501ed0205e78000b9c30780a33186154fda
|
[
"MIT"
] | null | null | null |
submissions_comments.py
|
jbell1991/reddit-scraping
|
73d88501ed0205e78000b9c30780a33186154fda
|
[
"MIT"
] | null | null | null |
# imports
from decouple import config
import pandas as pd
import praw
import psycopg2
import schedule
from sqlalchemy import create_engine
import time
def job():
current_day = time.strftime("%m/%d/%Y")
print(f"Performing job on {current_day}")
startTime = time.time()
# connecting to reddit API
reddit = praw.Reddit(
client_id=config("CLIENT_ID"),
client_secret=config("SECRET"),
user_agent=config("USER"),
username=config("USERNAME"),
password=config("PASSWORD")
)
subreddit = reddit.subreddit("wallstreetbets")
hot_wsb = subreddit.hot(limit=150)
# storing submission data in a dictionary
submissions = {
"title": [],
"subreddit": [],
"submission_author": [],
"submission_score": [],
"submission_id": [],
"url": [],
"num_comments": [],
"submission_created": [],
"submission_body": []
}
# iterate over each submission and store data in the submissions dictionary
for submission in hot_wsb:
submissions["title"].append(submission.title)
submissions["subreddit"].append(submission.subreddit)
submissions["submission_author"].append(submission.author)
submissions["submission_score"].append(submission.score)
submissions["submission_id"].append(submission.id)
submissions["url"].append(submission.url)
submissions["num_comments"].append(submission.num_comments)
submissions["submission_created"].append(submission.created)
submissions["submission_body"].append(submission.selftext)
# transform the submissions dictionary into a pandas dataframe
df = pd.DataFrame(submissions)
# convert created to date
df['submission_created'] = pd.to_datetime(df['submission_created'], unit='s')
# convert subreddit column to string
df['subreddit'] = df['subreddit'].astype(str)
# convert author column to string
df['submission_author'] = df['submission_author'].astype(str)
# connect to postgresql database
db_pass = config("PASSWORD")
engine = create_engine(
f'postgresql://postgres:{db_pass}@localhost:5432/postgres')
# store pandas dataframe in sql database
df.to_sql('submissions', engine, if_exists='append')
# create dictionary to store comments
comments = {
"submission_id": [],
"comment_id": [],
"comment_score": [],
"comment_author": [],
"comment_created": [],
"comment_body": []
}
# iterating over each submission and collecting relevent comment data
for id in df['submission_id']:
submission = reddit.submission(id=id)
submission.comments.replace_more(limit=None)
for comment in submission.comments.list():
comments["submission_id"].append(id)
comments["comment_id"].append(comment.id)
comments["comment_score"].append(comment.score)
comments["comment_author"].append(comment.author)
comments["comment_created"].append(comment.created)
comments["comment_body"].append(comment.body)
# converting comments dictionary to a pandas dataframe
comments_df = pd.DataFrame(comments)
# convert created to date
comments_df["comment_created"] = pd.to_datetime(comments_df["comment_created"], unit='s')
# convert author to string
comments_df["comment_author"] = comments_df["comment_author"].astype(str)
# store comments_df in sql table
comments_df.to_sql('comments', engine, if_exists='append', index=False)
# calculate time it takes for script to run
executionTime = (time.time() - startTime)
print('Execution time in minutes: ' + str(executionTime/60))
# automate script to run at the same time everyday
schedule.every().day.at("09:07").do(job)
while True:
schedule.run_pending()
time.sleep(1)
| 32.890756
| 93
| 0.667092
| 441
| 3,914
| 5.782313
| 0.287982
| 0.056471
| 0.026667
| 0.016471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004864
| 0.212059
| 3,914
| 118
| 94
| 33.169492
| 0.821984
| 0.178334
| 0
| 0.025974
| 0
| 0
| 0.233031
| 0.017204
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012987
| false
| 0.038961
| 0.090909
| 0
| 0.103896
| 0.025974
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffeb87db7651191ea5cf19f49a0c7c9aa356f87d
| 8,539
|
py
|
Python
|
site-packages/playhouse/sqliteq.py
|
lego-cloud/MDMPy
|
dc676a5d2245a14b9b98a2ac2dba64ff0bf61800
|
[
"Python-2.0",
"OLDAP-2.7"
] | 674
|
2015-11-06T04:22:47.000Z
|
2022-02-26T17:31:43.000Z
|
site-packages/playhouse/sqliteq.py
|
lego-cloud/MDMPy
|
dc676a5d2245a14b9b98a2ac2dba64ff0bf61800
|
[
"Python-2.0",
"OLDAP-2.7"
] | 713
|
2015-11-06T10:48:58.000Z
|
2018-11-27T16:32:18.000Z
|
site-packages/playhouse/sqliteq.py
|
lego-cloud/MDMPy
|
dc676a5d2245a14b9b98a2ac2dba64ff0bf61800
|
[
"Python-2.0",
"OLDAP-2.7"
] | 106
|
2015-12-07T11:21:06.000Z
|
2022-03-11T10:58:41.000Z
|
import logging
import weakref
from threading import Event
from threading import Thread
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
import gevent
from gevent import Greenlet as GThread
from gevent.event import Event as GEvent
from gevent.queue import Queue as GQueue
except ImportError:
GThread = GQueue = GEvent = None
from playhouse.sqlite_ext import SqliteExtDatabase
logger = logging.getLogger('peewee.sqliteq')
class ResultTimeout(Exception):
pass
class AsyncCursor(object):
__slots__ = ('sql', 'params', 'commit', 'timeout',
'_event', '_cursor', '_exc', '_idx', '_rows')
def __init__(self, event, sql, params, commit, timeout):
self._event = event
self.sql = sql
self.params = params
self.commit = commit
self.timeout = timeout
self._cursor = self._exc = self._idx = self._rows = None
def set_result(self, cursor, exc=None):
self._cursor = cursor
self._exc = exc
self._idx = 0
self._rows = cursor.fetchall() if exc is None else []
self._event.set()
return self
def _wait(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
if not self._event.wait(timeout=timeout) and timeout:
raise ResultTimeout('results not ready, timed out.')
if self._exc is not None:
raise self._exc
def __iter__(self):
self._wait()
if self._exc is not None:
raise self._exec
return self
def next(self):
try:
obj = self._rows[self._idx]
except IndexError:
raise StopIteration
else:
self._idx += 1
return obj
__next__ = next
@property
def lastrowid(self):
self._wait()
return self._cursor.lastrowid
@property
def rowcount(self):
self._wait()
return self._cursor.rowcount
@property
def description(self):
return self._cursor.description
def close(self):
self._cursor.close()
def fetchall(self):
return list(self) # Iterating implies waiting until populated.
def fetchone(self):
self._wait()
try:
return next(self)
except StopIteration:
return None
THREADLOCAL_ERROR_MESSAGE = ('threadlocals cannot be set to True when using '
'the Sqlite thread / queue database. All queries '
'are serialized through a single connection, so '
'allowing multiple threads to connect defeats '
'the purpose of this database.')
WAL_MODE_ERROR_MESSAGE = ('SQLite must be configured to use the WAL journal '
'mode when using this feature. WAL mode allows '
'one or more readers to continue reading while '
'another connection writes to the database.')
class SqliteQueueDatabase(SqliteExtDatabase):
def __init__(self, database, use_gevent=False, autostart=False, readers=1,
queue_max_size=None, results_timeout=None, *args, **kwargs):
if kwargs.get('threadlocals'):
raise ValueError(THREADLOCAL_ERROR_MESSAGE)
kwargs['threadlocals'] = False
kwargs['check_same_thread'] = False
# Ensure that journal_mode is WAL. This value is passed to the parent
# class constructor below.
pragmas = self._validate_journal_mode(
kwargs.pop('journal_mode', None),
kwargs.pop('pragmas', None))
# Reference to execute_sql on the parent class. Since we've overridden
# execute_sql(), this is just a handy way to reference the real
# implementation.
Parent = super(SqliteQueueDatabase, self)
self.__execute_sql = Parent.execute_sql
# Call the parent class constructor with our modified pragmas.
Parent.__init__(database, pragmas=pragmas, *args, **kwargs)
self._autostart = autostart
self._results_timeout = results_timeout
self._num_readers = readers
self._is_stopped = True
self._thread_helper = self.get_thread_impl(use_gevent)(queue_max_size)
self._create_queues_and_workers()
if self._autostart:
self.start()
def get_thread_impl(self, use_gevent):
return GreenletHelper if use_gevent else ThreadHelper
def _validate_journal_mode(self, journal_mode=None, pragmas=None):
if journal_mode and journal_mode.lower() != 'wal':
raise ValueError(WAL_MODE_ERROR_MESSAGE)
if pragmas:
pdict = dict((k.lower(), v) for (k, v) in pragmas)
if pdict.get('journal_mode', 'wal').lower() != 'wal':
raise ValueError(WAL_MODE_ERROR_MESSAGE)
return [(k, v) for (k, v) in pragmas
if k != 'journal_mode'] + [('journal_mode', 'wal')]
else:
return [('journal_mode', 'wal')]
def _create_queues_and_workers(self):
self._write_queue = self._thread_helper.queue()
self._read_queue = self._thread_helper.queue()
target = self._run_worker_loop
self._writer = self._thread_helper.thread(target, self._write_queue)
self._readers = [self._thread_helper.thread(target, self._read_queue)
for _ in range(self._num_readers)]
def _run_worker_loop(self, queue):
while True:
async_cursor = queue.get()
if async_cursor is StopIteration:
logger.info('worker shutting down.')
return
logger.debug('received query %s', async_cursor.sql)
self._process_execution(async_cursor)
def _process_execution(self, async_cursor):
try:
cursor = self.__execute_sql(async_cursor.sql, async_cursor.params,
async_cursor.commit)
except Exception as exc:
cursor = None
else:
exc = None
return async_cursor.set_result(cursor, exc)
def queue_size(self):
return (self._write_queue.qsize(), self._read_queue.qsize())
def execute_sql(self, sql, params=None, require_commit=True, timeout=None):
cursor = AsyncCursor(
event=self._thread_helper.event(),
sql=sql,
params=params,
commit=require_commit,
timeout=self._results_timeout if timeout is None else timeout)
queue = self._write_queue if require_commit else self._read_queue
queue.put(cursor)
return cursor
def start(self):
with self._conn_lock:
if not self._is_stopped:
return False
self._writer.start()
for reader in self._readers:
reader.start()
logger.info('workers started.')
self._is_stopped = False
return True
def stop(self):
logger.debug('environment stop requested.')
with self._conn_lock:
if self._is_stopped:
return False
self._write_queue.put(StopIteration)
for _ in self._readers:
self._read_queue.put(StopIteration)
self._writer.join()
for reader in self._readers:
reader.join()
return True
def is_stopped(self):
with self._conn_lock:
return self._is_stopped
class ThreadHelper(object):
__slots__ = ('queue_max_size',)
def __init__(self, queue_max_size=None):
self.queue_max_size = queue_max_size
def event(self): return Event()
def queue(self, max_size=None):
max_size = max_size if max_size is not None else self.queue_max_size
return Queue(maxsize=max_size or 0)
def thread(self, fn, *args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.daemon = True
return thread
class GreenletHelper(ThreadHelper):
__slots__ = ('queue_max_size',)
def event(self): return GEvent()
def queue(self, max_size=None):
max_size = max_size if max_size is not None else self.queue_max_size
return GQueue(maxsize=max_size or 0)
def thread(self, fn, *args, **kwargs):
def wrap(*a, **k):
gevent.sleep()
return fn(*a, **k)
return GThread(wrap, *args, **kwargs)
| 32.222642
| 79
| 0.610025
| 1,003
| 8,539
| 4.946162
| 0.212363
| 0.026809
| 0.02177
| 0.012901
| 0.174158
| 0.143116
| 0.096352
| 0.077404
| 0.049587
| 0.049587
| 0
| 0.000843
| 0.305305
| 8,539
| 264
| 80
| 32.344697
| 0.835469
| 0.040169
| 0
| 0.196078
| 0
| 0
| 0.088056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0.004902
| 0.063725
| 0.029412
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffed6941b3c99947e3e5d93c80fbd2e963b7ad51
| 9,056
|
py
|
Python
|
Common/Db.py
|
StrawberryTeam/pi_robot
|
c1b8ce2ad49c64173673df0eb59e0941624556e7
|
[
"MIT"
] | 2
|
2018-08-30T14:38:53.000Z
|
2019-12-12T09:33:42.000Z
|
Common/Db.py
|
StrawberryTeam/pi_robot
|
c1b8ce2ad49c64173673df0eb59e0941624556e7
|
[
"MIT"
] | 1
|
2018-12-10T05:15:48.000Z
|
2018-12-10T05:15:48.000Z
|
Common/Db.py
|
StrawberryTeam/pi_robot
|
c1b8ce2ad49c64173673df0eb59e0941624556e7
|
[
"MIT"
] | 2
|
2019-06-28T06:05:17.000Z
|
2019-10-28T08:34:50.000Z
|
#!/usr/bin/python3
from Common.Straw import Straw
import pymongo
from pymongo import MongoClient
from bson.objectid import ObjectId
import os
class Db(Straw):
# 任务
_taskFields = {
'videoIds': 'string', #待操作的 视频
'setId': 'objectid', #待操作的视频集 id
'fromDevice': 'string',
'toDevice': 'string', #to device uid
'type': 'string', #copy 复制 zip 打包 addset 添加影片集 addvideo 添加影片 transfer
'link': 'string', #添加影片 / 影片集任务 链接
'platform': 'int', #影片集 / 影片 对应的平台
'created_at': 'int',
'sort': 'int', #排序方法
'status': 'int', #状态
'transfer_status': 'int', #传送状态
'file_md5': 'string', #传送文件的 md5 值
'file_path': 'string', #传送文件的路径
}
# 已连接表
_collection = {
# 影片集
'video_set': {},
# 影片列表
'video_list': {},
# task
'task': {},
# setting
'setting': {},
}
# 已连接 db
_db = {}
def __init__(self):
pass
# 连接表
def connect(self, table):
# 已连接过的表
if self._collection[table]:
return self._collection[table]
config = self.getConfig('DB')
client = MongoClient(config['mongoClient'])
if not self._db:
self._db = client[config['dbName']] # 连接库
self._collection[table] = self._db[table] # 选择表
return self._collection[table]
# 获取所有 set 内容 拼音不存在的
def getNonpySetList(self, count = 10):
_collection = self.connect('video_set')
dataList = _collection.find({"title_py": {"$exists": False}, 'non_py': {"$ne": True}}).sort("play_num", pymongo.DESCENDING).limit(count)
return dataList if dataList.count() > 0 else False
# 更新 set 拼音内容
def saveSetPy(self, data, _id):
_collection = self.connect('video_set')
avaiableFileds = ['title_py', 'title_pyshow', 'title_sp', 'tags']
saveData = common.removeUnsafeFields(data, avaiableFileds, self._videoSetFields)
# saveData = dict(filter(lambda k: k[0] in avaiableFileds, data.items()))
return _collection.update_one({"_id": _id}, {"$set": saveData})
# 获取所有 video 内容
def getNonpyVideoList(self, count = 10):
_collection = self.connect('video_list')
dataList = _collection.find({"name_py": {"$exists": False}, 'non_py': {"$ne": True}}).sort("plays", pymongo.DESCENDING).limit(count)
return dataList if dataList.count() > 0 else False
# 更新 video 拼音内容
def saveVideoPy(self, data, _id):
_collection = self.connect('video_list')
avaiableFileds = ['name_py', 'name_pyshow', 'name_sp', 'tags']
saveData = common.removeUnsafeFields(data, avaiableFileds, self._videoListFields)
# saveData = dict(filter(lambda k: k[0] in avaiableFileds, data.items()))
return _collection.update_one({"_id": _id}, {"$set": saveData})
# 获取影片集信息
def getSetInfo(self, setId):
_collection = self.connect('video_set')
item = _collection.find_one({"_id": ObjectId(setId)})
return item if item else False
# 获取本影片集所有影片内容
def getVideoListBySetId(self, setId):
_collection = self.connect('video_list')
dataList = _collection.find({"setId": common.conv2(setId, self._videoListFields['setId'])}).sort("_id", pymongo.ASCENDING)
return dataList if dataList.count() > 0 else False
# 获取本影片集所有影片内容
def getVideoListByDlImg(self, uid, setId):
_collection = self.connect('video_list')
dataList = _collection.find({"setId": common.conv2(setId, self._videoListFields['setId']), "img." + str(uid): {'$exists': False}}).sort("_id", pymongo.ASCENDING)
return dataList if dataList.count() > 0 else False
# 获取一个需要下载封面影片集
def getVideoSetByDlImg(self, uid, platforms = [1]):
'''
platform 1 爱奇艺
'''
_collection = self.connect('video_set')
dataList = _collection.find_one({"platform": {'$in': platforms}, "imgs." + str(uid): {'$exists': False}, "play_num." + str(uid): {'$exists': True}})
return dataList if dataList else False
# 更新影片集图片至本地图
def modifySetImg(self, setId, data, uid):
if not data['img']:
return False
_collection = self.connect('video_set')
modify = _collection.update_one({"_id": setId}, {"$set": {"imgs." + str(uid): data['img']}})
return True if modify else False
# 更新影片内容图片
def modifyVideoImg(self, _id, data, uid):
if not data['img']:
return False
_collection = self.connect('video_list')
modify = _collection.update_one({"_id": _id}, {"$set": {"imgs." + str(uid): data['img']}})
return True if modify else False
# # 修复用 start
# def fixGetSet(_id):
# table = 'video_set'
# _collection = connect(table)
# return _collection.find_one({"_id": _id})
# def fixGetVideo(_id):
# table = 'video_list'
# _collection = connect(table)
# return _collection.find_one({"_id": _id})
# # 更新影片集图片
# def fixModifySetImg(setId, data):
# if not data['img']:
# return False
# table = 'video_set'
# _collection = connect(table)
# modify = _collection.update_one({"_id": setId}, {"$set": {"img": data['img']}})
# modify2 = _collection.update_one({"_id": setId}, {"$unset": {"img_status": ""}})
# return True if modify and modify2 else False
# # 更新影片内容图片
# def fixModifyVideoImg(_id, data):
# if not data['img']:
# return False
# table = 'video_list'
# _collection = connect(table)
# modify = _collection.update_one({"_id": _id}, {"$set": {"img": data['img']}})
# modify2 = _collection.update_one({"_id": _id}, {"$unset": {"img_status": ""}})
# return True if modify and modify2 else False
# # 修复用 end
_TASK_READY = 1 #未执行的
_TASK_FAILD = 2 #已完成的未成功的
_TASK_SUCCESS = 3 #明确成功的任务
# 获取下一个需要执行的任务
def getTask(self, taskTypes, deviceId):
_collection = self.connect('task')
deviceId = str(deviceId)
taskInfo = _collection.find_one({"toDevice": deviceId, "type": {'$in':taskTypes}, 'status': self._TASK_READY})
return taskInfo if taskInfo else False
_TRANSFER_FAILD = -1 # 操作中断或失败 不重新尝试
_TRANSFER_READY = 1 # 等待打包文件
# _TRANSFER_PACK = 2 # 完成打包等待传送
_TRANSFER_COMPLETE = 2 # 传送完成,等待接收
_TRANSFER_SUCCESS = 3 # 任务完成,等待删除原始文件
_TRANSFER_CLERA = 4 # 任务完成,原始文件清除完成
# 下载影片集
def set2Dl(self, setId, deviceId):
# print("set {} to dl".format(setId))
_collection = self.connect('video_set')
modify = _collection.update_one({"_id": setId}, {"$push": {"dl": str(deviceId)}})
return True if modify else False
# 传送完成
def taskTransferComplete(self, taskId, fileMd5, filePath):
self.taskDoing(taskId)
_collection = self.connect('task')
saveData = dict()
saveData['transfer_status'] = self._TRANSFER_COMPLETE
saveData['file_md5'] = fileMd5
saveData['file_path'] = filePath
saveData = common.removeUnsafeFields(saveData, self._taskFields.keys(), self._taskFields)
modify = _collection.update_one({"_id": ObjectId(taskId)}, {"$set": saveData})
return True if modify else False
# 传送失败
def taskTransferFaild(self, taskId):
self.taskDoing(taskId)
_collection = self.connect('task')
modify = _collection.update_one({"_id": ObjectId(taskId)}, {"$set": {"transfer_status": self._TRANSFER_FAILD}})
return True if modify else False
# 默认任务为失败
def taskDoing(self, _id):
_collection = self.connect('task')
modify = _collection.update_one({"_id": ObjectId(_id)}, {"$set": {"status": self._TASK_FAILD}})
return True if modify else False
# 任务成功
def taskSuccess(self, _id):
_collection = self.connect('task')
modify = _collection.update_one({"_id": ObjectId(_id)}, {"$set": {"status": self._TASK_SUCCESS}})
return True if modify else False
# 查询已下载完成的集
def getDledRes(self, uid):
# 查 list
_collection = self.connect('video_list')
# 找一个未下载的单集
listItem = _collection.find({"plays." + str(uid): {'$exists': True}})
return listItem if listItem else False
# 设置为下载中
def setVSetOnDl(self, setId, uid):
_collection = self.connect('video_set')
uid = str(uid)
upMap = {"_id": ObjectId(setId)}
# 已全部下载完成
# 移出下载完成
_collection.update(upMap, {"$pull": {"dled": uid}})
# 添加已完成
_collection.update(upMap, {"$addToSet": {"dl": uid}})
# 需要重新更新 play_num -1
_collection.update_one(upMap, {"$inc": {"play_num." + uid : -1}})
return True
# 移出已下载完成的影片
def rmVideo(self, _id, uid):
_collection = self.connect('video_list')
_collection.update({"_id": _id}, {"$unset": {"plays." + str(uid): ""}})
return True
if __name__ == "__main__":
db()
| 35.100775
| 169
| 0.597946
| 989
| 9,056
| 5.264914
| 0.226491
| 0.051085
| 0.076628
| 0.069906
| 0.497599
| 0.467448
| 0.436336
| 0.386979
| 0.294603
| 0.245823
| 0
| 0.005349
| 0.256846
| 9,056
| 257
| 170
| 35.237354
| 0.768351
| 0.194898
| 0
| 0.293706
| 0
| 0
| 0.112334
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146853
| false
| 0.006993
| 0.034965
| 0
| 0.426573
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ffed95a551ec4c75f989589df7d781a9f4387728
| 1,251
|
py
|
Python
|
baya/tests/test_templatetags.py
|
kreneskyp/baya
|
5cf04b6873927124b4a3f24c113c08699dd61315
|
[
"MIT"
] | 4
|
2016-05-24T13:57:37.000Z
|
2020-02-27T05:22:56.000Z
|
baya/tests/test_templatetags.py
|
kreneskyp/baya
|
5cf04b6873927124b4a3f24c113c08699dd61315
|
[
"MIT"
] | 29
|
2016-02-05T01:31:51.000Z
|
2022-02-23T18:50:58.000Z
|
baya/tests/test_templatetags.py
|
hrichards/baya
|
f319cef5e95cd6a166265d51ae0ea236b6f65be3
|
[
"MIT"
] | 6
|
2016-05-20T22:22:45.000Z
|
2019-09-03T17:57:59.000Z
|
from django.template import Context
from django.template import Template
from .test_base import LDAPGroupAuthTestBase
from django.contrib.auth.models import AnonymousUser
class CanUserPerformActionTagTest(LDAPGroupAuthTestBase):
BASIC_TEMPLATE = Template(
"{% load baya_tags %}"
"{% can_user_perform_action action as can_perform_action %}"
"{% if can_perform_action %}"
"True"
"{% else %}"
"False"
"{% endif %}"
)
def test_anonymous_user_has_permission_false(self):
context = Context({
'action': 'index',
'user': AnonymousUser(),
})
rendered = self.BASIC_TEMPLATE.render(context)
self.assertIn('False', rendered)
def test_has_permission_false(self):
context = Context({
'action': 'index',
'user': self.login('has_nothing'),
})
rendered = self.BASIC_TEMPLATE.render(context)
self.assertIn('False', rendered)
def test_has_permission_true(self):
context = Context({
'action': 'index',
'user': self.login('has_all'),
})
rendered = self.BASIC_TEMPLATE.render(context)
self.assertIn('True', rendered)
| 29.785714
| 69
| 0.608313
| 122
| 1,251
| 6.02459
| 0.327869
| 0.070748
| 0.073469
| 0.097959
| 0.492517
| 0.492517
| 0.492517
| 0.492517
| 0.42449
| 0.22585
| 0
| 0
| 0.275779
| 1,251
| 41
| 70
| 30.512195
| 0.811258
| 0
| 0
| 0.4
| 0
| 0
| 0.174261
| 0.018385
| 0
| 0
| 0
| 0
| 0.085714
| 1
| 0.085714
| false
| 0
| 0.114286
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fff185192df2e58db961f6b323cfb8259a7a9f46
| 2,611
|
py
|
Python
|
egg/zoo/sum_game/architectures.py
|
CorentinKervadec/EGG
|
5ccd49c4a493514b1194699954d41940f5e2a5c6
|
[
"MIT"
] | null | null | null |
egg/zoo/sum_game/architectures.py
|
CorentinKervadec/EGG
|
5ccd49c4a493514b1194699954d41940f5e2a5c6
|
[
"MIT"
] | null | null | null |
egg/zoo/sum_game/architectures.py
|
CorentinKervadec/EGG
|
5ccd49c4a493514b1194699954d41940f5e2a5c6
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch.nn import functional as F
# In EGG, the game designer must implement the core functionality of the Sender and Receiver agents. These are then
# embedded in wrappers that are used to train them to play Gumbel-Softmax- or Reinforce-optimized games. The core
# Sender must take the input and produce a hidden representation that is then used by the wrapper to initialize
# the RNN or other module that will generate the message. The core Receiver expects a hidden representation
# generated by the message-processing wrapper, plus possibly other game-specific input, and it must generate the
# game-specific output.
# The RecoReceiver class implements the core Receiver agent for the reconstruction game. This is simply a linear layer
# that takes as input the vector generated by the message-decoding RNN in the wrapper (x in the forward method) and
# produces an output of n_features dimensionality, to be interpreted as a one-hot representation of the reconstructed
# attribute-value vector
class RecoReceiver(nn.Module):
def __init__(self, n_features, n_hidden):
super(RecoReceiver, self).__init__()
self.output = nn.Linear(n_hidden, n_features)
def forward(self, x, _input, _aux_input):
return self.output(x)
# The Sender class implements the core Sender agent common to both games: it gets the input target vector and produces a hidden layer
# that will initialize the message producing RNN
class Sender(nn.Module):
def __init__(self, n_hidden, n_features, log_sftmx=False):
super(Sender, self).__init__()
self.fc1 = nn.Linear(n_features, n_hidden)
self.log_sftmx = log_sftmx
if log_sftmx:
self.logsoft = nn.LogSoftmax(dim=1)
def forward(self, x, _aux_input):
out = self.fc1(x)
if self.log_sftmx:
out = self.logsoft(out)
return out
class SenderOracle(nn.Module):
def __init__(self, n_hidden, n_features):
super(SenderOracle, self).__init__()
def forward(self, x, _aux_input):
n = x.size(-1)/2
ar = torch.arange(n).to(x.device)
ar = torch.cat([ar, ar])
ar = torch.stack([ar]*x.size(0), dim=0)
decoded = (x*ar).sum(-1).long().unsqueeze(-1)
out = torch.zeros_like(x)
out.scatter_(1, decoded, 1e6)
return out
# here, it might make sense to add a non-linearity, such as tanh
| 44.254237
| 133
| 0.711222
| 401
| 2,611
| 4.508728
| 0.38404
| 0.029867
| 0.018252
| 0.024889
| 0.075221
| 0.075221
| 0.038717
| 0.038717
| 0.038717
| 0
| 0
| 0.005828
| 0.211413
| 2,611
| 59
| 134
| 44.254237
| 0.872268
| 0.518958
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0.030303
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fff18656fd42956b8ef43e1d1fc5a06b2aa15f66
| 2,757
|
py
|
Python
|
utils/random_training_splits.py
|
suvarnak/GenerativeFSLCovid
|
0bdeb4ed444c5c9d59697c71d0733fc3a100944c
|
[
"MIT"
] | null | null | null |
utils/random_training_splits.py
|
suvarnak/GenerativeFSLCovid
|
0bdeb4ed444c5c9d59697c71d0733fc3a100944c
|
[
"MIT"
] | null | null | null |
utils/random_training_splits.py
|
suvarnak/GenerativeFSLCovid
|
0bdeb4ed444c5c9d59697c71d0733fc3a100944c
|
[
"MIT"
] | null | null | null |
import os
import shutil
import random
def copy_random_k_files(src_dir, k, dst_dir):
file_list = os.listdir(src_dir)
if k == -1:
k=len(file_list)
for i in range(k):
random_file=random.choice(file_list)
print(random_file)
src1 = os.path.join(src_dir, random_file)
dst1 = os.path.join(dst_dir, random_file)
shutil.copyfile(src1, dst1)
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def main():
shots_per_run = 84
no_of_runs =10
image_dir = "./data/DeepCovid"
split_names = os.listdir(image_dir)
target_splits_dir = "./data"
print("createing directory structure")
for i in range(no_of_runs):
random_run_path = os.path.join(target_splits_dir, "DeepCovid_"+str(shots_per_run) + "_" + str(i))
print(random_run_path)
os.mkdir(random_run_path)
train_split = "train" #split_names[1]
test_split = "test" #split_names[0]
class_names = ['0_non','1_covid']
base_path_split = os.path.join(random_run_path,train_split)
os.makedirs(os.path.join(base_path_split,class_names[0]))
os.makedirs(os.path.join(base_path_split,class_names[1]))
base_path_split = os.path.join(random_run_path,test_split)
os.makedirs(os.path.join(base_path_split,class_names[0]))
os.makedirs(os.path.join(base_path_split,class_names[1]))
print("Directory '% s' created" % random_run_path)
src_train_dir = os.path.join(image_dir,"train")
src_train_dir_non = os.path.join(src_train_dir,"0_non")
src_train_dir_covid = os.path.join(src_train_dir,"1_covid")
dst_train_dir = os.path.join(random_run_path,"train")
dst_train_dir_non = os.path.join(dst_train_dir,"0_non")
dst_train_dir_covid = os.path.join(dst_train_dir,"1_covid")
copy_random_k_files(src_train_dir_non, shots_per_run, dst_train_dir_non)
copy_random_k_files(src_train_dir_covid, shots_per_run, dst_train_dir_covid)
src_test_dir = os.path.join(image_dir,"test")
src_test_dir_non = os.path.join(src_test_dir,"0_non")
src_test_dir_covid = os.path.join(src_test_dir,"1_covid")
dst_test_dir = os.path.join(random_run_path,"test")
dst_test_dir_non = os.path.join(dst_test_dir,"0_non")
dst_test_dir_covid = os.path.join(dst_test_dir,"1_covid")
copytree(src_test_dir_non, dst_test_dir_non)
copytree(src_test_dir_covid, dst_test_dir_covid)
if __name__ == '__main__':
main()
| 39.385714
| 105
| 0.673921
| 442
| 2,757
| 3.812217
| 0.158371
| 0.08546
| 0.136499
| 0.046291
| 0.465282
| 0.429674
| 0.226706
| 0.153116
| 0.153116
| 0.110386
| 0
| 0.011899
| 0.207472
| 2,757
| 69
| 106
| 39.956522
| 0.759268
| 0.010519
| 0
| 0.066667
| 0
| 0
| 0.066079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.1
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fff2144edf1dc7c96f337289635ef5af44b23625
| 8,510
|
py
|
Python
|
testscript/imputation_algorithms.py
|
zshufan/Tattle-Tale
|
f9d93051efb523f1bda0cead023c2f001e18cc85
|
[
"BSD-3-Clause"
] | null | null | null |
testscript/imputation_algorithms.py
|
zshufan/Tattle-Tale
|
f9d93051efb523f1bda0cead023c2f001e18cc85
|
[
"BSD-3-Clause"
] | null | null | null |
testscript/imputation_algorithms.py
|
zshufan/Tattle-Tale
|
f9d93051efb523f1bda0cead023c2f001e18cc85
|
[
"BSD-3-Clause"
] | null | null | null |
# some codes refer to Holoclean evaluation function
# https://github.com/HoloClean/holoclean
import pandas as pd
import numpy as np
import logging
import random
import argparse
parser = argparse.ArgumentParser(description='Predict on many examples')
parser.add_argument("--dataset", type=str, help="dataset path")
parser.add_argument("--ground_truth", type=str, help="ground truth path")
parser.add_argument("--ground_truth_2", type=str, help="ground truth path")
args = parser.parse_args()
NULL_REPR = '_nan_'
exclude_attr = ['_tid_', 'FName', 'LName']
class DataCleaningAsAdv:
def __init__(self, csv_fpath) -> None:
# load dataset with missing values
self.load_dataset(csv_fpath)
# associate with domain
self.get_domain_knowledge()
def load_dataset(self, fpath, na_values=None) -> None:
try:
# Do not include TID and source column as trainable attributes
exclude_attr_cols = ['_tid_']
self.df = pd.read_csv(fpath, dtype=str, na_values=na_values, encoding='utf-8')
# Normalize the dataframe: drop null columns, convert to lowercase strings, and strip whitespaces.
for attr in self.df.columns.values:
if self.df[attr].isnull().all():
logging.warning("Dropping the following null column from the dataset: '%s'", attr)
self.df.drop(labels=[attr], axis=1, inplace=True)
continue
if attr not in exclude_attr_cols:
self.df[attr] = self.df[attr].str.strip().str.lower()
# Add _tid_ column to dataset that uniquely identifies an entity.
self.df.insert(0, '_tid_', range(0,len(self.df)))
# Use NULL_REPR to represent NULL values
self.df.fillna(NULL_REPR, inplace=True)
# print(self.df.head())
logging.info("Loaded %d rows with %d cells", self.df.shape[0], self.df.shape[0] * self.df.shape[1])
except Exception:
logging.error('loading data for missing data table %s', fpath)
raise
def load_ground_truth(self, fpath, tid_col, attr_col, val_col, na_values=None) -> None:
try:
self.gt_data = pd.read_csv(fpath, na_values=na_values, encoding='utf-8')
# We drop any ground truth values that are NULLs since we follow
# the closed-world assumption (if it's not there it's wrong).
# TODO: revisit this once we allow users to specify which
# attributes may be NULL.
self.gt_data.dropna(subset=[val_col], inplace=True)
self.gt_data.fillna(NULL_REPR, inplace=True)
self.gt_data.rename({tid_col: '_tid_',
attr_col: '_attribute_',
val_col: '_value_'},
axis='columns',
inplace=True)
self.gt_data = self.gt_data[['_tid_', '_attribute_', '_value_']]
# Normalize string to whitespaces.
self.gt_data['_value_'] = self.gt_data['_value_'].str.strip().str.lower()
except Exception:
logging.error('load_data for ground truth table %s', fpath)
raise
def get_domain_knowledge(self) -> None:
# get the domain of each column
# and the frequency of each value in the domain
self.domain = {}
self.weight = {}
for attr in self.df.columns.values:
if attr in exclude_attr:
continue
domain = self.df[attr].unique()
if NULL_REPR in domain:
domain = domain[domain != NULL_REPR]
self.domain[attr] = domain
attr_gb_count_df = self.df.groupby([attr])[attr].count()
# print(attr_gb_count_df)
self.weight[attr] = [attr_gb_count_df[val] for val in domain]
# print(self.weight[attr])
def fill_in_random_value(self) -> None:
self.random_repair = self.df.copy()
for attr in self.df.columns.values:
if attr in exclude_attr:
continue
# fill in the missing values
indices = self.random_repair[self.random_repair[attr]==NULL_REPR].index.tolist()
# print(indices)
for index in indices:
if self.random_repair.loc[index][attr] is not NULL_REPR:
logging.error("index not match")
raise
self.random_repair.at[index, attr] = np.random.choice(self.domain[attr])
# print(self.random_repair.loc[index][attr], self.df.loc[index][attr])
def fill_in_popular_value(self) -> None:
self.popular_repair = self.df.copy()
for attr in self.df.columns.values:
if attr in exclude_attr:
continue
# sort the zipped list to get the most popular item
# in each column in the ascending order
zipped = zip(self.domain[attr], self.weight[attr])
sorted_zip = sorted(zipped, key=lambda x: x[1])
# print(sorted_zip[-1])
# fill in the missing values
indices = self.popular_repair[self.popular_repair[attr]==NULL_REPR].index.tolist()
for index in indices:
if self.popular_repair.loc[index][attr] is not NULL_REPR:
logging.error("index not match")
raise
self.popular_repair.at[index, attr] = sorted_zip[-1][0]
# print(self.popular_repair.loc[index][attr], self.df.loc[index][attr])
def fill_in_by_weighted_sampling(self) -> None:
self.weighted_repair = self.df.copy()
for attr in self.df.columns.values:
if attr in exclude_attr:
continue
# fill in the missing values
indices = self.weighted_repair[self.weighted_repair[attr]==NULL_REPR].index.tolist()
# print(indices)
for index in indices:
if self.weighted_repair.loc[index][attr] is not NULL_REPR:
logging.error("index not match")
raise
self.weighted_repair.at[index, attr] = random.choices(self.domain[attr], weights=self.weight[attr], k=1)[0]
# print(self.weighted_repair.loc[index][attr], self.df.loc[index][attr])
def evaluate(self, gt_fpath, tid_col, attr_col, val_col, file) -> None:
self.load_ground_truth(gt_fpath, tid_col, attr_col, val_col)
total_repairs = self.gt_data.shape[0]
def _evaluate(df) -> int:
correct_repair = 0
for _, row in self.gt_data.iterrows():
if df.loc[row['_tid_']][row['_attribute_']] == row['_value_']:
if self.df.loc[row['_tid_']][row['_attribute_']] is not NULL_REPR:
logging.error("index not match when evaluating")
raise
correct_repair += 1
return correct_repair
# evaluate random filling
self.fill_in_random_value()
correct_repair = _evaluate(self.random_repair)
print("Precision of random filling: {}, correct_repairs: {}, total_repairs: {}".format(correct_repair/total_repairs, correct_repair, total_repairs), file=file)
# evaluate popular filling
self.fill_in_popular_value()
correct_repair = _evaluate(self.popular_repair)
print("Precision of popular filling: {}, correct_repairs: {}, total_repairs: {}".format(correct_repair/total_repairs, correct_repair, total_repairs), file=file)
# evaluate weighted filling
self.fill_in_by_weighted_sampling()
correct_repair = _evaluate(self.weighted_repair)
print("Precision of weighted filling: {}, correct_repairs: {}, total_repairs: {}".format(correct_repair/total_repairs, correct_repair, total_repairs), file=file)
if __name__ == "__main__":
# load dataset
adv = DataCleaningAsAdv(args.dataset)
f = open("baseline_cleaning_report_1", "a")
print(args.dataset, file=f)
# evaluate
adv.evaluate(gt_fpath=args.ground_truth,
tid_col='tid',
attr_col='attribute',
val_col='correct_val', file=f)
if args.ground_truth_2 is not None:
adv.evaluate(gt_fpath=args.ground_truth_2,
tid_col='tid',
attr_col='attribute',
val_col='correct_val', file=f)
| 42.338308
| 169
| 0.602115
| 1,071
| 8,510
| 4.575163
| 0.210084
| 0.031837
| 0.020408
| 0.022041
| 0.434286
| 0.370204
| 0.316327
| 0.272653
| 0.249592
| 0.241837
| 0
| 0.003485
| 0.291892
| 8,510
| 200
| 170
| 42.55
| 0.809658
| 0.154642
| 0
| 0.27907
| 0
| 0
| 0.109807
| 0.003632
| 0
| 0
| 0
| 0.005
| 0
| 1
| 0.069767
| false
| 0
| 0.03876
| 0
| 0.124031
| 0.031008
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fff3dd07c2f6cdec73bcd25788a20c7594c2652d
| 959
|
py
|
Python
|
streamlit/main.py
|
prakhar134/clean-or-messy
|
0b9080363c48ca9cff0449875dfcbd169ef64321
|
[
"MIT"
] | 13
|
2020-10-08T13:52:21.000Z
|
2022-03-11T07:02:35.000Z
|
streamlit/main.py
|
architsharmaa/clean-or-messy
|
b40028cb4c4c8bbefb91a4b016096953b445c146
|
[
"MIT"
] | null | null | null |
streamlit/main.py
|
architsharmaa/clean-or-messy
|
b40028cb4c4c8bbefb91a4b016096953b445c146
|
[
"MIT"
] | 9
|
2020-10-08T12:02:50.000Z
|
2022-01-25T23:38:46.000Z
|
from fastai.vision.all import *
from PIL import Image
import streamlit as st
import numpy as np
from io import BytesIO
from .config import imgWidth, imgHeight
st.title("CleanvsMessy")
st.markdown('''
## Upload the image''',True)
st.set_option('deprecation.showfileUploaderEncoding', False)
file = st.file_uploader(" ")
model = load_learner('model/model_v0.pkl')
st.markdown('''
## Preview of the Image''',True)
if file != None:
st.image(file, width = imgWidth, height = imgHeight)
if file != None:
def upload(file):
image = Image.open(file)
image_np = np.array(image)
image_without_alpha = image_np[:, :, :3]
is_clean, _, probs = model.predict(image_without_alpha)
prob = float(list(probs.numpy())[1])
return {"is_image_clean": is_clean, "predictedVal": prob}
result = upload(file)
st.write("Is Image Clean? "+result["is_image_clean"])
st.write("Confidence "+str(result["predictedVal"]))
| 30.935484
| 65
| 0.683003
| 130
| 959
| 4.907692
| 0.469231
| 0.032915
| 0.056426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003788
| 0.17414
| 959
| 31
| 66
| 30.935484
| 0.801768
| 0
| 0
| 0.148148
| 0
| 0
| 0.197917
| 0.0375
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.222222
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fff5f55a4eee57bae636a577f32adbde97ba453e
| 3,151
|
py
|
Python
|
e3/provisioning/AtlassianAwsSecurity.py
|
sguillory6/e3
|
1505e6ea389157b9645155b9da13d6d316235f1a
|
[
"Apache-2.0"
] | null | null | null |
e3/provisioning/AtlassianAwsSecurity.py
|
sguillory6/e3
|
1505e6ea389157b9645155b9da13d6d316235f1a
|
[
"Apache-2.0"
] | null | null | null |
e3/provisioning/AtlassianAwsSecurity.py
|
sguillory6/e3
|
1505e6ea389157b9645155b9da13d6d316235f1a
|
[
"Apache-2.0"
] | null | null | null |
import logging
import logging.config
import os
import subprocess
from datetime import datetime, timedelta
from botocore.credentials import CredentialProvider, RefreshableCredentials
from dateutil.tz import tzlocal
from common.E3 import e3
class AtlassianAwsSecurity(CredentialProvider):
"""
This class is only used internally by Atlassian to make use of our SAML implementation for AWS authentication.
It is included in the E3 distribution to serve as an example of how to integrate 3rd party authentication
tools with E3
"""
METHOD = "awstoken"
AWS_ACCESS_KEY_ID_KEY = 'AWS_ACCESS_KEY_ID'
AWS_SECRET_ACCESS_KEY_KEY = 'AWS_SECRET_ACCESS_KEY'
AWS_SECURITY_TOKEN_KEY = 'AWS_SECURITY_TOKEN'
def __init__(self, environ=None, mapping=None):
super(AtlassianAwsSecurity, self).__init__()
conf = e3.get_auth_config()
logging.debug("Atlassian AWS config: %s" % conf)
self._script = os.path.expanduser(conf.get('script', None))
self._token_file = os.path.expanduser(conf.get('tokens', None))
self._token_valid_for = long(conf.get('valid_for', 3600))
def load(self):
return RefreshableCredentials.create_from_metadata(
metadata=self.refresh(),
refresh_using=self.refresh,
method=self.METHOD)
def refresh(self):
if not (self._script and self._token_file):
logging.error("Unable to refresh tokens because configuration is missing")
return None
self._run_script()
return self._parse_tokens()
def _parse_tokens(self):
if not os.path.exists(self._token_file):
logging.error("Unable to locate '%s' unable to load AWS credentials, trying to proceed without them.",
self._token_file)
else:
with open(self._token_file) as tokens:
expiry = datetime.now(tzlocal()) + timedelta(minutes=55)
metadata = {
"expiry_time": str(expiry)
}
lines = tokens.readlines()
for line in lines:
line_tokens = line[7:-1]
eq_pos = line_tokens.find("=")
token_key = line_tokens[0:eq_pos]
token_value = line_tokens[eq_pos + 1:]
if token_key == self.AWS_ACCESS_KEY_ID_KEY:
metadata["access_key"] = token_value
if token_key == self.AWS_SECRET_ACCESS_KEY_KEY:
metadata["secret_key"] = token_value
self._aws_secret_access_key = token_value
if token_key == self.AWS_SECURITY_TOKEN_KEY:
metadata["token"] = token_value
self._aws_security_token = token_value
return metadata
return None
def _run_script(self):
environ = os.environ.copy().update({
'PATH': '/usr/local/bin:/usr/local/sbin:/usr/bin:/bin:/usr/sbin:/sbin',
'SHELL': '/bin/bash'
})
subprocess.call(self._script, shell=True, env=environ)
| 39.886076
| 114
| 0.614408
| 372
| 3,151
| 4.948925
| 0.349462
| 0.039109
| 0.035307
| 0.039109
| 0.154264
| 0.074959
| 0.074959
| 0.039109
| 0.039109
| 0
| 0
| 0.00724
| 0.298635
| 3,151
| 78
| 115
| 40.397436
| 0.825792
| 0.072993
| 0
| 0.031746
| 0
| 0.015873
| 0.126381
| 0.02797
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0
| 0.126984
| 0.015873
| 0.365079
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fff91c879216ac70a7559f58214c7d1b3892a9ea
| 3,264
|
py
|
Python
|
django_input_collection/api/restframework/collection.py
|
pivotal-energy-solutions/django-input-collection
|
cc2ce3e0a7104ba9c524eaba5706da94ddb04a5f
|
[
"Apache-2.0"
] | null | null | null |
django_input_collection/api/restframework/collection.py
|
pivotal-energy-solutions/django-input-collection
|
cc2ce3e0a7104ba9c524eaba5706da94ddb04a5f
|
[
"Apache-2.0"
] | 4
|
2019-08-25T15:47:24.000Z
|
2022-03-24T19:35:09.000Z
|
django_input_collection/api/restframework/collection.py
|
pivotal-energy-solutions/django-input-collection
|
cc2ce3e0a7104ba9c524eaba5706da94ddb04a5f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.urls import reverse
from rest_framework.response import Response
from rest_framework import status
from ...collection import BaseAPICollector, BaseAPISpecification
from ... import models
from . import serializers
class RestFrameworkSpecification(BaseAPISpecification):
content_type = "application/json"
def get_api_info(self):
info = super(RestFrameworkSpecification, self).get_api_info()
input_list = reverse("collection-api:input-list")
input_detail = reverse("collection-api:input-detail", kwargs={"pk": "__id__"})
instrument_list = reverse("collection-api:instrument-list")
instrument_detail = reverse("collection-api:instrument-detail", kwargs={"pk": "__id__"})
info["endpoints"] = {
"input": {
"list": {"url": input_list, "method": "GET"},
"add": {"url": input_list, "method": "POST"},
"get": {"url": input_detail, "method": "GET"},
"delete": {"url": input_detail, "method": "DELETE"},
},
"instrument": {
"list": {"url": instrument_list, "method": "GET"},
"get": {"url": instrument_detail, "method": "GET"},
},
}
return info
class RestFrameworkCollector(BaseAPICollector):
specification_class = RestFrameworkSpecification
model_codenames = {
models.Measure: "measure",
models.CollectionRequest: "request",
models.CollectionGroup: "segment",
models.CollectionGroup: "group",
models.CollectionInstrument: "instrument",
models.get_input_model(): "input",
}
# dynamic rest_framework overrides per model (use codename strings)
serializer_classes = {}
pagination_classes = {}
default_serializer_classes = {
"measure": serializers.MeasureSerializer,
"request": serializers.CollectionRequestSerializer,
"segment": serializers.CollectionGroupSerializer,
"group": serializers.CollectionGroupSerializer,
"instrument": serializers.CollectionInstrumentSerializer,
"input": serializers.CollectedInputSerializer,
}
def get_pagination_class(self, model):
"""
Returns a rest_framework pagination class for the model's viewset. Returning ``None`` will
be taken directly (disabling pagination), and ``False`` will ensure rest_framework still
applies whatever default pagination policy is in effect.
"""
codename = self.model_codenames.get(model, model)
return self.pagination_classes.get(codename, False)
def get_serializer_class(self, model):
"""Returns a rest_framework serializer class for the model's viewset."""
codename = self.model_codenames.get(model, model)
return self.serializer_classes.get(codename, self.default_serializer_classes[codename])
def get_destroy_response(self, instrument):
"""Returns a rest_framework Response when an input is deleted from this instrument."""
return Response(status=status.HTTP_204_NO_CONTENT)
def validate(self, instrument, data):
"""Raises any validation errors in the serializer's ``data``."""
return data
| 38.857143
| 99
| 0.662377
| 319
| 3,264
| 6.611285
| 0.332288
| 0.043148
| 0.037933
| 0.029872
| 0.102418
| 0.102418
| 0.079659
| 0.046468
| 0.046468
| 0
| 0
| 0.00158
| 0.224571
| 3,264
| 83
| 100
| 39.325301
| 0.831687
| 0.163297
| 0
| 0.035088
| 0
| 0
| 0.131215
| 0.042617
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0
| 0.105263
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fffc90bcd5aabe8c07f5b2517e1c835715addf0e
| 770
|
py
|
Python
|
DFS/depth_first_search.py
|
Quanta-Algorithm-Design/graphs
|
3a5b6362bf60a1e2fb06d2fadab46e72124d637d
|
[
"MIT"
] | null | null | null |
DFS/depth_first_search.py
|
Quanta-Algorithm-Design/graphs
|
3a5b6362bf60a1e2fb06d2fadab46e72124d637d
|
[
"MIT"
] | null | null | null |
DFS/depth_first_search.py
|
Quanta-Algorithm-Design/graphs
|
3a5b6362bf60a1e2fb06d2fadab46e72124d637d
|
[
"MIT"
] | 1
|
2020-10-05T06:46:13.000Z
|
2020-10-05T06:46:13.000Z
|
#!/usr/bin/env python3
"""
This module defines functions for depth-first-search in a graph with a given adjacency list
"""
def dfs_visit(node_list, adj_list, root_node, parent):
"""
Takes the graph node list, its adj list, and a node s,
and visits all the nodes reachable from s recursively.
"""
for node in adj_list[root_node]:
if node not in parent:
parent[node] = root_node
dfs_visit(node_list, adj_list, node, parent)
def dfs(node_list, adj_list):
"""
Iterate over possible root_nodes to explore the whole graph
"""
parent = {}
for root_node in node_list:
if root_node not in parent:
parent[root_node] = None
dfs_visit(node_list, adj_list, root_node, parent)
| 29.615385
| 91
| 0.654545
| 117
| 770
| 4.128205
| 0.393162
| 0.115942
| 0.091097
| 0.124224
| 0.287785
| 0.200828
| 0.153209
| 0.153209
| 0.153209
| 0
| 0
| 0.001761
| 0.262338
| 770
| 25
| 92
| 30.8
| 0.848592
| 0.367532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0803020bd1e3c35bd9b149aea49e7ac12f9623a3
| 933
|
py
|
Python
|
setup.py
|
yihong0618/-nbnhhsh-cli
|
3c8241dbc772b4b693e06b350c4351e75572596a
|
[
"Apache-2.0"
] | 33
|
2021-07-09T05:40:00.000Z
|
2022-02-07T12:49:34.000Z
|
setup.py
|
yihong0618/-nbnhhsh-cli
|
3c8241dbc772b4b693e06b350c4351e75572596a
|
[
"Apache-2.0"
] | 1
|
2021-07-09T05:37:02.000Z
|
2021-07-09T05:37:02.000Z
|
setup.py
|
yihong0618/-nbnhhsh-cli
|
3c8241dbc772b4b693e06b350c4351e75572596a
|
[
"Apache-2.0"
] | 2
|
2021-07-10T10:25:08.000Z
|
2021-07-11T03:16:38.000Z
|
from setuptools import setup, find_packages
VERSION = "0.1.1"
setup(
name="hhsh",
version=VERSION,
description="能不能好好说话? cli",
long_description="能不能好好说话? cli",
keywords="python hhsh cli terminal",
author="itorr,yihong0618",
author_email="zouzou0208@gmail.com",
url="https://github.com/yihong0618/hhsh",
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=["requests", "rich"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries",
],
entry_points={
"console_scripts": ["hhsh = hhsh.hhsh:main"],
},
)
| 29.15625
| 53
| 0.621651
| 96
| 933
| 5.9375
| 0.677083
| 0.1
| 0.131579
| 0.136842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030683
| 0.231511
| 933
| 31
| 54
| 30.096774
| 0.764296
| 0
| 0
| 0
| 0
| 0
| 0.476956
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.034483
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08075a784b23b26531f0e2fcf4a1653e8cbbe078
| 1,118
|
py
|
Python
|
tests/test_blender.py
|
dumpmemory/lassl
|
dfe56f09cc2ade6c777ad8561b24f23d83a34188
|
[
"Apache-2.0"
] | null | null | null |
tests/test_blender.py
|
dumpmemory/lassl
|
dfe56f09cc2ade6c777ad8561b24f23d83a34188
|
[
"Apache-2.0"
] | null | null | null |
tests/test_blender.py
|
dumpmemory/lassl
|
dfe56f09cc2ade6c777ad8561b24f23d83a34188
|
[
"Apache-2.0"
] | null | null | null |
from collections import Counter
import pytest
from datasets import load_dataset
from lassl.blender import DatasetBlender
def test_blending():
try:
from langid import classify
except ImportError as _:
raise ImportError(
"To test dataset blending, you need to install langid. "
"Please install langid using `pip install langid`."
)
en = load_dataset("squad").data["train"]["context"]
ko = load_dataset("oscar", "unshuffled_deduplicated_ko").data["train"]["text"]
ja = load_dataset("amazon_reviews_multi", "ja").data["train"]["review_body"]
weights = {"en": 0.2, "ko": 0.5, "ja": 0.3}
datasets = {"en": en, "ko": ko, "ja": ja}
blend = DatasetBlender(
datasets=list(datasets.values()),
weights=list(weights.values()),
)
langs = [classify(str(blend[i]))[0] for i in range(10)]
counts = Counter(langs)
assert int(counts["ko"]) == int(weights["ko"] * 10)
assert int(counts["en"]) == int(weights["en"] * 10)
assert int(counts["ja"]) == int(weights["ja"] * 10)
print("All tests are passed ;)")
| 30.216216
| 82
| 0.620751
| 143
| 1,118
| 4.776224
| 0.468531
| 0.064422
| 0.065886
| 0.04978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017162
| 0.218247
| 1,118
| 36
| 83
| 31.055556
| 0.764302
| 0
| 0
| 0
| 0
| 0
| 0.219141
| 0.023256
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.037037
| false
| 0.037037
| 0.259259
| 0
| 0.296296
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
080796109f90dd5533115b48ae3a4657f5fb4224
| 4,542
|
py
|
Python
|
wisps/data_analysis/path_parser.py
|
caganze/WISPS
|
81b91f8b49c7345ab68b7c4eb480716985e8905c
|
[
"MIT"
] | null | null | null |
wisps/data_analysis/path_parser.py
|
caganze/WISPS
|
81b91f8b49c7345ab68b7c4eb480716985e8905c
|
[
"MIT"
] | 7
|
2021-02-02T21:51:56.000Z
|
2022-01-13T00:57:45.000Z
|
wisps/data_analysis/path_parser.py
|
caganze/wisps
|
6572201f94a6af6d1c0a306f2f447215d4330bd7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
After the introduction of version 6.2, all wisp data and hst-3d are now on MAST
3D-HST has not added any new data nor changed their directory structure,
but that's not the case for WISP
Aim: parse new directories to make them compatible with v5.0
"""
import os
import glob
from ..utils import memoize_func
REMOTE_FOLDER=os.environ['WISP_SURVEY_DATA']
@memoize_func
def get_image_path(name, spectrum_path):
#print (name)
##returns the image path without going through the whole thing again
if name.lower().startswith('par') or name.startswith('hlsp'):
survey='wisps'
elif name.startswith('goo') or name.startswith('ud') or name.startswith('aeg') or name.startswith('cos'):
survey='hst3d'
if survey=='wisps':
folder=name.split('wfc3_')[-1].split('wfc3_')[-1].split('-')[0]
if '_wfc3' in name:
name=(name.split('wfc3_')[-1]).split('_g141')[0]
#print (name)
#print (REMOTE_FOLDER+'/wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'*/2dstamp/hlsp_wisp_hst_wfc3*'+name+'*stamp2d.fits')
stamp_image_path=glob.glob(REMOTE_FOLDER+'/wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'*/2dstamp/hlsp_wisp_hst_wfc3*'+name+'*stamp2d.fits')[0]
if survey=='hst3d':
#print (spectrum_path.split('/1D/ASCII/')[0]+'/2D/'+'FITS/'+name.split('1D')[0]+'*2D.fits')
stamp_image_path=glob.glob(spectrum_path.split('/1D/ASCII/')[0]+'/2D/'+'FITS/'+name.split('1D')[0]+'*2D.fits')[0]
#print ('stamp image',stamp_image_path )
#print (survey, spectrum_path, stamp_image_path)
return survey, stamp_image_path
@memoize_func
def parse_path(name, version):
"""
Parse a filename and retrieve all the survey info at once
"""
survey=None
spectrum_path=None
stamp_image_path=None
if name.startswith('Par') or name.startswith('par') or name.startswith('hlsp'):
survey='wisps'
elif name.startswith('goo') or name.startswith('ud') or name.startswith('aeg') or name.startswith('cos'):
survey='hst3d'
else:
survey=None
if survey=='wisps':
spectrum_path=_run_search(name)
folder=name.split('wfc3_')[-1].split('wfc3_')[-1].split('-')[0]
name=name.split('_wfc3_')[-1].split('a_g102')[0]
stamp_image_path=glob.glob(REMOTE_FOLDER+'/wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'*/2dstamp/hlsp_wisp_hst_wfc3*'+name+'*a_g141_v6.2_stamp2d.fits')[0]
if survey=='hst3d':
spectrum_path=_run_search(name)
s= spectrum_path.split('/1D/ASCII/')[0]+'/2D/'+'FITS/'+name.split('1D')[0]+'*2D.fits'
stamp_image_path=glob.glob(s.replace('g141', 'G141') )[0]
#print ('stamp image',stamp_image_path )
#print (survey, spectrum_path, stamp_image_path)
#blah
return survey, spectrum_path, stamp_image_path
@memoize_func
def _run_search(name):
#internal function used to search path given spectrum name
path=''
prefix= name[:3]
if name.startswith('Par') or name.startswith('par') or name.startswith('hlsp'):
#search version 6
if name.endswith('.dat'):
n=name.split('.dat')[0]
folder=name.split('wfc3_')[-1].split('wfc3_')[-1].split('-')[0]
else:
folder=name.split('-')[0]
n=name
path1=REMOTE_FOLDER+'wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'/1dspectra/*'+n+'*a_g141_*'
path2=REMOTE_FOLDER+'wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'/1dspectra/*'+n+'*a_g102-g141_*'
path=glob.glob(path1)[0]
if len(glob.glob(path2)) > 0:
path=glob.glob(path2)[0]
#except:
# #search version 5
# folder=name.split('_')[0]
# path=REMOTE_FOLDER+'wisps/'+folder+'*/Spectra/*'+name+'.dat'
# #print (path)
# path=glob.glob(path)[0]
if prefix in ['aeg', 'cos', 'uds', 'goo']:
syls= (name.split('-'))
str_= REMOTE_FOLDER+'*'+prefix+'*'+'/*'+prefix+ '*'+syls[1]+'*'+'/1D/ASCII/'+prefix+'*'+ syls[1]+ '*'+syls[2]+'*'
#print (str_)
path=glob.glob(str_.replace('g141', 'G141'))[0]
return path
@memoize_func
def return_path(name):
#print(name)wisps
if type(name) is list:
paths=[]
for p in name:
paths.append( _run_search(p))
return paths
if type(name) is str:
return _run_search(name)
@memoize_func
def return_spectrum_name(path):
""" returns name given path in the wisp folder"""
name=''
if path.endswith('.dat'):
name= path.split('.dat')[0].split('/')[-1]
else:
name=path.split('.ascii')[0].split('/')[-1].split('.')[0]
return name
| 34.409091
| 170
| 0.647952
| 666
| 4,542
| 4.274775
| 0.208709
| 0.073762
| 0.061819
| 0.04215
| 0.505796
| 0.488233
| 0.432385
| 0.432385
| 0.432385
| 0.432385
| 0
| 0.034817
| 0.158961
| 4,542
| 132
| 171
| 34.409091
| 0.710471
| 0.247688
| 0
| 0.350649
| 0
| 0
| 0.188075
| 0.082171
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064935
| false
| 0
| 0.038961
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0811dfdcb7e741d544fe728950a10ae174c04263
| 3,284
|
py
|
Python
|
fileForRepair/src/parking.py
|
ChangSeonKim/5G_UWC_project
|
0504a1b1ed30787f30e18a178897978de55660ef
|
[
"Apache-2.0"
] | null | null | null |
fileForRepair/src/parking.py
|
ChangSeonKim/5G_UWC_project
|
0504a1b1ed30787f30e18a178897978de55660ef
|
[
"Apache-2.0"
] | null | null | null |
fileForRepair/src/parking.py
|
ChangSeonKim/5G_UWC_project
|
0504a1b1ed30787f30e18a178897978de55660ef
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
import numpy as np
import math
from std_msgs.msg import String
def callback(data):
laser_arr_f = np.array(data.ranges[0:10])
laser_arr_l= np.array(data.ranges[85:95])
laser_arr_r = np.array(data.ranges[265:275])
block_f = laser_arr_f.mean()
block_r = laser_arr_r.mean()
block_l = laser_arr_l.mean()
print(block_f, block_r, block_l)
msg = Twist()
if block_f > 0.225: # and block_f < 0.3:
#go straight
msg.linear.x = 1
pub.publish(msg)
# elif block_f > 0.45:
# #go straight
# msg.linear.x = 1
# if ( block_l - block_r) > 0.05:
# msg.linear.x = 1
# msg.angular.z = -0.5
# elif ( block_l - block_r) < -0.05:
# msg.linear.x = 1
# msg.angular.z = -0.5
# else:
# msg.linear.x = 1
# msg.angular.z = 0.0
# pub.publish(msg)
else:
#stop
msg.linear.x = 0
pub.publish(msg)
if block_f < 0.225 and block_r > 0.30:
# right-turn
relative_angle = math.radians(95)
angular_speed = -1.0
duration = relative_angle/abs(angular_speed)
msg.angular.z = angular_speed
time2end = rospy.Time.now() + rospy.Duration(duration)
while rospy.Time.now() < time2end:
pub.publish(msg)
# new = 0
msg.linear.x = 0
msg.angular.z = 0
pub.publish(msg)
# rospy.sleep(.2)
elif block_f < 0.225 and block_l > 0.30:
# left-turn
relative_angle = math.radians(95)
angular_speed = 1.0
duration = relative_angle/abs(angular_speed)
msg.angular.z = angular_speed
time2end = rospy.Time.now() + rospy.Duration(duration)
while rospy.Time.now() < time2end:
pub.publish(msg)
# new = 0
msg.linear.x = 0
msg.angular.z = 0
pub.publish(msg)
# rospy.sleep(.2)
# elif block_f < 0.225 and block_l < 0.3 and block_r < 0.3:
# # U-turn
# relative_angle = math.radians(190)
# angular_speed = 1.0
# duration = relative_angle/abs(angular_speed)
# msg.angular.z = angular_speed
# time2end = rospy.Time.now() + rospy.Duration(duration)
# while rospy.Time.now() < time2end:
# pub.publish(msg)
# msg.linear.x = 0
# msg.angular.z = 0
# pub.publish(msg)
# rospy.sleep(.2)
# elif block_f < 0.225 and block_l > 0.3 and block_r > 0.3:
# # stop
# msg.linear.x = 0
# msg.angular.z = 0
# pub.publish(msg)
# # rospy.sleep(.2)
else:
pass
return
def stop(msg):
if(msg.data == 'stop here'):
msg = Twist()
#stop
msg.linear.x = 0
msg.angular.z = 0
pub.publish(msg)
if __name__ =='__main__':
rospy.init_node('parking')
pub = rospy.Publisher('/cmd_vel',Twist, queue_size=10)
rospy.Subscriber('/scan',LaserScan, queue_size = 1, callback = callback)
rospy.Subscriber('helloworld03', String, callback=stop)
rospy.spin()
pass
| 29.061947
| 76
| 0.546894
| 452
| 3,284
| 3.829646
| 0.205752
| 0.057192
| 0.063547
| 0.055459
| 0.631427
| 0.59792
| 0.573657
| 0.550549
| 0.537262
| 0.537262
| 0
| 0.050409
| 0.329476
| 3,284
| 113
| 77
| 29.061947
| 0.735695
| 0.350183
| 0
| 0.508475
| 0
| 0
| 0.02358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0.033898
| 0.101695
| 0
| 0.152542
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08138545899e44b68cb9f2c6902d9d5be0b380f7
| 2,622
|
py
|
Python
|
opennsa/provreg.py
|
jmacauley/opennsa
|
853c0fc8e065e74815cbc3f769939f64ac6aadeb
|
[
"BSD-3-Clause"
] | null | null | null |
opennsa/provreg.py
|
jmacauley/opennsa
|
853c0fc8e065e74815cbc3f769939f64ac6aadeb
|
[
"BSD-3-Clause"
] | null | null | null |
opennsa/provreg.py
|
jmacauley/opennsa
|
853c0fc8e065e74815cbc3f769939f64ac6aadeb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Registry for tracking providers dynamically in OpenNSA.
Keeping track of providers in a dynamical way in an NSI implementation is a
huge pain in the ass. This is a combination of things, such as seperate
identities and endpoints, callbacks, and the combination of local providers.
The class ProviderRegistry tries to keep it a bit sane.
"""
from twisted.python import log
from opennsa import error
LOG_SYSTEM = 'providerregistry'
class ProviderRegistry(object):
def __init__(self, providers, provider_factories):
# usually initialized with local providers
self.providers = providers.copy()
self.provider_factories = provider_factories # { provider_type : provider_spawn_func }
self.provider_networks = {} # { provider_urn : [ network ] }
def getProvider(self, nsi_agent_urn):
"""
Get a provider from a NSI agent identity/urn.
"""
try:
return self.providers[nsi_agent_urn]
except KeyError:
raise error.STPResolutionError('Could not resolve a provider for %s' % nsi_agent_urn)
def getProviderByNetwork(self, network_id):
"""
Get the provider urn by specifying network.
"""
for provider, networks in self.provider_networks.items():
if network_id in networks:
return provider
else:
raise error.STPResolutionError('Could not resolve a provider for %s' % network_id)
def addProvider(self, nsi_agent_urn, provider, network_ids):
"""
Directly add a provider. Probably only needed by setup.py
"""
if not nsi_agent_urn in self.providers:
log.msg('Creating new provider for %s' % nsi_agent_urn, system=LOG_SYSTEM)
self.providers[ nsi_agent_urn ] = provider
self.provider_networks[ nsi_agent_urn ] = network_ids
def spawnProvider(self, nsi_agent, network_ids):
"""
Create a new provider, from an NSI agent.
ServiceType must exist on the NSI agent, and a factory for the type available.
"""
if nsi_agent.urn() in self.providers and self.provider_networks[nsi_agent.urn()] == network_ids:
log.msg('Skipping provider spawn for %s (no change)' % nsi_agent, debug=True, system=LOG_SYSTEM)
return self.providers[nsi_agent.urn()]
factory = self.provider_factories[ nsi_agent.getServiceType() ]
prov = factory(nsi_agent)
self.addProvider(nsi_agent.urn(), prov, network_ids)
log.msg('Spawned new provider for %s' % nsi_agent, system=LOG_SYSTEM)
return prov
| 34.051948
| 108
| 0.672006
| 332
| 2,622
| 5.14759
| 0.337349
| 0.093622
| 0.077238
| 0.036864
| 0.228204
| 0.21416
| 0.113517
| 0.113517
| 0.065535
| 0.065535
| 0
| 0
| 0.250191
| 2,622
| 76
| 109
| 34.5
| 0.869278
| 0.2746
| 0
| 0
| 0
| 0
| 0.102178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0.060606
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
081559dc3ab661ae3a1df9c2d52bc8d2ba1f2ae4
| 997
|
py
|
Python
|
tests/test_task_tracker.py
|
jmchilton/shedclient-beta
|
50041b488652f8bf40555b0c1ef001290f1c3f6a
|
[
"CC-BY-3.0"
] | 2
|
2015-12-21T02:18:54.000Z
|
2016-09-08T13:56:36.000Z
|
tests/test_task_tracker.py
|
jmchilton/shedclient-beta
|
50041b488652f8bf40555b0c1ef001290f1c3f6a
|
[
"CC-BY-3.0"
] | 1
|
2015-12-21T19:26:21.000Z
|
2015-12-21T19:26:21.000Z
|
tests/test_task_tracker.py
|
jmchilton/shedclient-beta
|
50041b488652f8bf40555b0c1ef001290f1c3f6a
|
[
"CC-BY-3.0"
] | null | null | null |
from test_utils import TempDirectoryContext
from shedclient import task_tracker
def test_task_tracker():
with TempDirectoryContext() as context:
config = dict(
task_tracking_directory=context.temp_directory
)
tracker = task_tracker.build_task_tracker(config)
assert len(tracker.list_active_tasks()) == 0
task0_id = tracker.register_task({"state": "new"})
assert len(tracker.list_active_tasks()) == 1
task0_state0 = tracker.read_task(task0_id)
assert task0_state0["state"] == "new"
tracker.delete_task(task0_id)
assert len(tracker.list_active_tasks()) == 0
task1_id = tracker.register_task({"state": "new"})
assert len(tracker.list_active_tasks()) == 1
tracker.update_task(task1_id, {"state": "queued", "name": "task 1"})
task1_state0 = tracker.read_task(task1_id)
assert task1_state0["state"] == "queued"
assert task1_state0["name"] == "task 1"
| 33.233333
| 76
| 0.657974
| 120
| 997
| 5.175
| 0.308333
| 0.070853
| 0.10306
| 0.128824
| 0.299517
| 0.299517
| 0.299517
| 0.196457
| 0.196457
| 0.196457
| 0
| 0.028461
| 0.224674
| 997
| 29
| 77
| 34.37931
| 0.774903
| 0
| 0
| 0.190476
| 0
| 0
| 0.066199
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08212ae6445b938c3145af03c666f1c2c0d5163b
| 439
|
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/conftest.py
|
SirTelemak/cookiecutter-python-template
|
d7d8c4493250654a4ee3badb36c4c4da1ccb8d3d
|
[
"MIT"
] | 2
|
2020-06-04T19:17:13.000Z
|
2020-06-05T08:05:16.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/conftest.py
|
SirTelemak/cookiecutter-python-template
|
d7d8c4493250654a4ee3badb36c4c4da1ccb8d3d
|
[
"MIT"
] | 1
|
2020-08-06T15:01:47.000Z
|
2020-08-06T15:01:47.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/conftest.py
|
SirTelemak/cookiecutter-python-template
|
d7d8c4493250654a4ee3badb36c4c4da1ccb8d3d
|
[
"MIT"
] | 2
|
2020-06-15T19:26:33.000Z
|
2020-11-20T20:24:03.000Z
|
import logging
import pytest
from loguru import logger
@pytest.fixture(name='caplog', autouse=True)
def loguru_caplog(caplog):
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
logger.remove()
handler_id = logger.add(PropogateHandler(), format='{message}', backtrace=False)
caplog.clear()
yield caplog
logger.remove(handler_id)
| 21.95
| 84
| 0.708428
| 51
| 439
| 6.039216
| 0.568627
| 0.077922
| 0.123377
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177677
| 439
| 19
| 85
| 23.105263
| 0.853186
| 0
| 0
| 0
| 0
| 0
| 0.034169
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.230769
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0822f39156313d04e61ff6ddaaed66e14edc3a38
| 3,692
|
py
|
Python
|
scripts/convert_queries.py
|
galuscakova/podcasts
|
967cc04e2b0f7cf963a189ac5270cfa69f81a540
|
[
"BSD-4-Clause-UC"
] | null | null | null |
scripts/convert_queries.py
|
galuscakova/podcasts
|
967cc04e2b0f7cf963a189ac5270cfa69f81a540
|
[
"BSD-4-Clause-UC"
] | null | null | null |
scripts/convert_queries.py
|
galuscakova/podcasts
|
967cc04e2b0f7cf963a189ac5270cfa69f81a540
|
[
"BSD-4-Clause-UC"
] | 1
|
2021-05-27T07:44:51.000Z
|
2021-05-27T07:44:51.000Z
|
import getopt
import sys
import os
import re
import string
import xml.etree.ElementTree as ET
input_filename = ""
expansion_filename = ""
output_type = "combine"
exclude = set(string.punctuation)
options, remainder = getopt.getopt(sys.argv[1:], 'i:e:t:', ['inputfile=', 'expansionfile=', 'type='])
for opt, arg in options:
if opt in ('-i', '--inputfile'):
input_filename = arg
if (not os.path.exists(input_filename)):
sys.exit("Error: Inputfile does not exists")
if opt in ('-e', '--expansionfile'):
expansion_filename = arg
if (not os.path.exists(expansion_filename)):
sys.exit("Error: Expansion file does not exists")
if opt in ('-t', '--type'):
output_type = arg
def get_sdm_query(query,lambda_t=0.8,lambda_o=0.1,lambda_u=0.1):
words = query.split()
if len(words)==1:
return f"{lambda_t} #combine( {query} )"
terms = " ".join(words)
ordered = "".join([" #1({}) ".format(" ".join(bigram)) for bigram in zip(words,words[1:])])
unordered = "".join([" #uw8({}) ".format(" ".join(bigram)) for bigram in zip(words,words[1:])])
indri_query = f"{lambda_t} #combine( {terms} ) {lambda_o} #combine({ordered}) {lambda_u} #combine({unordered})"
return indri_query
expansion_terms = []
if (expansion_filename != ""):
with open(expansion_filename) as expandfile:
expansion_terms = expandfile.readlines()
xml_root = ET.parse(input_filename)
print("<parameters>")
order = 0
for topic in xml_root.findall('.//topic'):
num = topic.find('num').text
query = topic.find('query').text
description = topic.find('description').text
query = query.replace('-', ' ')
query = query.replace('\n', ' ')
description = description.replace('-', ' ')
description = description.replace('\n', ' ')
query = query.translate(str.maketrans('', '', string.punctuation))
description = description.translate(str.maketrans('', '', string.punctuation))
print("<query>")
print("<number>" + str(num) + "</number>")
expansion = ""
if ( expansion_filename != ""):
line_expansion_term = expansion_terms[order]
line_expansion_term = line_expansion_term.replace("[", "")
line_expansion_term = line_expansion_term.replace("]", "")
line_expansion_term = line_expansion_term.replace('"', "")
line_expansion_term = line_expansion_term.replace('\n',"")
line_expansion_terms = line_expansion_term.split(',')
expansion = " "
max_expansion_terms = 10
for i in range (min(max_expansion_terms, len(line_expansion_terms))):
if (':' in line_expansion_terms[i]):
term,score = line_expansion_terms[i].split(':')
score = score.replace("\n", "")
if (output_type == "weights"):
expansion = expansion + str(score) + " #combine(" + term + ") "
else:
expansion = expansion + term
expansion = expansion + " "
if (output_type == "combine"):
print("<text>#combine(" + query + " " + expansion + description + ")</text>")
if (output_type == "weights"):
print("<text>#weight( 1.0 #combine(" + query + ") " + expansion + " 0.5 #combine(" + description + "))</text>")
if (output_type == "terms"):
print("<text>" + query + " " + expansion + description + "</text>")
if (output_type == "sdm"):
query_sdm = get_sdm_query(query)
description_sdm = get_sdm_query(description)
print("<text>#weight(" + query_sdm + " " + description_sdm + ")</text>")
print("</query>")
order += 1
print("</parameters>")
| 34.830189
| 119
| 0.600488
| 421
| 3,692
| 5.092637
| 0.228029
| 0.084888
| 0.079291
| 0.039179
| 0.245802
| 0.197761
| 0.179104
| 0.114739
| 0.114739
| 0.114739
| 0
| 0.006995
| 0.225623
| 3,692
| 105
| 120
| 35.161905
| 0.742917
| 0
| 0
| 0.073171
| 0
| 0.012195
| 0.150447
| 0.005693
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012195
| false
| 0
| 0.073171
| 0
| 0.109756
| 0.109756
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0823b5eeb8c1036e06aae43d61945a3ec0226291
| 2,124
|
py
|
Python
|
tests/decloud_unittest.py
|
CNES/decloud
|
6b06ae98bfe68821b4ebd0e7ba06723809cb9b42
|
[
"Apache-2.0"
] | 8
|
2022-02-25T13:15:07.000Z
|
2022-03-20T18:29:49.000Z
|
tests/decloud_unittest.py
|
CNES/decloud
|
6b06ae98bfe68821b4ebd0e7ba06723809cb9b42
|
[
"Apache-2.0"
] | 1
|
2022-02-25T13:21:33.000Z
|
2022-02-25T13:21:33.000Z
|
tests/decloud_unittest.py
|
CNES/decloud
|
6b06ae98bfe68821b4ebd0e7ba06723809cb9b42
|
[
"Apache-2.0"
] | 1
|
2022-03-31T23:43:12.000Z
|
2022-03-31T23:43:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
import unittest
import filecmp
import gdal
import otbApplication as otb
from abc import ABC
from decloud.core.system import get_env_var, pathify, basename
class DecloudTest(ABC, unittest.TestCase):
DECLOUD_DATA_DIR = get_env_var("DECLOUD_DATA_DIR")
def get_path(self, path):
return pathify(self.DECLOUD_DATA_DIR) + path
def compare_images(self, image, reference, mae_threshold=0.01):
nbchannels_reconstruct = gdal.Open(image).RasterCount
nbchannels_baseline = gdal.Open(reference).RasterCount
self.assertTrue(nbchannels_reconstruct == nbchannels_baseline)
for i in range(1, 1+nbchannels_baseline):
comp = otb.Registry.CreateApplication('CompareImages')
comp.SetParameterString('ref.in', reference)
comp.SetParameterInt('ref.channel', i)
comp.SetParameterString('meas.in', image)
comp.SetParameterInt('meas.channel', i)
comp.Execute()
mae = comp.GetParameterFloat('mae')
self.assertTrue(mae < mae_threshold)
def compare_file(self, file, reference):
self.assertTrue(filecmp.cmp(file, reference))
def compare_raster_metadata(self, image, reference):
baseline_gdalinfo_path = '/tmp/baseline_{}_gdalinfo'.format(basename(reference))
subprocess.call('gdalinfo {} | grep --invert-match -e "Files:" -e "METADATATYPE" -e "OTB_VERSION" '
'-e "NoData Value" > {}'.format(reference, baseline_gdalinfo_path), shell=True)
image_gdalinfo_path = '/tmp/image_{}_gdalinfo'.format(basename(image))
subprocess.call('gdalinfo {} | grep --invert-match -e "Files:" -e "METADATATYPE" -e "OTB_VERSION" '
'-e "NoData Value" > {}'.format(image, image_gdalinfo_path), shell=True)
with open(baseline_gdalinfo_path) as f:
baseline_gdalinfo = f.read()
with open(image_gdalinfo_path) as f:
image_gdalinfo_path = f.read()
self.assertEqual(baseline_gdalinfo, image_gdalinfo_path)
| 38.618182
| 107
| 0.672787
| 246
| 2,124
| 5.621951
| 0.353659
| 0.069414
| 0.061461
| 0.041938
| 0.122921
| 0.122921
| 0.122921
| 0.122921
| 0.122921
| 0.122921
| 0
| 0.004192
| 0.213748
| 2,124
| 54
| 108
| 39.333333
| 0.823952
| 0.020245
| 0
| 0.051282
| 0
| 0.051282
| 0.154401
| 0.022607
| 0
| 0
| 0
| 0
| 0.102564
| 1
| 0.102564
| false
| 0
| 0.205128
| 0.025641
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08248cc60a1189c226093e9c782fd70e1acdd43e
| 2,609
|
py
|
Python
|
src/cameraCalibrator.py
|
mdaros2016/CarND-Advanced-Lane-Lines
|
b27d57f1c6730f302f18fb6b8cbbfcb9361d57bf
|
[
"MIT"
] | null | null | null |
src/cameraCalibrator.py
|
mdaros2016/CarND-Advanced-Lane-Lines
|
b27d57f1c6730f302f18fb6b8cbbfcb9361d57bf
|
[
"MIT"
] | null | null | null |
src/cameraCalibrator.py
|
mdaros2016/CarND-Advanced-Lane-Lines
|
b27d57f1c6730f302f18fb6b8cbbfcb9361d57bf
|
[
"MIT"
] | null | null | null |
import glob
import cv2
import numpy as np
class CameraCalibrator:
'''
Class for correcting the distortion of the pictures taken from the camera.
'''
def __init__(self, calibration_pictures_path_pattern='../camera_cal/calibration*.jpg'):
'''
:param calibration_pictures_path_pattern: File system path of a set of 9x6 chessboard pictures that will be used for camera calibration
'''
# store mtx and dist in the status of the object, so we don't have to compute them at every iteration
self.mtx = None
self.dist = None
self.calibration_pictures_path_pattern = calibration_pictures_path_pattern
def undistort(self, img):
'''
Corrects the distortion of an image.
The first invocation of thi method will take long, since it will lazily initialize the transformation matrix
:param img: distorted picture to be corrected
:return: the corrected picture
'''
if self.mtx is None:
self.initialize_transformation_matrix()
dst = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
return dst
def initialize_transformation_matrix(self):
'''
Initializes the transformation matrix, using the pictures contained in the path specified above
:return: Nothing, it just changes the internal status of the object
'''
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6 * 9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
img_size = []
# Make a list of calibration images
images = glob.glob(self.calibration_pictures_path_pattern)
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
append = imgpoints.append(corners)
img_size = (img.shape[1], img.shape[0])
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
self.mtx = mtx
self.dist = dist
| 38.367647
| 143
| 0.63166
| 339
| 2,609
| 4.778761
| 0.412979
| 0.058642
| 0.070988
| 0.092593
| 0.062963
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021402
| 0.283634
| 2,609
| 67
| 144
| 38.940299
| 0.845372
| 0.40207
| 0
| 0
| 0
| 0
| 0.020906
| 0.020906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.096774
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0827c8ec658edf16eba00017e1a771b5d2f84def
| 591
|
py
|
Python
|
nicos_ess/dream/setups/beam_monitor.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2019-11-06T15:40:36.000Z
|
2022-01-01T16:23:00.000Z
|
nicos_ess/dream/setups/beam_monitor.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91
|
2020-08-18T09:20:26.000Z
|
2022-02-01T11:07:14.000Z
|
nicos_ess/dream/setups/beam_monitor.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6
|
2020-01-11T10:52:30.000Z
|
2022-02-25T12:35:23.000Z
|
description = 'Instrument shutter'
prefix = "IOC"
devices = dict(
beam_monitor_1=device(
'nicos_ess.devices.epics.motor.EpicsMotor',
description="Beam monitor continuous position feedback",
motorpv=f'{prefix}:m8',
abslimits=(-10, 10),
unit='mm',
speed=5.,
),
beam_monitor_switch=device(
'nicos.devices.generic.Switcher',
description="Toggles between in and out of the beam",
moveable="beam_monitor_1",
mapping={
'IN': 0,
'OUT': 5,
},
precision=0.01,
)
)
| 24.625
| 64
| 0.566836
| 63
| 591
| 5.206349
| 0.68254
| 0.134146
| 0.073171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03163
| 0.304569
| 591
| 23
| 65
| 25.695652
| 0.766423
| 0
| 0
| 0
| 0
| 0
| 0.341794
| 0.118443
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0829534c63fae0dfb66814593c9605ce70347325
| 28,509
|
py
|
Python
|
biosteam/_system.py
|
tylerhuntington222/biosteam
|
234959180a3210d95e39a012454f455723c92686
|
[
"MIT"
] | null | null | null |
biosteam/_system.py
|
tylerhuntington222/biosteam
|
234959180a3210d95e39a012454f455723c92686
|
[
"MIT"
] | null | null | null |
biosteam/_system.py
|
tylerhuntington222/biosteam
|
234959180a3210d95e39a012454f455723c92686
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
import flexsolve as flx
from .digraph import (digraph_from_units_and_streams,
minimal_digraph,
surface_digraph,
finalize_digraph)
from thermosteam import Stream
from thermosteam.utils import registered
from .exceptions import try_method_with_object_stamp
from ._network import Network
from ._facility import Facility
from ._unit import Unit
from .report import save_report
from .exceptions import InfeasibleRegion
from .utils import colors, strtuple
import biosteam as bst
__all__ = ('System',)
# %% Functions for taking care of numerical specifications within a system path
def run_unit_in_path(unit):
specification = unit._specification
if specification:
method = specification
else:
method = unit._run
try_method_with_object_stamp(unit, method)
def converge_system_in_path(system):
specification = system._specification
if specification:
method = specification
else:
method = system._converge
try_method_with_object_stamp(system, method)
def simulate_unit_in_path(unit):
specification = unit._specification
if specification:
try_method_with_object_stamp(unit, unit._load_stream_links)
try_method_with_object_stamp(unit, unit._setup)
try_method_with_object_stamp(unit, specification)
try_method_with_object_stamp(unit, unit._summary)
else:
try_method_with_object_stamp(unit, unit.simulate)
def simulate_system_in_path(system):
specification = system._specification
if specification:
method = specification
else:
method = system.simulate
try_method_with_object_stamp(system, method)
# %% Debugging and exception handling
def _evaluate(self, command=None):
"""
Evaluate a command and request user input for next command.
If no command, return. This function is used for debugging a System object.
"""
# Done evaluating if no command, exit debugger if 'exit'
if command is None:
Next = colors.next('Next: ') + f'{repr(self)}\n'
info = colors.info("Enter to continue or type to evaluate:\n")
command = input(Next + info + ">>> ")
if command == 'exit': raise KeyboardInterrupt()
if command:
# Build locals dictionary for evaluating command
F = bst.main_flowsheet
lcs = {self.ID: self, 'bst': bst,
**F.system.__dict__,
**F.stream.__dict__,
**F.unit.__dict__,
**F.flowsheet.__dict__
}
try:
out = eval(command, {}, lcs)
except Exception as err:
# Print exception and ask to raise error or continue evaluating
err = colors.exception(f'{type(err).__name__}:') + f' {str(err)}\n\n'
info = colors.info(f"Enter to raise error or type to evaluate:\n")
command = input(err + info + ">>> ")
if command == '': raise err
_evaluate(self, command)
else:
# If successful, continue evaluating
if out is None: pass
elif (not hasattr(out, '_ipython_display_')
or isinstance(out, type)): print(out)
else: out._ipython_display_()
command = input(">>> ")
_evaluate(self, command)
def _method_debug(self, func):
"""Method decorator for debugging system."""
def wrapper(*args, **kwargs):
# Run method and ask to evaluate
_evaluate(self)
func(*args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
wrapper._original = func
return wrapper
def _notify_run_wrapper(self, func):
"""Decorate a System run method to notify you after each loop"""
def wrapper(*args, **kwargs):
if self.recycle:
func(*args, **kwargs)
input(f' Finished loop #{self._iter}\n')
else:
func(*args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
wrapper._original = func
return wrapper
# %% Process flow
class system(type):
@property
def converge_method(self):
"""Iterative convergence method ('wegstein', 'aitken', or 'fixed point')."""
return self._converge_method.__name__[1:]
@converge_method.setter
def converge_method(self, method):
method = method.lower().replace('-', '').replace(' ', '')
if 'wegstein' == method:
self._converge_method = self._wegstein
elif 'fixedpoint' == method:
self._converge_method = self._fixed_point
elif 'aitken' == method:
self._converge_method = self._aitken
else:
raise ValueError(f"only 'wegstein', 'aitken', and 'fixed point' methods are valid, not '{method}'")
@registered('SYS')
class System(metaclass=system):
"""
Create a System object that can iteratively run each element in a path
of BioSTREAM objects until the recycle stream is converged. A path can
have function, Unit and/or System objects. When the path contains an
inner System object, it converges/solves it in each loop/iteration.
Parameters
----------
ID : str
A unique identification. If ID is None, instance will not be
registered in flowsheet.
path : tuple[Unit, function and/or System]
A path that is run element by element until the recycle converges.
recycle=None : :class:`~thermosteam.Stream`, optional
A tear stream for the recycle loop.
facilities=() : tuple[Unit, function, and/or System], optional
Offsite facilities that are simulated only after
completing the path simulation.
"""
### Class attributes ###
#: Maximum number of iterations
maxiter = 200
#: Molar tolerance (kmol/hr)
molar_tolerance = 0.50
#: Temperature tolerance (K)
temperature_tolerance = 0.10
# [dict] Cached downstream systems by (system, unit, with_facilities) keys
_cached_downstream_systems = {}
@classmethod
def from_feedstock(cls, ID, feedstock, feeds=None, facilities=(),
ends=None, facility_recycle=None):
"""
Create a System object from a feedstock.
Parameters
----------
ID : str
Name of system.
feedstock : :class:`~thermosteam.Stream`
Main feedstock of the process.
feeds : Iterable[:class:`~thermosteam.Stream`]
Additional feeds to the process.
facilities : Iterable[Facility]
Offsite facilities that are simulated only after
completing the path simulation.
ends : Iterable[:class:`~thermosteam.Stream`]
Streams that not products, but are ultimately specified through
process requirements and not by its unit source.
facility_recycle : [:class:`~thermosteam.Stream`], optional
Recycle stream between facilities and system path.
"""
network = Network.from_feedstock(feedstock, feeds, ends)
return cls.from_network(ID, network, facilities, facility_recycle)
@classmethod
def from_network(cls, ID, network, facilities=(), facility_recycle=None):
"""
Create a System object from a network.
Parameters
----------
ID : str
Name of system.
network : Network
Network that defines the simulation path.
facilities : Iterable[Facility]
Offsite facilities that are simulated only after
completing the path simulation.
facility_recycle : [:class:`~thermosteam.Stream`], optional
Recycle stream between facilities and system path.
"""
facilities = Facility.ordered_facilities(facilities)
isa = isinstance
path = tuple([(cls.from_network('', i) if isa(i, Network) else i)
for i in network.path])
self = cls.__new__(cls)
self.units = network.units
self.streams = streams = network.streams
self.feeds = feeds = network.feeds
self.products = products = network.products
self._specification = None
self._set_recycle(network.recycle)
self._reset_errors()
self._set_path(path)
self._set_facilities(facilities)
self._set_facility_recycle(facility_recycle)
self._register(ID)
if facilities:
f_streams = bst.utils.streams_from_path(facilities)
f_feeds = bst.utils.feeds(f_streams)
f_products = bst.utils.products(f_streams)
streams.update(f_streams)
feeds.update(f_feeds)
products.update(f_products)
self._finalize_streams()
return self
def __init__(self, ID, path, recycle=None, facilities=(), facility_recycle=None):
self._specification = None
self._set_recycle(recycle)
self._load_flowsheet()
self._reset_errors()
self._set_path(path)
self._load_units()
self._set_facilities(facilities)
self._set_facility_recycle(facility_recycle)
self._load_streams()
self._finalize_streams()
self._register(ID)
specification = Unit.specification
save_report = save_report
def _load_flowsheet(self):
self.flowsheet = flowsheet_module.main_flowsheet.get_flowsheet()
def _set_recycle(self, recycle):
assert recycle is None or isinstance(recycle, Stream), (
"recycle must be a Stream instance or None, not "
f"{type(recycle).__name__}"
)
self._recycle = recycle
def _set_path(self, path):
#: tuple[Unit, function and/or System] A path that is run element
#: by element until the recycle converges.
self.path = path
#: set[System] All subsystems in the system
self.subsystems = subsystems = set()
#: list[Unit] Network of only unit operations
self._unit_path = unit_path = []
#: set[Unit] All units that have costs.
self._costunits = costunits = set()
isa = isinstance
for i in path:
if i in unit_path: continue
if isa(i, Unit):
unit_path.append(i)
elif isa(i, System):
unit_path.extend(i._unit_path)
subsystems.add(i)
costunits.update(i._costunits)
#: set[Unit] All units in the path that have costs
self._path_costunits = path_costunits = {i for i in unit_path
if i._design or i._cost}
costunits.update(path_costunits)
def _load_units(self):
#: set[Unit] All units within the system
self.units = set(self._unit_path) | self._costunits
def _set_facilities(self, facilities):
#: tuple[Unit, function, and/or System] Offsite facilities that are simulated only after completing the path simulation.
self._facilities = facilities = tuple(facilities)
subsystems = self.subsystems
costunits = self._costunits
units = self.units
isa = isinstance
for i in facilities:
if isa(i, Unit):
i._load_stream_links()
units.add(i)
if i._cost: costunits.add(i)
if isa(i, Facility) and not i._system: i._system = self
elif isa(i, System):
units.update(i.units)
subsystems.add(i)
costunits.update(i._costunits)
def _set_facility_recycle(self, recycle):
if recycle:
system = self._downstream_system(recycle.sink)
#: [FacilityLoop] Recycle loop for converging facilities
self._facility_loop = FacilityLoop(system, recycle)
else:
self._facility_loop = None
def _load_streams(self):
#: set[:class:`~thermosteam.Stream`] All streams within the system
self.streams = streams = set()
for u in self.units:
streams.update(u._ins + u._outs)
for sys in self.subsystems:
streams.update(sys.streams)
#: set[:class:`~thermosteam.Stream`] All feed streams in the system.
self.feeds = bst.utils.feeds(streams)
#: set[:class:`~thermosteam.Stream`] All product streams in the system.
self.products = bst.utils.products(streams)
def _load_stream_links(self):
for u in self._unit_path: u._load_stream_links()
def _filter_out_missing_streams(self):
for stream_set in (self.streams, self.feeds, self.products):
bst.utils.filter_out_missing_streams(stream_set)
def _finalize_streams(self):
self._load_stream_links()
self._filter_out_missing_streams()
@property
def TEA(self):
"""[TEA] Object for Techno-Economic Analysis."""
try: return self._TEA
except AttributeError: return None
@property
def facilities(self):
"""tuple[Facility] All system facilities."""
return self._facilities
@property
def recycle(self):
"""[:class:`~thermosteam.Stream`] A tear stream for the recycle loop"""
return self._recycle
@property
def converge_method(self):
"""Iterative convergence method ('wegstein', 'aitken', or 'fixed point')."""
return self._converge_method.__name__[1:]
@converge_method.setter
def converge_method(self, method):
if self.recycle is None:
raise ValueError(
"cannot set converge method when no recyle is specified")
method = method.lower().replace('-', '').replace(' ', '')
if 'wegstein' == method:
self._converge_method = self._wegstein
elif 'fixedpoint' == method:
self._converge_method = self._fixed_point
elif 'aitken' == method:
self._converge_method = self._aitken
else:
raise ValueError(
f"only 'wegstein', 'aitken', and 'fixed point' methods "
f"are valid, not '{method}'")
def _downstream_path(self, unit):
"""Return a list composed of the `unit` and everything downstream."""
if unit not in self.units: return []
elif self._recycle: return self.path
unit_found = False
downstream_units = unit._downstream_units
path = []
isa = isinstance
for i in self.path:
if unit_found:
if isa(i, System):
for u in i.units:
if u in downstream_units:
path.append(i)
break
elif i in downstream_units or not isa(i, Unit):
path.append(i)
else:
if unit is i:
unit_found = True
path.append(unit)
elif isa(i, System) and unit in i.units:
unit_found = True
path.append(i)
return path
def _downstream_system(self, unit):
"""Return a system with a path composed of the `unit` and
everything downstream (facilities included)."""
if unit is self.path[0]: return self
system = self._cached_downstream_systems.get((self, unit))
if system: return system
path = self._downstream_path(unit)
if path:
downstream_facilities = self._facilities
else:
unit_found = False
isa = isinstance
for pos, i in enumerate(self._facilities):
if unit is i or (isa(i, System) and unit in i.units):
downstream_facilities = self._facilities[pos:]
unit_found = True
break
assert unit_found, f'{unit} not found in system'
system = System(None, path,
facilities=downstream_facilities)
system._ID = f'{type(unit).__name__}-{unit} and downstream'
self._cached_downstream_systems[unit] = system
return system
def _minimal_digraph(self, **graph_attrs):
"""Return digraph of the path as a box."""
return minimal_digraph(self.ID, self.units, self.streams, **graph_attrs)
def _surface_digraph(self, **graph_attrs):
return surface_digraph(self.path)
def _thorough_digraph(self, **graph_attrs):
return digraph_from_units_and_streams(self.units, self.streams,
**graph_attrs)
def diagram(self, kind='surface', file=None, format='png', **graph_attrs):
"""Display a `Graphviz <https://pypi.org/project/graphviz/>`__ diagram of the system.
Parameters
----------
kind='surface' : {'thorough', 'surface', 'minimal'}:
* **'thorough':** Display every unit within the path.
* **'surface':** Display only elements listed in the path.
* **'minimal':** Display path as a box.
file=None : str, display in console by default
File name to save diagram.
format='png' : str
File format (e.g. "png", "svg").
"""
if kind == 'thorough':
f = self._thorough_digraph(format=format, **graph_attrs)
elif kind == 'surface':
f = self._surface_digraph(format=format, **graph_attrs)
elif kind == 'minimal':
f = self._minimal_digraph(format=format, **graph_attrs)
else:
raise ValueError(f"kind must be either 'thorough', 'surface', or 'minimal'")
finalize_digraph(f, file, format)
# Methods for running one iteration of a loop
def _iter_run(self, mol):
"""
Run the system at specified recycle molar flow rate.
Parameters
----------
mol : numpy.ndarray
Recycle molar flow rates.
Returns
-------
rmol : numpy.ndarray
New recycle molar flow rates.
unconverged : bool
True if recycle has not converged.
"""
if (mol < 0.).any():
raise InfeasibleRegion('material flow')
recycle = self.recycle
rmol = recycle.mol
rmol[:] = mol
T = recycle.T
self._run()
self._mol_error = mol_error = abs(mol - recycle.mol).sum()
self._T_error = T_error = abs(T - recycle.T)
self._iter += 1
if mol_error < self.molar_tolerance and T_error < self.temperature_tolerance:
unconverged = False
elif self._iter == self.maxiter:
raise RuntimeError(f'{repr(self)} could not converge' + self._error_info())
else:
unconverged = True
return rmol.copy(), unconverged
def _setup(self):
"""Setup each element of the system."""
isa = isinstance
for i in self.path:
if isa(i, (Unit, System)): i._setup()
def _run(self):
"""Rigorous run each element of the system."""
isa = isinstance
for i in self.path:
if isa(i, Unit):
run_unit_in_path(i)
elif isa(i, System):
converge_system_in_path(i)
else: i() # Assume it is a function
# Methods for convering the recycle stream
def _fixed_point(self):
"""Converge system recycle iteratively using fixed-point iteration."""
self._reset_iter()
flx.conditional_fixed_point(self._iter_run, self.recycle.mol.copy())
def _wegstein(self):
"""Converge the system recycle iteratively using wegstein's method."""
self._reset_iter()
flx.conditional_wegstein(self._iter_run, self.recycle.mol.copy())
def _aitken(self):
"""Converge the system recycle iteratively using Aitken's method."""
self._reset_iter()
flx.conditional_aitken(self._iter_run, self.recycle.mol.copy())
# Default converge method
_converge_method = _aitken
def _converge(self):
return self._converge_method() if self._recycle else self._run()
def _design_and_cost(self):
for i in self._path_costunits:
try_method_with_object_stamp(i, i._summary)
isa = isinstance
for i in self._facilities:
if isa(i, Unit):
simulate_unit_in_path(i)
elif isa(i, System):
simulate_system_in_path(i)
else:
i() # Assume it is a function
def _reset_iter(self):
self._iter = 0
for system in self.subsystems: system._reset_iter()
def reset_names(self, unit_format=None, stream_format=None):
"""Reset names of all streams and units according to the path order."""
Unit._default_ID = unit_format if unit_format else ['U', 0]
Stream._default_ID = stream_format if stream_format else ['d', 0]
streams = set()
units = set()
for i in self._unit_path:
if i in units: continue
try: i.ID = ''
except: continue
for s in (i._ins + i._outs):
if (s and s._sink and s._source
and s not in streams):
s.ID = ''
streams.add(s)
units.add(i)
def _reset_errors(self):
#: Molar flow rate error (kmol/hr)
self._mol_error = 0
#: Temperature error (K)
self._T_error = 0
#: Number of iterations
self._iter = 0
def reset_flows(self):
"""Reset all process streams to zero flow."""
from warnings import warn
warn(DeprecationWarning("'reset_flows' will be depracated; please use 'empty_process_streams'"))
self.empty_process_streams()
def empty_process_streams(self):
"""Reset all process streams to zero flow."""
self._reset_errors()
feeds = self.feeds
for stream in self.streams:
if stream not in feeds: stream.empty()
def empty_recycles(self):
"""Reset all recycle streams to zero flow."""
self._reset_errors()
if self.recycle: self.recycle.empty()
for system in self.subsystems:
system.empty_recycles()
def reset_cache(self):
"""Reset cache of all unit operations."""
for unit in self.units: unit.reset_cache()
def simulate(self):
"""Converge the path and simulate all units."""
self._setup()
self._converge()
self._design_and_cost()
if self._facility_loop: self._facility_loop()
# Debugging
def _debug_on(self):
"""Turn on debug mode."""
self._run = _notify_run_wrapper(self, self._run)
self.path = path = list(self.path)
for i, item in enumerate(path):
if isinstance(item, Unit):
item._run = _method_debug(item, item._run)
elif isinstance(item, System):
item._converge = _method_debug(item, item._converge)
elif callable(item):
path[i] = _method_debug(item, item)
def _debug_off(self):
"""Turn off debug mode."""
self._run = self._run._original
path = self.path
for i, item in enumerate(path):
if isinstance(item, Unit):
item._run = item._run._original
elif isinstance(item, System):
item._converge = item._converge._original
elif callable(item):
path[i] = item._original
self.path = tuple(path)
def debug(self):
"""Converge in debug mode. Just try it!"""
self._debug_on()
try: self._converge()
finally: self._debug_off()
end = self._error_info()
if end:
print(f'\nFinished debugging{end}')
else:
print(f'\n Finished debugging')
# Representation
def __str__(self):
if self.ID: return self.ID
else: return type(self).__name__
def __repr__(self):
if self.ID: return f'<{type(self).__name__}: {self.ID}>'
else: return f'<{type(self).__name__}>'
def show(self):
"""Prints information on unit."""
print(self._info())
def to_network(self):
"""Return network that defines the system path."""
isa = isinstance
path = [(i.to_network() if isa(i, System) else i) for i in self.path]
network = Network.__new__(Network)
network.path = path
network.recycle = self.recycle
network.units = self.units
network.subnetworks = [i for i in path if isa(i, Network)]
network.feeds = self.feeds
network.products = self.products
return network
def _ipython_display_(self):
try: self.diagram('minimal')
except: pass
self.show()
def _error_info(self):
"""Return information on convergence."""
if self.recycle:
return (f"\n convergence error: Flow rate {self._mol_error:.2e} kmol/hr"
f"\n Temperature {self._T_error:.2e} K"
f"\n iterations: {self._iter}")
else:
return ""
def _info(self):
"""Return string with all specifications."""
if self.recycle is None:
recycle = ''
else:
recycle = f"\n recycle: {self.recycle}"
error = self._error_info()
path = strtuple(self.path)
i = 1; last_i = 0
while True:
i += 2
i = path.find(', ', i)
i_next = path.find(', ', i+2)
if (i_next-last_i) > 35:
path = (path[:i] + '%' + path[i:])
last_i = i
elif i == -1: break
path = path.replace('%, ', ',\n' + ' '*8)
if self.facilities:
facilities = strtuple(self.facilities)
i = 1; last_i = 0
while True:
i += 2
i = facilities.find(', ', i)
if (i - last_i) > 35:
facilities = (facilities[:i] + '%' + facilities[i:])
last_i = i
elif i == -1: break
facilities = facilities.replace('%, ', ',\n'+' '*14)
facilities = f"\n facilities: {facilities}"
else:
facilities = ''
return (f"System: {self.ID}"
+ recycle
+ f"\n path: {path}"
+ facilities
+ error)
class FacilityLoop(metaclass=system):
__slots__ = ('system', 'recycle',
'_mol_error', '_T_error', '_iter')
#: Maximum number of iterations to solve facilities
maxiter = 50
#: Molar tolerance (kmol/hr)
molar_tolerance = 0.50
#: Temperature tolerance (K)
temperature_tolerance = 0.10
def __init__(self, system, recycle):
self.system = system
self.recycle = recycle
self._reset_errors()
_reset_errors = System._reset_errors
_error_info = System._error_info
_iter_run = System._iter_run
_fixed_point = System._fixed_point
_aitken = System._aitken
_wegstein = System._wegstein
_converge_method = System._converge_method
converge_method = System.converge_method
def _reset_iter(self):
self.system._reset_iter()
self._iter = 0
def _run(self): self.system.simulate()
def __call__(self): self._converge_method()
def __repr__(self):
return f"<{type(self).__name__}: {self.system.ID}>"
from biosteam import _flowsheet as flowsheet_module
| 35.725564
| 128
| 0.582377
| 3,240
| 28,509
| 4.912037
| 0.119753
| 0.021112
| 0.004524
| 0.011938
| 0.330066
| 0.283632
| 0.250895
| 0.195287
| 0.167515
| 0.154697
| 0
| 0.002686
| 0.321021
| 28,509
| 798
| 129
| 35.725564
| 0.819497
| 0.20818
| 0
| 0.31028
| 0
| 0
| 0.058415
| 0.008555
| 0
| 0
| 0
| 0
| 0.003738
| 1
| 0.123364
| false
| 0.003738
| 0.026168
| 0.007477
| 0.228037
| 0.007477
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
082bb5b00799a75a854f5404ce105bcaeac6c3e7
| 1,005
|
py
|
Python
|
modules/AI/research/findContour.py
|
killax-d/Counter-Coins-API
|
97acede70e26b23f96883bb14e2bf6ace3759174
|
[
"MIT"
] | null | null | null |
modules/AI/research/findContour.py
|
killax-d/Counter-Coins-API
|
97acede70e26b23f96883bb14e2bf6ace3759174
|
[
"MIT"
] | null | null | null |
modules/AI/research/findContour.py
|
killax-d/Counter-Coins-API
|
97acede70e26b23f96883bb14e2bf6ace3759174
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
image = cv2.imread('original.png')
gray = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
blur = cv2.GaussianBlur(gray, (19, 19), 0)
# Application d'un seuil pour obtenir une image binaire
thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 1)
kernel = np.ones((3, 3), np.uint8)
# Application d'érosion et d'ouverture pour supprimer les contours de petites pièces
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=1)
contours, hierarchy = cv2.findContours(closing.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
area = cv2.contourArea(contour)
if area < 10000 or area > 50000:
continue
print(area)
if len(contour) < 5:
continue
try:
ellipse = cv2.fitEllipse(contour)
cv2.ellipse(image, ellipse, (0,255,0), 2)
except:
pass
# ecriture de l'image
cv2.imwrite('result.png', image)
| 30.454545
| 103
| 0.711443
| 144
| 1,005
| 4.895833
| 0.590278
| 0.022695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06152
| 0.175124
| 1,005
| 33
| 104
| 30.454545
| 0.788902
| 0.155224
| 0
| 0.086957
| 0
| 0
| 0.026005
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.043478
| 0.086957
| 0
| 0.086957
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
083461c10e66e08e6e0c8ad2d8f84b46b0b09e65
| 8,413
|
py
|
Python
|
python/src/ties/cli/test/ties_convert_tests.py
|
Noblis/ties-lib
|
e7c6165ebcd80e11b792fd4bcddf6ce634da0c60
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-04-10T19:02:27.000Z
|
2020-04-10T19:02:27.000Z
|
python/src/ties/cli/test/ties_convert_tests.py
|
Noblis/ties-lib
|
e7c6165ebcd80e11b792fd4bcddf6ce634da0c60
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
python/src/ties/cli/test/ties_convert_tests.py
|
Noblis/ties-lib
|
e7c6165ebcd80e11b792fd4bcddf6ce634da0c60
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
################################################################################
# Copyright 2019 Noblis, Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
import json
import os
import unittest
from stat import S_IRUSR
from tempfile import mkstemp
from unittest import TestCase
from ties.cli.ties_convert import main
from ties.util.testing import cli_test
short_usage = """\
usage: ties-convert [-h] [--classification-level SECURITY_TAG]
[--output-file OUTPUT_FILE | --in-place] [--version]
EXPORT_PATH"""
long_usage = """\
{}
Converts TIES export.json files from older versions of the schema (0.1.8, 0.2,
0.3, 0.4, 0.5, 0.6, 0.7, 0.8) to the current version (0.9).
positional arguments:
EXPORT_PATH the path to the TIES JSON file or - to read from stdin
optional arguments:
-h, --help show this help message and exit
--classification-level SECURITY_TAG, -c SECURITY_TAG
the classification level of the TIES JSON, required
for TIES JSON from pre-0.3 versions of the schema
--output-file OUTPUT_FILE, -f OUTPUT_FILE
the output file path for the converted TIES JSON
--in-place, -i modifies the input file in-place, overwriting it with
the converted JSON data
--version prints version information
""".format(short_usage)
test_input = """\
{
"version": "0.1.8",
"objectItem": [
{
"sha256Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"md5Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}"""
test_output = """\
{
"version": "0.9",
"securityTag": "UNCLASSIFIED",
"objectItems": [
{
"objectId": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sha256Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"md5Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"authorityInformation": {
"securityTag": "UNCLASSIFIED"
}
}
]
}"""
class TiesConvertTests(TestCase):
def setUp(self):
self._default_args = ['--classification-level', 'UNCLASSIFIED']
fd, self._input_file_path = mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(test_input)
fd, self._output_file_path = mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(test_output)
def tearDown(self):
try:
os.remove(self._input_file_path)
except Exception: # pylint: disable=broad-except
pass
try:
os.remove(self._output_file_path)
except Exception: # pylint: disable=broad-except
pass
def _check_input_file_json(self, expected_json):
with open(self._input_file_path, 'r', encoding='utf-8') as f:
self.assertEqual(json.load(f), json.loads(expected_json))
def _check_output_file_json(self, expected_json):
with open(self._output_file_path, 'r', encoding='utf-8') as f:
self.assertEqual(json.load(f), json.loads(expected_json))
def test_no_args(self):
with cli_test(self, main) as t:
t.args([])
t.return_code(2)
t.stdout_text()
t.stderr(short_usage)
t.stderr('ties-convert: error: the following arguments are required: EXPORT_PATH')
t.stderr()
def test_help_short(self):
with cli_test(self, main) as t:
t.args(['-h'])
t.return_code(0)
t.stdout_text(long_usage)
t.stderr()
def test_help_long(self):
with cli_test(self, main) as t:
t.args(['--help'])
t.return_code(0)
t.stdout_text(long_usage)
t.stderr()
def test_stdin_stdout(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-'])
t.stdin(test_input)
t.return_code(0)
t.stdout_json(test_output)
t.stderr()
def test_infile_stdout(self):
with cli_test(self, main) as t:
t.args(self._default_args + [self._input_file_path])
t.return_code(0)
t.stdout_json(test_output)
t.stderr()
self._check_input_file_json(test_input)
def test_stdin_outfile(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-f', self._output_file_path, '-'])
t.stdin(test_input)
t.return_code(0)
t.stdout_text()
t.stderr()
self._check_output_file_json(test_output)
def test_infile_outfile(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-f', self._output_file_path, self._input_file_path])
t.return_code(0)
t.stdout_text()
t.stderr()
self._check_input_file_json(test_input)
self._check_output_file_json(test_output)
def test_inplace(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-i', self._input_file_path])
t.return_code(0)
t.stdout_text()
t.stderr()
self._check_input_file_json(test_output)
def test_inplace_stdin(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-i', '-'])
t.stdin(test_input)
t.return_code(0)
t.stdout_json(test_output)
t.stderr()
def test_inplace_outfile_error(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-i', '-f', self._output_file_path, self._input_file_path])
t.return_code(2)
t.stdout_text()
t.stderr(short_usage)
t.stderr('ties-convert: error: argument --output-file/-f: not allowed with argument --in-place/-i')
def test_inplace_write_error(self):
os.chmod(self._input_file_path, S_IRUSR)
with cli_test(self, main) as t:
t.args(self._default_args + ['-i', self._input_file_path])
t.return_code(1)
t.stdout_text()
t.stderr("error: could not write to file: {}".format(self._input_file_path))
def test_stdin_parse_exception(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-'])
t.return_code(1)
t.stdout_text()
t.stderr('error: could not parse JSON from stdin')
def test_infile_fnf(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['/file/not/found'])
t.return_code(1)
t.stdout_text()
t.stderr('error: could not read from file: /file/not/found')
def test_infile_parse_exception(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['/dev/null'])
t.return_code(1)
t.stdout_text()
t.stderr('error: could not read from file: /dev/null')
def test_outfile_fnf(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-f', '/dev/full', self._input_file_path])
t.return_code(1)
t.stdout_text()
t.stderr('error: could not write to file: /dev/full')
if __name__ == '__main__':
unittest.main()
| 36.578261
| 111
| 0.562463
| 1,016
| 8,413
| 4.431102
| 0.188976
| 0.031986
| 0.03665
| 0.049978
| 0.496224
| 0.491337
| 0.491337
| 0.48534
| 0.469347
| 0.422923
| 0
| 0.010215
| 0.313443
| 8,413
| 229
| 112
| 36.737991
| 0.769217
| 0.127303
| 0
| 0.445055
| 0
| 0.016484
| 0.292744
| 0.050329
| 0
| 0
| 0
| 0
| 0.010989
| 1
| 0.104396
| false
| 0.010989
| 0.043956
| 0
| 0.153846
| 0.005495
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0834a96e609f196a4e397fc0d0398ea157ccd7e5
| 2,316
|
py
|
Python
|
Edge Detection.py
|
paulmtree/Lung-Segmentation-Project
|
2cffe09ce6a4818200d88b9e4e87155feb594366
|
[
"MIT"
] | 14
|
2020-11-10T16:47:54.000Z
|
2022-03-15T12:17:29.000Z
|
Edge Detection.py
|
paulmtree/Lung-Segmentation-Project
|
2cffe09ce6a4818200d88b9e4e87155feb594366
|
[
"MIT"
] | 3
|
2020-11-21T09:49:15.000Z
|
2021-05-30T23:58:30.000Z
|
Edge Detection.py
|
paulmtree/Lung-Segmentation-Project
|
2cffe09ce6a4818200d88b9e4e87155feb594366
|
[
"MIT"
] | 3
|
2021-11-04T18:08:53.000Z
|
2022-01-13T03:22:26.000Z
|
from PIL import Image, ImageFilter
import numpy as np
import glob
from numpy import array
import matplotlib.pyplot as plt
from skimage import morphology
import scipy.ndimage
def sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1, display1 = True):
if (display1):
new_list = []
new_list.append(stack)
new_list.append(stack)
new_list.append(stack)
new_list.append(stack)
sample_stack(new_list, 2, 2, 0, 1, False)
else:
fig,ax = plt.subplots(rows,cols,figsize=[12,12])
for i in range((rows*cols)):
ind = start_with + i*show_every
ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind)
ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray')
ax[int(i/rows),int(i % rows)].axis('off')
plt.show()
"""
datapath = "jpg_images/"
img0 = Image.open("jpg_images/maskedimage" + str(0) + ".jpg")
counter = 0
img1 = []
for f in glob.glob('/Users/paulmccabe/Desktop/jpg images/*.jpg'):
path = "jpg_images/maskedimage" + str(counter) + ".jpg"
img0 = Image.open(path).convert('L')
img1.append(array(img0))
counter += 1
print("Counter: " + str(counter))
imgs_to_process_orig = np.stack([s for s in img1])
"""
id = 2
imgs = np.load("/Users/paulmccabe/Desktop/Segmentation Project/" + "justmask_%d.npy" % (id))
counter = 0
print("Saving as jpg Images...")
for img in imgs:
scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' + '/jpg mask images/justmask{}.jpg'.format(counter), img)
counter += 1
counter = 0
#print("Re-Importing jpg Images...")
#for f in glob.glob('/Users/paulmccabe/Desktop/Segmentation Project/jpg mask images/*.jpg'):
# path = "jpg_images/maskedimage" + str(counter) + ".jpg"
# img0 = Image.open(path).convert('L')
# img1.append(array(img0))
# counter += 1
imgs[imgs == 1] = 255
list = []
for img in imgs:
PIL_img = Image.fromarray(img.astype('uint8'))
PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)
np_img = array(PIL_edge)
dilation = morphology.dilation(np_img, np.ones([4,4]))
list.append(dilation)
imgs_after_processing = np.stack([s for s in list])
np.save("/Users/paulmccabe/Desktop/Segmentation Project" + "/justedge_%d.npy" % (id), imgs_after_processing[:284])
#sample_stack(np_img)
| 35.090909
| 128
| 0.658895
| 342
| 2,316
| 4.359649
| 0.318713
| 0.042254
| 0.032193
| 0.04829
| 0.391683
| 0.336687
| 0.317907
| 0.28169
| 0.179745
| 0.179745
| 0
| 0.021568
| 0.179188
| 2,316
| 66
| 129
| 35.090909
| 0.762756
| 0.124784
| 0
| 0.195122
| 0
| 0
| 0.151836
| 0.084007
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.170732
| 0
| 0.195122
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08354cb83dbefe75aa87b426bfa4c3e544572c47
| 2,191
|
py
|
Python
|
benchmark.py
|
Umass-ITS/Open3D-PointNet2-Semantic3D
|
0254926f62cbca695aa1e76a18fec0863be5e455
|
[
"MIT"
] | 330
|
2019-04-10T21:31:24.000Z
|
2021-07-26T06:16:17.000Z
|
benchmark.py
|
largeword/Open3D-PointNet2-Semantic3D
|
3a9751dc724877933fc883320100796cef23489d
|
[
"MIT"
] | 44
|
2019-04-10T15:28:36.000Z
|
2021-06-22T17:39:05.000Z
|
benchmark.py
|
largeword/Open3D-PointNet2-Semantic3D
|
3a9751dc724877933fc883320100796cef23489d
|
[
"MIT"
] | 78
|
2019-04-08T09:39:29.000Z
|
2021-06-08T02:39:14.000Z
|
import json
import numpy as np
import tensorflow as tf
import time
from predict import Predictor
if __name__ == "__main__":
checkpoint = "logs/semantic_backup_full_submit_dec_10/best_model_epoch_275.ckpt"
hyper_params = json.loads(open("semantic.json").read())
predictor = Predictor(
checkpoint_path=checkpoint, num_classes=9, hyper_params=hyper_params
)
batch_size = 64
# Init data
points_with_colors = np.random.randn(batch_size, hyper_params["num_point"], 6)
# Warm up
pd_labels = predictor.predict(points_with_colors)
# Benchmark
s = time.time()
profiler = tf.profiler.Profiler(predictor.sess.graph)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
_ = predictor.predict(
points_with_colors, run_options=run_options, run_metadata=run_metadata
)
profiler.add_step(0, run_metadata)
batch_time = time.time() - s
sample_time = batch_time / batch_size
print(
"Batch size: {}, batch_time: {}, sample_time: {}".format(
batch_size, batch_time, sample_time
)
)
option_builder = tf.profiler.ProfileOptionBuilder
opts = (
option_builder(option_builder.time_and_memory())
.with_step(-1) # with -1, should compute the average of all registered steps.
.with_file_output("tf-profile.txt")
.select(["micros", "bytes", "occurrence"])
.order_by("micros")
.build()
)
# Profiling info about ops are saved in 'test-%s.txt' % FLAGS.out
profiler.profile_operations(options=opts)
for batch_size in [2 ** n for n in range(8)]:
# Init data
points_with_colors = np.random.randn(batch_size, hyper_params["num_point"], 6)
# Warm up
pd_labels = predictor.predict(points_with_colors)
# Benchmark
s = time.time()
_ = predictor.predict(points_with_colors)
batch_time = time.time() - s
sample_time = batch_time / batch_size
print(
"Batch size: {}, batch_time: {}, sample_time: {}".format(
batch_size, batch_time, sample_time
)
)
| 30.013699
| 86
| 0.652214
| 274
| 2,191
| 4.912409
| 0.408759
| 0.066865
| 0.071322
| 0.077266
| 0.408618
| 0.36107
| 0.36107
| 0.36107
| 0.36107
| 0.36107
| 0
| 0.009042
| 0.242812
| 2,191
| 72
| 87
| 30.430556
| 0.802291
| 0.082154
| 0
| 0.313725
| 0
| 0
| 0.119381
| 0.032468
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.098039
| 0
| 0.098039
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
083725212ef9f198c79212406fcc54599eb1abb4
| 2,783
|
py
|
Python
|
framework/codejam/extract/cyclomatic_complexity.py
|
neizod/coding-analysis
|
cc086bcf204e570032d11b12a46ac819cfe93f2b
|
[
"MIT"
] | 1
|
2015-05-22T05:01:53.000Z
|
2015-05-22T05:01:53.000Z
|
framework/codejam/extract/cyclomatic_complexity.py
|
neizod/coding-analysis
|
cc086bcf204e570032d11b12a46ac819cfe93f2b
|
[
"MIT"
] | null | null | null |
framework/codejam/extract/cyclomatic_complexity.py
|
neizod/coding-analysis
|
cc086bcf204e570032d11b12a46ac819cfe93f2b
|
[
"MIT"
] | null | null | null |
import os
import json
import logging
from framework._utils import FunctionHook
class CodeJamExtractCyclomaticComplexity(FunctionHook):
''' This method will extract cyclomatic complexity from submitted code.
Need to run `extract language` first, since not every language has
implement with the extractor (only C, C++, Python). '''
@staticmethod
def use_cmetrics(pid, pio, uname):
''' cmetrics is a tool for analysing cyclomatic complexity for
code written in C, C++. '''
from subprocess import getoutput
from framework._utils.misc import datapath
directory = datapath('codejam', 'source', pid, pio, uname)
data = getoutput('mccabe -n {}/*'.format(directory))
if not data:
return
for line in data.split('\n'):
*_, complexity, _ = line.split('\t')
yield int(complexity)
@staticmethod
def use_radon(pid, pio, uname):
''' radon is a tool for analysing cyclomatic complexity for
code written in Python. '''
from subprocess import getoutput
from framework._utils.misc import datapath
directory = datapath('codejam', 'source', pid, pio, uname)
data = json.loads(getoutput('radon cc -sj {}'.format(directory)))
for extracted_file in data.values():
if 'error' in extracted_file:
return
for extracted_func in extracted_file:
yield extracted_func['complexity']
def main(self, year, force=False, **_):
from framework._utils import write
from framework._utils.misc import datapath, make_ext
os.makedirs(datapath('codejam', 'extract'), exist_ok=True)
usepath = datapath('codejam', 'extract',
make_ext('language', year, 'json'))
outpath = datapath('codejam', 'extract',
make_ext('cyclomatic-complexity', year, 'json'))
if not force and os.path.isfile(outpath):
return logging.warn('output file already exists, aborting.')
extracted_data = json.load(open(usepath))
for submission in extracted_data:
pid = submission['pid']
pio = submission['io']
uname = submission['uname']
logging.info('extracting: %i %i %s', pid, pio, uname)
languages_set = set(submission.pop('languages'))
complexity = []
if {'Python'} & languages_set:
complexity += self.use_radon(pid, pio, uname)
if {'C', 'C++'} & languages_set:
complexity += self.use_cmetrics(pid, pio, uname)
submission['cyclomatic-complexity'] = sorted(complexity)
write.json(extracted_data, open(outpath, 'w'))
| 42.815385
| 75
| 0.606899
| 304
| 2,783
| 5.467105
| 0.361842
| 0.028881
| 0.04633
| 0.039711
| 0.33574
| 0.220217
| 0.198556
| 0.198556
| 0.198556
| 0.198556
| 0
| 0
| 0.285304
| 2,783
| 64
| 76
| 43.484375
| 0.835596
| 0.126123
| 0
| 0.196078
| 0
| 0
| 0.110034
| 0.017707
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.196078
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
083e03b527a87a9ebea41c58c4a9944e76e7007f
| 1,948
|
py
|
Python
|
extrator/test/test_pipeline.py
|
MinisterioPublicoRJ/robotj
|
946e9547eea6f548609f7ccfaf1c6a13fffece65
|
[
"MIT"
] | 3
|
2018-03-13T12:17:13.000Z
|
2021-04-18T19:55:04.000Z
|
extrator/test/test_pipeline.py
|
MinisterioPublicoRJ/robotj
|
946e9547eea6f548609f7ccfaf1c6a13fffece65
|
[
"MIT"
] | 1
|
2018-06-19T13:09:10.000Z
|
2018-06-19T13:09:10.000Z
|
extrator/test/test_pipeline.py
|
MinisterioPublicoRJ/robotj
|
946e9547eea6f548609f7ccfaf1c6a13fffece65
|
[
"MIT"
] | 1
|
2021-04-18T19:55:09.000Z
|
2021-04-18T19:55:09.000Z
|
from unittest.mock import patch, MagicMock
from unittest import TestCase
from ..crawler.pipeliner import pipeline
from ..settings import URL_PROCESSO
class Pipeline(TestCase):
@patch('robotj.extrator.crawler.pipeliner.parse_itens',
return_value={'d': 4})
@patch('robotj.extrator.crawler.pipeliner.parse_metadados',
return_value={'a': 1})
@patch('robotj.extrator.crawler.pipeliner.area_dos_metadados',
return_value=(0, 1))
@patch('robotj.extrator.crawler.pipeliner.BeautifulSoup')
@patch('robotj.extrator.crawler.pipeliner.cria_hash_do_processo')
@patch('robotj.extrator.crawler.pipeliner.requests')
@patch('robotj.extrator.crawler.pipeliner.formata_numero_processo')
def test_pipeline_do_parsing_dos_processos(self, _fnp, _req, _chdp, _bs,
_am, _pm, _pi):
processo = '1234'
numero_formatado = '1.2.3.4'
html = '{"a": 1}'
_resp_mock = MagicMock()
_resp_mock.content = html
_soup_mock = MagicMock()
_soup_mock.find_all.return_value = 'rows_mock'
_fnp.return_value = numero_formatado
_req.get.return_value = _resp_mock
_chdp.return_value = 'ab12'
_bs.return_value = _soup_mock
processos = pipeline(processo)
_fnp.assert_called_once_with(processo)
_req.get.assert_called_once_with(URL_PROCESSO.format(
doc_number=numero_formatado),
headers={'X-Forwarded-For': '10.0.250.15'},
timeout=10)
_chdp.assert_called_once_with(html)
_bs.assert_called_once_with(html, 'lxml')
_soup_mock.find_all.assert_called_once_with('tr')
_am.assert_called_once_with('rows_mock')
_pm.assert_called_once_with('rows_mock', '1.2.3.4', 0, 1)
_pi.assert_called_once_with(_soup_mock, '1234', 1)
self.assertEqual(processos, {'a': 1, 'd': 4, 'hash': 'ab12'})
| 38.96
| 76
| 0.661704
| 242
| 1,948
| 4.950413
| 0.322314
| 0.106845
| 0.106845
| 0.133556
| 0.301336
| 0.173623
| 0
| 0
| 0
| 0
| 0
| 0.026298
| 0.219199
| 1,948
| 49
| 77
| 39.755102
| 0.761341
| 0
| 0
| 0
| 0
| 0
| 0.232033
| 0.178131
| 0
| 0
| 0
| 0
| 0.219512
| 1
| 0.02439
| false
| 0
| 0.097561
| 0
| 0.146341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08418a8370fcf775a2fd7e29466ecc715efe0e4f
| 2,575
|
py
|
Python
|
tests/utils_test.py
|
asrashley/dash-live
|
1ffbc57896e4e46855a42af6ef79a1865ebfce55
|
[
"Apache-2.0"
] | 2
|
2019-11-02T06:26:29.000Z
|
2020-05-15T16:54:20.000Z
|
tests/utils_test.py
|
asrashley/dash-live
|
1ffbc57896e4e46855a42af6ef79a1865ebfce55
|
[
"Apache-2.0"
] | 1
|
2020-01-20T17:20:54.000Z
|
2020-01-21T08:38:30.000Z
|
tests/utils_test.py
|
asrashley/dash-live
|
1ffbc57896e4e46855a42af6ef79a1865ebfce55
|
[
"Apache-2.0"
] | null | null | null |
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import datetime
import os
import sys
import unittest
_src = os.path.join(os.path.dirname(__file__),"..", "src")
if not _src in sys.path:
sys.path.append(_src)
import utils
class DateTimeTests(unittest.TestCase):
def test_isoformat(self):
tests = [
('2009-02-27T10:00:00Z', datetime.datetime(2009,2,27,10,0,0, tzinfo=utils.UTC()) ),
('2013-07-25T09:57:31Z', datetime.datetime(2013,7,25,9,57,31, tzinfo=utils.UTC()) ),
('PT14H00M00S', datetime.timedelta(hours=14) ),
('PT26H00M00S', datetime.timedelta(hours=26) ),
('PT14H', datetime.timedelta(hours=14) ),
('PT1M00S', datetime.timedelta(minutes=1) ),
('PT2M', datetime.timedelta(minutes=2) ),
('PT1M0.00S', datetime.timedelta(minutes=1) ),
('PT45S', datetime.timedelta(seconds=45) ),
('PT4.5S', datetime.timedelta(seconds=4.5) ),
('PT01:45:19', datetime.timedelta(hours=1,minutes=45,seconds=19) ),
]
for test in tests:
tc = utils.from_isodatetime(test[0])
self.failUnlessEqual(tc,test[1])
date_str = "2013-07-25T09:57:31Z"
date_val = utils.from_isodatetime(date_str)
# Don't check for the 'Z' because Python doesn't put the timezone in the isoformat string
isoformat = date_val.isoformat().replace('+00:00','Z')
self.assertEqual(isoformat,date_str)
date_str = "2013-07-25T09:57:31.123Z"
date_val = utils.from_isodatetime(date_str)
self.assertEqual(date_val.microsecond, 123000)
self.assertTrue(date_val.isoformat().startswith(date_str[:-1]))
class BufferedReaderTests(unittest.TestCase):
def test_buffer_reader(self):
r = bytearray('t'*65536)
#mem = memoryview(r)
for i in range(len(r)):
r[i] = i & 0xFF
br = utils.BufferedReader(StringIO.StringIO(r), buffersize=1024)
p = br.peek(8)
self.assertTrue(len(p) >= 8)
for i in range(8):
self.assertEqual(ord(p[i]), i)
self.assertEqual(br.tell(), 0)
p = br.read(8)
self.assertEqual(br.tell(), 8)
self.assertEqual(len(p), 8)
for i in range(8):
self.assertEqual(ord(p[i]), i)
p = br.read(8)
self.assertEqual(br.tell(), 16)
self.assertEqual(len(p), 8)
for i in range(8):
self.assertEqual(ord(p[i]), i+8)
if __name__ == "__main__":
unittest.main()
| 35.763889
| 97
| 0.597282
| 337
| 2,575
| 4.468843
| 0.362018
| 0.099602
| 0.063745
| 0.029216
| 0.21846
| 0.205843
| 0.179283
| 0.13413
| 0.095618
| 0.095618
| 0
| 0.087838
| 0.252816
| 2,575
| 71
| 98
| 36.267606
| 0.694906
| 0.041165
| 0
| 0.180328
| 0
| 0
| 0.070183
| 0.009736
| 0
| 0
| 0.001623
| 0
| 0.196721
| 1
| 0.032787
| false
| 0
| 0.131148
| 0
| 0.196721
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0845053b64f5370f1498b8e4729e90a827f0c839
| 6,329
|
py
|
Python
|
erpnext_taxjar/api.py
|
DigiThinkIT/erpnext_taxjar
|
5313dbdd931745e9655d3f5fd53c830abb0d7ee7
|
[
"MIT"
] | null | null | null |
erpnext_taxjar/api.py
|
DigiThinkIT/erpnext_taxjar
|
5313dbdd931745e9655d3f5fd53c830abb0d7ee7
|
[
"MIT"
] | 8
|
2017-07-01T11:13:14.000Z
|
2020-11-19T13:26:29.000Z
|
erpnext_taxjar/api.py
|
DigiThinkIT/erpnext_taxjar
|
5313dbdd931745e9655d3f5fd53c830abb0d7ee7
|
[
"MIT"
] | 13
|
2017-06-30T15:47:00.000Z
|
2022-02-22T16:24:41.000Z
|
import traceback
import pycountry
import taxjar
import frappe
from erpnext import get_default_company
from frappe import _
from frappe.contacts.doctype.address.address import get_company_address
TAX_ACCOUNT_HEAD = frappe.db.get_single_value("TaxJar Settings", "tax_account_head")
SHIP_ACCOUNT_HEAD = frappe.db.get_single_value("TaxJar Settings", "shipping_account_head")
def create_transaction(doc, method):
# Allow skipping creation of transaction for dev environment
# if taxjar_create_transactions isn't defined in site_config we assume
# we DO NOT want to create transactions all the time, except on production.
if not frappe.local.conf.get("taxjar_create_transactions", 0):
return
sales_tax = 0
for tax in doc.taxes:
if tax.account_head == TAX_ACCOUNT_HEAD:
sales_tax = tax.tax_amount
if not sales_tax:
return
tax_dict = get_tax_data(doc)
if not tax_dict:
return
tax_dict['transaction_id'] = doc.name
tax_dict['transaction_date'] = frappe.utils.today()
tax_dict['sales_tax'] = sales_tax
tax_dict['amount'] = doc.total + tax_dict['shipping']
client = get_client()
try:
client.create_order(tax_dict)
except taxjar.exceptions.TaxJarResponseError as err:
frappe.throw(_(sanitize_error_response(err)))
except Exception as ex:
print(traceback.format_exc(ex))
def delete_transaction(doc, method):
client = get_client()
client.delete_order(doc.name)
def get_client():
taxjar_settings = frappe.get_single("TaxJar Settings")
if not taxjar_settings.api_key:
frappe.throw(_("The TaxJar API key is missing."), frappe.AuthenticationError)
api_key = taxjar_settings.get_password("api_key")
return taxjar.Client(api_key=api_key)
def get_shipping_address(doc):
company_address = get_company_address(get_default_company()).company_address
company_address = frappe.get_doc("Address", company_address)
shipping_address = None
if company_address:
if doc.shipping_address_name:
shipping_address = frappe.get_doc("Address", doc.shipping_address_name)
else:
shipping_address = company_address
return shipping_address
def get_tax_data(doc):
shipping_address = get_shipping_address(doc)
if not shipping_address:
return
if shipping_address.country:
country_code = frappe.db.get_value("Country", shipping_address.country, "code")
country_code = country_code.upper()
else:
frappe.throw(_("Please select a country!"))
if country_code != "US":
return
shipping = 0
for tax in doc.taxes:
if tax.account_head == SHIP_ACCOUNT_HEAD:
shipping += tax.tax_amount
shipping_state = shipping_address.get("state")
if shipping_state is not None:
# Handle shipments to military addresses
if shipping_state.upper() in ("AE", "AA", "AP"):
frappe.throw(_("""For shipping to overseas US bases, please
contact us with your order details."""))
else:
shipping_state = validate_state(shipping_address)
tax_dict = {
'to_country': country_code,
'to_zip': shipping_address.pincode,
'to_city': shipping_address.city,
'to_state': shipping_state,
'shipping': shipping,
'amount': doc.net_total
}
return tax_dict
def sanitize_error_response(response):
response = response.full_response.get("detail")
response = response.replace("_", " ")
sanitized_responses = {
"to zip": "Zipcode",
"to city": "City",
"to state": "State",
"to country": "Country"
}
for k, v in sanitized_responses.items():
response = response.replace(k, v)
return response
def set_sales_tax(doc, method):
if not doc.items:
return
# Allow skipping calculation of tax for dev environment
# if taxjar_calculate_tax isn't defined in site_config we assume
# we DO want to calculate tax all the time.
if not frappe.local.conf.get("taxjar_calculate_tax", 1):
return
if doc.exempt_from_sales_tax or frappe.db.get_value("Customer", doc.customer, "exempt_from_sales_tax"):
for tax in doc.taxes:
if tax.account_head == TAX_ACCOUNT_HEAD:
tax.tax_amount = 0
break
doc.run_method("calculate_taxes_and_totals")
return
tax_dict = get_tax_data(doc)
if not tax_dict:
# Remove existing tax rows if address is changed from a taxable state/country
setattr(doc, "taxes", [tax for tax in doc.taxes if tax.account_head != TAX_ACCOUNT_HEAD])
return
tax_data = validate_tax_request(tax_dict)
if tax_data is not None:
if not tax_data.amount_to_collect:
setattr(doc, "taxes", [tax for tax in doc.taxes if tax.account_head != TAX_ACCOUNT_HEAD])
elif tax_data.amount_to_collect > 0:
# Loop through tax rows for existing Sales Tax entry
# If none are found, add a row with the tax amount
for tax in doc.taxes:
if tax.account_head == TAX_ACCOUNT_HEAD:
tax.tax_amount = tax_data.amount_to_collect
doc.run_method("calculate_taxes_and_totals")
break
else:
doc.append("taxes", {
"charge_type": "Actual",
"description": "Sales Tax",
"account_head": TAX_ACCOUNT_HEAD,
"tax_amount": tax_data.amount_to_collect
})
doc.run_method("calculate_taxes_and_totals")
def validate_address(doc, address):
# Validate address using PyCountry
tax_dict = get_tax_data(doc)
if tax_dict:
# Validate address using TaxJar
validate_tax_request(tax_dict)
def validate_tax_request(tax_dict):
client = get_client()
try:
tax_data = client.tax_for_order(tax_dict)
except taxjar.exceptions.TaxJarResponseError as err:
frappe.throw(_(sanitize_error_response(err)))
else:
return tax_data
def validate_state(address):
country_code = frappe.db.get_value("Country", address.get("country"), "code")
error_message = _("""{} is not a valid state! Check for typos or enter the ISO code for your state.""".format(address.get("state")))
state = address.get("state").upper().strip()
# The max length for ISO state codes is 3, excluding the country code
if len(state) <= 3:
address_state = (country_code + "-" + state).upper() # PyCountry returns state code as {country_code}-{state-code} (e.g. US-FL)
states = pycountry.subdivisions.get(country_code=country_code.upper())
states = [pystate.code for pystate in states]
if address_state in states:
return state
frappe.throw(error_message)
else:
try:
lookup_state = pycountry.subdivisions.lookup(state)
except LookupError:
frappe.throw(error_message)
else:
return lookup_state.code.split('-')[1]
| 26.931915
| 133
| 0.746721
| 927
| 6,329
| 4.848975
| 0.199569
| 0.044049
| 0.046719
| 0.034038
| 0.322358
| 0.249166
| 0.241824
| 0.194883
| 0.194883
| 0.173971
| 0
| 0.001676
| 0.151367
| 6,329
| 234
| 134
| 27.047009
| 0.835226
| 0.123084
| 0
| 0.297468
| 0
| 0
| 0.137308
| 0.026378
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063291
| false
| 0.006329
| 0.044304
| 0
| 0.208861
| 0.006329
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08453ede8c646dbf40688a3665092cf3d4f4e359
| 3,543
|
py
|
Python
|
tests/lib_test.py
|
grundrauschen/center-points
|
5a12f68ac012a0a2bf52d8a8381d0272e309ac18
|
[
"MIT"
] | null | null | null |
tests/lib_test.py
|
grundrauschen/center-points
|
5a12f68ac012a0a2bf52d8a8381d0272e309ac18
|
[
"MIT"
] | 2
|
2015-06-03T10:57:13.000Z
|
2015-09-15T12:43:22.000Z
|
tests/lib_test.py
|
fu-berlin-swp-2014/center-points
|
0fa523314a3168d4d229b6f61d0d05d314a8b35a
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import numpy.testing as nptest
import centerpoints.lib as lib
class TestLibrary(unittest.TestCase):
def setUp(self):
# { dimension -> points }
self.d_plus_2_points = {}
for d in [3, 5, 10, 100]:
# we need d+2 points, first take all bases
bases = np.eye(d)
self.d_plus_2_points[d] = \
np.concatenate((bases,
[bases[0] + bases[1],
bases[1] + bases[2]]))
def test_find_alphas(self):
for points in self.d_plus_2_points.values():
alphas = lib._find_alphas(points)
self.assertEqual(type(alphas), type(np.array([])))
self.assertEqual(len(alphas), len(points))
greater_idx = alphas > 0
smaller_idx = ~ greater_idx
smaller_sum = np.sum(alphas[smaller_idx])
greater_sum = np.sum(alphas[greater_idx])
# make sure it is not the trivial solution
self.assertNotAlmostEqual(smaller_sum, 0)
self.assertAlmostEqual(greater_sum + smaller_sum, 0)
def test_radon_point(self):
for points in self.d_plus_2_points.values():
alphas = lib._find_alphas(points)
radon_tuple = lib.radon_point(points)
self.assertEqual(type(radon_tuple), np.ndarray)
radon = np.asmatrix(radon_tuple)
greater_idx = alphas > 0
greater_alphas = np.asmatrix(alphas[greater_idx])
greater_points = np.asmatrix(points[greater_idx])
sum_greater = np.sum(greater_alphas)
nptest.assert_allclose(radon / sum_greater, radon * sum_greater)
nptest.assert_allclose(radon / sum_greater,
greater_alphas * greater_points)
smaller_alphas = np.asmatrix(alphas[~ greater_idx])
smaller_points = np.asmatrix(points[~ greater_idx])
nptest.assert_allclose(smaller_alphas * smaller_points,
radon / np.sum(smaller_alphas),
atol=1e-15)
def test_solve_homogeneous(self):
M = np.array([[1, 0, 0, 0, 2],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0],
[0, 4, 0, 0, 0]])
null = lib.solve_homogeneous(M)
nptest.assert_allclose(np.dot(M, null), np.zeros(4), atol=1e-10)
def test_null_space(self):
# simple example with a one dimensional null space ()
a = np.array([[2, 3, 5], [-4, 2, 3], [0, 0, 0]])
null_space_a = lib.null_space(a)
x = np.dot(a, null_space_a)
nptest.assert_allclose(np.dot(a, (2*null_space_a)),
np.zeros_like(null_space_a),
atol=1e-10)
nptest.assert_allclose(np.dot(a, (10*null_space_a)),
np.zeros_like(null_space_a),
atol=1e-10)
# advanced example with a 3 dimensional null space ()
b = np.array([[1, 1, 1, 2, 3],
[1, 0, 1, 2, 3],
[1, 0, 1, 2, 3],
[1, 0, 1, 2, 3],
[1, 0, 1, 2, 3]])
null_space_b = lib.null_space(b)
null_vec = 2*null_space_b[:, 0] + 4*null_space_b[:, 1]
nptest.assert_allclose(np.dot(b, null_vec),
np.zeros_like(null_vec),
atol=1e-10)
| 36.90625
| 76
| 0.519616
| 444
| 3,543
| 3.941441
| 0.195946
| 0.016
| 0.015429
| 0.011429
| 0.322857
| 0.276
| 0.133143
| 0.128571
| 0.128571
| 0.128571
| 0
| 0.043848
| 0.369179
| 3,543
| 95
| 77
| 37.294737
| 0.73915
| 0.05899
| 0
| 0.202899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0.072464
| false
| 0
| 0.057971
| 0
| 0.144928
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
084547589496d6e3bddafc72879279f994ed30e1
| 711
|
py
|
Python
|
genome-experimentation/cleaning-genome-data.py
|
shivamsyal/summer21
|
68cdcae1524e720066e57baa190f15477b69515a
|
[
"MIT"
] | null | null | null |
genome-experimentation/cleaning-genome-data.py
|
shivamsyal/summer21
|
68cdcae1524e720066e57baa190f15477b69515a
|
[
"MIT"
] | null | null | null |
genome-experimentation/cleaning-genome-data.py
|
shivamsyal/summer21
|
68cdcae1524e720066e57baa190f15477b69515a
|
[
"MIT"
] | 2
|
2022-01-10T18:16:18.000Z
|
2022-03-20T01:17:28.000Z
|
# test comment
import os
filename = input("File to format: ")
os.system("gunzip "+filename)
n = int(input("What number genome is this? "))
os.system("mv "+filename[:-3]+" genome"+str(n)+".fna")
original = "genome"+str(n)+".fna"
copy = "genome"+str(n)+"_copy.fna"
filtered = "genome"+str(n)+"_filtered.fna"
rem = ['>']
with open(original) as old, open(copy,'w') as new:
for line in old:
if not any(bad in line for bad in rem):
new.write(line)
with open(copy) as f, open(filtered,'a') as f2:
f2.write("".join(line.strip() for line in f))
with open(filtered, 'r+') as inp:
y = inp.read().upper()
inp.truncate(0)
with open(filtered, 'a') as out:
out.write(y)
os.remove(copy)
| 30.913043
| 54
| 0.624473
| 119
| 711
| 3.714286
| 0.453782
| 0.081448
| 0.090498
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006826
| 0.175809
| 711
| 22
| 55
| 32.318182
| 0.74744
| 0.016878
| 0
| 0
| 0
| 0
| 0.164993
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.047619
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
084592c05031adcf4e22889393a72a2880d58eb8
| 758
|
py
|
Python
|
villas/controller/components/managers/generic.py
|
VILLASframework/VILLAScontroller
|
e672439797f209afdd5bc62078f7d49c60269aa4
|
[
"Apache-2.0"
] | null | null | null |
villas/controller/components/managers/generic.py
|
VILLASframework/VILLAScontroller
|
e672439797f209afdd5bc62078f7d49c60269aa4
|
[
"Apache-2.0"
] | null | null | null |
villas/controller/components/managers/generic.py
|
VILLASframework/VILLAScontroller
|
e672439797f209afdd5bc62078f7d49c60269aa4
|
[
"Apache-2.0"
] | null | null | null |
from villas.controller.components.manager import Manager
from villas.controller.component import Component
class GenericManager(Manager):
def create(self, payload):
component = Component.from_dict(payload.get('parameters'))
try:
self.add_component(component)
except KeyError:
self.logger.error('A component with the UUID %s already exists',
component.uuid)
def delete(self, payload):
parameters = payload.get('parameters')
uuid = parameters.get('uuid')
try:
comp = self.components[uuid]
self.remove_component(comp)
except KeyError:
self.logger.error('There is not component with UUID: %s', uuid)
| 28.074074
| 76
| 0.62533
| 81
| 758
| 5.814815
| 0.432099
| 0.042463
| 0.084926
| 0.101911
| 0.123142
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.283641
| 758
| 26
| 77
| 29.153846
| 0.867403
| 0
| 0
| 0.222222
| 0
| 0
| 0.135884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0846011f39bb03a7af3bf569426365af42543fe1
| 1,503
|
py
|
Python
|
udacity-program_self_driving_car_engineer_v2.0/module02-computer vision/exercise02-data acquisiton and visualization/visualization.py
|
linksdl/futuretec-project-self_driving_cars_projects
|
38e8f14543132ec86a8bada8d708eefaef23fee8
|
[
"MIT"
] | null | null | null |
udacity-program_self_driving_car_engineer_v2.0/module02-computer vision/exercise02-data acquisiton and visualization/visualization.py
|
linksdl/futuretec-project-self_driving_cars_projects
|
38e8f14543132ec86a8bada8d708eefaef23fee8
|
[
"MIT"
] | null | null | null |
udacity-program_self_driving_car_engineer_v2.0/module02-computer vision/exercise02-data acquisiton and visualization/visualization.py
|
linksdl/futuretec-project-self_driving_cars_projects
|
38e8f14543132ec86a8bada8d708eefaef23fee8
|
[
"MIT"
] | null | null | null |
"""
# !/usr/bin/env python
# -*- coding: utf-8 -*-
@Time : 2022/2/23 19:35
@Author : shengdl999links@gmail.com
@ProjectName : udacity-program_self_driving_car_engineer_v1.0_source.0
@File : visualization.py
"""
import glob
import os.path
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from PIL import Image
from utils import get_data
def viz(ground_truth):
"""
create a grid visualization of images with color coded bboxes
args:
- ground_truth [list[dict]]: ground truth data
"""
# IMPLEMENT THIS FUNCTION
paths = glob.glob('../data/images/*')
gt_dic = {}
# mapping to access data faster
for gt in ground_truth:
gt_dic[gt['filename']] = gt
# color mapping of classes
color_map = {1: [1, 0, 0], 2: [0, 1, 0], 4: [0, 0, 1]}
f, ax = plt.subplots(4, 5, figsize=(20, 10))
for i in range(20):
x = i % 4
y = i % 5
filename = os.path.basename(paths[i])
img = Image.open(paths[i])
ax[x, y].imshow(img)
bboxes = gt_dic[filename]['boxes']
classes = gt_dic[filename]['classes']
for cl, bb in zip(classes, bboxes):
y1, x1, y2, x2 = bb
rec = Rectangle((x1, y1), x2 - x1, y2 - y1, facecolor='none', edgecolor=color_map[cl])
ax[x, y].add_patch(rec)
ax[x, y].axis('off')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
ground_truth, _ = get_data()
viz(ground_truth)
| 25.05
| 98
| 0.594145
| 218
| 1,503
| 3.958716
| 0.53211
| 0.076477
| 0.013905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04529
| 0.265469
| 1,503
| 59
| 99
| 25.474576
| 0.736413
| 0.27678
| 0
| 0
| 0
| 0
| 0.048387
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.193548
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
084746dfc5f458e9131b1743d5567db36da8ab9c
| 898
|
py
|
Python
|
setup.py
|
georgenicolaou/python-fakeports
|
24eecf879e0d2d2a100be06952fb3677019457e2
|
[
"MIT"
] | 3
|
2020-02-03T08:25:10.000Z
|
2021-09-29T15:59:01.000Z
|
setup.py
|
georgenicolaou/python-fakeports
|
24eecf879e0d2d2a100be06952fb3677019457e2
|
[
"MIT"
] | 2
|
2021-01-18T19:27:44.000Z
|
2021-01-18T19:27:44.000Z
|
setup.py
|
georgenicolaou/python-fakeports
|
24eecf879e0d2d2a100be06952fb3677019457e2
|
[
"MIT"
] | null | null | null |
from setuptools import setup
long_description = 'TODO'
# with open("README.md", "r") as rfd:
# long_description = rfd.read()
REQUIREMENTS = [r.strip() for r in open("requirements.txt").readlines()]
setup(
name='python-fakeports',
version="0.1",
packages=['python_fakeports'],
url='',
license='GPL',
author='George Nicolaou',
author_email='george@silensec.com',
description='Python clone of portspoof',
long_description=long_description,
install_requires=REQUIREMENTS,
data_files=[
('/etc/fakeports/', ['fakeports.yml.sample']),
('/usr/local/bin/', ['bin/fakeports.tac'])
],
scripts=['bin/fakeportsctl', 'bin/fakeportsd'],
platforms='any',
classifiers = [line.strip() for line in '''\
Development Status :: 4 - Beta
Intended Audience :: System Administrators
Operating System :: POSIX :: Linux
''']
)
| 28.967742
| 72
| 0.644766
| 99
| 898
| 5.767677
| 0.69697
| 0.105079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004132
| 0.191537
| 898
| 30
| 73
| 29.933333
| 0.782369
| 0.076837
| 0
| 0
| 0
| 0
| 0.416465
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08486cbf36ba6ba189128910a8b98a815a664466
| 938
|
py
|
Python
|
python/17_letter_combinations_of_a_phone_number.py
|
dchapp/blind75
|
aaa409cf2db4ef6d0f86177f4217eceeb391caa8
|
[
"MIT"
] | null | null | null |
python/17_letter_combinations_of_a_phone_number.py
|
dchapp/blind75
|
aaa409cf2db4ef6d0f86177f4217eceeb391caa8
|
[
"MIT"
] | null | null | null |
python/17_letter_combinations_of_a_phone_number.py
|
dchapp/blind75
|
aaa409cf2db4ef6d0f86177f4217eceeb391caa8
|
[
"MIT"
] | null | null | null |
num_to_letters = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z'],
}
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if len(digits) == 0:
return []
return self.recursive(digits)
def recursive(self, digits):
words = set()
digit_idx = 0
def worker(digits, digit_idx, current_word):
candidates = num_to_letters[digits[digit_idx]]
for c in candidates:
if digit_idx == len(digits)-1:
words.add(current_word + c)
else:
worker(digits, digit_idx+1, current_word + c)
worker(digits, 0, "")
return list(words)
| 28.424242
| 65
| 0.410448
| 108
| 938
| 3.453704
| 0.583333
| 0.107239
| 0.112601
| 0.107239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022807
| 0.392324
| 938
| 32
| 66
| 29.3125
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0.036247
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
084eddbd29309d0a8c29e8b0baeae41ed4f83c9f
| 7,420
|
py
|
Python
|
logicscen.py
|
exposit/pythia-oracle
|
60e4e806c9ed1627f2649822ab1901d28933daac
|
[
"MIT"
] | 32
|
2016-08-27T01:31:42.000Z
|
2022-03-21T08:59:28.000Z
|
logicscen.py
|
exposit/pythia-oracle
|
60e4e806c9ed1627f2649822ab1901d28933daac
|
[
"MIT"
] | 3
|
2016-08-27T00:51:47.000Z
|
2019-08-26T13:23:04.000Z
|
logicscen.py
|
exposit/pythia-oracle
|
60e4e806c9ed1627f2649822ab1901d28933daac
|
[
"MIT"
] | 10
|
2016-08-28T14:14:41.000Z
|
2021-03-18T03:24:22.000Z
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#---------------------------------------------------------------------------------------------------
# --> Logic to handle scenarios
#---------------------------------------------------------------------------------------------------
import imports
from imports import *
import config
import logic
from logic import *
def parseRefs(source):
start_sep='[['
end_sep=']]'
result=[]
tmp=source.split(start_sep)
for par in tmp:
if end_sep in par:
result.append(par.split(end_sep)[0])
for clause in result:
action, text, link = clause.split('|')
new = "[ref=" + action + "_" + link + "][color=" + config.formats['link_color'] + "]" + text + "[/color][/ref]"
source = source.replace("[[" + clause + "]]", new, 1)
return source
def parseTextVariables(self, source):
start_sep='<<'
end_sep='>>'
result=[]
tmp=source.split(start_sep)
try:
mod = config.curr_game_dir + "scenlogic.py"
filename = mod.split('/')[-1]
pyfile = filename.split('.')[0]
scenlogic = imp.load_source( pyfile, mod)
except:
pass
for par in tmp:
if end_sep in par:
result.append(par.split(end_sep)[0])
for clause in result:
try:
a = clause.split("if ")[0]
except:
a = clause
try:
if a.split('.')[0] == 'var':
a = config.scenario[ a.split('.')[1] ]
else:
a = eval("scenlogic." + a)(self)
except:
pass
try:
b = clause.split(" else ")[-1]
except:
b = ""
try:
if b.split('.')[0] == 'var':
b = config.scenario[ b.split('.')[1] ]
else:
b = eval("scenlogic." + b)(self)
except:
pass
try:
condition = clause.split("if ")[1]
condition = condition.split(" else ")[0]
except:
condition = ""
try:
condition = config.scenario[ condition ]
except:
pass
try:
condition = eval("scenlogic." + condition)(self)
except:
pass
try:
if condition == True:
new = a
else:
new = b
except:
new = a
source = source.replace("<<" + clause + ">>", new, 1)
return source
def clearOldLinks(self, ref):
for i in range(len(config.textLabelArray)):
newtext = config.textLabelArray[i].text
colorList = re.findall('(?:[0-9a-fA-F]{3}){2}', newtext)
for color in colorList:
newtext = newtext.replace(color, "")
newtext = newtext.replace("[ref=" + ref + "]", "")
newtext = newtext.replace("[/ref]", "")
newtext = newtext.replace("[/color]", "")
config.textLabelArray[i].text = newtext
config.textArray[config.textLabelArray[i].index] = config.textLabelArray[i].text
def refPress(*args):
self = args[0].self
label = args[0]
subtype, text = args[1].split('_')
subtype = subtype[:1]
ref = args[1]
print(label.index)
try:
mod = config.curr_game_dir + "scenlogic.py"
filename = mod.split('/')[-1]
pyfile = filename.split('.')[0]
scenlogic = imp.load_source( pyfile, mod)
except:
pass
if subtype == "d":
block = config.scenario['block']
#try:
# base = config.advDict[block][text]
#except:
base = config.scenario['descRefs'][text]
try:
eval("scenlogic." + base[3])(self)
except:
pass
display = parseTextVariables(self, base[0])
display = parseRefs(display)
logic.updateCenterDisplay(self, display, base[1])
if base[2] == 'repeatable':
newtext = label.text
colorList = re.findall('(?:[0-9a-fA-F]{3}){2}', newtext)
for color in colorList:
newtext = newtext.replace(color, config.formats['visited_link_color'])
label.text = newtext
config.textArray[label.index] = label.text
else:
newtext = label.text
colorList = re.findall('(?:[0-9a-fA-F]{3}){2}', newtext)
for color in colorList:
newtext = newtext.replace(color, "")
newtext = newtext.replace("[ref=" + ref + "]", "")
newtext = newtext.replace("[/ref]", "")
newtext = newtext.replace("[/color]", "")
label.text = newtext
config.textArray[label.index] = label.text
elif subtype == "t":
block = config.scenario['block']
base = config.scenario['toggleRefs'][text]
label.text = base[0]
config.textArray[label.index] = label.text
elif subtype == "j":
block = config.scenario['block']
try:
base = config.advDict[block][text]
except:
base = config.scenario['jumpRefs'][text]
destination = base['jump']
try:
exitmsg = base['exitmsg']
except:
exitmsg = "..."
try:
exitformat = base['exitformat']
except:
exitformat = "result"
try:
repeatable = base['repeatable']
except:
repeatable = "yes"
try:
pause = base['pause']
except:
pause = False
config.scenario['block'] = destination
# this was a jump; clear all older links
clearOldLinks(self, ref)
exitmsg = parseTextVariables(self, exitmsg)
exitmsg = parseRefs(exitmsg)
logic.updateCenterDisplay(self, exitmsg, exitformat)
if pause == False:
showCurrentBlock(self)
else:
more = "[ref=f_showCurrentBlock][color=" + config.formats['link_color'] + "]continue" + "[/color][/ref]"
logic.updateCenterDisplay(self, more, 'italic')
else:
# this is a function; clear all older links
clearOldLinks(self, ref)
try:
eval("scenlogic." + text)(self)
except:
pass
def showCurrentBlock(self, *args):
block = config.scenario['block']
result = ""
count = 0
for item in config.advDict[block]['text']:
count = count + 1
display = parseTextVariables(self, item[0])
display = parseRefs(display)
logic.updateCenterDisplay(self, display, item[1])
self.scenarioTitleLabel.text = config.advDict[block]['title']
showCurrentExits(self)
def showCurrentExits(self, *args):
block = config.scenario['block']
result = ""
try:
for item in config.advDict[block]['exits']:
display = parseTextVariables(self, item[0])
display = parseRefs(display)
logic.updateCenterDisplay(self, display, item[1])
except:
try:
for item in config.advDict[block]['exitlist']:
display = '[[jump|' + config.advDict[block][item]['display'] + '|' + item + ']]'
display = parseTextVariables(self, display)
display = parseRefs(display)
logic.updateCenterDisplay(self, display, config.advDict[block][item]['exitmsg'])
except:
pass
| 27.279412
| 119
| 0.508491
| 728
| 7,420
| 5.151099
| 0.171703
| 0.0448
| 0.0504
| 0.034667
| 0.5064
| 0.4888
| 0.4816
| 0.4096
| 0.378933
| 0.355467
| 0
| 0.00902
| 0.327628
| 7,420
| 271
| 120
| 27.380074
| 0.742634
| 0.053774
| 0
| 0.569307
| 0
| 0
| 0.073741
| 0.013408
| 0.004951
| 0
| 0
| 0
| 0
| 1
| 0.029703
| false
| 0.044554
| 0.024752
| 0
| 0.064356
| 0.004951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0850f9781ec228546bf41eccc932a22fd036e4a8
| 7,980
|
py
|
Python
|
datyy/views/projects.py
|
VladimirSiv/datyy
|
4f3b54557850212ca3ce4c0d16cd56eb9989d7c4
|
[
"MIT"
] | null | null | null |
datyy/views/projects.py
|
VladimirSiv/datyy
|
4f3b54557850212ca3ce4c0d16cd56eb9989d7c4
|
[
"MIT"
] | null | null | null |
datyy/views/projects.py
|
VladimirSiv/datyy
|
4f3b54557850212ca3ce4c0d16cd56eb9989d7c4
|
[
"MIT"
] | null | null | null |
import dash
import dash_html_components as html
import dash_bootstrap_components as dbc
import numpy as np
from server import app
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from components.cards import simple_info_card
from components.dropdowns import dropdown_single
from components.cards import project_info_card
from components.tables import simple_table
from components.gantts import simple_gantt_graph
from logic.dropdowns import dropdown_single_logic
from logic.tables import generate_project_tasks_data
from logic.pie_charts import sunburst_chart_logic
from logic.gantts import simple_gantt_logic
layout = html.Div(
children=[
html.Div(id="project-temp", style={"display": "none"}),
dbc.Row(
className="main-row",
children=[
dbc.Col(
dropdown_single(
id_="project-select",
placeholder="Select Project",
text="Project:",
),
width=3,
),
],
),
dbc.Row(
className="main-row",
children=[
dbc.Col(
simple_info_card(
id_="project-card-planning",
title="Planning",
)
),
dbc.Col(
simple_info_card(
id_="project-card-design",
title="Design",
)
),
dbc.Col(
simple_info_card(
id_="project-card-development",
title="Development",
)
),
dbc.Col(
simple_info_card(
id_="project-card-testing",
title="Testing",
)
),
dbc.Col(
simple_info_card(
id_="project-card-cost",
title="Cost",
)
),
dbc.Col(
simple_info_card(
id_="project-card-duration",
title="Duration",
)
),
],
),
dbc.Row(
className="main-row",
children=[
dbc.Col(
project_info_card(
id_="budget-graph",
title="Budget spending",
subcomponents={
"project-budget": "Budget",
"project-remaining": "Remaining",
"project-currently": "Currently",
},
),
width=6,
),
dbc.Col(
simple_table(
id_="project-tasks-table",
title="Overdue tasks",
columns=[
"Overdue (days)",
"Task",
"Deadline",
"Employee",
],
),
width=6,
),
],
),
html.Div(
className="main-row", children=[html.H4("Milestones", className="title-bold")]
),
dbc.Row(
className="main-row",
children=[dbc.Col(simple_gantt_graph(id_="project-gantt-graph"))],
),
]
)
@app.callback(
[Output("project-select", "options"), Output("project-temp", "children")],
Input("url", "pathname"),
State("project-item", "data"),
)
def set_project_select_options(pathname, project_stored):
"""Sets project select options
Args:
pathname (str): Url pathname
project_stored (str): State of project value
Returns:
list: List of options
str: Project hidden value
Raises:
PreventUpdate: if arguments are not valid
"""
if pathname == "/datyy/projects":
project = project_stored
if project_stored is None:
project = 0
return dropdown_single_logic(), project
raise PreventUpdate
@app.callback(
[Output("project-item", "data"), Output("project-select", "value")],
[Input("project-temp", "children"), Input("project-select", "value")],
)
def set_hidden_project_item(hidden, dropdown_value):
"""Set state and selected project value
Args:
hidden (str): Hidden project value
dropdown_value (str): Selected project value
Returns:
str: State of project value
str: Selected project value
Raises:
PreventUpdate: if arguments are not valid
"""
ctx = dash.callback_context
if not ctx.triggered:
input_id = None
else:
input_id = ctx.triggered[0]["prop_id"].split(".")[0]
if input_id == "project-temp" and hidden is not None:
return hidden, int(hidden)
if input_id == "project-select" and dropdown_value is not None:
return dropdown_value, dropdown_value
raise PreventUpdate
@app.callback(
[
Output("project-card-" + card_type, "children")
for card_type in [
"planning",
"design",
"development",
"testing",
"cost",
"duration",
]
],
Input("project-select", "value"),
)
def set_project_card_info_values(value):
"""Sets project information values
Args:
value (str): Selected project value
Returns:
str: Project planning value
str: Project design value
str: Project development value
str: Project testing value
str: Project cost value
str: Project duration value
Raises:
PreventUpdate: if arguments are not valid
"""
if value is not None:
result = [str(x) + "%" for x in np.random.randint(100, size=4)]
result.append("$" + str(np.random.randint(100, 1000)))
result.append(str(np.random.randint(10, 20)) + " days")
return result
raise PreventUpdate
@app.callback(Output("project-tasks-table", "data"), Input("project-select", "value"))
def set_project_tasks_table(value):
"""Sets project tasks table data
Args:
value (str): Select project value
Returns:
obj: Table data
Raises:
PreventUpdate: if arguments are not valid
"""
if value is not None:
return generate_project_tasks_data()
raise PreventUpdate
@app.callback(
[
Output("project-budget", "children"),
Output("project-remaining", "children"),
Output("project-currently", "children"),
Output("budget-graph", "figure"),
],
Input("project-select", "value"),
)
def set_project_budget_info(value):
"""Sets project budget information
Args:
value (str): Selected project value
Returns:
str: Project budget value
str: Project remaining value
str: Project currently value
obj: Project Budget graph figure
Raises:
PreventUpdate: if arguments are not valid
"""
if value is not None:
result = list(np.random.randint(0, 1000, size=3))
result.append(sunburst_chart_logic())
return result
raise PreventUpdate
@app.callback(Output("project-gantt-graph", "figure"), Input("project-select", "value"))
def display_gantt_graph(value):
"""Displays gantt graph figure
Args:
value (str): Selected project value
Returns:
obj: Project gantt graph figure
Raises:
PreventUpdate: if arguments are not valid
"""
if value is not None:
return simple_gantt_logic()
raise PreventUpdate
| 28.098592
| 90
| 0.52193
| 764
| 7,980
| 5.328534
| 0.175393
| 0.025547
| 0.023581
| 0.023581
| 0.355687
| 0.336281
| 0.283468
| 0.226234
| 0.125031
| 0.067305
| 0
| 0.005659
| 0.37995
| 7,980
| 283
| 91
| 28.19788
| 0.817098
| 0.180827
| 0
| 0.378378
| 0
| 0
| 0.150685
| 0.010513
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032432
| false
| 0
| 0.086486
| 0
| 0.156757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
085550c02672da4291f033dfdf10337c089c2aa8
| 16,119
|
py
|
Python
|
multiacctcf.py
|
DonMills/multiacct-CF-orchestrate
|
4acce3c984c1801ff66cf9d210e3a0d1a6f9246b
|
[
"MIT"
] | 11
|
2017-07-19T07:05:44.000Z
|
2022-02-07T19:35:51.000Z
|
multiacctcf.py
|
DonMills/multiacct-CF-orchestrate
|
4acce3c984c1801ff66cf9d210e3a0d1a6f9246b
|
[
"MIT"
] | null | null | null |
multiacctcf.py
|
DonMills/multiacct-CF-orchestrate
|
4acce3c984c1801ff66cf9d210e3a0d1a6f9246b
|
[
"MIT"
] | 2
|
2017-07-19T15:01:52.000Z
|
2022-02-07T19:35:53.000Z
|
#!/usr/bin/python
from __future__ import print_function
import threading
import boto3
import botocore
import argparse
from time import ctime
###############
# Some Global Vars
##############
lock = threading.Lock()
awsaccts = [{'acct': 'acct1ID',
'name': 'master',
'cffile': 'location of cloudformation file in S3'},
{'acct': 'acct2ID',
'name': 'dev',
'cffile': 'location of cloudformation file in S3'},
{'acct': 'acct3ID',
'name': 'staging',
'cffile': 'location of cloudformation file in S3'},
{'acct': 'acct4ID',
'name': 'test',
'cffile': 'location of cloudformation file in S3'},
{'acct': 'acct5ID',
'name': 'QA',
'cffile': 'location of cloudformation file in S3'}]
###################################
# This results dict is prepopulated with the info for the master vpc in a region. It will be overwritten
# if the master cloudform is run
###################################
results = {
'master': {
'CIDRblock': '172.0.1.0/22',
'RTBint': [
'rtb-xxxxxxxx',
'rtb-xxxxxxxx'],
'VPCID': 'vpc-xxxxxxxx'}}
threads = []
#######################
# The function that does CloudFormation and peering requests
#######################
def run_cloudform(acct, acctname, region, cffile, nopeer, results):
################
# Don't like these, but necessary due to scoping
###############
cfgood = None
ismaster = None
cidrblock = None
vpcid = None
rtbid = None
rtb_inta = None
rtb_intb = None
threadname = threading.current_thread().name
if acctname == "master":
ismaster = True
###################
# If we are running in master, we don't need sts creds
###################
if ismaster:
try:
cf = boto3.client('cloudformation',
region_name=region)
validate = cf.validate_template(
TemplateURL=cffile
)
cfgood = True
print(
"[%s] %s CloudFormation file %s validated successfully for account %s" %
(ctime(), threadname, cffile, acctname))
except botocore.exceptions.ClientError as e:
print(
"[%s] %s CloudFormation file %s validation failed for account %s with error: %s" %
(ctime(), threadname, cffile, acctname, e))
cfgood = False
###################
# Otherwise, we do.
###################
else:
with lock:
print(
"[%s] %s is assuming STS role for account %s" %
(ctime(), threadname, acctname))
try:
with lock:
sts = boto3.client('sts')
role = sts.assume_role(
RoleArn='arn:aws:iam::' + acct + ':role/MasterAcctRole',
RoleSessionName='STSTest',
DurationSeconds=900
)
accesskey = role["Credentials"]["AccessKeyId"]
secretkey = role["Credentials"]["SecretAccessKey"]
sessiontoken = role["Credentials"]["SessionToken"]
print(
"[%s] %s successfully assumed STS role for account %s" %
(ctime(), threadname, acctname))
except botocore.exceptions.ClientError as e:
with lock:
print(
"[%s] %s failed to assume role for account %s with error: %s" %
(ctime(), threadname, acctname, e))
with lock:
print(
"[%s] %s is verifying CloudFormation file %s for account %s" %
(ctime(), threadname, cffile, acctname))
try:
cf = boto3.client('cloudformation',
aws_access_key_id=accesskey,
aws_secret_access_key=secretkey,
aws_session_token=sessiontoken,
region_name=region)
validate = cf.validate_template(
TemplateURL=cffile
)
cfgood = True
with lock:
print(
"[%s] %s CloudFormation file %s validated successfully for account %s" %
(ctime(), threadname, cffile, acctname))
except botocore.exceptions.ClientError as e:
with lock:
print(
"[%s] %s CloudFormation file %s validation failed for account %s with error: %s" %
(ctime(), threadname, cffile, acctname, e))
cfgood = False
##########################
# Ok the CF should be validated (cfgood=True), so let's run it.
#########################
if cfgood:
with lock:
print(
"[%s] %s Preparing to run CloudFormation file %s in account %s" %
(ctime(), threadname, cffile, acctname))
stackid = cf.create_stack(
StackName=region + "-" + acctname,
TemplateURL=cffile,
Parameters=[
{
},
],
Tags=[
{
'Key': 'Purpose',
'Value': 'Infrastructure'
},
]
)['StackId']
with lock:
print("[%s] %s StackID %s is running in account %s" %
(ctime(), threadname, stackid, acctname))
waiter = cf.get_waiter('stack_create_complete')
waiter.wait(StackName=stackid)
with lock:
print(
"[%s] %s StackID %s completed creation in account %s" %
(ctime(), threadname, stackid, acctname))
stack = cf.describe_stacks(StackName=stackid)
for item in stack['Stacks'][0]['Outputs']:
if item['OutputKey'] == "VPCId":
vpcid = item["OutputValue"]
elif item['OutputKey'] == "VPCCIDRBlock":
cidrblock = item["OutputValue"]
elif item['OutputKey'] == "RouteTableId":
rtbid = item["OutputValue"]
elif item['OutputKey'] == "InternalRouteTableA":
rtbid_inta = item["OutputValue"]
elif item['OutputKey'] == "InternalRouteTableB":
rtbid_intb = item["OutputValue"]
pcxid = "None"
###########################
# Don't do peering if we are master vpc or if nopeer is set via cli
# otherwise, this is the peering code
##########################
if not ismaster and not nopeer:
with lock:
print(
"[%s] %s Preparing to request peering with Master vpc in account %s" %
(ctime(), threadname, acctname))
try:
ec2 = boto3.client('ec2',
aws_access_key_id=accesskey,
aws_secret_access_key=secretkey,
aws_session_token=sessiontoken,
region_name=region)
pcx = ec2.create_vpc_peering_connection(
VpcId=vpcid,
PeerVpcId=results['master']['VPCID'],
PeerOwnerId='masteracctID'
)
pcxid = pcx['VpcPeeringConnection']['VpcPeeringConnectionId']
with lock:
print(
"[%s] %s Peering Connection request ID %s sent from account %s" %
(ctime(), threadname, pcxid, acctname))
print(
"[%s] %s Preparing to add route to table %s to Peer Connection ID %s in account %s" %
(ctime(), threadname, rtbid, pcxid, acctname))
route = ec2.create_route(
DestinationCidrBlock=results['master']['CIDRblock'],
VpcPeeringConnectionId=pcxid,
RouteTableId=rtbid
)
if route['Return']:
print(
"[%s] Route added to route table %s for network %s to peer connection %s in account %s" %
(ctime(), rtbid, results['master']['CIDRblock'], pcxid, acctname))
else:
print(
"[%s] Failed adding to route table %s for network %s to peer connection %s in account %s" %
(ctime(), rtbid, results['master']['CIDRblock'], pcxid, acctname))
except botocore.exceptions.ClientError as e:
with lock:
print(
"[%s] %s Peering Connection request failed for account %s with error: %s" %
(ctime(), threadname, acctname, e))
results[acctname] = {
"CIDRblock": cidrblock,
"VPCID": vpcid,
"PCXID": pcxid}
############################
# master results need the route table ids of both internal tables to add routes to both
###########################
if ismaster:
results[acctname].update({'RTBint': [rtbid_inta, rtbid_intb]})
def printdata(results, acctname):
print(
"The CIDRBlock for VPC %s in account %s is %s. The VPC peering id is %s" %
(results[acctname]['VPCID'],
acctname,
results[acctname]['CIDRblock'],
results[acctname]['PCXID']))
def printdatamaster(results):
print(
"The CIDRBlock for VPC %s in master account is %s. The internal route table ids are %s and %s" %
(results['master']['VPCID'],
results['master']['CIDRblock'],
results['master']['RTBint'][0],
results['master']['RTBint'][1]))
def main():
#############################
# Parse CLI options - setup the parser
############################
parser = argparse.ArgumentParser(
description='An orchestration script that runs multi-account CloudFormation and can set up peering relationships between the VPCs created')
parser.add_argument(
"region",
type=str,
choices=[
"us-west-2",
"us-east-1"],
help="The AWS Region you would like to operate in")
parser.add_argument(
"-sa",
"--single_account",
action='append',
help="Provide a single account name(dev,hdp,test,beps) and only operate on that account. You can perform this action multiple times to operate on more than one account.")
parser.add_argument(
"-np",
"--no_peering",
action='store_true',
dest='no_peering',
help="Run the CloudFormation, but don't do the inter-VPC peering")
#################################
# Parse CLI options - read the parser
#################################
nopeer = None
args = parser.parse_args()
region = args.region
acct = args.single_account
if args.no_peering:
nopeer = True
############################
# Do single account or multiple single account runs
############################
if acct:
for line in acct:
foundacct = None
print(
"[%s] Single account selected: Preparing to run CloudFormation on %s account" %
(ctime(), line))
print("[%s] Preparing to spawn thread" % ctime())
for entry in awsaccts:
if entry['name'] == line:
t = threading.Thread(
target=run_cloudform,
args=(
entry['acct'],
entry['name'],
region,
entry['cffile'],
nopeer,
results))
threads.append(t)
t.start()
foundacct = True
if not foundacct:
print("[%s] No matching account name found!" % ctime())
print("[%s] Current configured accounts are:" % ctime())
for entry in awsaccts:
print(
"[%s] Account ID: %s Account Name: %s" %
(ctime(), entry['acct'], entry['name']))
for i in range(len(threads)):
threads[i].join()
#############################
# Or run the whole shebang
#############################
else:
print(
"[%s] Preparing to run CloudFormation across all AWS accounts" %
ctime())
print("[%s] Preparing to run Master account CloudFormation" % ctime())
masteracct = list(
(entry for entry in awsaccts if entry['name'] == 'master'))[0]
run_cloudform(
masteracct['acct'],
masteracct['name'],
region,
masteracct['cffile'],
nopeer,
results)
printdatamaster(results)
print("[%s] Preparing to spawn threads" % ctime())
subaccts = (entry for entry in awsaccts if entry['name'] != 'master')
##############################
# do the threading for subaccts
#############################
for entry in subaccts:
t = threading.Thread(
target=run_cloudform,
args=(
entry['acct'],
entry['name'],
region,
entry['cffile'],
nopeer,
results))
threads.append(t)
t.start()
for i in range(len(threads)):
threads[i].join()
print("[%s] All CloudFormations run!" % ctime())
if len(results) > 1:
print("[%s] Printing outputs:" % ctime())
for entry in (entry for entry in results if entry != 'master'):
printdata(results, entry)
###############################
# Accept peering and add final routes to peering vpcs
##############################
if not nopeer and len(results) > 1:
print(
"[%s] Attempting to accept peering requests in Master" %
ctime())
try:
master = boto3.client('ec2',
region_name=region)
subaccts = (entry for entry in results if entry != "master")
for entry in subaccts:
pcx = master.accept_vpc_peering_connection(
VpcPeeringConnectionId=results[entry]['PCXID']
)
print(
"[%s] VPC Peering connection from %s with ID %s is status: %s" %
(ctime(),
entry,
results[entry]['PCXID'],
pcx['VpcPeeringConnection']['Status']['Code']))
for table in results['master']['RTBint']:
route = master.create_route(
DestinationCidrBlock=results[entry]['CIDRblock'],
VpcPeeringConnectionId=results[entry]['PCXID'],
RouteTableId=table
)
if route['Return']:
print(
"[%s] Route added to Master route table %s for network %s to peer connection %s" %
(ctime(), table, results[entry]['CIDRblock'], results[entry]['PCXID']))
else:
print(
"[%s] Adding route to Master route table %s for network %s to peer connection %s failed!" %
(ctime(), table, results[entry]['CIDRblock'], results[entry]['PCXID']))
except botocore.exceptions.ClientError as e:
print(
"[%s] Failed to manipulate account %s with error: %s" %
(ctime(), "Master", e))
print("[%s] Finished" % ctime())
if __name__ == '__main__':
main()
| 39.70197
| 179
| 0.470749
| 1,463
| 16,119
| 5.138756
| 0.20164
| 0.026337
| 0.013966
| 0.033653
| 0.415137
| 0.361798
| 0.340516
| 0.301809
| 0.216547
| 0.196329
| 0
| 0.004021
| 0.382902
| 16,119
| 405
| 180
| 39.8
| 0.751784
| 0.051182
| 0
| 0.405882
| 0
| 0.020588
| 0.254067
| 0.004549
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011765
| false
| 0
| 0.017647
| 0
| 0.029412
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
085769a397608c592ac48390d3b4d6b67aae08eb
| 882
|
py
|
Python
|
NIM/tests/woa_test.py
|
buctlab/source-seeking-multi-robot-team-simulator
|
a68c214b9bd19006a94c0adc832681bbaf0d6dc8
|
[
"Apache-2.0"
] | null | null | null |
NIM/tests/woa_test.py
|
buctlab/source-seeking-multi-robot-team-simulator
|
a68c214b9bd19006a94c0adc832681bbaf0d6dc8
|
[
"Apache-2.0"
] | null | null | null |
NIM/tests/woa_test.py
|
buctlab/source-seeking-multi-robot-team-simulator
|
a68c214b9bd19006a94c0adc832681bbaf0d6dc8
|
[
"Apache-2.0"
] | null | null | null |
import os
from Config import Config
from NIM.algorithms import WhaleOptimizationAlgorithm
from NIM.algorithms.algorithm import logger
if __name__ == '__main__':
with open(Config.default_saved_scene_path, 'r') as f:
data = f.read()
m2d = eval(data)
seed = 5
woa = WhaleOptimizationAlgorithm(m2d, Config.rasterized_cell_size, func=Config.func, iterations=Config.iterations,
debug=True, population=Config.number_of_robots, robot_size=Config.size, seed=seed,
k=Config.leakage_sources)
best_sol, best_val = woa.run()
logger.info("best sol:{sol}, best val:{val}".format(sol=best_sol, val=best_val))
func_name = type(woa.func).__name__
woa.iter_swarm_pos.to_csv(
os.path.join(Config.project_root, "data/csv_file/woa_MultiSourceFunction_" + str(seed) + ".csv"))
| 36.75
| 119
| 0.675737
| 115
| 882
| 4.895652
| 0.530435
| 0.0373
| 0.060391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004335
| 0.21542
| 882
| 23
| 120
| 38.347826
| 0.809249
| 0
| 0
| 0
| 0
| 0
| 0.091837
| 0.043084
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.235294
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
085893c679735b22d323d01a1e71583ba759cc3a
| 6,242
|
py
|
Python
|
src/COVIDZejunDatagraphs.py
|
luisflores0330/ista131final
|
168ac6afe666e945ae717387b50420804b33c4f3
|
[
"Apache-2.0"
] | null | null | null |
src/COVIDZejunDatagraphs.py
|
luisflores0330/ista131final
|
168ac6afe666e945ae717387b50420804b33c4f3
|
[
"Apache-2.0"
] | null | null | null |
src/COVIDZejunDatagraphs.py
|
luisflores0330/ista131final
|
168ac6afe666e945ae717387b50420804b33c4f3
|
[
"Apache-2.0"
] | 4
|
2021-12-07T21:44:31.000Z
|
2021-12-07T23:20:04.000Z
|
'''
File: COVIDZejunDatagraphs.py
Author: Zejun Li
Purpose: This file contains 12 different functions to make 5 different graphs about the COVID 19 in Idaho
'''
import pandas as pd, numpy as np, matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import datetime as dt
def get_df():
'''
This function is to get the dataframe from the csv file : data_table_for_daily_death_trends__idaho.csv
'''
fname = "data_table_for_daily_death_trends__idaho.csv"
df = pd.read_csv(fname,sep=',', skiprows = 2, engine='python')
del df["State"]
df["Dates"] = np.nan
def date_convert(date_to_convert):
return datetime.datetime.strptime(date_to_convert, '%b %d %Y').strftime('%m/%d/%Y')
df['Dates'] = df['Date'].apply(date_convert)
del df["Date"]
return df
def get_date_lst():
'''This function is to get all of the dates from the Dates column
'''
df = get_df()
lst_dates = []
for i in df['Dates']:
lst_dates.append(i)
return lst_dates
def fig1():
'''This function is to make a line graph with x axis of Dates and y axis of Current Hospitalized COVID-19 Patients.
'''
df = get_df()
lst_dates = get_date_lst()
x = [dt.datetime.strptime(d,'%m/%d/%Y').date() for d in lst_dates]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))
plt.plot(x,df['Current Hospitalized COVID-19 Patients'])
plt.gcf().autofmt_xdate()
plt.xlabel("Dates")
plt.ylabel("Current Hospitalized COVID-19 Patients")
plt.suptitle('Figure 1', fontsize=16)
def fig2():
'''This function is to make a bar chart with x axis of Dates and y axis of New Deaths
'''
df = get_df()
lst_dates = get_date_lst()
plt.figure(figsize=(10,10))
plt.style.use('ggplot')
lst_dates = []
for i in df['Dates']:
lst_dates.append(i)
x = [dt.datetime.strptime(d,'%m/%d/%Y').date() for d in lst_dates]
lst = []
for i in df['New Deaths']:
lst.append(i)
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x,lst,width=0.8, color='darkviolet')
plt.xlabel("Dates")
plt.ylabel("New Deaths")
plt.suptitle('Figure 2', fontsize=16)
def fig3():
'''This function is to make a scatter plot with x axis of Dates and y axis of 7-Day Moving Avg
'''
df = get_df()
plt.figure(figsize=(16,10), dpi= 80)
lst_dates = get_date_lst()
lst = []
for i in df["7-Day Moving Avg"]:
lst.append(i)
int_lst = []
for i in range(len(lst_dates)):
int_lst.append(i)
x = np.array(lst_dates)
y = np.array(lst)
x1 = np.array(int_lst)
m, b = np.polyfit(x1, y, 1)
plt.plot(x, m*x1 + b)
plt.scatter(x, y)
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))
plt.xlabel("Dates")
plt.ylabel("7-Day Moving Avg")
plt.gca().invert_xaxis()
plt.suptitle('Figure 3', fontsize=16)
def main():
fig1()
fig2()
fig3()
plt.show()
main()
def csv(file):
'''
This function is to get two dataframes from the csv file; df: data_table_for_daily_case_trends__idaho1.csv; df2:data_table_for_daily_death_trends__idaho2.csv
'''
df = pd.read_csv(file, sep = ",", skiprows = 2)
df2 = pd.read_csv("data_table_for_daily_death_trends__idaho2.csv", sep = "," , skiprows = 2)
df["New Deaths"] = df2["New Deaths"]
df["Doses Per Day"] = 0
df["Dates"] = df["Date"].replace({"Jan":"01", "Feb":"02","Mar":"03","Apr":"04","May":"05","Jun":"06","Jul":"07","Aug":"08","Sep":"09","Oct":"10","Nov":"11","Dec":"12"}, regex = True)
df["Total Doses Administered"] = df["Total Doses Administered"].fillna(0)
for i in range(1, len(df["Total Doses Administered"])-1):
a = pd.to_numeric(df["Total Doses Administered"])
df.loc[i-1,"Doses Per Day"] = abs((int(a.iloc[i-1]) - int(a.iloc[i])))
a.append(df["Doses Per Day"])
df.drop(labels = [0], axis = 0)
df.drop([0, 1, 2], axis = 0,inplace = True)
del df["7-Day Moving Avg"]
del df["State"]
return df
def clean_dose():
'''This function is to delete the dates that don't have dose
'''
df = csv("data_table_for_daily_case_trends__idaho1.csv")
for i in range(626,670):
df = df.drop(index=i)
return df
def fig4():
'''This function is to make a line graph with x axis of Dates and y axis of New cases
'''
df = csv("data_table_for_daily_case_trends__idaho1.csv")
x = [dt.datetime.strptime(d,'%m %d %Y').date() for d in df["Dates"]]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m %d %Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))
plt.plot(x,df['New Cases'])
plt.gcf().autofmt_xdate()
plt.xlabel("Dates")
plt.ylabel("New Cases")
plt.suptitle('Figure 4', fontsize=16)
'''
def fig5():
df = csv("data_table_for_daily_case_trends__idaho1.csv")
plt.figure(figsize=(10,10))
plt.style.use('ggplot')
lst_dates = []
for i in df['Dates']:
lst_dates.append(i)
x = [dt.datetime.strptime(d,'%m %d %Y').date() for d in df["Dates"]]
lst = []
for i in df['New Deaths']:
lst.append(i)
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x,lst,width=0.8, color='black')
plt.xlabel("Dates")
plt.ylabel("New Deaths")
plt.suptitle('Figure 5', fontsize=16)
'''
def fig5():
'''This function is to make a bar chart with x axis of Dates and y axis of Doses Per Day
'''
df = clean_dose()
plt.figure(figsize=(16,10), dpi= 80)
lst = []
for i in df["Doses Per Day"]:
lst.append(i)
x = np.array(df["Dates"])
y = np.array(lst)
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))
plt.bar(x,lst,width=0.8, color='navy')
plt.xlabel("Dates")
plt.ylabel("Doses Per Day")
plt.gca().invert_xaxis()
plt.suptitle('Figure 5', fontsize=16)
def main2():
fig4()
#fig5()
fig5()
plt.show()
main2()
| 33.026455
| 187
| 0.603172
| 975
| 6,242
| 3.740513
| 0.206154
| 0.03071
| 0.019742
| 0.039485
| 0.610913
| 0.525363
| 0.489443
| 0.46312
| 0.378942
| 0.349328
| 0
| 0.029196
| 0.237264
| 6,242
| 188
| 188
| 33.202128
| 0.73682
| 0.165812
| 0
| 0.376
| 0
| 0
| 0.178499
| 0.040145
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096
| false
| 0
| 0.032
| 0.008
| 0.168
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0858b5bc59305248e9f97a28c217e52f4157d9b4
| 1,118
|
py
|
Python
|
tests/test_pipeline_disk_deduplication.py
|
kingking888/skyscraper
|
d710202f9581c3791d2cf7ee3ae33e950e46c0b7
|
[
"MIT"
] | 1
|
2021-03-21T07:25:43.000Z
|
2021-03-21T07:25:43.000Z
|
tests/test_pipeline_disk_deduplication.py
|
kingking888/skyscraper
|
d710202f9581c3791d2cf7ee3ae33e950e46c0b7
|
[
"MIT"
] | null | null | null |
tests/test_pipeline_disk_deduplication.py
|
kingking888/skyscraper
|
d710202f9581c3791d2cf7ee3ae33e950e46c0b7
|
[
"MIT"
] | 1
|
2021-04-24T11:38:18.000Z
|
2021-04-24T11:38:18.000Z
|
import pytest
import json
import datetime
from scrapy.spiders import Spider
import scrapy.exceptions
from skyscraper.items import BasicItem
from scrapy.exceptions import DropItem
from skyscraper.pipelines.filesystem import DiskDeduplicationPipeline
class MockDeduplication():
def __init__(self):
self.s = set()
def add_word(self, word):
self.s.add(word)
def has_word(self, word):
return word in self.s
def test_filters_duplicate_item():
pipeline = DiskDeduplicationPipeline(MockDeduplication(), 'namespace')
spider = Spider(name='spider')
item = BasicItem()
item['id'] = 'my-unique-id'
item['url'] = 'http://example.com/'
item['source'] = 'dummy source'
# one time it should work
pipeline.process_item(item, spider)
# afterwards it should throw
with pytest.raises(DropItem):
pipeline.process_item(item, spider)
# for different ID it should work
item = BasicItem()
item['id'] = 'my-unique-id-2'
item['url'] = 'http://example.com/'
item['source'] = 'dummy source'
pipeline.process_item(item, spider)
| 24.304348
| 74
| 0.686047
| 137
| 1,118
| 5.510949
| 0.416058
| 0.019868
| 0.075497
| 0.091391
| 0.303311
| 0.188079
| 0.188079
| 0.111258
| 0.111258
| 0
| 0
| 0.001117
| 0.199463
| 1,118
| 45
| 75
| 24.844444
| 0.842458
| 0.073345
| 0
| 0.3
| 0
| 0
| 0.121124
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.266667
| 0.033333
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
085b597e5e9aaf7c138a4db4c8f8739331aa2a66
| 2,342
|
py
|
Python
|
SVM/SVM_Regression/Sklearn_SVM_Regression.py
|
Jojoxiao/Machine-Learning-for-Beginner-by-Python3
|
71b91c9cba5803bd78d4d31be6dabb1d3989e968
|
[
"MIT"
] | 397
|
2018-05-28T02:07:32.000Z
|
2022-03-30T09:53:37.000Z
|
SVM/SVM_Regression/Sklearn_SVM_Regression.py
|
976634681/Machine-Learning-for-Beginner-by-Python3
|
d9effcbb1b390dc608a0f4c0a28f0ad03892047a
|
[
"MIT"
] | 4
|
2019-01-14T16:41:02.000Z
|
2021-03-11T13:23:06.000Z
|
SVM/SVM_Regression/Sklearn_SVM_Regression.py
|
976634681/Machine-Learning-for-Beginner-by-Python3
|
d9effcbb1b390dc608a0f4c0a28f0ad03892047a
|
[
"MIT"
] | 235
|
2018-06-28T05:31:40.000Z
|
2022-03-11T03:20:07.000Z
|
# -*- coding:utf-8 -*-
# &Author AnFany
# 利用Sklearn包实现支持核函数回归
"""
第一部分:引入库
"""
# 引入部分的北京PM2.5数据
import SVM_Regression_Data as rdata
# 引入库包
from sklearn import svm
import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 中文字体名称
mpl.rcParams['axes.unicode_minus'] = False # 显示负号
"""
第二部分:构建函数
"""
# 核函数
def sk_svm_train(intr, labeltr, inte, kener):
clf = svm.SVR(kernel=kener)
# 开始训练
clf.fit(intr, labeltr)
# 训练输出
tr = clf.predict(intr)
# 预测输出
pr = clf.predict(inte)
return tr, pr
# 结果输出函数
'''
‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’
'''
# 数据集
def result(data, he='rbf'):
# 训练、预测的网络输出
trainacc, testacc = [], []
xd = data[0]
yd = data[1].T[0]
# 测试数据
texd = data[2]
teyd = data[3].T[0]
# 开始训练
resu = sk_svm_train(xd, yd, texd, he)
tra = resu[0] * (data[4][1] - data[4][0]) + data[4][0]
pre = resu[1] * (data[4][1] - data[4][0]) + data[4][0]
ydd = data[1].T[0] * (data[4][1] - data[4][0]) + data[4][0]
teyd = data[3].T[0] * (data[4][1] - data[4][0]) + data[4][0]
return ydd, tra, teyd, pre
# 绘图的函数
def huitu(suout, shiout, c=['b', 'k'], sign='训练', cudu=3):
# 绘制原始数据和预测数据的对比
plt.subplot(2, 1, 1)
plt.plot(list(range(len(suout))), suout, c=c[0], linewidth=cudu, label='%s:算法输出' % sign)
plt.plot(list(range(len(shiout))), shiout, c=c[1], linewidth=cudu, label='%s:实际值' % sign)
plt.legend(loc='best')
plt.title('原始数据和向量机输出数据的对比')
# 绘制误差和0的对比图
plt.subplot(2, 2, 3)
plt.plot(list(range(len(suout))), suout - shiout, c='r', linewidth=cudu, label='%s:误差' % sign)
plt.plot(list(range(len(suout))), list(np.zeros(len(suout))), c='k', linewidth=cudu, label='0值')
plt.legend(loc='best')
plt.title('误差和0的对比')
# 需要添加一个误差的分布图
plt.subplot(2, 2, 4)
plt.hist(suout - shiout, 50, facecolor='g', alpha=0.75)
plt.title('误差直方图')
# 显示
plt.show()
'''第四部分:最终的运行程序'''
if __name__ == "__main__":
datasvr = rdata.model_data
realtr, outtri, realpre, poupre = result(datasvr, he='rbf')
huitu(realtr, outtri, c=['b', 'k'], sign='训练', cudu=1.5)
huitu(realpre, poupre, c=['b', 'k'], sign='预测', cudu=1.5)
| 22.519231
| 101
| 0.557643
| 344
| 2,342
| 3.75
| 0.40407
| 0.046512
| 0.037209
| 0.031008
| 0.217054
| 0.20155
| 0.104651
| 0.05969
| 0.05969
| 0.045736
| 0
| 0.035493
| 0.242101
| 2,342
| 103
| 102
| 22.737864
| 0.691268
| 0.085824
| 0
| 0.044444
| 0
| 0
| 0.064767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
085b8a0758f970cf513eb9555d20e921de2dbc2f
| 1,655
|
py
|
Python
|
tests/test_history.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_history.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_history.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
from os.path import dirname
import unittest
from .decorators import skip_if_no_mock
from .helpers import mock
from conda import history
class HistoryTestCase(unittest.TestCase):
def test_works_as_context_manager(self):
h = history.History("/path/to/prefix")
self.assertTrue(getattr(h, '__enter__'))
self.assertTrue(getattr(h, '__exit__'))
@skip_if_no_mock
def test_calls_update_on_enter_and_exit(self):
h = history.History("/path/to/prefix")
with mock.patch.object(h, 'update') as update:
with h:
self.assertEqual(1, update.call_count)
pass
self.assertEqual(2, update.call_count)
@skip_if_no_mock
def test_returns_history_object_as_context_object(self):
h = history.History("/path/to/prefix")
with mock.patch.object(h, 'update'):
with h as h2:
self.assertEqual(h, h2)
class UserRequestsTestCase(unittest.TestCase):
h = history.History(dirname(__file__))
user_requests = h.get_user_requests()
def test_len(self):
self.assertEqual(len(self.user_requests), 6)
def test_0(self):
self.assertEqual(self.user_requests[0],
{'cmd': ['conda', 'update', 'conda'],
'date': '2016-02-16 13:31:33'})
def test_last(self):
self.assertEqual(self.user_requests[-1],
{'action': 'install',
'cmd': ['conda', 'install', 'pyflakes'],
'date': '2016-02-18 22:53:20',
'specs': ['pyflakes', 'conda', 'python 2.7*']})
| 31.826923
| 73
| 0.590937
| 200
| 1,655
| 4.65
| 0.365
| 0.045161
| 0.064516
| 0.03871
| 0.272043
| 0.272043
| 0.155914
| 0.122581
| 0.122581
| 0.122581
| 0
| 0.031987
| 0.282175
| 1,655
| 51
| 74
| 32.45098
| 0.750842
| 0
| 0
| 0.128205
| 0
| 0
| 0.123263
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 1
| 0.153846
| false
| 0.025641
| 0.128205
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
085e0152d8a979274c20816965dae9f9c36f8c65
| 6,066
|
py
|
Python
|
src/bpp/views/raporty/ranking_autorow.py
|
iplweb/django-bpp
|
85f183a99d8d5027ae4772efac1e4a9f21675849
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T19:50:02.000Z
|
2017-04-27T19:50:02.000Z
|
src/bpp/views/raporty/ranking_autorow.py
|
mpasternak/django-bpp
|
434338821d5ad1aaee598f6327151aba0af66f5e
|
[
"BSD-3-Clause"
] | 41
|
2019-11-07T00:07:02.000Z
|
2022-02-27T22:09:39.000Z
|
src/bpp/views/raporty/ranking_autorow.py
|
iplweb/bpp
|
f027415cc3faf1ca79082bf7bacd4be35b1a6fdf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
import itertools
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.db.models.aggregates import Sum
from django.template.defaultfilters import safe
from django.utils.functional import cached_property
from django_tables2 import Column
from django_tables2.export.views import ExportMixin
from django_tables2.tables import Table
from django_tables2.views import SingleTableView
from bpp.models import Autor, Sumy, OpcjaWyswietlaniaField, Uczelnia
from bpp.models.struktura import Wydzial
class RankingAutorowTable(Table):
class Meta:
attrs = {"class": "bpp-table"}
model = Autor
order_by = ("-impact_factor_sum", "autor__nazwisko")
fields = (
"lp",
"autor",
"impact_factor_sum",
"liczba_cytowan_sum",
"punkty_kbn_sum",
)
lp = Column(
empty_values=(),
orderable=False,
attrs={"td": {"class": "bpp-lp-column"}},
exclude_from_export=True,
)
autor = Column(order_by=("autor__nazwisko", "autor__imiona"))
punkty_kbn_sum = Column("Punkty PK", "punkty_kbn_sum")
impact_factor_sum = Column("Impact Factor", "impact_factor_sum")
liczba_cytowan_sum = Column("Liczba cytowań", "liczba_cytowan_sum")
def render_lp(self):
self.lp_counter = getattr(
self, "lp_counter", itertools.count(self.page.start_index())
)
return "%i." % next(self.lp_counter)
def render_autor(self, record):
return safe(
'<a href="%s">%s</a>'
% (
reverse("bpp:browse_autor", args=(record.autor.slug,)),
str(record.autor),
)
)
def value_autor(self, record):
return str(record.autor)
class RankingAutorowJednostkaWydzialTable(RankingAutorowTable):
class Meta:
fields = (
"lp",
"autor",
"jednostka",
"wydzial",
"impact_factor_sum",
"liczba_cytowan_sum",
"punkty_kbn_sum",
)
order_by = ("-impact_factor_sum", "autor__nazwisko")
jednostka = Column(accessor="jednostka.nazwa")
wydzial = Column(accessor="jednostka.wydzial.nazwa")
class RankingAutorow(ExportMixin, SingleTableView):
template_name = "raporty/ranking-autorow.html"
def get_table_class(self):
if self.rozbij_na_wydzialy:
return RankingAutorowJednostkaWydzialTable
return RankingAutorowTable
@cached_property
def rozbij_na_wydzialy(self):
return self.request.GET.get("rozbij_na_jednostki", "True") == "True"
@cached_property
def tylko_afiliowane(self):
return self.request.GET.get("tylko_afiliowane", "False") == "True"
def get_queryset(self):
qset = Sumy.objects.all()
qset = qset.filter(
rok__gte=self.kwargs["od_roku"], rok__lte=self.kwargs["do_roku"]
)
wydzialy = self.get_wydzialy()
if wydzialy:
qset = qset.filter(jednostka__wydzial__in=wydzialy)
if self.tylko_afiliowane:
qset = qset.filter(jednostka__skupia_pracownikow=True)
qset = qset.filter(afiliuje=True)
if self.rozbij_na_wydzialy:
qset = qset.prefetch_related("jednostka__wydzial").select_related(
"autor", "jednostka"
)
qset = qset.group_by("autor", "jednostka")
else:
qset = qset.select_related("autor")
qset = qset.group_by("autor")
qset = qset.annotate(
impact_factor_sum=Sum("impact_factor"),
liczba_cytowan_sum=Sum("liczba_cytowan"),
punkty_kbn_sum=Sum("punkty_kbn"),
)
qset = qset.exclude(impact_factor_sum=0, liczba_cytowan_sum=0, punkty_kbn_sum=0)
qset = qset.exclude(autor__pokazuj=False)
uczelnia = Uczelnia.objects.get_default()
if uczelnia is not None:
ukryte_statusy = uczelnia.ukryte_statusy("rankingi")
if ukryte_statusy:
qset = qset.exclude(status_korekty_id__in=ukryte_statusy)
return qset
def get_dostepne_wydzialy(self):
return Wydzial.objects.filter(zezwalaj_na_ranking_autorow=True)
def get_wydzialy(self):
base_query = self.get_dostepne_wydzialy()
wydzialy = self.request.GET.getlist("wydzialy[]")
if wydzialy:
try:
wydzialy = base_query.filter(pk__in=[int(x) for x in wydzialy])
return wydzialy
except (TypeError, ValueError):
pass
return base_query
def get_context_data(self, **kwargs):
context = super(SingleTableView, self).get_context_data(**kwargs)
context["od_roku"] = self.kwargs["od_roku"]
context["do_roku"] = self.kwargs["do_roku"]
jeden_rok = False
if self.kwargs["od_roku"] == self.kwargs["do_roku"]:
context["rok"] = self.kwargs["od_roku"]
jeden_rok = True
wydzialy = self.get_wydzialy()
context["wydzialy"] = wydzialy
if jeden_rok:
context["table_title"] = "Ranking autorów za rok %s" % context["rok"]
else:
context["table_title"] = "Ranking autorów za lata %s - %s" % (
context["od_roku"],
context["do_roku"],
)
context["tab_subtitle"] = ""
if len(wydzialy) != len(self.get_dostepne_wydzialy()):
context["table_subtitle"] = ", ".join([x.nazwa for x in wydzialy])
return context
def get_table_kwargs(self):
uczelnia = Uczelnia.objects.all().first()
pokazuj = uczelnia.pokazuj_liczbe_cytowan_w_rankingu
if pokazuj == OpcjaWyswietlaniaField.POKAZUJ_NIGDY or (
pokazuj == OpcjaWyswietlaniaField.POKAZUJ_ZALOGOWANYM
and self.request.user.is_anonymous
):
return {"exclude": ("liczba_cytowan_sum",)}
return {}
| 32.612903
| 88
| 0.616716
| 663
| 6,066
| 5.392157
| 0.260935
| 0.026853
| 0.033566
| 0.017902
| 0.141259
| 0.085874
| 0.043636
| 0.024056
| 0.024056
| 0
| 0
| 0.001817
| 0.274151
| 6,066
| 185
| 89
| 32.789189
| 0.810129
| 0.003792
| 0
| 0.18543
| 0
| 0
| 0.13905
| 0.008442
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072848
| false
| 0.006623
| 0.086093
| 0.033113
| 0.337748
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f22aabe1afa4a1593594ef47c8110872cb757c3c
| 16,701
|
py
|
Python
|
client-lib/pypi/nsrr/nsrr.py
|
nsrr/nsrr-cloud
|
a1e33bc3ba3220600e8b1973882d2ed76a7277c6
|
[
"MIT"
] | null | null | null |
client-lib/pypi/nsrr/nsrr.py
|
nsrr/nsrr-cloud
|
a1e33bc3ba3220600e8b1973882d2ed76a7277c6
|
[
"MIT"
] | null | null | null |
client-lib/pypi/nsrr/nsrr.py
|
nsrr/nsrr-cloud
|
a1e33bc3ba3220600e8b1973882d2ed76a7277c6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import requests
from requests.structures import CaseInsensitiveDict
import json
import getpass
from pathlib import Path
import hashlib
import pandas as pd
import gzip
from multiprocessing import Process
# Global variables
#API_SERVER='https://dev-cloud.sleepdata.org/api/v1'
API_SERVER='https://cloud.sleepdata.org/api/v1'
#API_SERVER='http://localhost:9002/api/v1'
procs=[]
all_decompress_edfz=[]
def get_input_token():
enter_pass_text="""
Get your token here: https://sleepdata.org/token
Your input is hidden while entering token.
Enter your token:
"""
return getpass.getpass(enter_pass_text)
def read_token_from_file(file_name):
try:
f=open(file_name,'r')
user_token=f.readline().strip()
f.close()
return user_token
except Exception as e:
print("ERROR: the following error occured while reading token from input file")
print(e)
def get_user_access(user_token):
headers = CaseInsensitiveDict()
headers= {'token': user_token}
try:
resp = requests.get(API_SERVER+'/list/access', headers=headers)
if(resp.ok and resp.status_code == 200):
user_access_json=json.loads(resp.content)
if(user_access_json["datasets"]):
df=pd.DataFrame(user_access_json["datasets"], columns=["Dataset", "Full Name", "URL","Access"])
print(df.to_string(index=False))
else:
print("ERROR: Unable to list user access, please verify input token, approved DUA and try again")
except Exception as e:
print("ERROR: Unable to process request at this time, try again later")
def get_auth_token(user_token, dataset_name):
headers = CaseInsensitiveDict()
headers={'token': user_token}
payload = {'dataset_name': dataset_name}
try:
resp = requests.get(API_SERVER+'/auth-token', params=payload, headers=headers)
if(resp.ok and resp.status_code == 200):
auth_token=json.loads(resp.content)["auth_token"]
else:
auth_token=False
return auth_token
except Exception as e:
return False
def get_download_url(auth_token=None, file_name=None):
payload = {'file_name': file_name}
try:
if(auth_token):
auth_headers = CaseInsensitiveDict()
auth_headers = {'Authorization': 'Bearer %s' %auth_token}
resp = requests.get(API_SERVER+'/download/url/controlled', params=payload, headers=auth_headers)
else:
resp = requests.get(API_SERVER+'/download/url/open', params=payload)
if(resp.ok and resp.status_code == 200):
return resp.content
else:
return False
except Exception as e:
return False
def download_file(url, download_file_name, no_md5,decompress, metadata):
global procs, all_decompress_edfz
try:
file_name_split=download_file_name.split("/")
file_name=file_name_split[-1]
if(decompress and file_name.split(".")[-1]=='idx'):
print("Skipping download of file: ",download_file_name)
return True
file_download_path="/".join(file_name_split[:-1])
path = Path(str(Path.cwd())+"/"+file_download_path)
if not path.exists():
path.mkdir(parents= True, exist_ok= True)
response=requests.get(url, stream=True)
f_download=path / file_name
with f_download.open("wb+") as f:
for chunk in response.iter_content(chunk_size=1024):
f.write(chunk)
f.close()
if no_md5:
if not f_download.stat().st_size == metadata["size"]:
delete_file_path=Path(str(Path.cwd())+"/"+download_file_name)
delete_file_path.unlink()
return False
else:
print("Downloaded file: ",download_file_name," ",metadata["size"],"bytes")
else:
md5_object = hashlib.md5()
block_size = 128 * md5_object.block_size
md5_file = open(f_download, 'rb')
chunk = md5_file.read(block_size)
while chunk:
md5_object.update(chunk)
chunk = md5_file.read(block_size)
md5_hash = md5_object.hexdigest()
md5_file.close()
if not md5_hash == metadata["md5"]:
delete_file_path=Path(str(Path.cwd())+"/"+download_file_name)
#delete_file_path.unlink()
return False
else:
print("Downloaded file: ",download_file_name," ", metadata["size"],"bytes")
# call decompress fn
if(decompress and file_name.split(".")[-1]=="edfz"):
decompress_proc = Process(target=decompress_edf, args=(download_file_name,))
decompress_proc.start()
procs.append(decompress_proc)
all_decompress_edfz.append({"name": f_download, "size":f_download.stat().st_size})
return True
except Exception as e:
return False
def get_all_files_list(dataset_name):
payload = {'dataset_name': dataset_name}
try:
resp = requests.get(API_SERVER+'/list/all-files', params=payload)
if(resp.ok and resp.status_code == 200):
return resp.content
else:
return False
except Exception as e:
return False
def download_wrapper(all_files,user_token, dataset_name,download_path, force, no_md5, decompress):
if(decompress):
global procs, all_decompress_edfz
all_download_size=0
all_files=json.loads(all_files)
for f in all_files["open_files"]:
if not download_path in f:
continue
if not force:
file_path=""
if decompress and f.split(".")[-1]=="edfz":
file_path=Path(str(Path.cwd())+"/"+".".join(f.split(".")[:-1])+".edf")
if file_path.is_file():
print("Skipping download of existing file: {0}".format(f))
continue
else:
file_path=Path(str(Path.cwd())+"/"+f)
if file_path.is_file():
if file_path.stat().st_size == all_files["open_files"][f]['size']:
print("Skipping download of existing file: {0}".format(f))
continue
url=get_download_url(file_name=f)
if(url):
download_success=download_file(url,f,no_md5,decompress,all_files["open_files"][f])
if not download_success:
print("ERROR: Unable to download file {0}".format(f))
else:
if not (decompress and f.split(".")[-1] == ".idx" ):
all_download_size+=all_files["open_files"][f]["size"]
else:
print("ERROR: Unable to get download URL for file {0}, try again later".format(f))
if(all_files["controlled_files"]):
if "/" in download_path:
download_path="/".join(download_path.split("/")[1:])
for f in list(all_files["controlled_files"]):
if not download_path in f:
del all_files["controlled_files"][f]
controlled_files_count=len(all_files["controlled_files"])
if controlled_files_count == 0:
if all_download_size != 0:
print("Total size of downloaded file(s) is ",all_download_size, "bytes")
return
if not user_token:
print("Error: Input token is empty, skipping {0} controlled file(s) download".format(controlled_files_count))
if all_download_size != 0:
print("Total size of downloaded file(s) is ",all_download_size, "bytes")
return
for f in all_files["controlled_files"]:
f_with_dataset=dataset_name+"/"+f
if not force:
file_path=""
if decompress and f_with_dataset.split(".")[-1]=="edfz":
file_path=Path(str(Path.cwd())+"/"+".".join(f_with_dataset.split(".")[:-1])+".edf")
if file_path.is_file():
print("Skipping download of existing file: {0}".format(f))
controlled_files_count-=1
continue
else:
file_path=Path(str(Path.cwd())+"/"+f_with_dataset)
if file_path.is_file():
if file_path.stat().st_size == all_files["controlled_files"][f]['size']:
print("Skipping download of existing file: {0}".format(f))
controlled_files_count-=1
continue
# get bearer token
auth_token=get_auth_token(user_token, dataset_name)
if(auth_token):
url=get_download_url(auth_token=auth_token,file_name=f)
if(url):
download_success=download_file(url,f_with_dataset,no_md5,decompress,all_files["controlled_files"][f])
if not download_success:
print("ERROR: Unable to download file {0}".format(f))
else:
controlled_files_count-=1
if not (decompress and f.split(".")[-1] == ".idx"):
all_download_size+=all_files["controlled_files"][f]["size"]
else:
print("ERROR: Unable to get download URL for file {0}, try again later".format(f))
else:
print("ERROR: Unable to (re)download {0} controlled files as token verification failed, try again later".format(controlled_files_count))
break
sum_=0
try:
if decompress:
for proc in procs:
proc.join()
for f in all_decompress_edfz:
sum_+=Path('.'.join(str(f["name"]).split(".")[:-1])+".edf").stat().st_size -f["size"]
except Exception as e:
print("ERROR: Calculation failed for additional space used by decompressed files")
return
if all_download_size != 0:
print("Total size of downloaded file(s) is ",all_download_size, "bytes")
if sum_ !=0:
print("Total additional space consumed by decompression is ", sum_, "bytes")
def download_all_files(user_token, dataset_name, force, no_md5, decompress):
try:
download_path=''
if "/" in dataset_name:
download_path=dataset_name
dataset_name=dataset_name.split("/")[0]
all_files=get_all_files_list(dataset_name)
if(all_files):
download_wrapper(all_files,user_token, dataset_name, download_path, force, no_md5, decompress)
else:
print("ERROR: Unable to retrieve files list of dataset {0}, check list of cloud hosted datasets and try again".format(dataset_name))
except Exception as e:
print("ERROR: Unable to complete the download of files")
def get_subject_files_list(dataset_name,subject):
payload = {'dataset_name': dataset_name, 'subject': subject}
try:
resp = requests.get(API_SERVER+'/list/subject-files', params=payload)
if(resp.ok and resp.status_code == 200):
return resp.content
else:
return False
except Exception as e:
return False
def download_subject_files(user_token,dataset_name,subject, force, no_md5, decompress):
download_path=''
if "/" in dataset_name:
download_path=dataset_name
dataset_name=dataset_name.split("/")[0]
all_files=get_subject_files_list(dataset_name,subject)
if(all_files):
download_wrapper(all_files,user_token, dataset_name, download_path, force, no_md5, decompress)
else:
print("ERROR: Unable to retrieve files list of subject {0} of dataset {1}, check list of cloud hosted datasets and try again".format(subject,dataset_name))
def list_all_subjects(dataset_name):
payload = {'dataset_name': dataset_name}
try:
resp = requests.get(API_SERVER+'/list/all-subjects', params=payload)
if(resp.ok and resp.status_code == 200):
all_subjects_json=json.loads(resp.content)
if(all_subjects_json["subjects"]):
all_subjects="\n".join(list(all_subjects_json["subjects"]))
print(all_subjects)
else:
print("ERROR: Unable to list all subject of {0} dataset, check list of cloud hosted datasets and try again".format(dataset_name))
except Exception as e:
print("ERROR: Unable to process request at this time, try again later")
def list_all_files(dataset_name):
download_path=''
if "/" in dataset_name:
download_path=dataset_name
dataset_name=dataset_name.split("/")[0]
try:
all_files=get_all_files_list(dataset_name)
if not all_files:
print("ERROR: Unable to retrieve files list of dataset {0}, check list of cloud hosted datasets and try again".format(dataset_name))
return
all_files=json.loads(all_files)
if(all_files):
print_files=[]
for f in all_files["open_files"]:
if not download_path in f:
continue
print_files.append(["/".join(f.split("/")[1:]),all_files["open_files"][f]["size"]])
if download_path:
download_path='/'.join(download_path.split("/")[1:])
for f in all_files["controlled_files"]:
if not download_path in f:
continue
print_files.append([f,all_files["controlled_files"][f]["size"]])
print_files=sorted(print_files,key= lambda x:x[0])
df=pd.DataFrame(print_files, columns=["File Name", "Size(Bytes)"])
if df.empty:
print("ERROR: No files found for given input dataset (path): ",dataset_name+"/"+download_path)
else:
print(df.to_string(index=False))
except Exception as e:
print("ERROR: Unable to process request at this time, try again later")
def generate_nested_dirs(directories_list):
try:
nested_dirs={}
for d in directories_list:
temp=nested_dirs
for sub_dir in d.split("/"):
if temp.get(sub_dir) is None:
temp[sub_dir]={}
temp=temp[sub_dir]
return nested_dirs
except Exception as e:
return False
def print_tree_structure(nested_dirs_dict, indent, parent):
try:
for d in list(nested_dirs_dict):
if indent == 0:
print('{0: <50}{1}'.format(d,parent+"/"+d))
else:
print('{0: <50}{1}'.format(' '*indent+'+--'+d,parent+"/"+d))
if nested_dirs_dict[d]:
print_tree_structure(nested_dirs_dict[d], indent+1, parent+"/"+d)
return True
except Exception as e:
return False
def list_all_directories(dataset_name):
try:
all_files=get_all_files_list(dataset_name)
if not all_files:
print("ERROR: Unable to retrieve files list of dataset {0}, check list of cloud hosted datasets and try again".format(dataset_name))
return
all_files=json.loads(all_files)
if(all_files):
print_dirs=[]
for f in all_files["open_files"]:
print_dirs.append("/".join(f.split("/")[1:-1]))
for f in all_files["controlled_files"]:
print_dirs.append("/".join(f.split("/")[:-1]))
print_dirs=sorted(set(print_dirs))
nested_dirs_dict=generate_nested_dirs(print_dirs)
if nested_dirs_dict:
printed=print_tree_structure(nested_dirs_dict,0,dataset_name)
if not printed:
print("ERROR: Unable to show directory structure of dataset {0}, try again later".format(dataset_name))
except Exception as e:
print("ERROR: Unable to process request at this time, try again later")
def decompress_edf(edfz_file_name):
full_edfz_file_name = Path(str(Path.cwd())+"/"+edfz_file_name)
try:
edf_data=''
with gzip.open(full_edfz_file_name, 'rb') as f:
edf_data = f.read()
edf_to_write=Path(''.join(str(full_edfz_file_name).split(".")[:-1])+".edf")
with open(edf_to_write,'wb') as f:
f.write(edf_data)
full_edfz_file_name.unlink()
print("Decompressed file: ",edfz_file_name, "to",'.'.join(edfz_file_name.split(".")[:-1])+".edf","and deleted original")
except Exception as e:
print("ERROR: Unable to decompress EDFZ file: ",edfz_file_name)
| 42.496183
| 163
| 0.598946
| 2,109
| 16,701
| 4.519678
| 0.105737
| 0.05193
| 0.030214
| 0.033991
| 0.619597
| 0.572073
| 0.511855
| 0.456567
| 0.439677
| 0.415653
| 0
| 0.009641
| 0.285791
| 16,701
| 393
| 164
| 42.496183
| 0.789487
| 0.011257
| 0
| 0.509804
| 0
| 0.016807
| 0.176834
| 0.001454
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.008403
| 0.02521
| 0
| 0.148459
| 0.151261
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f22b087ab319568e891a7406ef151ad2f4d6b818
| 509
|
py
|
Python
|
assignment2.py
|
talsperre/random-walk
|
5c810f571c9de28926850e1ad70ff4c29df9c0f4
|
[
"MIT"
] | null | null | null |
assignment2.py
|
talsperre/random-walk
|
5c810f571c9de28926850e1ad70ff4c29df9c0f4
|
[
"MIT"
] | null | null | null |
assignment2.py
|
talsperre/random-walk
|
5c810f571c9de28926850e1ad70ff4c29df9c0f4
|
[
"MIT"
] | null | null | null |
import numpy as np
N = 100
R = 10000
R_range = range(R)
size = (N, 3)
C = np.zeros((N, 3))
k = 1
print ("100")
print ("STEP: ", k)
for i in range(N):
print ("He ", C[i, 0], " ", C[i, 1], " ", C[i, 2])
k += 1
for j in range(R):
A = np.random.uniform(-1, 1, size)
B = np.sum(np.multiply(A, A), axis=1)
B = np.sqrt(B)
B = B.reshape(N, 1)
Norm_A = A / B
C += Norm_A
if j % 10 == 0:
print ("100")
print ("STEP: ", k)
for i in range(N):
print ("He ", C[i, 0], " ", C[i, 1], " ", C[i, 2])
k += 1
| 18.851852
| 53
| 0.489194
| 109
| 509
| 2.256881
| 0.330275
| 0.04878
| 0.105691
| 0.138211
| 0.390244
| 0.390244
| 0.390244
| 0.390244
| 0.390244
| 0.390244
| 0
| 0.084656
| 0.257367
| 509
| 27
| 54
| 18.851852
| 0.566138
| 0
| 0
| 0.4
| 0
| 0
| 0.054902
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0.24
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f22fac0a3ced91e4e4e5768a9d363783d0f24bd3
| 1,462
|
py
|
Python
|
parallel/images_common.py
|
minrk/ipython-cse17
|
16a9059c7054a8bd4977a3cb8b09c100ea779069
|
[
"BSD-3-Clause"
] | 3
|
2017-03-02T07:11:37.000Z
|
2017-03-03T06:13:32.000Z
|
parallel/images_common.py
|
minrk/ipython-cse17
|
16a9059c7054a8bd4977a3cb8b09c100ea779069
|
[
"BSD-3-Clause"
] | null | null | null |
parallel/images_common.py
|
minrk/ipython-cse17
|
16a9059c7054a8bd4977a3cb8b09c100ea779069
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import matplotlib.pyplot as plt
from skimage.io import imread
def plot_corners(img, corners, show=True):
"""Display the image and plot all contours found"""
plt.imshow(img, cmap='gray')
plt.plot(corners[:,1], corners[:,0], 'r+', markeredgewidth=1.5, markersize=8) # Plot corners
plt.axis('image')
plt.xticks([])
plt.yticks([])
if show:
plt.show()
def find_corners(path, min_distance=5):
"""Find corners in an image at path
Returns the image and the corner lists.
"""
from skimage.feature import corner_harris, corner_peaks
img = imread(path, flatten=True)
corners = corner_peaks(corner_harris(img), min_distance=min_distance)
return img, corners
def get_corners_image(path):
"""Given a path, return a PNG of the image with contour lines
Calls both find_contours and plot_contours
"""
from IPython.core.pylabtools import print_figure
img, corners = find_corners(path)
plot_corners(img, corners, show=False)
fig = plt.gcf()
pngdata = print_figure(fig)
plt.close(fig)
return pngdata
def get_pictures(pictures_dir):
"""Return a list of picture files found in pictures_dir"""
pictures = []
for directory, subdirs, files in os.walk(pictures_dir):
for fname in files:
if fname.lower().endswith(('.jpg', '.png')):
pictures.append(os.path.join(directory, fname))
return pictures
| 29.24
| 96
| 0.666211
| 202
| 1,462
| 4.717822
| 0.430693
| 0.04617
| 0.029381
| 0.044071
| 0.052466
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005277
| 0.222298
| 1,462
| 49
| 97
| 29.836735
| 0.832894
| 0.197674
| 0
| 0
| 0
| 0
| 0.016874
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.16129
| 0
| 0.387097
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f23235dddab2a9fffc993f7fe1be533663c51d2b
| 290
|
py
|
Python
|
src/calc.py
|
ceIery/epic7-speed-calculator
|
2f91e57117e2b6873772e6a703e47241570ab75f
|
[
"MIT"
] | null | null | null |
src/calc.py
|
ceIery/epic7-speed-calculator
|
2f91e57117e2b6873772e6a703e47241570ab75f
|
[
"MIT"
] | null | null | null |
src/calc.py
|
ceIery/epic7-speed-calculator
|
2f91e57117e2b6873772e6a703e47241570ab75f
|
[
"MIT"
] | null | null | null |
"""
Given a base speed value and a list of percentages, calculates the speed value
for each percentage
"""
def get_speeds(percents, base):
speeds = []
for percent in percents:
speeds.append(round(((int)(base) * ((int)(percent) / 100))))
print(speeds)
return speeds
| 24.166667
| 78
| 0.662069
| 39
| 290
| 4.897436
| 0.666667
| 0.104712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0.213793
| 290
| 11
| 79
| 26.363636
| 0.824561
| 0.337931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f2330e7134a6c2ae1cacee5b851dbdfec9f5f1d4
| 11,762
|
py
|
Python
|
src/magi/actions/base.py
|
personalrobotics/magipy
|
6f86d6938168f580f667cfc093cf7e9f218e2853
|
[
"BSD-3-Clause"
] | null | null | null |
src/magi/actions/base.py
|
personalrobotics/magipy
|
6f86d6938168f580f667cfc093cf7e9f218e2853
|
[
"BSD-3-Clause"
] | 1
|
2018-01-06T00:24:06.000Z
|
2018-01-06T00:24:06.000Z
|
src/magi/actions/base.py
|
personalrobotics/magipy
|
6f86d6938168f580f667cfc093cf7e9f218e2853
|
[
"BSD-3-Clause"
] | null | null | null |
"""Base classes, context managers, and exceptions for MAGI actions."""
from abc import ABCMeta, abstractmethod
import logging
from openravepy import KinBody, Robot
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class SaveAndJump(object):
"""
Save the state of the environment and jump the environment to the result of
a solution when entering. Jump back to the original state when exiting.
"""
def __init__(self, solution, env):
"""
@param solution: a Solution object
@param env: the OpenRAVE environment to call save and jump on
"""
self.solution = solution
self.env = env
def __enter__(self):
"""First call save on the solution, then jump."""
LOGGER.debug('Begin SaveAndJump: %s', self.solution.action.get_name())
self.cm = self.solution.save(self.env)
self.cm.__enter__()
self.solution.jump(self.env)
def __exit__(self, exc_type, exc_value, traceback):
"""Exit the context manager created when this context manager was entered."""
LOGGER.debug('End SaveAndJump: %s', (self.solution.action.get_name()))
retval = self.cm.__exit__(exc_type, exc_value, traceback)
return retval
class Validate(object):
"""Check a precondition when entering and a postcondition when exiting."""
def __init__(self,
env,
precondition=None,
postcondition=None,
detector=None):
"""
@param env: OpenRAVE environment
@param precondition: Validator that validates preconditions
@param postcondition: Validator that validates postconditions
@param detector: object detector (implements DetectObjects, Update)
"""
self.env = env
self.precondition = precondition
self.postcondition = postcondition
self.detector = detector
def __enter__(self):
"""Validate precondition."""
LOGGER.info('Validate precondition: %s', self.precondition)
if self.precondition is not None:
self.precondition.validate(self.env, self.detector)
def __exit__(self, exc_type, exc_value, traceback):
"""Validate postcondition."""
LOGGER.info('Validate postcondition: %s', self.postcondition)
if self.postcondition is not None:
self.postcondition.validate(self.env, self.detector)
class ActionError(Exception):
"""Base exception class for actions."""
KNOWN_KWARGS = {'deterministic'}
def __init__(self, *args, **kwargs):
super(ActionError, self).__init__(*args)
assert self.KNOWN_KWARGS.issuperset(kwargs.keys())
self.deterministic = kwargs.get('deterministic', None)
class CheckpointError(ActionError):
"""Exception class for checkpoints."""
pass
class ExecutionError(Exception):
"""Exception class for executing solutions."""
def __init__(self, message='', solution=None):
super(ExecutionError, self).__init__(message)
self.failed_solution = solution
class ValidationError(Exception):
"""Exception class for validating solutions."""
def __init__(self, message='', validator=None):
super(ValidationError, self).__init__(message)
self.failed_validator = validator
class Action(object):
"""Abstract base class for actions."""
__metaclass__ = ABCMeta
def __init__(self,
name=None,
precondition=None,
postcondition=None,
checkpoint=False):
"""
@param name: name of the action
@param precondition: Validator that validates preconditions
@param postcondition: Validator that validates postconditions
@param checkpoint: True if this action is a checkpoint - once a Solution
is achieved, neither the plan method of this action nor any of its
predecessors will be called again
"""
self._name = name
self.precondition = precondition
self.postcondition = postcondition
self.checkpoint = checkpoint
def get_name(self):
"""Return the name of the action."""
return self._name
@abstractmethod
def plan(self, env):
"""
Return a Solution that realizes this action.
This method attempts to realize this action in the input environment, if
possible. It MUST restore the environment to its original state before
returning. If successful, this method returns a Solution object.
Otherwise, it raises an ActionError.
The implementation of this method MAY be stochastic. If so, the method
may return a different solution each time it is called.
The environment MUST be locked when calling this method.
Ideally, planners should "with Validate(env, self.precondition)" when
calling this.
@param env: OpenRAVE environment
@return Solution object
"""
pass
def execute(self, env, simulate):
"""
Plan, postprocess, and execute this action.
This is a helper method that wraps the plan() method.
The environment MUST NOT be locked while calling this method.
@param env: OpenRAVE environment
@param simulate: flag to run in simulation
@return result of executing the action
"""
with env:
solution = self.plan(env)
executable_solution = solution.postprocess(env)
return executable_solution.execute(env, simulate)
class Solution(object):
"""Abstract base class for solutions."""
__metaclass__ = ABCMeta
def __init__(self,
action,
deterministic,
precondition=None,
postcondition=None):
"""
@param action: Action that generated this Solution
@param deterministic: True if calling the plan method on the action
multiple times will give the exact same solution
@param precondition: Validator. Can be more specific than action's
precondition.
@param postcondition: Validator. Can be more specific than action's
postcondition.
"""
self.action = action
self.deterministic = deterministic
self.precondition = precondition if precondition else action.precondition
self.postcondition = postcondition if postcondition else action.postcondition
def save_and_jump(self, env):
"""
Return a context manager that preserves the state of the environmnet
then jumps the environment to the result of this solution.
This context manager MUST restore the environment to its original state
before returning.
@param env: OpenRAVE environment
@return context manager
"""
return SaveAndJump(self, env)
@abstractmethod
def save(self, env):
"""
Return a context manager that preserves the state of the environment.
This method returns a context manager that preserves the state of the
robot that is changed by the jump() method or by executing the
solution. This context manager MUST restore the environment to its
original state before returning.
@param env: OpenRAVE environment
@return context manager
"""
pass
@abstractmethod
def jump(self, env):
"""
Set the state of the environment to the result of this solution.
The input environment to this method MUST be in the same state that was
used to plan this action. The environment MUST be modified to match the
result of executing action. This method SHOULD perform the minimal
computation necessary to achieve this result.
The environment MUST be locked while calling this method.
@param env: OpenRAVE environment
"""
pass
@abstractmethod
def postprocess(self, env):
"""
Return an ExecutableSolution that can be executed.
Post-process this solution to prepare for execution. The input
environment to this method MUST be in the same state that was used to
plan the environment. The environment MUST be restored to this state
before returning.
This operation MUST NOT be capable of failing and MUST NOT change the
state of the environment after executing the action. As long as these
two properties are satisfied, the result MAY be stochastic.
The environment MUST be locked while calling this method.
@param env: OpenRAVE environment
@return ExecutableSolution object
"""
pass
def execute(self, env, simulate):
"""
Postprocess and execute this solution.
This is a helper method that wraps the postprocess() method.
The environment MUST NOT be locked while calling this method.
@param env: OpenRAVE environment
@param simulate: flag to run in simulation
@return result of executing the solution
"""
with env:
executable_solution = self.postprocess(env)
return executable_solution.execute(env, simulate)
class ExecutableSolution(object):
"""Abstract base class for executing post-processed solutions."""
__metaclass__ = ABCMeta
def __init__(self, solution):
"""
@param solution: Solution that generated this ExecutableSolution
"""
self.solution = solution
self.precondition = solution.precondition
self.postcondition = solution.postcondition
@abstractmethod
def execute(self, env, simulate):
"""
Execute this solution.
If execution fails, this method should raise an ExecutionError.
The environment MUST NOT be locked while calling this method.
@param env: OpenRAVE environment
@param simulate: flag to run in simulation
@return result of executing the solution
"""
pass
def to_key(obj):
"""
Return a tuple that uniquely identifies an object in an Environment.
The output of this function can be passed to from_key to find the
equivalent object in, potentially, a different OpenRAVE environment.
@param obj: object in an OpenRAVE environment
@return tuple that uniquely identifies the object
"""
if obj is None:
return None
elif isinstance(obj, (KinBody, Robot)):
key = obj.GetName(),
elif isinstance(obj, (KinBody.Joint, KinBody.Link)):
key = obj.GetParent().GetName(), obj.GetName()
elif isinstance(obj, Robot.Manipulator):
key = obj.GetRobot().GetName(), obj.GetName()
else:
raise TypeError('Unknown type "{!s}".'.format(type(obj)))
return (type(obj), ) + key
def from_key(env, key):
"""
Return the object identified by the input key in an Environment.
The input of this function is constructed by the to_key function.
@param env: an OpenRAVE environment
@param key: tuple that uniquely identifies the object
@return object in the input OpenRAVE environment
"""
if key is None:
return None
obj_type = key[0]
if issubclass(obj_type, (KinBody, Robot)):
return env.GetKinBody(key[1])
elif issubclass(obj_type, KinBody.Joint):
return env.GetKinBody(key[1]).GetJoint(key[2])
elif issubclass(obj_type, KinBody.Link):
return env.GetKinBody(key[1]).GetLink(key[2])
elif issubclass(obj_type, Robot.Manipulator):
return env.GetRobot(key[1]).GetManipulator(key[2])
else:
raise TypeError('Unknown type "{!s}".'.format(obj_type))
| 32.672222
| 85
| 0.655841
| 1,359
| 11,762
| 5.591611
| 0.18028
| 0.033162
| 0.01895
| 0.031978
| 0.401763
| 0.312936
| 0.283853
| 0.23911
| 0.20279
| 0.18134
| 0
| 0.000934
| 0.271552
| 11,762
| 359
| 86
| 32.763231
| 0.885971
| 0.466502
| 0
| 0.335821
| 0
| 0
| 0.029735
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 1
| 0.171642
| false
| 0.044776
| 0.022388
| 0
| 0.380597
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f233b62fa43bf27f7df361b2d0940e083df21551
| 6,471
|
py
|
Python
|
src/core/python/core/io/od.py
|
railtoolkit/OpenLinTim
|
27eba8b6038946ce162e9f7bbc0bd23045029d51
|
[
"MIT"
] | null | null | null |
src/core/python/core/io/od.py
|
railtoolkit/OpenLinTim
|
27eba8b6038946ce162e9f7bbc0bd23045029d51
|
[
"MIT"
] | null | null | null |
src/core/python/core/io/od.py
|
railtoolkit/OpenLinTim
|
27eba8b6038946ce162e9f7bbc0bd23045029d51
|
[
"MIT"
] | null | null | null |
from typing import List
from core.exceptions.input_exceptions import (InputFormatException,
InputTypeInconsistencyException)
from core.model.graph import Graph
from core.model.impl.fullOD import FullOD
from core.model.impl.mapOD import MapOD
from core.model.infrastructure import InfrastructureNode
from core.model.od import OD, ODPair
from core.io.csv import CsvReader, CsvWriter
from core.model.ptn import Stop, Link
from core.util.config import Config, default_config
class ODReader:
"""
Class to read files of od matrices.
"""
def __init__(self, source_file_name: str, od: OD):
"""
Constructor of an ODReader for a demand collection and a given file
name. The given name will not influence the read file but the used name
in any error message, so be sure to tuse the same name in here and in
the CsvReader!
"""
self.sourceFileName = source_file_name
self.od = od
def process_od_line(self, args: [str], lineNumber: int) -> None:
"""
Process the contents of an od matric line.
:param args the content of the line
:param lineNumber the numberm used for error handling
:raise exceptions if the line does not contain exactly 3 entries
if the specific types of the entries do not match
the expectations.
"""
if len(args) != 3:
raise InputFormatException(self.sourceFileName, len(args), 3)
try:
origin = int(args[0])
except ValueError:
raise InputTypeInconsistencyException(self.sourceFileName, 1,
lineNumber, "int", args[0])
try:
destination = int(args[1])
except ValueError:
raise InputTypeInconsistencyException(self.sourceFileName, 2,
lineNumber, "int", args[1])
try:
passengers = float(args[2])
except ValueError:
raise InputTypeInconsistencyException(self.sourceFileName, 3,
lineNumber, "float", args[2])
self.od.setValue(origin, destination, passengers)
@staticmethod
def read(od: OD, size: int = None, file_name: str = "", config: Config = Config.getDefaultConfig()) -> OD:
"""
Read the given file into an od object. If parameters are not given but needed,
the respective values will be read from the given config.
:param od: the od to fill. If not given, an empty MapOD will be used. If a size is given, a FullOD of the
corresponding size will be used
:param size: the size of the FullOD to use (if no od is given directly)
:param file_name: the file name to read the od matrix from
:param config: the config to read the parameters from that are not given
:return the read of matrix
"""
if not od and size:
od = FullOD(size)
if not od:
od = MapOD()
if not file_name:
file_name = config.getStringValue("default_od_file")
reader = ODReader(file_name, od)
CsvReader.readCsv(file_name, reader.process_od_line)
return od
@staticmethod
def readNodeOd(od: OD, size: int = None, file_name: str = "", config: Config = Config.getDefaultConfig()) -> OD:
"""
Read the given file into an od object. If parameters are not given but needed,
the respective values will be read from the given config.
:param od: the od to fill. If not given, an empty MapOD will be used. If a size is given, a FullOD of the
corresponding size will be used
:param size: the size of the FullOD to use (if no od is given directly)
:param file_name: the file name to read the od matrix from
:param config: the config to read the parameters from that are not given
:return the read of matrix
"""
if not file_name:
file_name = config.getStringValue("filename_od_nodes_file")
return ODReader.read(od, size, file_name, config)
class ODWriter:
"""
Class implementing the writing of an od matrix as a static method. Just
call write(Graph, OD, Config) to write the od matrix to the file
specified in the config.
"""
@staticmethod
def write(ptn: Graph[Stop, Link], od: OD, file_name: str= "", header: str= "",
config: Config = Config.getDefaultConfig()):
"""
Write the given od matrix to the file specified in the config by
default_od_file. Will write all od pairs, including those with weight
0.
:param ptn the ptn the od matrix is based on
:param od the od matrix to write
:param config Used for reading the values of default_od_file and
od_header
:param file_name the file name to write the od matrix to
:param header the header to write in the od file
"""
od_pairs = []
if not file_name:
file_name = config.getStringValue("default_od_file")
if not header:
header = config.getStringValue("od_header")
for origin in ptn.getNodes():
for destination in ptn.getNodes():
od_pairs.append(ODPair(origin.getId(), destination.getId(), od.getValue(origin.getId(), destination.getId())))
CsvWriter.writeListStatic(file_name, od_pairs, ODPair.toCsvStrings, header=header)
@staticmethod
def writeNodeOd(od: OD, file_name: str="", header: str="",
config: Config = Config.getDefaultConfig()):
"""
Write the given od matrix to the file specified or the corresponding file name from the config. Will write
only the od pairs with positive demand
:param od: the od object to write
:param file_name: the file to write the od data to
:param header: the header to use
:param config: the config to read parameters from that are needed but not given
"""
if not file_name:
file_name = config.getStringValue("filename_od_nodes_file")
if not header:
header = config.getStringValue("od_nodes_header")
od_pairs = od.getODPairs()
CsvWriter.writeListStatic(file_name, od_pairs, ODPair.toCsvStrings, header=header)
| 44.020408
| 126
| 0.626642
| 844
| 6,471
| 4.735782
| 0.191943
| 0.056042
| 0.019515
| 0.021016
| 0.487616
| 0.482612
| 0.405054
| 0.398549
| 0.379034
| 0.367025
| 0
| 0.002888
| 0.304281
| 6,471
| 146
| 127
| 44.321918
| 0.88494
| 0.386803
| 0
| 0.333333
| 0
| 0
| 0.031045
| 0.012532
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.027778
| 0.138889
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f23575bb8b4e289c914a5be32dd736b94767c391
| 4,395
|
py
|
Python
|
kriging/_kriging.py
|
ERSSLE/ordinary_kriging
|
f983081e4f12b0bae03bd042a6f451c65dcb2759
|
[
"MIT"
] | 3
|
2020-09-08T16:55:44.000Z
|
2021-12-04T15:35:07.000Z
|
kriging/_kriging.py
|
ERSSLE/ordinary_kriging
|
f983081e4f12b0bae03bd042a6f451c65dcb2759
|
[
"MIT"
] | null | null | null |
kriging/_kriging.py
|
ERSSLE/ordinary_kriging
|
f983081e4f12b0bae03bd042a6f451c65dcb2759
|
[
"MIT"
] | 2
|
2021-08-25T09:35:50.000Z
|
2021-12-07T08:19:11.000Z
|
# encoding: utf-8
"""
Ordinary Kriging interpolation is a linear estimation of regionalized variables.
It assumes that the data change into a normal distribution,
and considers that the expected value of regionalized variable Z is unknown.
The interpolation process is similar to the weighted sliding average,
and the weight value is determined by spatial data analysis.
"""
import numpy as np
from shapely.geometry import Polygon,Point,shape
from shapely.geometry.multipolygon import MultiPolygon
from shapely.prepared import prep
class Kriging():
"""Ordinary Kriging interpolation class"""
def _distance(self,xy1,xy2):
xdmat = (xy1[:,[0]] - xy2[:,0])**2
ydmat = (xy1[:,[1]] - xy2[:,1])**2
return np.sqrt(xdmat + ydmat)
def _rh(self,z):
return 1/2 * (z - z.reshape(-1,1))**2
def _proportional(self,x,y):
""" x*y / x**2 """
return (x*y).sum()/(x ** 2).sum()
def fit(self,xy=None,z=None):
"""
The training process mainly includes half variance and distance matrix calculation.
"""
self.xy = xy.copy()
self.z = z.copy()
h = self._distance(xy,xy)
r = self._rh(z)
hh_f = np.triu(h+1,0)
rr_f = np.triu(r+1,0)
hh=np.triu(h,0)
rr=np.triu(r,0)
self.k = self._proportional(hh[(hh!=0) | (hh_f!=0)],rr[(rr!=0) | (rr_f!=0)])
self.hnew=h*self.k
self.hnew = np.r_[self.hnew,np.ones((1,self.hnew.shape[1]))]
self.hnew = np.c_[self.hnew,np.ones((self.hnew.shape[0],1))]
self.hnew[self.hnew.shape[0]-1,self.hnew.shape[1]-1] = 0
def predict(self,xy):
"""
The interpolating weights are calculated and the interpolating results are obtained.
"""
oh = self._distance(self.xy,xy)
oh = self.k * oh
oh = np.r_[oh,np.ones((1,oh.shape[1]))]
self.w = np.dot(np.linalg.inv(self.hnew),oh)
res = (self.z.reshape(-1,1) * self.w[:-1,:]).sum(0)
return res
def shape_shadow(xgrid,ygrid,mapdata):
"""
Mask processing.
Parameters
----------
xgrid: grid coordinates of longitude.
ygrid: grid coordinates of latitude.
mapdata: array of map data.
Return
------
np.ndarray: An array of Boolean types.
"""
newshp = Polygon()
for shap in mapdata:
newshp = newshp.union(shape({'type':'Polygon','coordinates':[shap]}))
points = []
for xi,yi in zip(xgrid.ravel(),ygrid.ravel()):
points.append(Point([xi,yi]))
prep_newshp = prep(newshp)
mask = []
for p in points:
mask.append(bool(prep_newshp.contains(p)-1))
mask = np.array(mask).reshape(xgrid.shape)
return mask
def interpolate(xy,z,extension=1.2,point_counts=(100,100)):
"""
Interpolate through the Kriging class, and return the grid points
of the longitude and latitude interpolation results
Parameters
----------
xy: The latitude and longitude coordinates of a spatial data point.
z: The latitude and longitude coordinates of a spatial data point.
extension: The interpolating region is expanded to cover a wider area.
point_counts: How many data points to interpolate, default is 100 * 100.
"""
kri = Kriging()
kri.fit(xy,z)
x_max,x_min,y_max,y_min = xy[:,0].max(),xy[:,0].min(),xy[:,1].max(),xy[:,1].min()
p = (extension - 1.0)/2
x_s = x_min - (x_max-x_min)*p
x_e = x_max + (x_max-x_min)*p
y_s = y_min - (y_max-y_min)*p
y_e = y_max + (y_max-y_min)*p
xls = np.linspace(x_s,x_e,point_counts[0])
yls = np.linspace(y_s,y_e,point_counts[1])
xgrid,ygrid = np.meshgrid(xls,yls)
xgridls,ygridls = xgrid.ravel(),ygrid.ravel()
if len(xgridls) > 100000: # Consider memory limit loop handling.
zgridls = np.array([])
for s,e in zip(np.arange(0,len(xgridls),100000)[:-1],np.arange(0,len(xgridls),100000)[1:]):
zgridls = np.concatenate([zgridls,kri.predict(np.c_[xgridls[s:e],ygridls[s:e]])])
if e < len(xgridls):
zgridls = np.concatenate([zgridls,kri.predict(np.c_[xgridls[e:],ygridls[e:]])])
else:
zgridls = kri.predict(np.c_[xgridls,ygridls])
zgrid = zgridls.reshape(xgrid.shape)
return xgrid,ygrid,zgrid
| 35.731707
| 100
| 0.597952
| 649
| 4,395
| 3.975347
| 0.269646
| 0.034109
| 0.015504
| 0.009302
| 0.153488
| 0.125969
| 0.115504
| 0.077519
| 0.077519
| 0.041085
| 0
| 0.02766
| 0.251422
| 4,395
| 122
| 101
| 36.02459
| 0.756535
| 0.281456
| 0
| 0
| 0
| 0
| 0.007666
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.057143
| 0.014286
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f23806bdb5c4b2e6ddeae98b2f41f0141fe5c5b9
| 1,410
|
py
|
Python
|
crypto-scrapers/scrapers/spiders/coin_market_cap.py
|
chnsh/crypto-index-fund
|
6c4122b868372ba99aba4f703e85d8ee12af07de
|
[
"MIT"
] | 14
|
2018-05-27T19:34:59.000Z
|
2022-02-09T12:02:38.000Z
|
crypto-scrapers/scrapers/spiders/coin_market_cap.py
|
chnsh/crypto-index-fund
|
6c4122b868372ba99aba4f703e85d8ee12af07de
|
[
"MIT"
] | 4
|
2018-05-28T02:44:07.000Z
|
2022-03-02T14:55:20.000Z
|
crypto-scrapers/scrapers/spiders/coin_market_cap.py
|
chnsh/crypto-index-fund
|
6c4122b868372ba99aba4f703e85d8ee12af07de
|
[
"MIT"
] | 1
|
2022-03-07T05:26:47.000Z
|
2022-03-07T05:26:47.000Z
|
from datetime import datetime
from locale import *
import scrapy
from injector import Injector
from scrapers.items import CoinMarketCapItem
from scrapers.utils import UrlListGenerator
setlocale(LC_NUMERIC, '')
class CoinMarketCapSpider(scrapy.Spider):
name = "cmc"
custom_settings = {
'ITEM_PIPELINES': {
'scrapers.pipelines.CMCPipeline': 100,
}
}
def start_requests(self):
for url in Injector().get(UrlListGenerator).generate_cmc_url_list():
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
coin = response.css('h1.text-large small::text') \
.extract_first() \
.replace('(', '') \
.replace(')', '')
for row in response.css('table tbody tr'):
data = row.css('td::text').extract()
yield CoinMarketCapItem(
date=datetime.strptime(data[0], '%b %d, %Y').date(),
open_price=atof(data[1]) if data[1] != '-' else None,
high_price=atof(data[2]) if data[2] != '-' else None,
low_price=atof(data[3]) if data[3] != '-' else None,
close_price=atof(data[4]) if data[4] != '-' else None,
volume=atof(data[5]) if data[5] != '-' else None,
market_cap=atof(data[6]) if data[6] != '-' else None,
coin=coin
)
| 33.571429
| 76
| 0.565248
| 164
| 1,410
| 4.780488
| 0.481707
| 0.061224
| 0.066327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017034
| 0.292199
| 1,410
| 41
| 77
| 34.390244
| 0.768537
| 0
| 0
| 0
| 0
| 0
| 0.078723
| 0.021277
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.176471
| 0
| 0.323529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f23af2303a08de830f84db88bf6e00cef4e25589
| 4,361
|
py
|
Python
|
crawler/cli.py
|
NicolasLM/crawler
|
15ed6441fef3b68bfadc970f597271191fe66cf8
|
[
"MIT"
] | null | null | null |
crawler/cli.py
|
NicolasLM/crawler
|
15ed6441fef3b68bfadc970f597271191fe66cf8
|
[
"MIT"
] | null | null | null |
crawler/cli.py
|
NicolasLM/crawler
|
15ed6441fef3b68bfadc970f597271191fe66cf8
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from urllib.parse import urlparse
import click
import rethinkdb as r
import redis
import crawler.conf as conf
# cli does not need to be thread-safe
conn = r.connect(host=conf.RethinkDBConf.HOST,
db=conf.RethinkDBConf.DB)
domains = r.table('domains')
@click.group()
@click.version_option()
def cli():
"""Crawler command line tool."""
@cli.command('as', short_help='most popular AS')
@click.option('--count', default=15, help='number of AS to show')
def top_as(count):
"""Show which Autonomous Systems are the most popular."""
data = domains.filter(r.row['success'] == True).\
group(r.row['asn']).count().run(conn)
top('Autonomous Systems', count, data)
@cli.command('countries', short_help='most popular countries')
@click.option('--count', default=15, help='number of countries to show')
def top_countries(count):
"""Show which countries are the most popular."""
data = domains.filter(r.row['success'] == True).\
group(r.row['country']).count().run(conn)
top('countries', count, data)
def top(kind, count, data):
top = OrderedDict(sorted(data.items(), key=lambda t: -t[1]))
i = 1
click.secho('Top {} {}'.format(count, kind), bold=True)
for value, occurences in top.items():
if not value:
continue
click.echo('{:>15} {}'.format(value, occurences))
i += 1
if i > count:
break
@cli.command('stats', short_help='statistics about domains')
def stats():
"""Show statistics about domains."""
success = domains.filter(r.row['success'] == True).count().run(conn)
failure = domains.filter(r.row['success'] == False).count().run(conn)
redis_url = urlparse(conf.CeleryConf.BROKER_URL)
redis_conn = redis.StrictRedis(redis_url.hostname,
port=redis_url.port,
db=redis_url.path[1:])
pending = redis_conn.llen('celery')
try:
percent_failure = failure*100/success
except ZeroDivisionError:
percent_failure = 0.0
click.secho('Domain statistics', bold=True)
click.secho('Success: {}'.format(success), fg='green')
click.secho('Pending: {}'.format(pending), fg='yellow')
click.secho('Failed: {} ({:.2f}%)'.format(failure, percent_failure),
fg='red')
@cli.command('domain', short_help='information about a domain')
@click.argument('name')
def domain(name):
"""Show information about a domain."""
import pprint
domain_name = name.lower()
try:
pprint.pprint(domains.filter({'name': domain_name}).run(conn).next())
except r.net.DefaultCursorEmpty:
click.echo('No information on {}'.format(domain_name))
@cli.command('insert', short_help='insert a domain in the list to crawl')
@click.argument('name')
def insert(name):
"""Insert a domain in the list of domains to crawl."""
from .crawler import crawl_domain
name = name.lower()
crawl_domain.delay(name)
click.secho('Domain {} added to Celery tasks'.format(name),
fg='yellow')
@cli.command('rethinkdb', short_help='prepare RethinkDB')
def rethinkdb():
"""Prepare database and table in RethinkDB"""
from rethinkdb.errors import ReqlOpFailedError, ReqlRuntimeError
conn = r.connect(host=conf.RethinkDBConf.HOST)
# Create database
try:
r.db_create(conf.RethinkDBConf.DB).run(conn)
click.secho('Created database {}'.format(conf.RethinkDBConf.DB),
fg='yellow')
except ReqlOpFailedError:
click.secho('Database {} already exists'.format(conf.RethinkDBConf.DB),
fg='green')
# Create table 'domains'
conn = r.connect(host=conf.RethinkDBConf.HOST,
db=conf.RethinkDBConf.DB)
try:
r.table_create('domains', durability=conf.RethinkDBConf.DURABILITY).\
run(conn)
click.secho('Created table domains', fg='yellow')
except ReqlOpFailedError:
click.secho('Table domains already exists', fg='green')
# Create index on domains.name
try:
r.table('domains').index_create('name').run(conn)
click.secho('Created index domains.name', fg='yellow')
except ReqlRuntimeError:
click.secho('Index domains.name already exists', fg='green')
| 33.806202
| 79
| 0.64022
| 543
| 4,361
| 5.092081
| 0.252302
| 0.0434
| 0.034358
| 0.024593
| 0.233273
| 0.179747
| 0.124051
| 0.110669
| 0.083906
| 0.083906
| 0
| 0.004661
| 0.212795
| 4,361
| 128
| 80
| 34.070313
| 0.800757
| 0.086907
| 0
| 0.180851
| 0
| 0
| 0.172711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.095745
| 0
| 0.180851
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f23b010b735f63cc59ac899de4d7a1e041082294
| 9,667
|
py
|
Python
|
run.py
|
keyunluo/Pytorch-DDP
|
ff91affdd2c4cebe1719e9a46f118405c308fd1f
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
keyunluo/Pytorch-DDP
|
ff91affdd2c4cebe1719e9a46f118405c308fd1f
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
keyunluo/Pytorch-DDP
|
ff91affdd2c4cebe1719e9a46f118405c308fd1f
|
[
"Apache-2.0"
] | null | null | null |
# -8*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.optim as optim
import torch.multiprocessing as mp
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader, Dataset
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
import argparse, random, time, os
import numpy as np
class MyDataset(Dataset):
def __init__(self):
super().__init__()
self.docs = torch.randn((1024, 32, 16))
def __len__(self):
return len(self.docs)
def __getitem__(self, index) :
return self.docs[index]
class MyModel(nn.Module):
def __init__(self, max_seq_len=32, emb_dim=16):
super().__init__()
self.max_seq_len = max_seq_len
self.position_layer = nn.Embedding(max_seq_len, emb_dim)
self.encoder_layer = nn.TransformerEncoderLayer(d_model=emb_dim, nhead=2, dropout=0.2, batch_first=True)
self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=2)
self.fc = nn.Linear(emb_dim, 4)
def forward(self, imgs, mask):
postions = self.position_layer(torch.arange(self.max_seq_len).repeat((imgs.shape[0], 1)).to(imgs).long())
imgs = imgs + postions
feature = self.encoder(imgs, src_key_padding_mask=~mask)
pooling1 = torch.sum((feature * mask.unsqueeze(-1)), axis=1) / mask.sum(axis=1)
pooling2 = torch.max((feature * mask.unsqueeze(-1)), axis=1)[0]
pooling = torch.cat([pooling1, pooling2], dim=1)
output = self.fc(pooling)
return output
class Trainer():
def __init__(self, model, dataloader, datasampler, device, rank, args):
self.model = model
self.dataloader = dataloader
self.datasampler = datasampler
self.device = device
self.rank = rank
self.args = args
def _data_to_gpu(self, data, device):
for k in data:
data[k] = torch.tensor(data[k]).to(device)
return data
def predict(self, dataloader=None, is_valid=False):
y_true, y_pred = [], []
self.model.eval()
if dataloader is None:
dataloader = self.dataloader
with torch.no_grad():
for batch in dataloader:
input = [self._data_to_gpu(data, self.device) for data in batch]
if is_valid:
feature, label = input[:-1], input[-1]
else:
feature, label = input[:-1], None
output = self.model(feature)
predicted_label = torch.argmax(output, dim=1).detach().cpu().numpy().tolist()
y_pred += predicted_label
y_true += [0] * len(predicted_label) if not is_valid else label.detach().cpu().numpy().tolist()
self.model.eval()
return y_true, y_pred
def fit(self, epoch, optimizer, criterion, saved_model, scheduler=None, validloader=None):
for epoch in range(1, epoch+1):
time1 = time.time()
self.model.train(True)
self.datasampler.set_epoch(epoch)
total_loss = []
for batch in self.dataloader:
optimizer.zero_grad()
input = [self._data_to_gpu(data, self.device) for data in batch]
feature, label = input[:-1], input[-1]
output = self.model(feature)
loss = criterion(output, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_norm)
optimizer.step()
if self.rank == 0:
total_loss.append(loss.item())
if self.rank == 0:
epoch_avg_loss = np.mean(total_loss)
print("Epoch {:02d}, Time {:.02f}s, AvgLoss {:.06f}".format(epoch, time.time()-time1, epoch_avg_loss))
state_dict = self.model.module.state_dict()
os.makedirs(os.path.dirname(saved_model), exist_ok=True)
torch.save(state_dict, saved_model)
if validloader:
test_out = self.predict(validloader, True)
torch.distributed.all_reduce(test_out)
if self.rank == 0:
y_true, y_pred = test_out
torch.cuda.empty_cache()
if scheduler is not None:
scheduler.step()
def parameter_parser():
parser = argparse.ArgumentParser(description="Run Model")
parser.add_argument("--seq_len",
type=int,
default=512,
help="max sequence length")
parser.add_argument("--ip",
type=str,
default="localhost",
help="ip address")
parser.add_argument("--port",
type=str,
default=str(random.randint(20000, 30000)),
help="port num")
parser.add_argument("--cuda_devices",
type=int,
nargs='+',
default=list(range(torch.cuda.device_count())),
help="cuda devices")
parser.add_argument("--mode",
type=str,
choices=["train", "eval"],
help="train or eval")
parser.add_argument("--num_worker",
type=int,
default=8,
help="number of data loader worker")
parser.add_argument("--batch_size",
type=int,
default=32,
help="batch size")
parser.add_argument("--epoch",
type=int,
default=5,
help="num epoch")
parser.add_argument("--max_norm",
type=int,
default=30,
help="max norm value")
return parser.parse_args()
def set_manual_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
cudnn.benchmark = False
cudnn.deterministic = True
def dist_init(ip, rank, local_rank, world_size, port):
"""
initialize data distributed
"""
host_addr_full = 'tcp://' + ip + ':' + str(port)
torch.distributed.init_process_group("nccl", init_method=host_addr_full, rank=rank, world_size=world_size)
torch.cuda.set_device(local_rank)
assert torch.distributed.is_initialized()
def init_weights(module):
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight.data)
nn.init.constant_(module.bias.data, 0.0)
elif isinstance(module, nn.LSTM):
nn.init.xavier_uniform_(module.weight_ih_l0.data)
nn.init.orthogonal_(module.weight_hh_l0.data)
nn.init.constant_(module.bias_ih_l0.data, 0.0)
nn.init.constant_(module.bias_hh_l0.data, 0.0)
hidden_size = module.bias_hh_l0.data.shape[0] // 4
module.bias_hh_l0.data[hidden_size:(2*hidden_size)] = 1.0
if module.bidirectional:
nn.init.xavier_uniform_(module.weight_ih_l0_reverse.data)
nn.init.orthogonal_(module.weight_hh_l0_reverse.data)
nn.init.constant_(module.bias_ih_l0_reverse.data, 0.0)
nn.init.constant_(module.bias_hh_l0_reverse.data, 0.0)
module.bias_hh_l0_reverse.data[hidden_size:(
2*hidden_size)] = 1.0
def train_worker(rank, args, world_size):
model_file = "model.torch"
device = args.cuda_devices[rank]
dist_init(args.ip, rank, device, world_size, args.port)
model = prepare_model(model_file, args, need_load=False, is_train=True, distributed=True)
criterion = nn.CrossEntropyLoss()
train_dataset = MyDataset()
train_datasampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, pin_memory=True, num_workers=args.num_worker, batch_size=args.batch_size, sampler=train_datasampler)
optimizer = optim.Adam(model.parameters(), lr=1e-5)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=32, eta_min=1e-6)
trainer = Trainer(model, train_dataloader, train_datasampler, device, rank, args)
valid_dataset = MyDataset()
valid_datasampler = DistributedSampler(valid_dataset)
valid_dataloader = DataLoader(valid_dataset, pin_memory=True, num_workers=args.num_worker, batch_size=args.batch_size, sampler=valid_datasampler)
trainer.fit(args.epoch, optimizer, criterion, model_file=model_file, scheduler=scheduler,
validloader=valid_dataloader, validset=valid_dataset)
def prepare_model(model_file, args, need_load=False, is_train=True, distributed=True):
if distributed:
rank, device = torch.distributed.get_rank(), torch.cuda.current_device()
else:
rank, device = 0, torch.cuda.current_device()
model = MyModel()
model = model.to(device)
if need_load:
model.load_state_dict(torch.load(model_file, map_location='cuda:{}'.format(device)))
if rank == 0:
print("[*] load model {}".format(model_file))
else:
model.apply(init_weights)
if is_train and distributed:
model = DistributedDataParallel(model, device_ids=[device])
print("[*] rank:{}, device:{}".format(rank, device))
return model
def trainer():
world_size = len(args.cuda_devices)
mp.spawn(train_worker, args=(args, world_size), nprocs=world_size)
if __name__ == '__main__':
args = parameter_parser()
if args.mode == "train":
trainer()
| 39.618852
| 150
| 0.602359
| 1,170
| 9,667
| 4.760684
| 0.228205
| 0.010772
| 0.027469
| 0.017953
| 0.175224
| 0.158528
| 0.124237
| 0.124237
| 0.077558
| 0.077558
| 0
| 0.015598
| 0.283749
| 9,667
| 243
| 151
| 39.781893
| 0.78885
| 0.005276
| 0
| 0.121359
| 0
| 0
| 0.037099
| 0
| 0
| 0
| 0
| 0
| 0.004854
| 1
| 0.07767
| false
| 0
| 0.048544
| 0.009709
| 0.174757
| 0.014563
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f23c95d3f1d786e4a9f7ff9ea7ec7de8d8f85605
| 373
|
py
|
Python
|
newsletter/urls.py
|
vallka/djellifique
|
fb84fba6be413f9d38276d89ae84aeaff761218f
|
[
"MIT"
] | null | null | null |
newsletter/urls.py
|
vallka/djellifique
|
fb84fba6be413f9d38276d89ae84aeaff761218f
|
[
"MIT"
] | null | null | null |
newsletter/urls.py
|
vallka/djellifique
|
fb84fba6be413f9d38276d89ae84aeaff761218f
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import *
app_name = 'newsletter'
urlpatterns = [
path('pixel/', my_image, name='pixel'),
path('click/<str:uuid>/', click_redirect, name='click'),
path('notification/', notification, name='notification'),
path('sendtest/<str:slug>', sendtest, name='sendtest'),
path('stats/<str:slug>', stats, name='stats'),
]
| 26.642857
| 61
| 0.659517
| 45
| 373
| 5.4
| 0.466667
| 0.057613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142091
| 373
| 13
| 62
| 28.692308
| 0.759375
| 0
| 0
| 0
| 0
| 0
| 0.310992
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f23ec17cf55792ab6ef9150b36b5c3e6f5471fbb
| 6,491
|
py
|
Python
|
vesc_driver/src/mathdir/cubic_spline_planner.py
|
Taek-16/vesc_study
|
c4f8e56a2530b17622ca73e9eba57830a1b51ad9
|
[
"Apache-2.0"
] | 1
|
2021-02-13T10:48:13.000Z
|
2021-02-13T10:48:13.000Z
|
vesc_driver/src/mathdir/cubic_spline_planner.py
|
Taek-16/vesc_study
|
c4f8e56a2530b17622ca73e9eba57830a1b51ad9
|
[
"Apache-2.0"
] | null | null | null |
vesc_driver/src/mathdir/cubic_spline_planner.py
|
Taek-16/vesc_study
|
c4f8e56a2530b17622ca73e9eba57830a1b51ad9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
cubic spline planner
Author: Atsushi Sakai
"""
import math
import numpy as np
import bisect
from scipy.spatial import distance
class Spline:
"""
Cubic Spline class
"""
def __init__(self, x, y):
self.b, self.c, self.d, self.w = [], [], [], []
self.x = x
self.y = y
self.nx = len(x) # dimension of x
h = np.diff(x)
# calc coefficient c
self.a = [iy for iy in y]
# calc coefficient c
A = self.__calc_A(h)
B = self.__calc_B(h)
self.c = np.linalg.solve(A, B)
# print(self.c1)
# calc spline coefficient b and d
for i in range(self.nx - 1):
self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))
tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \
(self.c[i + 1] + 2.0 * self.c[i]) / 3.0
self.b.append(tb)
def calc(self, t):
"""
Calc position
if t is outside of the input x, return None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
return result
def calcd(self, t):
"""
Calc first derivative
if t is outside of the input x, return None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0
return result
def calcdd(self, t):
"""
Calc second derivative
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx
return result
def calcddd(self, t):
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
result = 6.0 * self.d[i]
return result
def __search_index(self, x):
"""
search data segment index
"""
return bisect.bisect(self.x, x) - 1
def __calc_A(self, h):
"""
calc matrix A for spline coefficient c
"""
A = np.zeros((self.nx, self.nx))
A[0, 0] = 1.0
for i in range(self.nx - 1):
if i != (self.nx - 2):
A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])
A[i + 1, i] = h[i]
A[i, i + 1] = h[i]
A[0, 1] = 0.0
A[self.nx - 1, self.nx - 2] = 0.0
A[self.nx - 1, self.nx - 1] = 1.0
# print(A)
return A
def __calc_B(self, h):
"""
calc matrix B for spline coefficient c
"""
B = np.zeros(self.nx)
for i in range(self.nx - 2):
B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / \
h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]
# print(B)
return B
class Spline2D:
"""
2D Cubic Spline class
"""
def __init__(self, x, y):
self.s = self.__calc_s(x, y)
self.sx = Spline(self.s, x)
self.sy = Spline(self.s, y)
def __calc_s(self, x, y):
dx = np.diff(x)
dy = np.diff(y)
self.ds = [math.sqrt(idx ** 2 + idy ** 2)
for (idx, idy) in zip(dx, dy)]
s = [0]
s.extend(np.cumsum(self.ds))
return s
def calc_position(self, s):
"""
calc position
"""
x = self.sx.calc(s)
y = self.sy.calc(s)
return x, y
def calc_curvature(self, s):
"""
calc curvature
"""
dx = self.sx.calcd(s)
ddx = self.sx.calcdd(s)
dy = self.sy.calcd(s)
ddy = self.sy.calcdd(s)
k = (ddy * dx - ddx * dy) / (dx ** 2 + dy ** 2)
return k
def calc_d_curvature(self, s):
"""
calc d_curvature which is derivative of curvature by s
"""
dx = self.sx.calcd(s)
ddx = self.sx.calcdd(s)
dddx = self.sx.calcddd(s)
dy = self.sy.calcd(s)
ddy = self.sy.calcdd(s)
dddy = self.sy.calcddd(s)
squareterm = dx * dx + dy * dy
dk = ((dddy + dx - dddx * dy) * squareterm - 3 * (ddy * dx - ddx * dy) * (dx * ddx + dy * ddy)) / (squareterm * squareterm)
return dk
def calc_yaw(self, s):
"""
calc yaw
"""
dx = self.sx.calcd(s)
dy = self.sy.calcd(s)
yaw = math.atan2(dy, dx)
return yaw
def calc_spline_course(x, y, ds=0.1):
sp = Spline2D(x, y)
s = list(np.arange(0, sp.s[-1], ds))
rx, ry, ryaw, rk, rdk = [], [], [], [], []
for i_s in s:
ix, iy = sp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(sp.calc_yaw(i_s))
rk.append(sp.calc_curvature(i_s))
rdk.append(sp.calc_d_curvature(i_s))
return rx, ry, ryaw, rk, rdk, s
def main():
print("Spline 2D test")
import matplotlib.pyplot as plt
import numpy as np
manhae1 = np.load(file='/home/menguiin/catkin_ws/src/macaron_2/path/K-CITY-garage-1m.npy')
x = manhae1[0:manhae1.shape[0]-1, 0]
y = manhae1[0:manhae1.shape[0]-1, 1]
rx, ry, ryaw, rk, rdk, s = calc_spline_course(x, y)
s = np.array(s)
flg, ax = plt.subplots(1)
plt.plot(range(-s.shape[0],s.shape[0],2),s, "s", label="s-value")
plt.grid(True)
plt.axis("equal")
plt.xlabel("index")
plt.ylabel("sval")
plt.legend()
flg, ax = plt.subplots(1)
plt.plot(x, y, "xb", label="input")
plt.plot(rx, ry, "-r", label="spline")
plt.grid(True)
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
flg, ax = plt.subplots(1)
plt.plot(s, [math.degrees(iyaw) for iyaw in ryaw], "or", label="yaw")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("yaw angle[deg]")
flg, ax = plt.subplots(1)
plt.plot(s, rk, "-r", label="curvature")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("curvature [1/m]")
plt.show()
if __name__ == '__main__':
main()
| 24.130112
| 131
| 0.475582
| 995
| 6,491
| 3.031156
| 0.162814
| 0.028183
| 0.021883
| 0.01061
| 0.392573
| 0.340849
| 0.306366
| 0.28183
| 0.241379
| 0.187666
| 0
| 0.02636
| 0.362964
| 6,491
| 268
| 132
| 24.220149
| 0.703023
| 0.088122
| 0
| 0.355422
| 0
| 0.006024
| 0.037156
| 0.011378
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096386
| false
| 0
| 0.036145
| 0
| 0.271084
| 0.006024
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f240eb401196f0b66c32fe422e4a7253f5e5528f
| 1,469
|
py
|
Python
|
mojave_setup/fonts.py
|
RuchirChawdhry/macOS-Mojave-Setup
|
5e61fe8c20abc42e63fcbd1c7e310aab8cc02a1c
|
[
"MIT"
] | null | null | null |
mojave_setup/fonts.py
|
RuchirChawdhry/macOS-Mojave-Setup
|
5e61fe8c20abc42e63fcbd1c7e310aab8cc02a1c
|
[
"MIT"
] | null | null | null |
mojave_setup/fonts.py
|
RuchirChawdhry/macOS-Mojave-Setup
|
5e61fe8c20abc42e63fcbd1c7e310aab8cc02a1c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess as sp
class Fonts:
FONTS = [
"source-code-pro",
"source-sans-pro",
"source-serif-pro",
"roboto",
"roboto-mono",
"roboto-slab",
"open-sans",
"open-sans-condensed",
"lato",
"ibm-plex",
"ibm-plex-mono",
"ibm-plex-sans",
"georgia",
"ibm-plex-sans-condensed",
"fira-mono",
"fira-sans",
"fira-code",
"times-new-roman",
"great-vibes",
"grand-hotel",
"montserrat",
"hack",
"simple-line-icons",
"old-standard-tt",
"ibm-plex-serif",
"inconsolata",
"impact",
"bebas-neue",
"arial",
"arial-black",
"alex-brush",
"alegreya",
"alegreya-sans",
"aguafina-script",
"libre-baskerville",
"lobster",
"material-icons",
"raleway",
"rajdhani",
"raleway-dots",
"merriweather",
"merriweather-sans",
"redhat",
"pacifico",
]
def get_noto_casks(self):
cmd = ["brew", "search", "font-noto", "--casks"]
noto = sp.run(cmd, capture_output=True).stdout.decode().splitlines()[1:]
return noto
def install(self):
self.FONTS += self.get_noto_casks()
for font in self.FONTS:
sp.run(["brew", "cask", "install", font])
| 22.953125
| 80
| 0.479238
| 143
| 1,469
| 4.888112
| 0.601399
| 0.050072
| 0.031474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003138
| 0.349217
| 1,469
| 63
| 81
| 23.31746
| 0.728033
| 0.029272
| 0
| 0
| 0
| 0
| 0.373596
| 0.016152
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.017857
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f2430c615c25842a6a15c7289e5e98e1e77f49ce
| 1,817
|
py
|
Python
|
src/neighborly/core/residence.py
|
ShiJbey/neighborly
|
5af1e3211f1ef0e25803790850e7cd3d3a49be69
|
[
"MIT"
] | null | null | null |
src/neighborly/core/residence.py
|
ShiJbey/neighborly
|
5af1e3211f1ef0e25803790850e7cd3d3a49be69
|
[
"MIT"
] | null | null | null |
src/neighborly/core/residence.py
|
ShiJbey/neighborly
|
5af1e3211f1ef0e25803790850e7cd3d3a49be69
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict
from ordered_set import OrderedSet
from neighborly.core.ecs import Component
from neighborly.core.engine import AbstractFactory, ComponentDefinition
class Residence(Component):
__slots__ = "owners", "former_owners", "residents", "former_residents", "_vacant"
def __init__(self) -> None:
super().__init__()
self.owners: OrderedSet[int] = OrderedSet([])
self.former_owners: OrderedSet[int] = OrderedSet([])
self.residents: OrderedSet[int] = OrderedSet([])
self.former_residents: OrderedSet[int] = OrderedSet([])
self._vacant: bool = True
def to_dict(self) -> Dict[str, Any]:
return {
**super().to_dict(),
"owners": list(self.owners),
"former_owners": list(self.former_owners),
"residents": list(self.residents),
"former_residents": list(self.former_residents),
"vacant": self._vacant,
}
def add_tenant(self, person: int, is_owner: bool = False) -> None:
"""Add a tenant to this residence"""
self.residents.add(person)
if is_owner:
self.owners.add(person)
self._vacant = False
def remove_tenant(self, person: int) -> None:
"""Remove a tenant rom this residence"""
self.residents.remove(person)
self.former_residents.add(person)
if person in self.owners:
self.owners.remove(person)
self.former_owners.add(person)
self._vacant = len(self.residents) == 0
def is_vacant(self) -> bool:
return self._vacant
class ResidenceFactory(AbstractFactory):
def __init__(self):
super().__init__("Residence")
def create(self, spec: ComponentDefinition, **kwargs) -> Residence:
return Residence()
| 32.446429
| 85
| 0.63071
| 199
| 1,817
| 5.537688
| 0.261307
| 0.054446
| 0.083485
| 0.098004
| 0.181488
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000732
| 0.248211
| 1,817
| 55
| 86
| 33.036364
| 0.806003
| 0.035773
| 0
| 0
| 0
| 0
| 0.063182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170732
| false
| 0
| 0.097561
| 0.073171
| 0.414634
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f2439cb603c2e5bb9b0700a3b097f6415267d55a
| 15,518
|
py
|
Python
|
tests/SBHRun_Environment.py
|
SD2E/synbiohub_adapter
|
492f9ef1054b17d790654310b895bb7ad155808e
|
[
"MIT"
] | 1
|
2019-10-08T20:31:16.000Z
|
2019-10-08T20:31:16.000Z
|
tests/SBHRun_Environment.py
|
SD2E/synbiohub_adapter
|
492f9ef1054b17d790654310b895bb7ad155808e
|
[
"MIT"
] | 84
|
2018-03-06T16:02:30.000Z
|
2020-09-01T18:17:54.000Z
|
tests/SBHRun_Environment.py
|
SD2E/synbiohub_adapter
|
492f9ef1054b17d790654310b895bb7ad155808e
|
[
"MIT"
] | 1
|
2019-02-06T17:17:54.000Z
|
2019-02-06T17:17:54.000Z
|
import threading
import time
import pandas as pd
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import os
import fnmatch
import random
import re
import getpass
import sys
from rdflib import Graph
from synbiohub_adapter.SynBioHubUtil import *
from sbol import *
"""
This class will perform unit testing to query information from SynBioHub's instances.
Installation Requirement(s):
- This test environment requires two third party packages to display plot:
1. pip install pandas
2. python -mpip install -U matplotlib
To run this python file, enter in the following command from the synbiohub_adapter directory:
python -m tests.SBHRun_Environment
author(s) :Tramy Nguyen
"""
class myThread (threading.Thread):
"""
An instance of this class will allow a user to execute N numbers of pushes to a SynBioHub instance.
sbolTriples: A list of SBOL Triples that stores SBOL documents
sbh_connector: An instance of pySBOL's PartShop needed to perform login
for pushing and pulling data to and from SynBioHub
"""
def __init__(self, sbolTriples, sbh_connector):
threading.Thread.__init__(self)
self.sbolTriples_list = sbolTriples
self.sbh_connector = sbh_connector
self.thread_start = self.thread_end = 0
self.tupTime_List = []
self.pushPull_List = []
"""
A default run method that will run after a thread is created and started
"""
def run(self):
self.thread_start = time.clock()
for sbolTriple in self.sbolTriples_list:
push_time = push_sbh(sbolTriple.sbolDoc(), self.sbh_connector)
self.tupTime_List.append((push_time, sbolTriple))
# TODO: currently pull will not work on current pySBOL build so set to 0
self.pushPull_List.append((push_time, 0))
self.thread_end = time.clock()
"""
Returns the time (seconds) it took to run an instance of this thread
"""
def thread_duration(self):
return self.thread_end - self.thread_start
"""
Returns a list of python triples where each Triples are structured as (t1, t2).
t1 = Time it took for each push
t2 = An instance of the SBOLTriple class that holds information about the given SBOL file.
"""
def tripleTime_List(self):
return self.tupTime_List
def pushPull_Times(self):
return self.pushPull_List
class SBOLTriple():
"""
An instance of this class will allow a user to access 3 types of information about an SBOLDocument.
1. the number of SBOL triples found in a SBOL document,
2. the SBOL document object generated from pySBOL, and
3. the full path of the XML file used to generate the SBOL document.
xmlFile: the full path of the SBOL File used to create the SBOL document
"""
def __init__(self, xmlFile, uid):
xmlGraph = Graph()
xmlGraph.parse(xmlFile)
total_obj = []
for sbol_subj, sbol_pred, sbol_obj in xmlGraph:
total_obj.append(sbol_obj)
self.__tripleSize = len(total_obj)
self.__sbolDoc = self.create_sbolDoc(xmlFile, uid)
self.__sbolFile = xmlFile
"""
Returns a new SBOL document created from the given SBOL file and an instance of an SBOLTriple
"""
def create_sbolDoc(self, sbolFile, uid):
sbolDoc = Document()
sbolDoc.read(sbolFile)
sbolDoc.displayId = uid
sbolDoc.name = uid + "_name"
sbolDoc.description = uid + "_description"
sbolDoc.version = str("1")
return sbolDoc
# Returns this objects SBOL document
def sbolDoc(self):
return self.__sbolDoc
# Returns a string value of the SBOL file that was assigned to this triple object
def get_xmlFile(self):
return self.__sbolFile
# Returns the total number of SBOL triples found in the given SBOL file
def totalTriples(self):
return self.__tripleSize
def get_uniqueID(idPrefix):
"""Generates a unique id
"""
t = time.ctime()
uid = '_'.join([idPrefix, t])
return re.sub(r'[: ]', '_', uid)
def create_sbolDocs(numDocs, idPrefix, sbolFile):
"""Returns a list of SBOL Documents
numDocs: An integer value to indicate how many SBOL documents this method should create
idPrefix: A unique id prefix to set each SBOL document
sbolFile: the SBOL file to create an SBOL document from
"""
sbolDoc_List = []
sbolTriples = []
u_counter = 0
for i in range(0, numDocs):
uid = get_uniqueID(idPrefix + "_d" + str(i))
trip_obj = SBOLTriple(sbolFile, uid)
sbolTriples.append(trip_obj)
sbolDoc_List.append(trip_obj.sbolDoc())
print("created doc%s" % i)
return sbolDoc_List, sbolTriples
def get_randomFile(sbolFiles):
"""Returns the full path of a randomly selected SBOL file found in the given directory
dirLocation: The directory to select a random SBOL file from
"""
selectedFile = random.choice(sbolFiles)
return selectedFile
def get_sbolList(dirLocation):
"""Returns a list of xml file found in the given directory
"""
for root, dir, files in os.walk(dirLocation):
sbolFiles = [os.path.abspath(os.path.join(root, fileName)) for fileName in files]
return sbolFiles
def push_sbh(sbolDoc, sbh_connector):
"""Returns the time (seconds) it takes to make a push to a new Collection on SynBioHub
sbh_connector: An instance of pySBOL's PartShop needed to perform login
for pushing and pulling data to and from SynBioHub
sbolURI: The URI of the SynBioHub collection or the specific part to be fetched
"""
start = time.clock()
result = sbh_connector.submit(sbolDoc)
end = time.clock()
print(result)
if result != 'Successfully uploaded':
sys.exit()
return end - start
def pull_sbh(sbh_connector, sbolURI):
"""Returns the time (seconds) it takes to make a pull from an existing SynBioHub Collection
sbh_connector: An instance of pySBOL's PartShop needed to perform login
for pushing and pulling data to and from SynBioHub
sbolURI: The URI of the SynBioHub collection or the specific part to be fetched
"""
sbolDoc = Document()
setHomespace("https://bbn.com")
start = time.clock()
sbh_connector.pull(sbolURI, sbolDoc)
end = time.clock()
if sbolDoc is None:
print("Found nothing and caused no error.")
else:
experimentalData_tl = []
for tl in sbolDoc:
if topLevel.type == 'http://sd2e.org#ExperimentalData':
experimentalData_tl.append(topLevel)
if len(experimentalData_tl) != 74:
print("Found the wrong SynBioHub Part with this uri: %s" % sbolURI)
return end - start
def createThreads(threadNum, sbh_connector, sbolDoc_size, idPrefix, sbolFile):
threads = []
for t in range(threadNum):
time.sleep(1)
_, sbolTriples = create_sbolDocs(sbolDoc_size, idPrefix + "_t" + str(t), sbolFile)
threads.append(myThread(sbolTriples, sbh_connector))
return threads
def generate_speedData(sbolFile, sbh_connector, sbolDoc_size, idPrefix):
pushTimes = []
pullTimes = []
currTotal = []
threads = createThreads(1, sbh_connector, sbolDoc_size, idPrefix + "ST_Coll_", sbolFile)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
sum = 0
for r1, r2 in t.pushPull_Times():
pushTimes.append(r1)
pullTimes.append(r2)
sum += r1
currTotal.append(sum)
df = pd.DataFrame({"Pull_Time": pullTimes,
"Push_Time": pushTimes,
"Total_Time": currTotal})
# df.loc['Total'] = df.sum()
return df
def run_triples(sbh_connector, collPrefix, sbolFiles):
triples_list = []
doc = 0
for s in sbolFiles:
print(s)
uid = get_uniqueID(collPrefix + "_t" + str(1) + "_d" + str(doc))
trip_obj = SBOLTriple(s, uid)
triples_list.append(trip_obj)
doc += 1
t = myThread(triples_list, sbh_connector)
t.start()
t.join()
pushTimes = []
sbol_tripleSizes = []
for v1, v2 in t.tripleTime_List():
pushTimes.append(v1)
sbol_tripleSizes.append(v2.totalTriples())
return sbol_tripleSizes, pushTimes
def run_setThreads(sbh_connector, set_size, t_growthRate, sbolFile, sbolDoc_size, collPrefix):
setId_List = []
threadId_List = []
threadDur_List = []
threadSize = t_growthRate
for i in range(1, set_size + 1):
curr_set = createThreads(threadSize, sbh_connector, sbolDoc_size, collPrefix, sbolFile)
for t in curr_set:
t.start()
for t in curr_set:
t.join()
for t in curr_set:
t_dur = t.thread_duration()
threadId_List.append(t.getName())
threadDur_List.append(t_dur)
setId_List.extend(["set_t" + str(threadSize)] * len(curr_set))
threadSize += t_growthRate
return setId_List, threadId_List, threadDur_List
def generate_setData(sbh_connector, iterations, set_size, t_growthRate, sbolFile, sbolDoc_size, collPrefix):
runId_List = []
setId_List = []
threadId_List = []
threadDur_List = []
for i in range(1, iterations + 1):
r1, r2, r3 = run_setThreads(sbh_connector, set_size, t_growthRate, sbolFile, sbolDoc_size, collPrefix)
runId_List.extend(['run' + str(i)] * len(r1))
setId_List.extend(r1)
threadId_List.extend(r2)
threadDur_List.extend(r3)
df = pd.DataFrame({"Run_ID": runId_List,
"Set_ID": setId_List,
"Thread_ID": threadId_List,
"Time/Thread": threadDur_List},
columns=['Run_ID', 'Set_ID', 'Thread_ID', 'Time/Thread'])
return df
def generate_tripleData(sbh_connector, iterations, collPrefix, sbolFiles):
runId_List = []
tripeSize_List = []
pushTime_List = []
for i in range(1, iterations + 1):
sbol_tripleSizes, pushTimes = run_triples(sbh_connector, collPrefix + str(i), sbolFiles)
runId_List.extend(['Run' + str(i)] * len(pushTimes))
tripeSize_List.extend(sbol_tripleSizes)
pushTime_List.extend(pushTimes)
df = pd.DataFrame({"Run_ID": runId_List,
"Triple_Size": tripeSize_List,
"Push_Time": pushTime_List},
columns=['Run_ID', 'Triple_Size', 'Push_Time'])
return df
def get_fileName(filePath):
file_ext = os.path.basename(filePath)
file_name, f_ext = os.path.splitext(file_ext)
return file_name
def br_speed(sbh_connector, sbolDoc_size, sbolFiles):
for f in sbolFiles:
print(f)
df = generate_speedData(f, sbh_connector, sbolDoc_size, "RS_")
fileName = get_fileName(f)
trip_obj = SBOLTriple(f, "temp_id")
triple_size = trip_obj.totalTriples()
create_SpeedLinePlot(df, f, sbolDoc_size, triple_size)
create_SpeedLine2Plot(df, f, sbolDoc_size, triple_size)
df.to_csv("outputs/SpeedResult_f%s_d%s.csv" % (fileName, sbolDoc_size))
def br_setThread(sbh_connector, iterations, set_size, t_growthRate, sbolDoc_size, sbolFiles):
for f in sbolFiles:
df = generate_setData(sbh_connector, iterations, set_size, t_growthRate, f, sbolDoc_size, "RST_")
trip_obj = SBOLTriple(f, "temp_id")
fileName = get_fileName(f)
create_SetBarPlot(df, iterations, set_size, f, trip_obj.totalTriples(), sbolDoc_size)
df.to_csv("outputs/Set_f%s_iter%s_s%s_d%s.csv" % (fileName, iterations, set_size, sbolDoc_size))
def br_triples(sbh_connector, iterations, sbolFiles):
df = generate_tripleData(sbh_connector, iterations, "RT", sbolFiles)
create_TripleScatterPlot(df, iterations)
df.to_csv("outputs/Triples_iter%s.csv" % (iterations))
def create_SpeedLinePlot(df, f, sbolDoc_size, trip_size):
y_max = 20
fig, ax = plt.subplots()
plt.ylim((0, y_max))
ax.set_title("Time to Push %s Triples to SynBioHub" % trip_size)
ax.set_ylabel("Time to Push (sec)")
ax.set_xlabel("Push Index")
df.plot(x=df.index + 1, y='Push_Time', ax=ax)
fileName = get_fileName(f)
fig.savefig('outputs/SpeedResult_f%s_d%s.pdf' % (fileName, sbolDoc_size))
def create_SpeedLine2Plot(df, f, sbolDoc_size, trip_size):
fig, ax = plt.subplots()
ax.set_title("Time to Push %s Triples to SynBioHub" % trip_size)
ax.set_ylabel("Time to Push (sec)")
ax.set_xlabel("Push Index")
df.plot(x=df.index + 1, y='Total_Time', ax=ax)
fileName = get_fileName(f)
fig.savefig('outputs/SpeedResult2_f%s_d%s.pdf' % (fileName, sbolDoc_size))
def create_SetBarPlot(df, iterations, set_size, f, trip_size, doc_size):
fig, ax = plt.subplots()
# max_index = df.groupby(['Run_ID', 'Set_ID'])['Time/Thread'].transform(max) == df['Time/Thread']
# max_df = df[max_index]
grouped_max = df.groupby(['Set_ID'])
means = grouped_max.mean()
errors = grouped_max.std()
g = plt.get_cmap('Dark2')
means.plot.barh(xerr=errors, ax=ax, legend=False, colormap=g)
ax.set_title("Average Time to Push %s Triples per Thread" % (trip_size))
ax.set_xlabel("Time to Push (sec)")
ax.set_ylabel("Thread Group")
fileName = get_fileName(f)
fig.savefig('outputs/Set_f%s_iter%s_s%s_d%s.pdf' % (fileName, iterations, set_size, doc_size))
def create_TripleScatterPlot(df, iterations):
fig, ax = plt.subplots()
plt.ylim((0, 20))
grouped_runs = df.groupby('Run_ID')
for name, group in grouped_runs:
fit = np.polyfit(group['Triple_Size'], group['Push_Time'], deg=1)
ax.plot(group['Triple_Size'], fit[0] * group['Triple_Size'] + fit[1], color='black')
ax.scatter(data=group, x='Triple_Size', y='Push_Time', marker='o', c='orange')
ax.set_title("Time to Push SBOL Documents with Varying Size")
ax.set_ylabel("Time to Push (sec)")
ax.set_xlabel("Document Size (# of Triples)")
fig.savefig('outputs/Triples_iter%s.pdf' % (iterations))
def backup_sequentialLoad():
# At one point, update pushing to SBH to do something like this so performance doesn't suffer.
sbolDoc = Document()
sbolDoc.read("./examples/c_trips10000.xml")
for i in range(1):
print(i)
uid = get_uniqueID("ex_")
sbolDoc.displayId = uid
sbolDoc.name = uid + "_name"
sbolDoc.description = uid + "_description"
sbolDoc.version = str("1")
push_sbh(sbolDoc, sbh_connector)
if __name__ == '__main__':
server_name = "https://synbiohub.bbn.com"
print("Logging into: " + server_name)
sbh_connector = PartShop(server_name)
sbh_user = input('Enter Username: ')
sbh_connector.login(sbh_user, getpass.getpass(prompt='Enter SynBioHub Password: ', stream=sys.stderr))
# Config.setOption("verbose", True)
# sbolFiles = get_sbolList("./examples/workingFiles")
sbolFiles = ["./examples/c_trips40000.xml"]
iterations = 1
sbolDoc_size = 1
br_speed(sbh_connector, sbolDoc_size, sbolFiles)
# br_triples(sbh_connector, iterations, sbolFiles)
# iterations, set_size=10, t_growthRate=5, sbolDoc_size=100
# TODO: MAKE SURE TO CHANGE COLOR OF BAR GRAPH TO MAKE IT LOOK COOL...
# br_setThread(sbh_connector, 3, 5, 3, 50, sbolFiles)
| 33.661605
| 110
| 0.660974
| 2,072
| 15,518
| 4.775579
| 0.195946
| 0.042446
| 0.009702
| 0.016271
| 0.318343
| 0.261445
| 0.199697
| 0.165235
| 0.152299
| 0.135927
| 0
| 0.007362
| 0.238497
| 15,518
| 460
| 111
| 33.734783
| 0.829991
| 0.163681
| 0
| 0.229167
| 0
| 0
| 0.097857
| 0.022608
| 0
| 0
| 0
| 0.004348
| 0
| 1
| 0.107639
| false
| 0.006944
| 0.052083
| 0.020833
| 0.236111
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f245528c941762eda827c561627c5aa634c97c9f
| 2,842
|
py
|
Python
|
setup.py
|
Unidata/drilsdown
|
55aca7168fb390f31c36729605401564e9b82c56
|
[
"MIT"
] | 3
|
2018-05-25T00:19:12.000Z
|
2021-01-08T15:54:36.000Z
|
setup.py
|
suvarchal/drilsdown
|
e82f58396f640fef847353caf1bd4b2bf016c7a6
|
[
"MIT"
] | 11
|
2017-10-31T20:15:24.000Z
|
2019-12-16T21:01:55.000Z
|
setup.py
|
suvarchal/drilsdown
|
e82f58396f640fef847353caf1bd4b2bf016c7a6
|
[
"MIT"
] | 10
|
2018-02-08T22:23:28.000Z
|
2019-09-29T23:25:19.000Z
|
import os
from six import iteritems
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.install import install
import subprocess
PACKAGE_NAME = 'drilsdown'
SOURCES = {
'ipython_IDV': 'projects/ipython_IDV',
'idv_teleport': 'projects/IDV_teleport',
'ramadda_publish': 'projects/RAMADDA_publish',
}
VERSION = '2.4.91'
def install_drilsdown_projects(sources, develop=False):
""" Use pip to install all drilsdown projects. """
print("installing all drilsdown projects in {} mode".format(
"development" if develop else "normal"))
wd = os.getcwd()
for k, v in iteritems(sources):
try:
os.chdir(os.path.join(wd, v))
if develop:
subprocess.check_call(['pip', 'install', '-e', '.']) # could be pip3 on certain platforms
else:
subprocess.check_call(['pip', 'install', '.']) # could be pip3 on certain platforms
except Exception as e:
print("Oops, something went wrong installing", k)
print(e)
finally:
os.chdir(wd)
class DevelopCmd(develop):
""" Add custom steps for the develop command """
def run(self):
install_drilsdown_projects(SOURCES, develop=True)
develop.run(self)
class InstallCmd(install):
""" Add custom steps for the install command """
def run(self):
install_drilsdown_projects(SOURCES, develop=False)
install.run(self)
setup(
name=PACKAGE_NAME,
version=VERSION,
author="Drilsdown team",
author_email="drilsdown@unidata.ucar.edu",
description="A collection of tools for jupyter notebooks",
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
url="https://github.com/Unidata/drilsdown",
license="MIT",
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'future',
'six',
'requests',
'ipython',
'ipywidgets>=7.1.0rc',
'jupyter-client',
# 'ipython_IDV>=' + VERSION + "'", # cannot be source and a dependency??
'ipython-IDV', # from pypi
'ramadda_publish', #from pypi
'idv_teleport', #from pypi
],
cmdclass={
#'install': InstallCmd, # do not overwrite for now to make
# pip install and python setup.py install do same.
# note in class pip might be called pip3 on certain platforms
'develop': DevelopCmd,
},
extras_require={
'addons': ['numpy','netcdf4','xarray','metpy'],
'visual': ['pyviz','geoviews'],
}
)
| 32.295455
| 105
| 0.60943
| 317
| 2,842
| 5.381703
| 0.457413
| 0.049824
| 0.058617
| 0.054513
| 0.184056
| 0.126612
| 0.064478
| 0.064478
| 0.064478
| 0
| 0
| 0.009108
| 0.26601
| 2,842
| 87
| 106
| 32.666667
| 0.808725
| 0.165377
| 0
| 0.055556
| 0
| 0
| 0.29108
| 0.030303
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.152778
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f24567e433386b2908e8d4a58f10fb0b2a6b3b98
| 2,129
|
py
|
Python
|
ejercicios/Ejercicio6.py
|
Xavitheforce/Ejercicios_Iteracion
|
e840439e1277b5946592128d5c771d895c2fac2c
|
[
"Apache-2.0"
] | null | null | null |
ejercicios/Ejercicio6.py
|
Xavitheforce/Ejercicios_Iteracion
|
e840439e1277b5946592128d5c771d895c2fac2c
|
[
"Apache-2.0"
] | null | null | null |
ejercicios/Ejercicio6.py
|
Xavitheforce/Ejercicios_Iteracion
|
e840439e1277b5946592128d5c771d895c2fac2c
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
class Banco():
def __init__(self):
self.cuentas = {
'1':{
'nombre': 'Marcos Martinez',
'balance': 173735,
'tipo': 1,
'movimientos': []
},'2':{
'nombre': 'Alejandro Sanchez',
'balance': 1342,
'tipo': 0,
'movimientos': []
},'3':{
'nombre': 'Claudia Plaza',
'balance': 120984,
'tipo': 1,
'movimientos': []
},
}
def movimiento(self):
cuenta, cuenta_destino, cantidad = input('Introduce el Número de Cuenta de origen: '), input('Introduce el Número de Cuenta de destino: '), input('Introduce la cantidad a Mover: ')
balance_cuenta = self.cuentas[str(cuenta)]['balance']
if not self.cuentas[str(cuenta)]:
return print('La cuenta de origen no está registrada.')
if not self.cuentas[str(cuenta_destino)]:
return print('La cuenta de destino no está registrada.')
if balance_cuenta < int(cantidad):
return print('La cantidad introducida es superior a la disponible en la cuenta Nº: ' + str(cuenta) + '.')
movimientos_cuenta_origen = self.cuentas[str(cuenta)]['movimientos']
movimientos_cuenta_origen.append({
'cuenta_destino': str(cuenta_destino),
'cantidad': '-' + str(cantidad),
'hora': str(datetime.now())
})
self.cuentas[str(cuenta)]['balance'] -= int(cantidad)
movimientos_cuenta_destino = self.cuentas[str(cuenta_destino)]['movimientos']
movimientos_cuenta_destino.append({
'cuenta_origen': str(cuenta),
'cantidad': '+' + str(cantidad),
'hora': str(datetime.now())
})
self.cuentas[str(cuenta_destino)]['balance'] += int(cantidad)
print(self.cuentas[cuenta]['movimientos'])
print(self.cuentas[cuenta]['balance'], self.cuentas[cuenta_destino]['balance'])
def iniciar(self):
start = input('Bienvenido a Bancos Ramirez. ¿Quieres realizar algun operación?(S/N): ')
if start.lower() == 's':
decision = start
while decision.lower() == 's':
Banco().movimiento()
decision = input('¿Quieres seguir haciendo operaciones?(S/N): ')
| 39.425926
| 184
| 0.622828
| 241
| 2,129
| 5.419087
| 0.307054
| 0.092649
| 0.075038
| 0.107198
| 0.264165
| 0.174579
| 0.136294
| 0.087289
| 0.087289
| 0.087289
| 0
| 0.013253
| 0.220291
| 2,129
| 54
| 185
| 39.425926
| 0.772289
| 0
| 0
| 0.169811
| 0
| 0
| 0.296714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.018868
| 0
| 0.150943
| 0.09434
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f2490fc27568d943c3ececc3e75fce355b5da3ff
| 3,497
|
py
|
Python
|
advent/days/day17/day.py
|
RuedigerLudwig/advent2021
|
ce069d485bb34b4752ec4e89f195f7cc8cf084cc
|
[
"Unlicense"
] | null | null | null |
advent/days/day17/day.py
|
RuedigerLudwig/advent2021
|
ce069d485bb34b4752ec4e89f195f7cc8cf084cc
|
[
"Unlicense"
] | null | null | null |
advent/days/day17/day.py
|
RuedigerLudwig/advent2021
|
ce069d485bb34b4752ec4e89f195f7cc8cf084cc
|
[
"Unlicense"
] | null | null | null |
from __future__ import annotations
from itertools import product
from typing import Iterator
day_num = 17
def part1(lines: Iterator[str]) -> int:
probe = Target.from_str(next(lines))
mx = max(y for _, y in probe.get_possible())
return mx * (mx + 1) >> 1
def part2(lines: Iterator[str]) -> int:
probe = Target.from_str(next(lines))
return probe.count_possible()
Range = tuple[int, int]
XStepRange = tuple[int, int | None]
YStepRange = tuple[int, int]
Pos = tuple[int, int]
class Target:
@staticmethod
def from_str(line: str) -> Target:
def get_range(text: str) -> Range:
match text.split(".."):
case [start, end]:
return int(start.strip()), int(end.strip())
case _:
raise NotImplementedError
match line.split(","):
case [x, y]:
range_x = get_range(x.split("=")[1])
range_y = get_range(y.split("=")[1])
return Target(range_x, range_y)
case _:
raise NotImplementedError
def __init__(self, range_x: Range, range_y: Range) -> None:
self.range_x = range_x
self.range_y = range_y
def __eq__(self, other: object) -> bool:
if isinstance(other, Target):
return self.range_x == other.range_x and self.range_y == other.range_y
raise NotImplementedError
def possible_x(self) -> Iterator[tuple[int, XStepRange]]:
for x_start in range(1, self.range_x[1] + 1):
min_steps: int | None = None
steps = 1
x_pos = x_start
x_vel = x_start - 1
done = False
while not done:
if x_pos > self.range_x[1]:
if min_steps is not None:
yield x_start, (min_steps, steps - 1)
done = True
elif x_pos >= self.range_x[0] and min_steps is None:
min_steps = steps
elif x_vel == 0:
if min_steps is not None:
yield x_start, (min_steps, None)
done = True
steps += 1
x_pos += x_vel
x_vel -= 1
def possible_y(self) -> Iterator[tuple[int, YStepRange]]:
for y_start in range(self.range_y[0], -self.range_y[0] + 1):
if y_start <= 0:
steps = 1
y_vel = y_start - 1
else:
steps = y_start * 2 + 2
y_vel = -y_start - 2
min_steps = None
y_pos = y_vel + 1
done = False
while not done:
if y_pos < self.range_y[0]:
if min_steps is not None:
yield y_start, (min_steps, steps - 1)
done = True
elif y_pos <= self.range_y[1] and min_steps is None:
min_steps = steps
steps += 1
y_pos += y_vel
y_vel -= 1
def get_possible(self) -> Iterator[Pos]:
posx = self.possible_x()
posy = self.possible_y()
for (x, (min_x, max_x)), (y, (min_y, max_y)) in product(posx, posy):
mn = max(min_x, min_y)
mx = max_y if max_x is None else min(max_x, max_y)
if mn <= mx:
yield x, y
def count_possible(self) -> int:
return sum(1 for _ in self.get_possible())
| 31.223214
| 82
| 0.501859
| 455
| 3,497
| 3.630769
| 0.167033
| 0.043584
| 0.03632
| 0.021792
| 0.259685
| 0.21247
| 0.21247
| 0.183414
| 0.101695
| 0.101695
| 0
| 0.016635
| 0.398341
| 3,497
| 111
| 83
| 31.504505
| 0.768536
| 0
| 0
| 0.255556
| 0
| 0
| 0.00143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.033333
| 0.011111
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|