hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce3d81ce24cceec4599dc4b76ddf499bb737ff2b | 2,140 | py | Python | src/python/entity_align/eval/ScoreFile.py | amnda-d/learned-string-alignments | 2d4ecf4f2ee9dd90ba01ff5d86e8e3913b704723 | [
"Apache-2.0"
] | 40 | 2018-03-05T23:30:19.000Z | 2021-09-28T04:13:20.000Z | src/python/entity_align/eval/ScoreFile.py | amnda-d/learned-string-alignments | 2d4ecf4f2ee9dd90ba01ff5d86e8e3913b704723 | [
"Apache-2.0"
] | 2 | 2018-05-25T04:19:40.000Z | 2019-12-03T23:55:13.000Z | src/python/entity_align/eval/ScoreFile.py | amnda-d/learned-string-alignments | 2d4ecf4f2ee9dd90ba01ff5d86e8e3913b704723 | [
"Apache-2.0"
] | 5 | 2018-04-24T14:34:57.000Z | 2019-03-21T16:59:50.000Z | """
Copyright (C) 2017-2018 University of Massachusetts Amherst.
This file is part of "learned-string-alignments"
http://github.com/iesl/learned-string-alignments
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from entity_align.eval.EvalHitsAtK import eval_hits_at_k_file
from entity_align.eval.EvalMap import eval_map_file
def score(prediction_filename,model_name,dataset_name):
""" Given a file of predictions, compute all metrics
:param prediction_filename: TSV file of predictions
:param model_name: Name of the model
:param dataset_name: Name of the dataset
:return:
"""
counter = 0
scores = ""
map_score = eval_map_file(prediction_filename)
scores += "{}\t{}\t{}\tMAP\t{}\n".format(model_name, dataset_name, counter, map_score)
scores += "{}\t{}\t{}\tHits@1\t{}\n".format(model_name, dataset_name, counter,
eval_hits_at_k_file(prediction_filename, 1))
scores += "{}\t{}\t{}\tHits@10\t{}\n".format(model_name, dataset_name, counter,
eval_hits_at_k_file(prediction_filename, 10))
scores += "{}\t{}\t{}\tHits@50\t{}\n".format(model_name, dataset_name, counter,
eval_hits_at_k_file(prediction_filename, 50))
return scores
if __name__ == "__main__":
in_file = sys.argv[1]
out_file = sys.argv[2]
model = sys.argv[3] if len(sys.argv) > 2 else "model"
dataset = sys.argv[4]if len(sys.argv) > 3 else "dataset"
with open(out_file,'w') as fout:
s = score(in_file,model,dataset)
fout.write(s)
print(s)
| 41.153846 | 94 | 0.674299 | 311 | 2,140 | 4.466238 | 0.395498 | 0.043197 | 0.057595 | 0.071994 | 0.182865 | 0.172066 | 0.172066 | 0.172066 | 0.146868 | 0.146868 | 0 | 0.017334 | 0.218224 | 2,140 | 51 | 95 | 41.960784 | 0.812911 | 0.404673 | 0 | 0 | 0 | 0 | 0.093927 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.208333 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce3e73b7b0b6e79bc8701bf1d8699429d27e0558 | 3,920 | py | Python | opentelemetry-sdk/tests/metrics/test_point.py | srikanthccv/opentelemetry-python | 1dd18556dfe1089d04c417adeddfdd3b18e6d67e | [
"Apache-2.0"
] | null | null | null | opentelemetry-sdk/tests/metrics/test_point.py | srikanthccv/opentelemetry-python | 1dd18556dfe1089d04c417adeddfdd3b18e6d67e | [
"Apache-2.0"
] | null | null | null | opentelemetry-sdk/tests/metrics/test_point.py | srikanthccv/opentelemetry-python | 1dd18556dfe1089d04c417adeddfdd3b18e6d67e | [
"Apache-2.0"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from opentelemetry.sdk.metrics.export import (
Gauge,
Histogram,
HistogramDataPoint,
Metric,
NumberDataPoint,
Sum,
)
def _create_metric(data):
return Metric(
name="test-name",
description="test-description",
unit="test-unit",
data=data,
)
class TestDatapointToJSON(TestCase):
def test_sum(self):
self.maxDiff = None
point = _create_metric(
Sum(
data_points=[
NumberDataPoint(
attributes={"attr-key": "test-val"},
start_time_unix_nano=10,
time_unix_nano=20,
value=9,
)
],
aggregation_temporality=2,
is_monotonic=True,
)
)
self.assertEqual(
'{"name": "test-name", "description": "test-description", "unit": "test-unit", "data": "{\\"data_points\\": \\"[{\\\\\\"attributes\\\\\\": {\\\\\\"attr-key\\\\\\": \\\\\\"test-val\\\\\\"}, \\\\\\"start_time_unix_nano\\\\\\": 10, \\\\\\"time_unix_nano\\\\\\": 20, \\\\\\"value\\\\\\": 9}]\\", \\"aggregation_temporality\\": 2, \\"is_monotonic\\": true}"}',
point.to_json(),
)
def test_gauge(self):
point = _create_metric(
Gauge(
data_points=[
NumberDataPoint(
attributes={"attr-key": "test-val"},
start_time_unix_nano=10,
time_unix_nano=20,
value=9,
)
]
)
)
self.assertEqual(
'{"name": "test-name", "description": "test-description", "unit": "test-unit", "data": "{\\"data_points\\": \\"[{\\\\\\"attributes\\\\\\": {\\\\\\"attr-key\\\\\\": \\\\\\"test-val\\\\\\"}, \\\\\\"start_time_unix_nano\\\\\\": 10, \\\\\\"time_unix_nano\\\\\\": 20, \\\\\\"value\\\\\\": 9}]\\"}"}',
point.to_json(),
)
def test_histogram(self):
point = _create_metric(
Histogram(
data_points=[
HistogramDataPoint(
attributes={"attr-key": "test-val"},
start_time_unix_nano=50,
time_unix_nano=60,
count=1,
sum=0.8,
bucket_counts=[0, 0, 1, 0],
explicit_bounds=[0.1, 0.5, 0.9, 1],
min=0.8,
max=0.8,
)
],
aggregation_temporality=1,
)
)
self.maxDiff = None
self.assertEqual(
'{"name": "test-name", "description": "test-description", "unit": "test-unit", "data": "{\\"data_points\\": \\"[{\\\\\\"attributes\\\\\\": {\\\\\\"attr-key\\\\\\": \\\\\\"test-val\\\\\\"}, \\\\\\"start_time_unix_nano\\\\\\": 50, \\\\\\"time_unix_nano\\\\\\": 60, \\\\\\"count\\\\\\": 1, \\\\\\"sum\\\\\\": 0.8, \\\\\\"bucket_counts\\\\\\": [0, 0, 1, 0], \\\\\\"explicit_bounds\\\\\\": [0.1, 0.5, 0.9, 1], \\\\\\"min\\\\\\": 0.8, \\\\\\"max\\\\\\": 0.8}]\\", \\"aggregation_temporality\\": 1}"}',
point.to_json(),
)
| 39.2 | 507 | 0.471939 | 378 | 3,920 | 4.73545 | 0.31746 | 0.053631 | 0.080447 | 0.070391 | 0.550279 | 0.530168 | 0.530168 | 0.530168 | 0.530168 | 0.530168 | 0 | 0.027376 | 0.329082 | 3,920 | 99 | 508 | 39.59596 | 0.653232 | 0.142092 | 0 | 0.346154 | 0 | 0.038462 | 0.363691 | 0.162437 | 0 | 0 | 0 | 0 | 0.038462 | 1 | 0.051282 | false | 0 | 0.025641 | 0.012821 | 0.102564 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce3ffedcaa5e701aace15a075e51b4adb77b4edb | 1,445 | py | Python | devcontrol/http/devcontent.py | diegopx/AutoHome | a48478cccd9712270ef96845578c63f8c82aae77 | [
"BSD-3-Clause"
] | null | null | null | devcontrol/http/devcontent.py | diegopx/AutoHome | a48478cccd9712270ef96845578c63f8c82aae77 | [
"BSD-3-Clause"
] | null | null | null | devcontrol/http/devcontent.py | diegopx/AutoHome | a48478cccd9712270ef96845578c63f8c82aae77 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# automation.py
# an automation development server
# by Diego Guerrero
import flask
import ssl
import sys
import json
app = flask.Flask(__name__)
configuration = None
def check_authorization():
return flask.request.headers.get("Authorization") == configuration["devmqttpsk"]
@app.route("/static/sonoff-firmware.bin", methods=["GET"])
def sonoff_firmware():
if not check_authorization():
flask.abort(404)
try:
version = int(flask.request.headers.get("X-ESP8266-version"))
except:
version = sys.maxint
available = 1
if version < available:
return flask.send_from_directory("static/", "sonoff-firmware.bin")
else:
return ("", 304, {})
@app.route("/static/access", methods=["GET"])
def access():
if not check_authorization():
flask.abort(404)
return ("", 204, {"X-SSID": configuration["wifissid"], "X-PSK": configuration["wifipass"]})
def main():
try:
with open("configuration.json") as configfile:
configuration = json.loads(configfile.read())
except IOError:
print("Can't open configuration file", file=sys.stderr)
return
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
sslcontext.load_cert_chain(configuration["certificate"], configuration["privatekey"])
app.run(host=configuration["devhostname"], port=configuration["devhttpport"],
ssl_context=sslcontext, threaded=False, debug=False)
if __name__ == "__main__":
main()
| 24.491525 | 92 | 0.712803 | 175 | 1,445 | 5.754286 | 0.525714 | 0.053625 | 0.037736 | 0.043694 | 0.0715 | 0.0715 | 0.0715 | 0 | 0 | 0 | 0 | 0.016787 | 0.134256 | 1,445 | 58 | 93 | 24.913793 | 0.788169 | 0.07474 | 0 | 0.153846 | 0 | 0 | 0.178679 | 0.02027 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0.025641 | 0.102564 | 0.025641 | 0.333333 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce413bef2410af554c7111f74ade966ae5f60321 | 1,329 | py | Python | Server/config/dev.py | Jaws-bar/Entry3.0-InterviewSystem | 15385f9982c0c4e9aed970263b7ea1e50d6163ca | [
"MIT"
] | null | null | null | Server/config/dev.py | Jaws-bar/Entry3.0-InterviewSystem | 15385f9982c0c4e9aed970263b7ea1e50d6163ca | [
"MIT"
] | null | null | null | Server/config/dev.py | Jaws-bar/Entry3.0-InterviewSystem | 15385f9982c0c4e9aed970263b7ea1e50d6163ca | [
"MIT"
] | null | null | null | from datetime import timedelta
class Config:
DEBUG = True
HOST = 'localhost'
RUN_SETTING = {
'host': HOST,
'port': 5000,
'debug': DEBUG
}
SERVICE_NAME = 'entry3.0-interview'
SECRET_KEY = " erich_hartmann"
JWT_SECRET_KEY = 'otto_carius'
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=6)
JWT_REFRESH_TOKEN_EXPIRES = timedelta(days=3)
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:wasitacatisaw?@localhost:3333/entry"
# SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:wasitacatisaw?@localhost:3333/"+SERVICE_NAME
SQLALCHEMY_ECHO = True
SWAGGER = {
'title': SERVICE_NAME,
'specs_route': '/docs',
'uiversion': 3,
'info': {
'title': SERVICE_NAME + ' API',
'version': '1.0',
'description': 'Interview System'
},
'basePath': '/ '
}
SWAGGER_TEMPLATE = {
'schemes': [
'http'
],
'tags': [
{
'name': 'Admin',
'description': 'Admin menu API'
},
{
'name': 'Auth',
'description': 'Auth API'
},
{
'name': 'Interview',
'description': 'Interview API'
}
]
} | 23.732143 | 98 | 0.495862 | 112 | 1,329 | 5.678571 | 0.571429 | 0.069182 | 0.066038 | 0.081761 | 0.198113 | 0.198113 | 0.198113 | 0.198113 | 0.198113 | 0 | 0 | 0.022892 | 0.37547 | 1,329 | 56 | 99 | 23.732143 | 0.743373 | 0.069225 | 0 | 0 | 0 | 0 | 0.262945 | 0.045307 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.021739 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce42c3c2bddcf142e72fc4593697a07b41d50ed0 | 3,078 | py | Python | test_all.py | nescirem/fortran_static_library | d2ee115ccaa93e6618577f4f3e90e93ffac0daf6 | [
"MIT"
] | 1 | 2019-07-08T11:38:19.000Z | 2019-07-08T11:38:19.000Z | test_all.py | nescirem/Fortran_External_Library | d2ee115ccaa93e6618577f4f3e90e93ffac0daf6 | [
"MIT"
] | null | null | null | test_all.py | nescirem/Fortran_External_Library | d2ee115ccaa93e6618577f4f3e90e93ffac0daf6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#coding:utf-8
from sh import bash
def createDeck(LANG):
import itertools
DDBUG=['debug','release']
if LANG == "fortran":
FC=['gfortran','ifort']
return (list(itertools.product(FC,DDBUG)))
else:
FC=['g++']
return (list(itertools.product(DDBUG,FC)))
def main():
def test_library(str):
if LIBRARY == "STATIC_LIBRARY":
expected = read_file("./expect/lib_expect.txt")
else:
expected = read_file("./expect/dll_expect.txt")
expected = pure_str(expected)
cstrs = str.split( " --------------------------------" )
result = pure_str(cstrs[-2])
if expected == result:
print ('\033[0;32m[PASS]\033[0m', flush=True); err_code = 0
else:
print ('\033[1;31m[ERROR]\033[0m', flush=True); err_code = 1
return err_code
def test_modes():
err_count=0
for i in modes:
bash( c='./'+LANG+'_'+LIBRARY.lower()+'/clean_all.sh', _timeout=2 )
app_cmd = i[0]+' '+i[1]
if app_cmd.isspace():
print ('['+LANG+'] '+LIBRARY+' DEFAULT', end=": ", flush=True)
else:
print ('['+LANG+'] '+LIBRARY+' '+app_cmd, end=": ", flush=True)
str = bash( c='./'+LANG+'_'+LIBRARY.lower()+'/build.sh '+app_cmd, _timeout=10 )
err_count += test_library(str)
return err_count
print ('', flush=True)
err_tcount = 0
LANG="fortran"
# Fortran STATIC LIBRARY
modes = createDeck(LANG)
modes.append(('',''))
LIBRARY="STATIC_LIBRARY"
err_tcount += test_modes()
print ('', flush=True)
# Fortran DYNAMIC LIBRARY
LIBRARY="DYNAMIC_LIBRARY"
err_tcount += test_modes()
print ('', flush=True)
LANG="cpp"
# C++ STATIC LIBRARY
LIBRARY="STATIC_LIBRARY"
modes = createDeck(LANG)
err_tcount += test_modes()
print ('', flush=True)
# C++ DYNAMIC LIBRARY
LIBRARY="DYNAMIC_LIBRARY"
modes = createDeck(LANG)
err_tcount += test_modes()
clean_all()
print ("\033[1;31mERROR\033[0m =", err_tcount)
print ('', flush=True)
def read_file(file_path):
import os
if not os.path.isfile(file_path):
raise TypeError(file_path + " does not exist")
text_in_file = open(file_path).read()
return text_in_file
def pure_str(str):
purestr = str.replace(' ','')
purestr = purestr.replace('\n','')
return purestr
def clean_all():
bash( c="./fortran_static_library/clean_all.sh", _timeout=2 )
bash( c="./fortran_dynamic_library/clean_all.sh", _timeout=2 )
bash( c="./cpp_static_library/clean_all.sh", _timeout=2 )
bash( c="./cpp_dynamic_library/clean_all.sh", _timeout=2 )
#------------------------------------------------------
# add script here
#------------------------------------------------------
print ('', flush=True)
if __name__=="__main__":
main()
| 27.981818 | 91 | 0.532814 | 355 | 3,078 | 4.408451 | 0.273239 | 0.057508 | 0.053674 | 0.054313 | 0.334185 | 0.235783 | 0.208946 | 0.17508 | 0.06901 | 0 | 0 | 0.020125 | 0.273554 | 3,078 | 109 | 92 | 28.238532 | 0.679785 | 0.078622 | 0 | 0.276316 | 0 | 0 | 0.171206 | 0.102229 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092105 | false | 0.013158 | 0.039474 | 0 | 0.210526 | 0.144737 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce42ecb2216a2bd3b8c63146f02b5f1b86c801c7 | 1,213 | py | Python | basicExample.py | nagasudhirpulla/python_logging_samples | 9d8bb5f78ab544ef45bacb58fb40717f8645d030 | [
"MIT"
] | null | null | null | basicExample.py | nagasudhirpulla/python_logging_samples | 9d8bb5f78ab544ef45bacb58fb40717f8645d030 | [
"MIT"
] | null | null | null | basicExample.py | nagasudhirpulla/python_logging_samples | 9d8bb5f78ab544ef45bacb58fb40717f8645d030 | [
"MIT"
] | null | null | null | import logging
import json
from logging import LoggerAdapter
def getEnrichedLogger(name: str, extra: dict) -> LoggerAdapter:
"""get logger object that is enriched with the 'extra' dict
https://medium.com/devops-dudes/python-logs-a-jsons-journey-to-elasticsearch-ffbabfd44b83
Args:
name (str): name of logger
extra (dict): enrich dict, like {"app_name":"myApp", "server_ip":"10.10.10.10"}
Returns:
LoggerAdapter: LoggerAdapter object
"""
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
streamHandler = logging.StreamHandler()
basicDict = {
"time": "%(asctime)s", "level": "%(levelname)s", "message": "%(message)s"}
fullDict = {**basicDict, **extra}
streamFormatter = logging.Formatter(json.dumps(fullDict))
streamHandler.setFormatter(streamFormatter)
logger.addHandler(streamHandler)
loggerAdapter = logging.LoggerAdapter(logger, extra)
return loggerAdapter
logger = getEnrichedLogger(name="test_app", extra={"app_name": "myTestApp"})
logger.info("Hello World!!!")
try:
x = 1/0
except Exception as e:
logger.error("Some error occured", exc_info=e)
# logger.exception("Some error occured")
| 32.783784 | 93 | 0.694147 | 139 | 1,213 | 6.021583 | 0.546763 | 0.032258 | 0.014337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01393 | 0.171476 | 1,213 | 36 | 94 | 33.694444 | 0.818905 | 0.295136 | 0 | 0 | 0 | 0 | 0.131707 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.142857 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce43e2880a6605ac044095014b21d03ceef8e382 | 4,677 | py | Python | d3m/metadata/params.py | tods-doc/tamu_d3m | a6a05f022ea60ab9787cbd89659ea8e0062ca22b | [
"Apache-2.0"
] | null | null | null | d3m/metadata/params.py | tods-doc/tamu_d3m | a6a05f022ea60ab9787cbd89659ea8e0062ca22b | [
"Apache-2.0"
] | null | null | null | d3m/metadata/params.py | tods-doc/tamu_d3m | a6a05f022ea60ab9787cbd89659ea8e0062ca22b | [
"Apache-2.0"
] | null | null | null | import typing
from d3m import exceptions, utils
class ParamsMeta(utils.AbstractMetaclass):
def __new__(mcls, class_name, bases, namespace, **kwargs): # type: ignore
for name, value in namespace.items():
if name.startswith('_'):
continue
if utils.is_class_method_on_class(value) or utils.is_instance_method_on_class(value):
continue
raise TypeError("Only methods and attribute type annotations can be defined on Params class, not '{name}'.".format(name=name))
class_params_items = {}
class_annotations = namespace.get('__annotations__', {})
for name, value in class_annotations.items():
value = typing._type_check(value, "Each annotation must be a type.")
if name in namespace:
# Just update the annotation.
class_annotations[name] = value
else:
# Extract annotation out.
class_params_items[name] = value
for name in class_params_items.keys():
del class_annotations[name]
# Set back updated annotations.
namespace['__annotations__'] = class_annotations
params_items = {}
for base in reversed(bases):
params_items.update(base.__dict__.get('__params_items__', {}))
params_items.update(class_params_items)
namespace['__params_items__'] = params_items
return super().__new__(mcls, class_name, bases, namespace, **kwargs)
class Params(dict, metaclass=ParamsMeta):
"""
A base class to be subclassed and used as a type for ``Params`` type
argument in primitive interfaces. An instance of this subclass should
be returned from primitive's ``get_params`` method, and accepted in
``set_params``.
You should subclass the class and set type annotations on class attributes
for params available in the class.
When creating an instance of the class, all parameters have to be provided.
"""
def __init__(self, other: typing.Dict[str, typing.Any] = None, **values: typing.Any) -> None:
if other is None:
other = {}
values = dict(other, **values)
params_keys = set(self.__params_items__.keys()) # type: ignore
values_keys = set(values.keys())
missing = params_keys - values_keys
if len(missing):
raise exceptions.InvalidArgumentValueError("Not all parameters are specified: {missing}".format(missing=missing))
extra = values_keys - params_keys
if len(extra):
raise exceptions.InvalidArgumentValueError("Additional parameters are specified: {extra}".format(extra=extra))
super().__init__(values)
def __setitem__(self, key, value): # type: ignore
if key not in self.__params_items__:
raise ValueError("Additional parameter is specified: {key}".format(key=key))
return super().__setitem__(key, value)
def __delitem__(self, key): # type: ignore
raise AttributeError("You cannot delete parameters.")
def clear(self): # type: ignore
raise AttributeError("You cannot delete parameters.")
def pop(self, key, default=None): # type: ignore
raise AttributeError("You cannot delete parameters.")
def popitem(self): # type: ignore
raise AttributeError("You cannot delete parameters.")
def setdefault(self, key, default=None): # type: ignore
if key not in self.__params_items__:
raise ValueError("Additional parameter is specified: {key}".format(key=key))
return super().setdefault(key, default)
def update(self, other: typing.Dict[str, typing.Any] = None, **values: typing.Any) -> None: # type: ignore
if other is None:
other = {}
values = dict(other, **values)
params_keys = set(self.__params_items__.keys()) # type: ignore
values_keys = set(values.keys())
extra = values_keys - params_keys
if len(extra):
raise ValueError("Additional parameters are specified: {extra}".format(extra=extra))
super().update(values)
def validate(self) -> None:
for name, value in self.items():
value_type = self.__params_items__[name] # type: ignore
if not utils.is_instance(value, value_type):
raise TypeError("Value '{value}' for parameter '{name}' is not an instance of the type: {value_type}".format(value=value, name=name, value_type=value_type))
def __repr__(self) -> str:
return '{class_name}({super})'.format(class_name=type(self).__name__, super=super().__repr__())
| 36.826772 | 172 | 0.645927 | 555 | 4,677 | 5.196396 | 0.214414 | 0.057212 | 0.026006 | 0.040222 | 0.370319 | 0.370319 | 0.357836 | 0.332871 | 0.332871 | 0.225381 | 0 | 0.000286 | 0.251229 | 4,677 | 126 | 173 | 37.119048 | 0.823244 | 0.135985 | 0 | 0.324324 | 0 | 0 | 0.153962 | 0.005266 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148649 | false | 0 | 0.027027 | 0.013514 | 0.256757 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbebad3a4bdd2fca2bf6509cbc7d5b9173df861e | 1,229 | py | Python | scripts/main_triads.py | orphee-celui-qui-ne-sait-rien/Word-predictor | 54f8a47d207fba1aa4ecf5fcfd178d2db43e6398 | [
"Unlicense"
] | null | null | null | scripts/main_triads.py | orphee-celui-qui-ne-sait-rien/Word-predictor | 54f8a47d207fba1aa4ecf5fcfd178d2db43e6398 | [
"Unlicense"
] | null | null | null | scripts/main_triads.py | orphee-celui-qui-ne-sait-rien/Word-predictor | 54f8a47d207fba1aa4ecf5fcfd178d2db43e6398 | [
"Unlicense"
] | null | null | null | import random
import json
import sys
import os.path as path
from setup import jsons_path, standard_chars
def guess_next_letter(word: str): # ex. word = "___hel"
last_triad = word[-3:]
try:
next_char = random.choices(list(DATA[last_triad].keys()), list(DATA[last_triad].values()))[0]
return next_char
except KeyError:
print("Oh no, there's not data in your analyzed text about this text sequence, the script will return a random character")
print("Consider using a larger text to get better results")
return random.choice(list(standard_chars))
def cycle():
word = "___"
while True:
letter = input("next letter:\n")
# stops loop
if len(letter) == 0:
print(f"-------------------\nyour final word is:\n{word[3:]}")
break
word += letter
next_char = guess_next_letter(word)
print(f"\nuser word:\n{word[3:]}")
print(f"next char:\n{word[3:] + next_char}\n")
if __name__ == '__main__':
triad_filename = sys.argv[1]
triad_file_path = path.join(jsons_path, triad_filename)
with open(triad_file_path) as json_file:
DATA: dict
DATA = json.load(json_file)
cycle()
| 30.725 | 130 | 0.624898 | 174 | 1,229 | 4.201149 | 0.494253 | 0.05472 | 0.024624 | 0.051984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007543 | 0.244915 | 1,229 | 39 | 131 | 31.512821 | 0.780172 | 0.02441 | 0 | 0 | 0 | 0.03125 | 0.250836 | 0.020903 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.15625 | 0 | 0.28125 | 0.15625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbec3ca38787aee1cc4e74c87811c9302070a732 | 7,003 | py | Python | tools/pythonpkg/tests/sqlite/test_types.py | ankane/duckdb | 8a60a6144059067939092bdc30ca229093c984e5 | [
"MIT"
] | 1 | 2021-09-15T10:29:20.000Z | 2021-09-15T10:29:20.000Z | tools/pythonpkg/tests/sqlite/test_types.py | ankane/duckdb | 8a60a6144059067939092bdc30ca229093c984e5 | [
"MIT"
] | null | null | null | tools/pythonpkg/tests/sqlite/test_types.py | ankane/duckdb | 8a60a6144059067939092bdc30ca229093c984e5 | [
"MIT"
] | 1 | 2021-08-13T06:36:19.000Z | 2021-08-13T06:36:19.000Z | #-*- coding: iso-8859-1 -*-
# pysqlite2/test/types.py: tests for type conversion and detection
#
# Copyright (C) 2005 Gerhard H�ring <gh@ghaering.de>
#
# This file is part of pyduckdb.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
#
# This library is derived from the pysqlite testing library, with small modifications
# to remove tests that are for features that are not supported by DuckDB.
import datetime
import decimal
import unittest
import duckdb
class DuckDBTypeTests(unittest.TestCase):
def setUp(self):
self.con = duckdb.connect(":memory:")
self.cur = self.con.cursor()
self.cur.execute("create table test(i bigint, s varchar, f double)")
def tearDown(self):
self.cur.close()
self.con.close()
def test_CheckString(self):
self.cur.execute("insert into test(s) values (?)", (u"Österreich",))
self.cur.execute("select s from test")
row = self.cur.fetchone()
self.assertEqual(row[0], u"Österreich")
def test_CheckSmallInt(self):
self.cur.execute("insert into test(i) values (?)", (42,))
self.cur.execute("select i from test")
row = self.cur.fetchone()
self.assertEqual(row[0], 42)
def test_CheckLargeInt(self):
num = 2**40
self.cur.execute("insert into test(i) values (?)", (num,))
self.cur.execute("select i from test")
row = self.cur.fetchone()
self.assertEqual(row[0], num)
def test_CheckFloat(self):
val = 3.14
self.cur.execute("insert into test(f) values (?)", (val,))
self.cur.execute("select f from test")
row = self.cur.fetchone()
self.assertEqual(row[0], val)
def test_CheckDecimal(self):
val = 17.29
self.cur.execute("insert into test(f) values (?)", (decimal.Decimal(val),))
self.cur.execute("select f from test")
row = self.cur.fetchone()
self.assertEqual(row[0], val)
def test_CheckNaN(self):
with self.assertRaises(RuntimeError) as context:
self.cur.execute("insert into test(f) values (?)", (decimal.Decimal('nan'),))
def test_CheckInf(self):
with self.assertRaises(RuntimeError) as context:
self.cur.execute("insert into test(f) values (?)", (decimal.Decimal('inf'),))
def test_CheckUnicodeExecute(self):
self.cur.execute(u"select 'Österreich'")
row = self.cur.fetchone()
self.assertEqual(row[0], u"Österreich")
class CommonTableExpressionTests(unittest.TestCase):
def setUp(self):
self.con = duckdb.connect(":memory:")
self.cur = self.con.cursor()
self.cur.execute("create table test(x int)")
def tearDown(self):
self.cur.close()
self.con.close()
def test_CheckCursorDescriptionCTESimple(self):
self.cur.execute("with one as (select 1) select * from one")
self.assertIsNotNone(self.cur.description)
self.assertEqual(self.cur.description[0][0], "1")
def test_CheckCursorDescriptionCTESMultipleColumns(self):
self.cur.execute("insert into test values(1)")
self.cur.execute("insert into test values(2)")
self.cur.execute("with testCTE as (select * from test) select * from testCTE")
self.assertIsNotNone(self.cur.description)
self.assertEqual(self.cur.description[0][0], "x")
def test_CheckCursorDescriptionCTE(self):
self.cur.execute("insert into test values (1)")
self.cur.execute("with bar as (select * from test) select * from test where x = 1")
self.assertIsNotNone(self.cur.description)
self.assertEqual(self.cur.description[0][0], "x")
self.cur.execute("with bar as (select * from test) select * from test where x = 2")
self.assertIsNotNone(self.cur.description)
self.assertEqual(self.cur.description[0][0], "x")
class DateTimeTests(unittest.TestCase):
def setUp(self):
self.con = duckdb.connect(":memory:")
self.cur = self.con.cursor()
self.cur.execute("create table test(d date, t time, ts timestamp)")
def tearDown(self):
self.cur.close()
self.con.close()
def test_CheckDate(self):
d = datetime.date(2004, 2, 14)
self.cur.execute("insert into test(d) values (?)", (d,))
self.cur.execute("select d from test")
d2 = self.cur.fetchone()[0]
self.assertEqual(d, d2)
def test_CheckTime(self):
t = datetime.time(7, 15, 0)
self.cur.execute("insert into test(t) values (?)", (t,))
self.cur.execute("select t from test")
t2 = self.cur.fetchone()[0]
self.assertEqual(t, t2)
def test_CheckTimestamp(self):
ts = datetime.datetime(2004, 2, 14, 7, 15, 0)
self.cur.execute("insert into test(ts) values (?)", (ts,))
self.cur.execute("select ts from test")
ts2 = self.cur.fetchone()[0]
self.assertEqual(ts, ts2)
def test_CheckSqlTimestamp(self):
now = datetime.datetime.utcnow()
self.cur.execute("insert into test(ts) values (current_timestamp)")
self.cur.execute("select ts from test")
ts = self.cur.fetchone()[0]
self.assertEqual(type(ts), datetime.datetime)
self.assertEqual(ts.year, now.year)
def test_CheckDateTimeSubSeconds(self):
ts = datetime.datetime(2004, 2, 14, 7, 15, 0, 500000)
self.cur.execute("insert into test(ts) values (?)", (ts,))
self.cur.execute("select ts from test")
ts2 = self.cur.fetchone()[0]
self.assertEqual(ts, ts2)
def test_CheckTimeSubSeconds(self):
t = datetime.time(7, 15, 0, 500000)
self.cur.execute("insert into test(t) values (?)", (t,))
self.cur.execute("select t from test")
t2 = self.cur.fetchone()[0]
self.assertEqual(t, t2)
def test_CheckDateTimeSubSecondsFloatingPoint(self):
ts = datetime.datetime(2004, 2, 14, 7, 15, 0, 510241)
self.cur.execute("insert into test(ts) values (?)", (ts,))
self.cur.execute("select ts from test")
ts2 = self.cur.fetchone()[0]
self.assertEqual(ts.year, ts2.year)
self.assertEqual(ts2.microsecond, 510241)
| 38.059783 | 91 | 0.645152 | 938 | 7,003 | 4.797441 | 0.220682 | 0.099556 | 0.115111 | 0.075556 | 0.582889 | 0.582889 | 0.563333 | 0.529556 | 0.498222 | 0.497333 | 0.000143 | 0.025778 | 0.224475 | 7,003 | 183 | 92 | 38.26776 | 0.802615 | 0.163644 | 0 | 0.492188 | 0 | 0 | 0.19976 | 0 | 0 | 0 | 0 | 0 | 0.195313 | 1 | 0.1875 | false | 0 | 0.03125 | 0 | 0.242188 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbef78623dba065c11d81e2a86f65b505ed9dffb | 10,814 | py | Python | graphanime/graphanime/animation.py | Sosso8305/GIF-Dijkstra-Python | 997d8c9423da1d196da71525ab19433887d9d9e6 | [
"MIT"
] | 1 | 2021-06-24T13:44:16.000Z | 2021-06-24T13:44:16.000Z | graphanime/graphanime/animation.py | Sosso8305/LaTeX-to-GIF-Python | 997d8c9423da1d196da71525ab19433887d9d9e6 | [
"MIT"
] | null | null | null | graphanime/graphanime/animation.py | Sosso8305/LaTeX-to-GIF-Python | 997d8c9423da1d196da71525ab19433887d9d9e6 | [
"MIT"
] | null | null | null | from .graph import Graph
from pdf2image import convert_from_path
from apng import APNG
from PIL import Image
import os, platform, subprocess, tempfile, glob, shutil
__all__ = ['load', 'gen_beamer', 'gen_pdf', 'gen_apng', 'gen_gif']
############Begin_Parser##################
def load(file):
fileTex = open(file,"r")
# Remove comments
fragTexts = fileTex.readlines()
line =[]
for text in fragTexts:
if text.find('%') !=-1:
text = text[:text.find('%')]
text += '\n'
line.append(text)
allText = ''.join(line)
# Get the uspackage&uselibrary lines
preambule = allText[allText.find("\\documentclass[tikz]{standalone}")+len('\documentclass[tikz]{standalone}'):allText.find("\\begin{document}")]
AllCommand = allText[allText.find("\\begin{tikzpicture}") + len("\\begin{tikzpicture}"):allText.find("\\end{tikzpicture}")]
G= Graph("G", [], [], {}, preambule)
AllCommand = AllCommand.split(';')
for command in AllCommand:
if command.find("\\node") != -1:
options = command[(command.find("[")+1):command.rfind("]")]
options = options.split(',')
fill =""
label =""
label_color=""
label_position=""
contour_color=""
other_options=[]
for opt in options:
if opt.find("fill") != -1:
if opt.find("=") != -1:
fill = " "+opt[opt.find("=")+1:]
else : fill = " "
elif opt.find("label") != -1:
if opt.find("{") != -1:
opt=opt[opt.find("{")+1:opt.find("}")]
if opt.find(":") != -1:
label = opt[opt.find(":")+1:]
if opt.find("[") != -1:
label_color = opt[opt.find("[")+1:opt.find("]")]
label_position = opt[opt.find("]")+1:opt.find(":")]
else :
label_position = opt[:opt.find(":")]
else:
label=opt[opt.find("=")+1:]
elif opt.find("draw") != -1:
if opt.find("=") != -1:
contour_color = " "+opt[opt.find("=")+1:]
else : contour_color = " "
else:
other_options.append(opt)
options = ",".join(other_options)
id = command[(command.find("(")+1):command.find(")")]
name = command[(command.rfind("{")+1):command.rfind("}")]
coordonnee = ()
if command.find("at(") != -1:
coordonnee = command[command.find("at(")+3:command.find("at(")+command[command.find("at("):].find(")")]
coordonnee = coordonnee.split(',')
G.add_node(id, name, fill=fill, label=label, node_options=options, coordonnee=coordonnee, label_color=label_color, label_position=label_position, contour_color=contour_color)
elif command.find("\\path") != -1:
command = command.splitlines()
for c in command:
if c.find('edge')==-1: continue
edge=(c[c.find("(")+1:c.find(")")], c[c.rfind("(")+1:c.rfind(")")])
options = c[(c.find("[")+1):c.find("]")]
options = options.split(',')
other_options=[]
color=""
edge_label=''
for opt in options:
if opt.find("color") != -1:
opt=''.join(opt.split())
color = opt[6:]
elif opt.find('"') != -1:
edge_label = opt[opt.find('"')+1:opt.rfind('"')]
elif opt.find("-") != -1:
opt=''.join(opt.split())
if opt=='-' or opt=='->' or opt=='<-':
orientation = opt
else:
other_options.append(opt)
options = ",".join(other_options)
G.add_link(edge, orientation, edge_label=edge_label, color=color, edge_options=options)
return G
############END_Parser##################
#############BEGIN_Back-end###################
def gen_beamer(anim,file,out_tex=False):
######Python to LaTeX######
if not os.path.exists("./out/"):
os.mkdir("./out/")
os.chdir("./out/")
current_dir = os.getcwd()
with tempfile.TemporaryDirectory() as tempdir:
os.chdir(tempdir)
fOut = open(file+".tex","w")
fOut.write("\\documentclass{beamer} \n")
fOut.write( anim[0].preambule + "\n")
fOut.write("\\tikzset{%https://tex.stackexchange.com/questions/49888/tikzpicture-alignment-and-centering\n") #source
fOut.write("master/.style={\nexecute at end picture={\n\coordinate (lower right) at (current bounding box.south east);\n\coordinate (upper left) at (current bounding box.north west);}},")
fOut.write("slave/.style={\nexecute at end picture={\n\pgfresetboundingbox\n\path (upper left) rectangle (lower right);}}}\n")
fOut.write("\\begin{document} \n")
first=True
for G in anim:
fOut.write("\\begin{frame} \n")
fOut.write("\\centering\n")
fOut.write("\\begin{tikzpicture} ")
if first:
fOut.write("[master]\n")
first=False
else: fOut.write("[slave]\n")
fOut.write(G.writeLaTeX())
fOut.write("\\end{tikzpicture} \n")
fOut.write("\\end{frame} \n")
fOut.write("\\end{document}")
fOut.close()
######LaTeX to PDF######
# TeX source filename
tex_filename = os.path.join(tempdir,file+".tex")
# the corresponding PDF filename
pdf_filename = os.path.join(tempdir,file+".pdf")
# compile TeX file
subprocess.run(['pdflatex', '-interaction=batchmode', tex_filename])
os.chdir(current_dir)
if os.path.exists(pdf_filename):
shutil.copy2(pdf_filename,current_dir)
if(out_tex):
shutil.copy2(tex_filename,current_dir)
else:
raise RuntimeError('PDF output not found')
os.chdir("../")
def gen_pdf(anim,file,out_tex=False):
######Python to LaTeX######
if not os.path.exists("./out/"):
os.mkdir("./out/")
os.chdir("./out/")
current_dir = os.getcwd()
with tempfile.TemporaryDirectory() as tempdir:
os.chdir(tempdir)
fOut = open(file+".tex","w")
fOut.write("\\documentclass[tikz]{standalone}\n")
fOut.write( anim[0].preambule + "\n")
fOut.write("\\tikzset{%https://tex.stackexchange.com/questions/49888/tikzpicture-alignment-and-centering\n") #source
fOut.write("master/.style={\nexecute at end picture={\n\coordinate (lower right) at (current bounding box.south east);\n\coordinate (upper left) at (current bounding box.north west);}},")
fOut.write("slave/.style={\nexecute at end picture={\n\pgfresetboundingbox\n\path (upper left) rectangle (lower right);}}}\n")
fOut.write("\\begin{document} \n")
first = True
for G in anim:
fOut.write("\\centering\n")
fOut.write("\\begin{tikzpicture}\n")
if first:
fOut.write("[master]\n")
first=False
else: fOut.write("[slave]\n")
fOut.write(G.writeLaTeX())
fOut.write("\\end{tikzpicture} \n")
fOut.write("\\end{document}")
fOut.close()
######LaTeX to PDF######
# TeX source filename
tex_filename = os.path.join(tempdir,file+".tex")
# the corresponding PDF filename
pdf_filename = os.path.join(tempdir,file+".pdf")
# compile TeX file
subprocess.run(['pdflatex', '-interaction=batchmode', tex_filename])
os.chdir(current_dir)
# check if PDF is successfully generated
if os.path.exists(pdf_filename):
shutil.copy2(pdf_filename,current_dir)
if(out_tex):
shutil.copy2(tex_filename,current_dir)
else:
raise RuntimeError('PDF output not found')
os.chdir("../")
def key_sort(word,file):
return int(word[len(file)+1:-4])
def gen_gif(anim,file,duration=500):
if not os.path.exists("./out/"):
os.mkdir("./out/")
os.chdir("./out/")
current_dir = os.getcwd()
with tempfile.TemporaryDirectory() as tempdir:
os.chdir(tempdir)
gen_pdf(anim, file)
pages = convert_from_path("./out/"+ file +".pdf")
nb = 0
for page in pages:
nb+=1
page.save(file+'_'+str(nb)+".png",'PNG')
frames = []
images = glob.glob("*.png")
images= sorted(images, key= lambda x: key_sort(x,file))
for img in images:
new_frame =Image.open(img)
frames.append(new_frame)
for _ in range(5):
frames.append(new_frame)
frames[0].save(file+".gif",format='GIF',append_images=frames[1:],save_all=True,duration=duration,loop=0)
# the corresponding GIF filename
gif_filename = os.path.join(tempdir,file+".gif")
os.chdir(current_dir)
if os.path.exists(gif_filename):
shutil.copy2(gif_filename,current_dir)
os.chdir("../")
def gen_apng(anim,file,delay=500):
if not os.path.exists("./out/"):
os.mkdir("./out/")
os.chdir("./out/")
current_dir = os.getcwd()
with tempfile.TemporaryDirectory() as tempdir:
os.chdir(tempdir)
gen_pdf(anim, file)
pages = convert_from_path("./out/"+ file +".pdf")
nb = 0
for page in pages:
nb+=1
page.save(file+'_'+str(nb)+".png",'PNG')
for _ in range(5):
nb +=1
page.save(file+'_'+str(nb)+".png",'PNG')
images = glob.glob("*.png")
images= sorted(images, key= lambda x: key_sort(x,file))
APNG.from_files(images,delay=delay).save(file+".png")
# the corresponding GIF filename
apng_filename = os.path.join(tempdir,file+".png")
os.chdir(current_dir)
if os.path.exists(apng_filename):
shutil.copy2(apng_filename,current_dir)
else:
raise RuntimeError('APNG output not found')
os.chdir("../")
#############END_Back-end###################
| 31.164265 | 195 | 0.50786 | 1,188 | 10,814 | 4.537037 | 0.156566 | 0.046753 | 0.022263 | 0.016327 | 0.635065 | 0.581262 | 0.548794 | 0.539889 | 0.513729 | 0.491095 | 0 | 0.009362 | 0.318476 | 10,814 | 346 | 196 | 31.254335 | 0.721981 | 0.037174 | 0 | 0.576037 | 0 | 0.02765 | 0.161776 | 0.039417 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02765 | false | 0 | 0.023041 | 0.004608 | 0.059908 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbefe333fe6aa16896fbd5efa745c3f46cd5605d | 6,087 | py | Python | src/mask_rcnn/keras/evaluate.py | zhouwubai/kaggle | 45fbce8834a5c7ce9c925af691f5761d9d88c8d3 | [
"MIT"
] | 1 | 2018-07-11T16:35:14.000Z | 2018-07-11T16:35:14.000Z | src/mask_rcnn/keras/evaluate.py | zhouwubai/kaggle | 45fbce8834a5c7ce9c925af691f5761d9d88c8d3 | [
"MIT"
] | null | null | null | src/mask_rcnn/keras/evaluate.py | zhouwubai/kaggle | 45fbce8834a5c7ce9c925af691f5761d9d88c8d3 | [
"MIT"
] | null | null | null | ############################################################
# Evaluation
############################################################
import numpy as np
from mask_rcnn.keras import utils
def trim_zeros(x):
"""It's common to have tensors larger than the available data and
pad with zeros. This function removes rows that are all zeros.
x: [rows, columns].
"""
assert len(x.shape) == 2
return x[~np.all(x == 0, axis=1)]
def compute_matches(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5, score_threshold=0.0):
"""Finds matches between prediction and ground truth instances.
Returns:
gt_match: 1-D array. For each GT box it has the index of the matched
predicted box.
pred_match: 1-D array. For each predicted box, it has the index of
the matched ground truth box.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Trim zero padding
# TODO: cleaner to do zero unpadding upstream
gt_boxes = trim_zeros(gt_boxes)
gt_masks = gt_masks[..., :gt_boxes.shape[0]]
pred_boxes = trim_zeros(pred_boxes)
pred_scores = pred_scores[:pred_boxes.shape[0]]
# Sort predictions by score from high to low
indices = np.argsort(pred_scores)[::-1]
pred_boxes = pred_boxes[indices]
pred_class_ids = pred_class_ids[indices]
pred_scores = pred_scores[indices]
pred_masks = pred_masks[..., indices]
# Compute IoU overlaps [pred_masks, gt_masks]
overlaps = utils.compute_overlaps_masks(pred_masks, gt_masks)
# Loop through predictions and find matching ground truth boxes
match_count = 0
pred_match = -1 * np.ones([pred_boxes.shape[0]])
gt_match = -1 * np.ones([gt_boxes.shape[0]])
for i in range(len(pred_boxes)):
# Find best matching ground truth box
# 1. Sort matches by score
sorted_ixs = np.argsort(overlaps[i])[::-1]
# 2. Remove low scores
low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]
if low_score_idx.size > 0:
sorted_ixs = sorted_ixs[:low_score_idx[0]]
# 3. Find the match
for j in sorted_ixs:
# If ground truth box is already matched, go to next one
if gt_match[j] > 0:
continue
# If we reach IoU smaller than the threshold, end the loop
iou = overlaps[i, j]
if iou < iou_threshold:
break
# Do we have a match?
if pred_class_ids[i] == gt_class_ids[j]:
match_count += 1
gt_match[j] = i
pred_match[i] = j
break
return gt_match, pred_match, overlaps
def compute_ap(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5):
"""Compute Average Precision at a set IoU threshold (default 0.5).
Returns:
mAP: Mean Average Precision
precisions: List of precisions at different class score thresholds.
recalls: List of recall values at different class score thresholds.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Get matches and overlaps
gt_match, pred_match, overlaps = compute_matches(
gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold)
# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)
# Pad with start and end values to simplify the math
precisions = np.concatenate([[0], precisions, [0]])
recalls = np.concatenate([[0], recalls, [1]])
# Ensure precision values decrease but don't increase. This way, the
# precision value at each recall threshold is the maximum it can be
# for all following recall thresholds, as specified by the VOC paper.
for i in range(len(precisions) - 2, -1, -1):
precisions[i] = np.maximum(precisions[i], precisions[i + 1])
# Compute mean AP over recall range
indices = np.where(recalls[:-1] != recalls[1:])[0] + 1
mAP = np.sum((recalls[indices] - recalls[indices - 1]) *
precisions[indices])
return mAP, precisions, recalls, overlaps
def compute_ap_range(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_thresholds=None, verbose=1):
"""Compute AP over a range or IoU thresholds. Default range is 0.5-0.95."""
# Default is 0.5 to 0.95 with increments of 0.05
iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)
# Compute AP over range of IoU thresholds
AP = []
for iou_threshold in iou_thresholds:
ap, precisions, recalls, overlaps =\
compute_ap(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_threshold=iou_threshold)
if verbose:
print("AP @{:.2f}:\t {:.3f}".format(iou_threshold, ap))
AP.append(ap)
AP = np.array(AP).mean()
if verbose:
print("AP @{:.2f}-{:.2f}:\t {:.3f}".format(
iou_thresholds[0], iou_thresholds[-1], AP))
return AP
def compute_recall(pred_boxes, gt_boxes, iou):
"""Compute the recall at the given IoU threshold. It's an indication
of how many GT boxes were found by the given prediction boxes.
pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates
gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates
"""
# Measure overlaps
overlaps = utils.compute_overlaps(pred_boxes, gt_boxes)
iou_max = np.max(overlaps, axis=1)
iou_argmax = np.argmax(overlaps, axis=1)
positive_ids = np.where(iou_max >= iou)[0]
matched_gt_boxes = iou_argmax[positive_ids]
recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]
return recall, positive_ids
| 39.784314 | 79 | 0.626581 | 865 | 6,087 | 4.227746 | 0.223121 | 0.030626 | 0.019688 | 0.017501 | 0.242275 | 0.172546 | 0.154772 | 0.13563 | 0.120317 | 0.102817 | 0 | 0.019465 | 0.25727 | 6,087 | 152 | 80 | 40.046053 | 0.789427 | 0.329226 | 0 | 0.111111 | 0 | 0 | 0.012268 | 0 | 0 | 0 | 0 | 0.006579 | 0.012346 | 1 | 0.061728 | false | 0 | 0.024691 | 0 | 0.148148 | 0.024691 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbf3184cbf64db2cef356b8b53aefaa13e99ce22 | 10,562 | py | Python | checker.py | Aihakaiha/mecount | 51e0f44b0b7db61ba5f9a9bc3fccf4da7ea62570 | [
"MIT"
] | null | null | null | checker.py | Aihakaiha/mecount | 51e0f44b0b7db61ba5f9a9bc3fccf4da7ea62570 | [
"MIT"
] | null | null | null | checker.py | Aihakaiha/mecount | 51e0f44b0b7db61ba5f9a9bc3fccf4da7ea62570 | [
"MIT"
] | null | null | null | from pathlib import Path
import sys
import os
import threading
import subprocess
from win10toast import ToastNotifier
PATH = sys.argv[1]
toast = ToastNotifier()
total_count = 0
total_lines = 0
html_count = 0
html_lines = 0
css_count = 0
css_lines = 0
js_count = 0
js_lines = 0
python_count = 0
python_lines = 0
csharp_count = 0
csharp_lines = 0
cpp_count = 0
cpp_lines = 0
c_count = 0
c_lines = 0
java_count = 0
java_lines = 0
batch_count = 0
batch_lines = 0
plain_count = 0
plain_lines = 0
accepted_formats = ["html", "htm", "css", "js", "py", "pyw", "cs", "cpp", "c", "java", "txt", "bat"]
try:
print("")
os.stat(PATH)
except FileNotFoundError as e:
print("Not valid path:", PATH)
sys.exit()
list_of_files = []
for item in os.listdir(PATH):
if os.path.isfile(os.path.join(PATH, item)):
list_object = PATH+"/"+item
print(list_object)
list_of_files.append(list_object)
else:
for items in os.listdir(PATH+"/"+item):
if os.path.isfile(os.path.join(PATH+"/"+item, items)):
list_object = PATH+"/"+item+"/"+items
print(list_object)
list_of_files.append(list_object)
else:
for more_items in os.listdir(PATH+"/"+item+"/"+items):
if os.path.isfile(os.path.join(PATH+"/"+item+"/"+items, more_items)):
print(more_items)
list_object = PATH+"/"+item+"/"+items+"/"+more_items
print(list_object)
list_of_files.append(list_object)
else:
for even_more_items in os.listdir(PATH+"/"+item+"/"+items+"/"+more_items):
if os.path.isfile(os.path.join(PATH+"/"+item+"/"+items+"/"+more_items, even_more_items)):
list_object = PATH+"/"+item+"/"+items+"/"+more_items+"/"+even_more_items
list_of_files.append(list_object)
else:
for alot_more_items in os.listdir((PATH+"/"+item+"/"+items+"/"+more_items+"/"+even_more_items)):
if os.path.isfile(os.path.join(PATH+"/"+item+"/"+items+"/"+more_items+"/"+even_more_items, alot_more_items)):
list_object = PATH+"/"+item+"/"+items+"/"+more_items+"/"+even_more_items+"/"+alot_more_items
print(list_object)
list_of_files.append(list_object)
print(list_of_files)
def readFiles(file, extension):
count = 0
global total_count
global total_lines
global html_count
global html_lines
global css_count
global css_lines
global js_count
global js_lines
global python_count
global python_lines
global csharp_count
global csharp_lines
global css_lines
global cpp_count
global cpp_lines
global c_count
global c_lines
global java_count
global java_lines
global batch_count
global batch_lines
global plain_count
global plain_lines
extension = extension.lower()
if extension in accepted_formats:
if extension == accepted_formats[0] or extension == accepted_formats[1]:
with open(file, "r", encoding="ascii", errors="surrogateescape")as f:
f = f.read()
html_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
total_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
for _ in f:
html_count = html_count + 1
count = count + 1
if extension == accepted_formats[2]:
with open(file, "r", encoding="ascii", errors="surrogateescape")as f:
f = f.read()
css_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
total_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
for _ in f:
css_count = css_count + 1
count = count + 1
if extension == accepted_formats[3]:
with open(file, "r", encoding="ascii", errors="surrogateescape")as f:
f = f.read()
js_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
total_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
for _ in f:
js_count = js_count + 1
count = count + 1
if extension == accepted_formats[4] or extension == accepted_formats[5]:
with open(file, "r", encoding="ascii", errors="surrogateescape")as f:
f = f.read()
python_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
total_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
for _ in f:
python_count = python_count + 1
count = count + 1
if extension == accepted_formats[6]:
with open(file, "r", encoding="ascii", errors="surrogateescape")as f:
f = f.read()
csharp_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
total_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
for _ in f:
csharp_count = csharp_count + 1
count = count + 1
if extension == accepted_formats[7]:
with open(file, "r", encoding="ascii", errors="surrogateescape")as f:
f = f.read()
cpp_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
total_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
for _ in f:
cpp_count = cpp_count + 1
count = count + 1
if extension == accepted_formats[8]:
with open(file, "r", encoding="ascii", errors="surrogateescape")as f:
f = f.read()
c_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
total_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
for _ in f:
c_count = c_count + 1
count = count + 1
if extension == accepted_formats[9]:
with open(file, "r", encoding="ascii", errors="surrogateescape")as f:
f = f.read()
java_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
total_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
for _ in f:
java_count = java_count + 1
count = count + 1
if extension == accepted_formats[10]:
with open(file, "r", encoding="ascii", errors="surrogateescape")as f:
f = f.read()
plain_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
total_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
for _ in f:
plain_count = plain_count + 1
count = count + 1
if extension == accepted_formats[11]:
with open(file, "r", encoding="ascii", errors="surrogateescape") as f:
f = f.read()
batch_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
total_lines += len(open(file, "r", encoding="ascii", errors="surrogateescape").read().splitlines())
for _ in f:
batch_count = batch_count + 1
count = count + 1
else:
print("")
print(f"file format not support: {file}")
total_count += count
for files in list_of_files:
if "." in files:
file_extension = files.rsplit(".", 1)[1]
#
readFiles(files, file_extension)
pass
else:
pass
def calc(x, y):
return round(x / y * 100, 2)
print("")
if html_count != 0 and html_lines != 0:
print(f"HTML: {html_count}------------- {calc(html_count, total_count)}% --- Lines: {html_lines}")
if css_count != 0 and css_lines != 0:
print(f"CSS: {css_count}---------------- {calc(css_count, total_count)}% --- Lines: {css_lines}")
if js_count != 0 and js_lines != 0:
print(f"JavaScript {js_count}---------- {calc(js_count, total_count)}% --- Lines: {js_lines}")
if python_count != 0 and python_lines != 0:
print(f"Python: {python_count}------------- {calc(python_count, total_count)}% --- Lines: {python_lines}")
if csharp_count != 0 and csharp_lines != 0:
print(f"C#: {csharp_count}----------------- {calc(csharp_count, total_count)}% --- Lines: {csharp_lines}")
if cpp_count != 0 and cpp_lines != 0:
print(f"C++: {cpp_count}---------------- {calc(cpp_count, total_count)}% --- Lines: {cpp_lines}")
if c_count != 0 and c_lines != 0:
print(f"C: {c_count}------------------ {calc(c_count, total_count)}% --- Lines: {c_lines}")
if java_count != 0 and java_lines != 0:
print(f"Java: {java_count}--------------- {calc(java_count, total_count)}% --- Lines: {java_lines}")
if batch_count != 0 and batch_lines != 0:
print(f"Batch: {batch_count}--------------- {calc(batch_count, total_count)}% --- Lines: {batch_lines}")
if plain_count != 0 and plain_lines != 0:
print(f"Plain: {plain_count}-------------- {calc(plain_count, total_count)}% --- Lines: {plain_lines}")
print("Total count", total_count, " Total lines", total_lines)
toast.show_toast("Read complete!", f"Read complete at {PATH} Count: {total_count} LOC: {total_lines}", duration=3)
sys.exit() | 43.465021 | 146 | 0.544878 | 1,233 | 10,562 | 4.476886 | 0.08678 | 0.043478 | 0.048913 | 0.092391 | 0.581159 | 0.571014 | 0.5625 | 0.5625 | 0.551268 | 0.468841 | 0 | 0.011835 | 0.304014 | 10,562 | 243 | 147 | 43.465021 | 0.739083 | 0 | 0 | 0.349057 | 0 | 0 | 0.169186 | 0.023837 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009434 | false | 0.009434 | 0.028302 | 0.004717 | 0.042453 | 0.103774 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbf69e1751cc22a4d9f5428d9f935069124ba770 | 4,686 | py | Python | train_depth_complete.py | choyingw/SCADC-DepthCompletion | 1da6d01213ea9de53d83625d9c36414d56653aad | [
"Apache-2.0"
] | 9 | 2021-04-01T01:53:06.000Z | 2022-03-16T01:35:58.000Z | train_depth_complete.py | choyingw/SCADC-DepthCompletion | 1da6d01213ea9de53d83625d9c36414d56653aad | [
"Apache-2.0"
] | null | null | null | train_depth_complete.py | choyingw/SCADC-DepthCompletion | 1da6d01213ea9de53d83625d9c36414d56653aad | [
"Apache-2.0"
] | 1 | 2022-03-25T03:29:09.000Z | 2022-03-25T03:29:09.000Z | #!/usr/bin/env python
# Author: Cho-Ying Wu, USC, March 2021
# Scene Completeness-Aware Lidar Depth Completion for Driving Scenario
# ICASSP 2021
import time
from options.options import AdvanceOptions
from models import create_model
from util.visualizer import Visualizer
from dataloaders.kitti_dataloader import KITTIDataset
import numpy as np
import random
import torch
import cv2
if __name__ == '__main__':
train_opt = AdvanceOptions().parse(True)
if not train_opt.test_path or not train_opt.train_path:
raise ValueError('Please specify paths for both the training and testing data.')
train_dataset = KITTIDataset(train_opt.train_path, type='train',
modality='d2sm')
test_dataset = KITTIDataset(train_opt.test_path, type='val',
modality='d2sm')
train_data_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=train_opt.batch_size, shuffle=True,
num_workers=train_opt.num_threads, pin_memory=True, sampler=None,
worker_init_fn=lambda work_id:np.random.seed(train_opt.seed + work_id))
test_opt = AdvanceOptions().parse(True)
test_opt.phase = 'val'
test_opt.batch_size = 1
test_opt.num_threads = 1
test_opt.serial_batches = True
test_opt.no_flip = True
test_opt.display_id = -1
test_data_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=test_opt.batch_size, shuffle=True, num_workers=test_opt.num_threads, pin_memory=True)
train_dataset_size = len(train_data_loader)
print('#training images = %d' % train_dataset_size)
test_dataset_size = len(test_data_loader)
print('#test images = %d' % test_dataset_size)
model = create_model(train_opt, train_dataset)
model.setup(train_opt)
visualizer = Visualizer(train_opt) # logger instance
total_steps = 0
for epoch in range(train_opt.epoch_count, train_opt.niter + 1):
model.train()
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
model.init_eval()
iterator = iter(train_data_loader)
while True:
try:
next_batch = next(iterator)
except StopIteration:
break
data, target = next_batch[0], next_batch[1]
iter_start_time = time.time()
if total_steps % train_opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_steps += train_opt.batch_size
epoch_iter += train_opt.batch_size
model.set_new_input(data,target)
model.optimize_parameters()
if total_steps % train_opt.print_freq == 0:
losses = model.get_current_losses()
t = (time.time() - iter_start_time) / train_opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)
message = model.print_depth_evaluation()
visualizer.print_current_depth_evaluation(message)
print()
iter_data_time = time.time()
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, train_opt.niter, time.time() - epoch_start_time))
model.update_learning_rate()
if epoch and epoch % train_opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.save_networks('latest')
model.save_networks(epoch)
model.eval()
test_loss_iter = []
epoch_iter = 0
model.init_test_eval()
with torch.no_grad():
iterator = iter(test_data_loader)
while True:
try:
next_batch = next(iterator)
except IndexError:
print("Corrupted data are catched! Discard this batch!")
continue
except StopIteration:
break
data, target = next_batch[0], next_batch[1]
model.set_new_input(data,target)
model.forward()
model.test_depth_evaluation(test_opt)
model.get_loss()
epoch_iter += test_opt.batch_size
losses = model.get_current_losses()
print('test epoch {0:}, iters: {1:}/{2:} '.format(epoch, epoch_iter, len(test_dataset) * test_opt.batch_size), end='\r')
message = model.print_test_depth_evaluation()
visualizer.print_current_depth_evaluation(message) # print the loss, and error message to the log file
print( # print on screen for fast validation
'RMSE= Curr: {result.rmse:.4f}(Avg: {average.rmse:.4f}) '
'MSE= Curr:{result.mse:.4f}(Avg: {average.mse:.4f}) '
'MAE= Curr:{result.mae:.4f}(Avg: {average.mae:.4f}) '
'Delta1= Curr:{result.delta1:.4f}(Avg: {average.delta1:.4f}) '
'Delta2= Curr:{result.delta2:.4f}(Avg: {average.delta2:.4f}) '
'Delta3= Curr:{result.delta3:.4f}(Avg: {average.delta3:.4f}) '
'REL= Curr:{result.absrel:.4f}(Avg: {average.absrel:.4f}) '
'Lg10= Curr:{result.lg10:.4f}(Avg: {average.lg10:.4f}) '.format(
result=model.test_result, average=model.test_average.average())) | 36.897638 | 125 | 0.70956 | 671 | 4,686 | 4.695976 | 0.269747 | 0.050778 | 0.030467 | 0.02158 | 0.243732 | 0.201841 | 0.163758 | 0.123136 | 0.104094 | 0.063472 | 0 | 0.015226 | 0.173069 | 4,686 | 127 | 126 | 36.897638 | 0.797935 | 0.051216 | 0 | 0.245283 | 0 | 0 | 0.169595 | 0.067793 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.084906 | 0 | 0.084906 | 0.141509 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbf6dd47fb45adab708085e3cba74c8749afb704 | 5,609 | py | Python | odtbrain/_prepare_sino.py | RI-imaging/ODTbrain | 063f9d1cf7803dd0dda9d68d2847f16c2496c205 | [
"BSD-3-Clause"
] | 15 | 2016-01-22T20:08:10.000Z | 2022-03-24T17:00:27.000Z | odtbrain/_prepare_sino.py | RI-imaging/ODTbrain | 063f9d1cf7803dd0dda9d68d2847f16c2496c205 | [
"BSD-3-Clause"
] | 15 | 2017-01-17T12:07:58.000Z | 2022-02-02T22:30:33.000Z | odtbrain/_prepare_sino.py | RI-imaging/ODTbrain | 063f9d1cf7803dd0dda9d68d2847f16c2496c205 | [
"BSD-3-Clause"
] | 6 | 2017-10-29T20:05:42.000Z | 2021-02-19T23:23:36.000Z | """Sinogram preparation"""
import numpy as np
from scipy.stats import mode
from skimage.restoration import unwrap_phase
def align_unwrapped(sino):
"""Align an unwrapped phase array to zero-phase
All operations are performed in-place.
"""
samples = []
if len(sino.shape) == 2:
# 2D
# take 1D samples at beginning and end of array
samples.append(sino[:, 0])
samples.append(sino[:, 1])
samples.append(sino[:, 2])
samples.append(sino[:, -1])
samples.append(sino[:, -2])
elif len(sino.shape) == 3:
# 3D
# take 1D samples at beginning and end of array
samples.append(sino[:, 0, 0])
samples.append(sino[:, 0, -1])
samples.append(sino[:, -1, 0])
samples.append(sino[:, -1, -1])
samples.append(sino[:, 0, 1])
# find discontinuities in the samples
steps = np.zeros((len(samples), samples[0].shape[0]))
for i in range(len(samples)):
t = np.unwrap(samples[i])
steps[i] = samples[i] - t
# if the majority believes so, add a step of PI
remove = mode(steps, axis=0)[0][0]
# obtain divmod min
twopi = 2*np.pi
minimum = divmod_neg(np.min(sino), twopi)[0]
remove += minimum*twopi
for i in range(len(sino)):
sino[i] -= remove[i]
def divmod_neg(a, b):
"""Return divmod with closest result to zero"""
q, r = divmod(a, b)
# make sure r is close to zero
sr = np.sign(r)
if np.abs(r) > b/2:
q += sr
r -= b * sr
return q, r
def sinogram_as_radon(uSin, align=True):
r"""Compute the phase from a complex wave field sinogram
This step is essential when using the ray approximation before
computation of the refractive index with the inverse Radon
transform.
Parameters
----------
uSin: 2d or 3d complex ndarray
The background-corrected sinogram of the complex scattered wave
:math:`u(\mathbf{r})/u_0(\mathbf{r})`. The first axis iterates
through the angles :math:`\phi_0`.
align: bool
Tries to correct for a phase offset in the phase sinogram.
Returns
-------
phase: 2d or 3d real ndarray
The unwrapped phase array corresponding to `uSin`.
See Also
--------
skimage.restoration.unwrap_phase: phase unwrapping
radontea.backproject_3d: e.g. reconstruction via backprojection
"""
ndims = len(uSin.shape)
if ndims == 2:
# unwrapping is very important
phiR = np.unwrap(np.angle(uSin), axis=-1)
else:
# Unwrap gets the dimension of the problem from the input
# data. Since we have a sinogram, we need to pass it the
# slices one by one.
phiR = np.angle(uSin)
for ii in range(len(phiR)):
phiR[ii] = unwrap_phase(phiR[ii], seed=47)
if align:
align_unwrapped(phiR)
return phiR
def sinogram_as_rytov(uSin, u0=1, align=True):
r"""Convert the complex wave field sinogram to the Rytov phase
This method applies the Rytov approximation to the
recorded complex wave sinogram. To achieve this, the following
filter is applied:
.. math::
u_\mathrm{B}(\mathbf{r}) = u_\mathrm{0}(\mathbf{r})
\ln\!\left(
\frac{u_\mathrm{R}(\mathbf{r})}{u_\mathrm{0}(\mathbf{r})}
+1 \right)
This filter step effectively replaces the Born approximation
:math:`u_\mathrm{B}(\mathbf{r})` with the Rytov approximation
:math:`u_\mathrm{R}(\mathbf{r})`, assuming that the scattered
field is equal to
:math:`u(\mathbf{r})\approx u_\mathrm{R}(\mathbf{r})+
u_\mathrm{0}(\mathbf{r})`.
Parameters
----------
uSin: 2d or 3d complex ndarray
The sinogram of the complex wave
:math:`u_\mathrm{R}(\mathbf{r}) + u_\mathrm{0}(\mathbf{r})`.
The first axis iterates through the angles :math:`\phi_0`.
u0: ndarray of dimension as `uSin` or less, or int.
The incident plane wave
:math:`u_\mathrm{0}(\mathbf{r})` at the detector.
If `u0` is "1", it is assumed that the data is already
background-corrected (
`uSin` :math:`= \frac{u_\mathrm{R}(\mathbf{r})}{
u_\mathrm{0}(\mathbf{r})} + 1`
). Note that if the reconstruction distance :math:`l_\mathrm{D}`
of the original experiment is non-zero and `u0` is set to 1,
then the reconstruction will be wrong; the field is not focused
to the center of the reconstruction volume.
align: bool
Tries to correct for a phase offset in the phase sinogram.
Returns
-------
uB: 2d or 3d real ndarray
The Rytov-filtered complex sinogram
:math:`u_\mathrm{B}(\mathbf{r})`.
See Also
--------
skimage.restoration.unwrap_phase: phase unwrapping
"""
ndims = len(uSin.shape)
# imaginary part of the complex Rytov phase
phiR = np.angle(uSin / u0)
# real part of the complex Rytov phase
lna = np.log(np.absolute(uSin / u0))
if ndims == 2:
# unwrapping is very important
phiR[:] = np.unwrap(phiR, axis=-1)
else:
# Unwrap gets the dimension of the problem from the input
# data. Since we have a sinogram, we need to pass it the
# slices one by one.
for ii in range(len(phiR)):
phiR[ii] = unwrap_phase(phiR[ii], seed=47)
if align:
align_unwrapped(phiR)
# rytovSin = u0*(np.log(a/a0) + 1j*phiR)
# u0 is one - we already did background correction
# complex rytov phase:
rytovSin = 1j * phiR + lna
return u0 * rytovSin
| 30.650273 | 72 | 0.607417 | 806 | 5,609 | 4.186104 | 0.266749 | 0.03527 | 0.050385 | 0.024896 | 0.430646 | 0.401008 | 0.352104 | 0.345584 | 0.272081 | 0.272081 | 0 | 0.017617 | 0.27135 | 5,609 | 182 | 73 | 30.818681 | 0.807928 | 0.588518 | 0 | 0.233333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.05 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbf74445e4232afaa3266299bfa9077227ec3594 | 6,503 | py | Python | indra/sources/eidos/eidos_api.py | min-yin-sri/indra | 93d4cb8b23764a2775f9dbdf5eb73b6053006d73 | [
"BSD-2-Clause"
] | 2 | 2020-01-14T08:59:10.000Z | 2020-12-18T16:21:38.000Z | indra/sources/eidos/eidos_api.py | min-yin-sri/indra | 93d4cb8b23764a2775f9dbdf5eb73b6053006d73 | [
"BSD-2-Clause"
] | null | null | null | indra/sources/eidos/eidos_api.py | min-yin-sri/indra | 93d4cb8b23764a2775f9dbdf5eb73b6053006d73 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str, bytes
from past.builtins import basestring
import json
import logging
import requests
from .processor import EidosJsonProcessor, EidosJsonLdProcessor
logger = logging.getLogger('eidos')
try:
# For text reading
from .eidos_reader import EidosReader
eidos_reader = EidosReader()
except Exception as e:
logger.warning('Could not instantiate Eidos reader, text reading '
'will not be available.')
eidos_reader = None
def process_text(text, out_format='json_ld', save_json='eidos_output.json',
webservice=None):
"""Return an EidosProcessor by processing the given text.
This constructs a reader object via Java and extracts mentions
from the text. It then serializes the mentions into JSON and
processes the result with process_json.
Parameters
----------
text : str
The text to be processed.
out_format : str
The type of Eidos output to read into and process. Can be one of
"json" or "json_ld". Default: "json_ld"
save_json : Optional[str]
The name of a file in which to dump the JSON output of Eidos.
webservice : Optional[str]
An Eidos reader web service URL to send the request to.
Returns
-------
ep : EidosJsonProcessor or EidosJsonLdProcessor depending on out_format
A EidosJsonProcessor or EidosJsonLdProcessor containing the extracted
INDRA Statements in ep.statements.
"""
if not webservice:
if eidos_reader is None:
logger.error('Eidos reader is not available.')
return None
json_dict = eidos_reader.process_text(text, out_format)
else:
res = requests.post('%s/process_text' % webservice,
json={'text': text})
json_dict = res.json()
if save_json:
with open(save_json, 'wt') as fh:
json.dump(json_dict, fh, indent=2)
if out_format == 'json':
return process_json(json_dict)
elif out_format == 'json_ld':
return process_json_ld(json_dict)
else:
logger.error('Output format %s is invalid.' % output_format)
return None
def process_json_file(file_name):
"""Return an EidosProcessor by processing the given Eidos json file.
The output from the Eidos reader is in json format. This function is
useful if the output is saved as a file and needs to be processed.
Parameters
----------
file_name : str
The name of the json file to be processed.
Returns
-------
ep : EidosJsonProcessor
A EidosJsonProcessor containing the extracted INDRA Statements
in ep.statements.
"""
try:
with open(file_name, 'rb') as fh:
json_str = fh.read().decode('utf-8')
return process_json_str(json_str)
except IOError:
logger.exception('Could not read file %s.' % file_name)
def process_json_ld_file(file_name):
"""Return an EidosProcessor by processing the given Eidos JSON-LD file.
The output from the Eidos reader is in json-LD format. This function is
useful if the output is saved as a file and needs to be processed.
Parameters
----------
file_name : str
The name of the JSON-LD file to be processed.
Returns
-------
ep : EidosJsonLdProcessor
A EidosJsonLdProcessor containing the extracted INDRA Statements
in ep.statements.
"""
try:
with open(file_name, 'rb') as fh:
json_str = fh.read().decode('utf-8')
return process_json_ld_str(json_str)
except IOError:
logger.exception('Could not read file %s.' % file_name)
def process_json_str(json_str):
"""Return an EidosProcessor by processing the given Eidos json string.
The output from the Eidos parser is in json format.
Parameters
----------
json_str : str
The json string to be processed.
Returns
-------
ep : EidosJsonProcessor
A EidosProcessor containing the extracted INDRA Statements
in ep.statements.
"""
if not isinstance(json_str, basestring):
raise TypeError('{} is {} instead of {}'.format(json_str,
json_str.__class__,
basestring))
try:
json_dict = json.loads(json_str)
except ValueError:
logger.error('Could not decode JSON string.')
return None
return process_json(json_dict)
def process_json_ld_str(json_str):
"""Return an EidosJsonLdProcessor by processing the Eidos JSON-LD string.
The output from the Eidos parser is in JSON-LD format.
Parameters
----------
json_str : str
The json-LD string to be processed.
Returns
-------
ep : EidosJsonLdProcessor
A EidosJsonLdProcessor containing the extracted INDRA Statements
in ep.statements.
"""
if not isinstance(json_str, basestring):
raise TypeError('{} is {} instead of {}'.format(json_str,
json_str.__class__,
basestring))
try:
json_dict = json.loads(json_str)
except ValueError:
logger.error('Could not decode JSON-LD string.')
return None
return process_json_ld(json_dict)
def process_json(json_dict):
"""Return an EidosJsonProcessor by processing the given Eidos JSON dict.
Parameters
----------
json_dict : dict
The JSON dict to be processed.
Returns
-------
ep : EidosJsonProcessor
A EidosJsonProcessor containing the extracted INDRA Statements
in ep.statements.
"""
ep = EidosJsonProcessor(json_dict)
ep.get_events()
return ep
def process_json_ld(json_dict):
"""Return an EidosJsonLdProcessor by processing a Eidos JSON-LD dict.
Parameters
----------
json_dict : dict
The JSON-LD dict to be processed.
Returns
-------
ep : EidosJsonLdProcessor
A EidosJsonLdProcessor containing the extracted INDRA Statements
in ep.statements.
"""
ep = EidosJsonLdProcessor(json_dict)
ep.get_events()
return ep
def initialize_reader():
"""Instantiate an Eidos reader for fast subsequent reading."""
eidos_reader.process_text('', 'json_ld')
| 29.830275 | 77 | 0.635245 | 797 | 6,503 | 5.046424 | 0.171895 | 0.029836 | 0.02909 | 0.046992 | 0.629786 | 0.576827 | 0.53083 | 0.477374 | 0.462456 | 0.448782 | 0 | 0.000644 | 0.284177 | 6,503 | 217 | 78 | 29.967742 | 0.863373 | 0.45133 | 0 | 0.47561 | 0 | 0 | 0.11398 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.097561 | 0 | 0.341463 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbfccf4323e58aed1fbcb3a40e3241762f84e2dc | 41,269 | py | Python | sra/v2/tables.py | nellore/runs | 5bd2e2a92a8a5b3dba90fe93080b9c7f11339e43 | [
"MIT"
] | 13 | 2015-12-07T21:28:24.000Z | 2019-11-08T22:42:39.000Z | sra/v2/tables.py | nellore/runs | 5bd2e2a92a8a5b3dba90fe93080b9c7f11339e43 | [
"MIT"
] | null | null | null | sra/v2/tables.py | nellore/runs | 5bd2e2a92a8a5b3dba90fe93080b9c7f11339e43 | [
"MIT"
] | 10 | 2015-12-21T15:41:03.000Z | 2020-06-04T04:31:41.000Z | #!/usr/bin/env python
"""
tables.py
Abhi Nellore / March 14, 2015
Reproduces data used in Mathematica 10 notebook figures.nb for paper Human
splicing diversity across the Sequence Read Archive (v2). Based in part on
Abhi Nellore's talk at Genome Informatics 2015; see the related repo
https://github.com/nellore/gi2015. Also based on v1 of this script, which is
../tables.py.
Get and unpack HISAT2 2.0.1-beta from
https://ccb.jhu.edu/software/hisat2/index.shtml; we use the tool
extract_splice_sites.py that comes with it to obtain splice sites from
annotation.
File requirements:
1. intropolis.v2.hg38.tsv.gz: database of exon-exon junctions found across
~50k SRA samples NOT PROVIDED IN THIS REPO BUT CAN BE REPRODUCED.
See README.md for instructions. The file is also available for download
at http://intropolis.rail.bio .
2. intropolis.idmap.v2.hg38.tsv: maps sample indexes from
intropolis.v2.hg38.tsv.gz to SRA run accession numbers (regex: [SED]RR\d+)
(In this repo.)
3. excluded.txt: lists SRA run accession numbers that may be in
intropolis.v2.hg38.tsv.gz but that were actually excluded from analysis.
Used to get accurate number of samples aligned.
(In this repo; see sra/v2/hg38/excluded.txt.)
4. liftOver executable available from https://genome-store.ucsc.edu/products/ ;
used to lift over hg19 Gencode junctions to hg38 for Gencode evolution
results _and_ to lift over hg19 SEQC junctions to hg38.
5. http://hgdownload.cse.ucsc.edu/goldenPath/hg38/liftOver/
hg19ToHg38.over.chain.gz, a dependency of Gencode.
6. All GENCODE gene annotations for GRCh37 and GRCh38, which may be obtained by
executing the following command.
for i in 3c 3d 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24;
do curl -o gencode.$i.gtf.gz ftp://ftp.sanger.ac.uk/pub/gencode/
Gencode_human/release_$i/gencode.v$i.annotation.gtf.gz; if [ $? -eq 78 ];
then curl -o gencode.$i.gtf.gz ftp://ftp.sanger.ac.uk/pub/gencode/
Gencode_human/release_$i/gencode.v$i.annotation.GRCh37.gtf.gz; fi;
if [ $? -eq 78 ]; then curl -o gencode.$i.gtf.gz ftp://ftp.sanger.ac.uk/
pub/gencode/Gencode_human/release_$i/gencode_v$i.annotation.GRCh37.gtf.gz;
fi; if [ $? -eq 78 ]; then curl -o gencode.$i.gtf.gz ftp://
ftp.sanger.ac.uk/pub/gencode/release_$i/gencode.v$i.gtf.gz; fi;
if [$? -eq 78]; then curl -o gencode.$i.gtf.gz ftp://
ftp.sanger.ac.uk/pub/gencode/Gencode_human/release_$i/gencode.v$i.gtf.gz;
fi; done
The GENCODE GTF filenames must have the format gencode.[VERSION].gtf.gz .
The directory containing GENCODE GTFs is specified at the command line.
An archive containing these GENCODE gene annotations may be downloaded at
http://verve.webfactional.com/misc/v2/gencodes.tar.gz .
7. annotated_junctions.tsv.gz, which is in this directory and is
generated by rip_annotated_junctions.py . This file contains a union
of relevant annotated junction tracks from the UCSC Genome Browser.
See rip_annotated_junctions.py for more information.
8. http://www.nature.com/nbt/journal/v32/n9/extref/nbt.2957-S4.zip, which
is Supplementary Data 3 from the paper "A comprehensive assessment of
RNA-seq accuracy, reproducibility and information content by the
Sequencing Quality Control Consortium" by SEQC/MAQC-III Consortium
in Nature Biotech. The junctions on this list are used to compare
alignment protocols with Rail. They are lifted over from hg19.
9. biosample_tags.tsv, which is in the hg38 subdirectory of this repo and was
generated using hg38/get_biosample_data.sh . It contains metadata from the
NCBI Biosample database, including sample submission dates. We executed
get_biosample_data.sh at 2:06 PM ET on 3/15/2016.
intropolis.v2.hg38.tsv.gz is specified as argument of --junctions. Annotations
are read from arguments of command-line parameter --annotations that specify
paths to the GTFs above.
Each line of intropolis.v2.hg38.tsv.gz specifies a different junction and has
the following tab-separated fields.
1. chromosome
2. start position (1-based inclusive)
3. end position (1-based inclusive)
4. strand (+ or -)
5. 5' motif (GT, GC, or AT)
6. 3' motif (AG or AC)
7. comma-separated list of indexes of samples in which junction was found
8. comma-separated list of counts of reads overlapping junctions in
corresponding sample from field 7. So if field 7 is 4,5,6 and field 8 is
9,10,11 there are 9 reads overlapping the junction in the sample with
index 4, 10 reads overlapping the junction in the sample with index 5, and
11 reads overlapping the junction in the sample with index 6.
Each line of intropolis.idmap.v2.hg38.tsv specifies a different sample
(specifically, run) on SRA and has the following tab-separated fields.
1. sample index
2. project accession number (regex: [SED]RP\d+)
3. sample accession number (regex: [SED]RS\d+)
4. experiment accession number (regex: [SED]RX\d+)
5. run accession number (regex: [SED]RR\d+)
We used PyPy 2.5.0 with GCC 4.9.2 for our Python implementation and from
the directory containing tables.py ran:
pypy tables.py
--hisat2-dir /path/to/hisat2-2.0.1-beta
--annotation annotated_junctions.tsv.gz
--gencode-dir /path/to/dir/with/gencode/annotations/across/versions
--junctions /path/to/intropolis.v2.hg38.tsv.gz
--biosample-metadata ./hg38/biosample_tags.tsv
--seqc /path/to/nbt.2957-S4.zip
--liftover /path/to/liftOver
--chain /path/to/hg19ToHg38.over.chain
--basename hg38
--index-to-sra intropolis.idmap.v2.hg38.tsv
Note that the argument of --hisat2-dir is the directory containing the HISAT 2
binary and extract_splice_sites.py.
The following output was obtained. It is included in this repo because this
script cannot easily be rerun to obtain results; the input file
intropolis.v1.hg19.tsv.gz must be provided, and this requires following the
instructions in README.md for its reproduction. Note that an "overlap" below
is an instance where a junction is overlapped by a read. A read that overlaps
two exon-exon junctions contributes two overlaps (or overlap instances).
[basename].annotation_diffint.tsv
Matrix where each row is a GENCODE version i and each column is a GENCODE
version i. Each element is in the format
(|junctions in i - junctions in j|, |junctions in i and j|,
|junctions in j - junctions in i|)
, where - is a set difference.
[basename].seqc_summary.txt
Junction counts from SEQC protocol and Rail for the 1720 samples studied by
with both. See file for details.
[basename].sample_count_submission_date_overlap_geq_20.tsv
Tab-separated fields
1. count of samples in which a given junction was found
2. count of projects in which a given junction was found
3. earliest known discovery date (in units of days after February 27, 2009)
-- this is the earliest known submission date of a sample associated with a
junction
Above, each junction is covered by at least 20 reads per sample.
[basename].[type].stats.tsv, where [type] is in [project, sample]
Tab-separated fields
1. [type] count
2. Number of junctions found in >= field 1 [type]s
3. Number of annotated junctions found in >= field 1 [type]s
4. Number of exonskips found in >= field 1 [type]s (exon skip: both 5' and 3'
splice sites are annotated, but not in the same exon-exon junction)
5. Number of altstartends found in >= field 1 [type]s (altstartend: either 5'
or 3' splice site is annotated, but not both)
6. Number of novel junctions found in >= field 1 [type]s (novel: both 5' and
3' splice sites are unannotated)
7. Number of GT-AG junctions found in >= field 1 [type]s
8. Number of annotated GT-AG junctions found in >= field 1 [type]s
9. Number of GC-AG junctions found in >= field 1 [type]s
10. Number of annotated GC-AG junctions found in >= field 1 [type]s
11. Number of AT-AC junctions found in >= field 1 [type]s
12. Number of annotated AT-AC junctions found in >= field 1 [type]s
[basename].seqc.stats.tsv
Tab-separated fields
1. SEQC sample count
2. Number of junctions found in >= field 1 SEQC samples
3. Number of junctions found by magic and Rail in >= field 1 SEQC samples
4. Number of junctions found by rmake and Rail in >= field 1 SEQC samples
5. Number of junctions found by subread and Rail in >= field 1 SEQC samples
6. Number of junctions found by Rail and exactly one of
{magic, rmake, subread} in >= field 1 samples
7. Number of junctions found by Rail and exactly two of
{magic, rmake, subread} in >= field 1 samples
8. Number of junctions found by Rail and all of {magic, rmake, subread} in
>= field 1 samples
[basename].stats_by_sample.tsv
Tab-separated fields
1. sample index
2. project accession number
3. sample accession number
4. experiment accession number
5. run accession number
6. junction count
7. annotated junction count
8. count of junctions overlapped by at least 5 reads
9. count of annotated junctions overlapped by at least 5 reads
10. total overlap instances
11. total annotated overlap instances
"""
import sys
import gzip
import zipfile
import re
import os
import subprocess
from contextlib import contextmanager
import tempfile
import atexit
import shutil
def is_gzipped(filename):
""" Uses gzip magic number to determine whether a file is compressed.
filename: path to file
Return value: True iff file filename is gzipped.
"""
with open(filename, 'rb') as binary_input_stream:
# Check for magic number
if binary_input_stream.read(2) == '\x1f\x8b':
return True
else:
return False
@contextmanager
def xopen(filename):
""" Opens both gzipped and uncompressed files for contextual reading.
filename: path to file to open
Yield value: a gzip.open or open object
"""
if is_gzipped(filename):
f = gzip.open(filename)
else:
f = open(filename)
try:
yield f
finally:
f.close()
@contextmanager
def liftover(input_stream, liftover_exe, chain_file, perform=True,
):
""" Transforms input stream in genomics coordinate format X to format Y
input_stream: junctions in format
chrom TAB start position TAB end position TAB strand or "NA"
or list [chrom, start position, end position, strand or "NA"]
liftover_exe: liftover executable; should be args.liftover
chain_file: chain file for liftover executable; should be args.chain
perform: True iff liftover should be performed
Return value: same format as input stream except transformed to new
coordinate system.
"""
if not perform:
yield input_stream
else:
temp_dir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, temp_dir, ignore_errors=True)
input_bed = os.path.join(temp_dir, 'totransform.bed')
output_bed = os.path.join(temp_dir, 'transformed.bed')
unmapped_bed = os.path.join(temp_dir, 'unmapped.bed')
with open(input_bed, 'w') as temp_stream:
for i, line in enumerate(input_stream):
if isinstance(line, str):
tokens = line.strip().split('\t')
else:
tokens = line
print >>temp_stream, '{}\t{}\t{}\t{}\t1\t{}'.format(
tokens[0], tokens[1], tokens[2],
('dummy_' + str(i)) if len(tokens) < 5
else tokens[4], tokens[3]
)
liftover_process = subprocess.check_call(' '.join([
liftover_exe,
input_bed,
chain_file,
output_bed,
unmapped_bed
]),
shell=True,
executable='/bin/bash'
)
output_process = subprocess.Popen(
"awk '{{print $1 \"\t\" $2 \"\t\" $3 \"\t\" $6{}}}' {}".format(
'' if len(tokens) < 5 else ' "\t" $4', output_bed
), shell=True, executable='/bin/bash',
stdout=subprocess.PIPE)
try:
yield output_process.stdout
finally:
output_process.stdout.close()
exit_code = output_process.wait()
if exit_code != 0:
raise RuntimeError(
'Liftover output process failed; '
'exit code was {}.'.format(exit_code)
)
shutil.rmtree(temp_dir, ignore_errors=True)
if __name__ == '__main__':
import argparse
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Add command-line arguments
parser.add_argument('--hisat2-dir', type=str, required=True,
help=('path to directory containing contents of HISAT2; we '
'unpacked ftp://ftp.ccb.jhu.edu/pub/infphilo/hisat2/'
'downloads/hisat2-2.0.1-beta-Linux_x86_64.zip to get this')
)
parser.add_argument('--annotation', type=str, required=True,
help=('path to annotated_junctions.tsv.gz, which is generated '
'by rip_annotated_junctions.py')
)
parser.add_argument('--gencode-dir', type=str, required=True,
help='path to directory containing all GENCODE GTFs for hg19 and '
'hg38, which includes 3c, 3d, and 4 through 24'
)
parser.add_argument('--junctions', type=str, required=True,
help='junctions file; this should be intropolis.v2.hg38.tsv.gz'
)
parser.add_argument('--index-to-sra', type=str, required=True,
help='index to SRA accession numbers file; this should be '
'intropolis.idmap.v2.hg38.tsv'
)
parser.add_argument('--biosample-metadata', type=str, required=True,
help='path to Biosample metadata file; this should be '
'biosample_tags.tsv'
)
parser.add_argument('--seqc', type=str, required=True,
help='path to SEQC junctions; this should be nbt.2957-S4.zip')
parser.add_argument('--basename', type=str, required=False,
default='hg38',
help='basename for output files'
)
parser.add_argument('--liftover', type=str, required=True,
help=('path to liftOver executable available from '
'https://genome-store.ucsc.edu/products/')
)
parser.add_argument('--chain', type=str, required=True,
help=('path to unzipped liftover chain; this should be '
'hg19ToHg38.over.chain')
)
args = parser.parse_args()
from collections import defaultdict
# Load all annotated junctions from annotated_junctions.tsv.gz
annotated_junctions = set()
annotated_5p = set()
annotated_3p = set()
with xopen(args.annotation) as annotations_stream:
for i, line in enumerate(annotations_stream):
chrom, start, end, strand = line.strip().split('\t')
annotated_junctions.add((chrom, int(start), int(end), strand))
if strand == '+':
annotated_5p.add((chrom, int(start), strand))
annotated_3p.add((chrom, int(end), strand))
elif strand == '-':
annotated_5p.add((chrom, int(end), strand))
annotated_3p.add((chrom, int(start), strand))
print >>sys.stderr, 'Read {} annotated junctions.'.format(i+1)
# Map sample indexes to accession number lines
index_to_sra, index_to_srp, srr_to_index = {}, {}, {}
srs_to_srr = defaultdict(list)
# Get sample indexes for all Illumina RNA-seq from SEQC for comparison
seqc_indexes = set()
with xopen(args.index_to_sra) as index_stream:
for line in index_stream:
partitioned = line.partition('\t')
sample_index = int(partitioned[0])
index_to_sra[sample_index] = partitioned[2].strip()
srp, srs, srx, srr = partitioned[2].strip().split('\t')
srs_to_srr[srs].append(srr)
srr_to_index[srr] = sample_index
index_to_srp[sample_index] = srp
if srp == 'SRP025982':
# SEQC hit!
seqc_indexes.add(sample_index)
print >>sys.stderr, 'Done mapping sample indexes to samples.'
from datetime import date
'''For getting junctions by "earliest detection date"; use units of number
of days after earliest date. Map sample indexes to submission dates.'''
all_dates = {}
with xopen(args.biosample_metadata) as biosample_stream:
biosample_stream.readline() # header
for line in biosample_stream:
tokens = line.strip().split('\t')
current_date = date(
*tuple(
[int(el.strip())
for el in tokens[10].split('T')[0].split('-')]
)
)
for srr in srs_to_srr[tokens[9].upper()]:
all_dates[srr_to_index[srr]] = current_date
earliest_date = min(all_dates.values())
for sample_index in all_dates:
all_dates[sample_index] = (
all_dates[sample_index] - earliest_date
).days
date_indexes = set(all_dates.keys())
print >>sys.stderr, 'Done grabbing submission dates from Biosample DB.'
# Grab all GENCODE junctions
gencodes = defaultdict(set)
containing_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(containing_dir, 'hg38.sizes')) as hg38_stream:
refs = set(
[tokens.strip().split('\t')[0] for tokens in hg38_stream]
)
extract_splice_sites_path = os.path.join(args.hisat2_dir,
'extract_splice_sites.py')
from glob import glob
annotations = glob(os.path.join(args.gencode_dir, 'gencode.*.gtf.gz'))
annotations = [(os.path.basename(annotation_path), annotation_path)
for annotation_path in annotations]
temp_dir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, temp_dir, ignore_errors=True)
temp_anno = os.path.join(temp_dir, 'temp_anno.tsv')
for annotation_base, annotation in annotations:
extract_process = subprocess.Popen(' '.join([
sys.executable,
extract_splice_sites_path,
annotation
if not is_gzipped(
annotation
) else ('<(gzip -cd %s)'
% annotation
)
]),
shell=True,
executable='/bin/bash',
stdout=subprocess.PIPE
)
gencode_version = annotation.split('.')[1]
# Lift over GENCODE versions < 20
with open(temp_anno, 'w') as temp_anno_stream, liftover(
extract_process.stdout, args.liftover, args.chain,
perform=(False if gencode_version in
['20', '21', '22', '23', '24'] else True)
) as liftover_stream:
for line in liftover_stream:
tokens = line.strip().split('\t')
tokens[1] = str(int(tokens[1]) + 2)
print >>temp_anno_stream, '\t'.join(tokens)
extract_process.stdout.close()
exit_code = extract_process.wait()
if exit_code != 0:
raise RuntimeError(
'extract_splice_sites.py had nonzero exit code {}.'.format(
exit_code
)
)
with open(temp_anno) as temp_anno_stream:
for line in temp_anno_stream:
tokens = line.strip().split('\t')
tokens[1] = int(tokens[1])
tokens[2] = int(tokens[2])
if tokens[0] in refs:
gencodes[gencode_version].add(tuple(tokens))
shutil.rmtree(temp_dir, ignore_errors=True)
gencode_versions = ['3c', '3d'] + [str(ver) for ver in range(4, 25)]
# Write some differences/intersections
with open(
args.basename + '.annotation_diffint.tsv', 'w'
) as intersect_stream:
print >>intersect_stream, '\t'.join([''] + gencode_versions)
for i in gencode_versions:
intersect_stream.write(i + '\t')
print >>intersect_stream, '\t'.join([
','.join([
str(len(gencodes[i] - gencodes[j])),
str(len(gencodes[i].intersection(gencodes[j]))),
str(len(gencodes[j] - gencodes[i]))])
for j in gencode_versions
])
print >>sys.stderr, ('Found {} annotated junctions across '
'GENCODE versions.').format(
{ version : len(gencodes[version])
for version in gencodes }
)
'''Grab SEQC junctions. Three protocols were used: Subread, r-make, and
NCBI Magic.'''
magic_junctions, rmake_junctions, subread_junctions = set(), set(), set()
seqc_junctions = set()
temp_dir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, temp_dir, ignore_errors=True)
lifted_supp = os.path.join(temp_dir, 'lifted_supp.tsv')
with zipfile.ZipFile(args.seqc).open('SupplementaryData3.tab') \
as seqc_stream, open(lifted_supp, 'w') as lift_stream:
seqc_stream.readline() # header
for line in seqc_stream:
tokens = line.strip().split('\t')
tokens[0] = tokens[0].split('.')
print >>lift_stream, '\t'.join([tokens[0][0], tokens[0][1],
tokens[0][2], 'NA', ','.join(
tokens[1:4]
)])
with open(lifted_supp) as lift_stream:
with liftover(
lift_stream, args.liftover, args.chain
) as liftover_stream:
for line in liftover_stream:
tokens = line.strip().split('\t')
junction = (tokens[0], int(tokens[1]), int(tokens[2]))
add_junc = False
tokens = tokens[4].split(',')
if tokens[0] == '1':
subread_junctions.add(junction)
add_junc = True
if tokens[1] == '1':
rmake_junctions.add(junction)
add_junc = True
if tokens[2] == '1':
magic_junctions.add(junction)
add_junc = True
if add_junc:
seqc_junctions.add(junction)
print >>sys.stderr, 'Done reading SEQC junctions.'
# Key: sample index; value: number of junctions found in sample
junction_counts = defaultdict(int)
# For junctions in union of annotations specified at command line
annotated_junction_counts = defaultdict(int)
# Count total overlap instances and annotated overlap instances
'''Same as above, but including only junctions covered by at least 5 reads
in the sample.'''
junction_counts_geq_5 = defaultdict(int)
annotated_junction_counts_geq_5 = defaultdict(int)
overlap_counts = defaultdict(int)
annotated_overlap_counts = defaultdict(int)
# Mapping counts of samples to junction counts
sample_count_to_junction_count = defaultdict(int)
project_count_to_junction_count = defaultdict(int)
sample_count_to_GTAG_junction_count = defaultdict(int)
project_count_to_GTAG_junction_count = defaultdict(int)
sample_count_to_GCAG_junction_count = defaultdict(int)
project_count_to_GCAG_junction_count = defaultdict(int)
sample_count_to_ATAC_junction_count = defaultdict(int)
project_count_to_ATAC_junction_count = defaultdict(int)
sample_count_to_GTAG_ann_count = defaultdict(int)
project_count_to_GTAG_ann_count = defaultdict(int)
sample_count_to_GCAG_ann_count = defaultdict(int)
project_count_to_GCAG_ann_count = defaultdict(int)
sample_count_to_ATAC_ann_count = defaultdict(int)
project_count_to_ATAC_ann_count = defaultdict(int)
# One of 5' or 3' splice site is in annotation, one isn't
sample_count_to_altstartend_junction_count = defaultdict(int)
project_count_to_altstartend_junction_count = defaultdict(int)
# Both 5' and 3' splice sites are in annotation, but junction is not
sample_count_to_exonskip_junction_count = defaultdict(int)
project_count_to_exonskip_junction_count = defaultdict(int)
# Full junction is in annotation
sample_count_to_annotated_junction_count = defaultdict(int)
project_count_to_annotated_junction_count = defaultdict(int)
# Neither 5' nor 3' is in annotation
sample_count_to_novel_junction_count = defaultdict(int)
project_count_to_novel_junction_count = defaultdict(int)
# For comparison wth SEQC
rail_seqc_junctions = set()
seqc_sample_count_to_junction_count = defaultdict(int)
seqc_sample_count_to_magic = defaultdict(int)
seqc_sample_count_to_rmake = defaultdict(int)
seqc_sample_count_to_subread = defaultdict(int)
seqc_sample_count_to_ones = defaultdict(int)
seqc_sample_count_to_twos = defaultdict(int)
seqc_sample_count_to_threes = defaultdict(int)
# For junction-date analyses
date_to_junction_count = defaultdict(int)
date_to_junction_count_overlap_geq_40 = defaultdict(int)
with xopen(args.junctions) as junction_stream, gzip.open(
args.basename
+ '.sample_count_submission_date_overlap_geq_40.tsv.gz', 'w'
) as junction_date_stream:
print >>junction_date_stream, ((
'# reads across samples in which junction '
'was found\t'
'# samples in which junction was found'
'\t# projects in which junction was found'
'\tearliest known discovery date in '
'days after %s; format Y-M-D\t') % (
earliest_date.strftime(
'%Y-%m-%d'
)
)) + '\t'.join(
['present in GENCODE v' + ver
for ver in gencode_versions]
) + '\tearliest GENCODE version'
for line in junction_stream:
tokens = line.strip().split('\t')
junction = (tokens[0], int(tokens[1]), int(tokens[2]), tokens[3])
if tokens[3] == '+':
fivep = junction[:2] + (junction[3],)
threep = (junction[0], junction[2], junction[3])
elif tokens[3] == '-':
threep = junction[:2] + (junction[3],)
fivep = (junction[0], junction[2], junction[3])
else:
raise RuntimeError('Bad strand in line "%s"' % line)
samples = [int(el) for el in tokens[-2].split(',')]
coverages = [int(el) for el in tokens[-1].split(',')]
sample_count = len(samples)
project_count = len(set([index_to_srp[sample]
for sample in samples]))
try:
discovery_date = min(
[all_dates[sample] for sample in samples
if sample in date_indexes]
)
except ValueError:
# No discovery date available!
pass
else:
date_to_junction_count[discovery_date] += 1
cov_sum = sum(coverages)
if cov_sum >= 40:
date_to_junction_count_overlap_geq_40[discovery_date] += 1
gencode_bools_to_print = [
'1' if junction in gencodes[ver]
else '0' for ver in gencode_versions
]
try:
earliest_gencode_version = gencode_versions[
gencode_bools_to_print.index('1')
]
except ValueError:
earliest_gencode_version = 'NA'
print >>junction_date_stream, ('%d\t%d\t%d\t%d\t' % (
cov_sum,
sample_count,
project_count,
discovery_date
)) + '\t'.join(gencode_bools_to_print) + (
'\t' + earliest_gencode_version
)
samples_and_coverages = zip(samples, coverages)
sample_count_to_junction_count[sample_count] += 1
project_count_to_junction_count[project_count] += 1
if tokens[5] == 'AG':
if tokens[4] == 'GT':
sample_count_to_GTAG_junction_count[sample_count] += 1
project_count_to_GTAG_junction_count[project_count] += 1
elif tokens[4] == 'GC':
sample_count_to_GCAG_junction_count[sample_count] += 1
project_count_to_GCAG_junction_count[project_count] += 1
else:
raise RuntimeError('Bad motif in line "%s"' % line)
elif tokens[5] == 'AC':
if tokens[4] == 'AT':
sample_count_to_ATAC_junction_count[sample_count] += 1
project_count_to_ATAC_junction_count[project_count] += 1
else:
raise RuntimeError('Bad motif in line "%s"' % line)
if junction in annotated_junctions:
sample_count_to_annotated_junction_count[sample_count] += 1
project_count_to_annotated_junction_count[project_count] += 1
if tokens[5] == 'AG':
if tokens[4] == 'GT':
sample_count_to_GTAG_ann_count[sample_count] += 1
project_count_to_GTAG_ann_count[project_count] += 1
elif tokens[4] == 'GC':
sample_count_to_GCAG_ann_count[sample_count] += 1
project_count_to_GCAG_ann_count[project_count] += 1
elif tokens[5] == 'AC':
sample_count_to_ATAC_ann_count[sample_count] += 1
project_count_to_ATAC_ann_count[project_count] += 1
for sample, coverage in samples_and_coverages:
annotated_junction_counts[sample] += 1
annotated_overlap_counts[sample] += coverage
if coverage >= 5:
annotated_junction_counts_geq_5[sample] += 1
elif threep in annotated_3p:
if fivep in annotated_5p:
sample_count_to_exonskip_junction_count[sample_count] += 1
project_count_to_exonskip_junction_count[
project_count
] += 1
else:
sample_count_to_altstartend_junction_count[sample_count] \
+= 1
project_count_to_altstartend_junction_count[
project_count
] += 1
elif fivep in annotated_5p:
sample_count_to_altstartend_junction_count[sample_count] += 1
project_count_to_altstartend_junction_count[project_count] += 1
else:
sample_count_to_novel_junction_count[sample_count] += 1
project_count_to_novel_junction_count[project_count] += 1
seqc_intersect = set(samples).intersection(seqc_indexes)
if seqc_intersect:
junction = junction[:-1]
rail_seqc_junctions.add(junction)
seqc_sample_count = len(seqc_intersect)
seqc_sample_count_to_junction_count[seqc_sample_count] += 1
intersect_count = 0
if junction in magic_junctions:
seqc_sample_count_to_magic[seqc_sample_count] += 1
intersect_count += 1
if junction in rmake_junctions:
seqc_sample_count_to_rmake[seqc_sample_count] += 1
intersect_count += 1
if junction in subread_junctions:
seqc_sample_count_to_subread[seqc_sample_count] += 1
intersect_count += 1
if intersect_count == 1:
seqc_sample_count_to_ones[seqc_sample_count] += 1
elif intersect_count == 2:
seqc_sample_count_to_twos[seqc_sample_count] += 1
elif intersect_count == 3:
seqc_sample_count_to_threes[seqc_sample_count] += 1
for sample, coverage in samples_and_coverages:
junction_counts[sample] += 1
overlap_counts[sample] += coverage
if coverage >= 5:
junction_counts_geq_5[sample] += 1
print >>sys.stderr, 'Done reading junction file.'
'''Aggregate junction stats: how many junctions/overlaps of given type
are found in >= K samples/projects/seqc samples?'''
sample_stats_to_aggregate = [sample_count_to_junction_count,
sample_count_to_annotated_junction_count,
sample_count_to_exonskip_junction_count,
sample_count_to_altstartend_junction_count,
sample_count_to_novel_junction_count,
sample_count_to_GTAG_junction_count,
sample_count_to_GTAG_ann_count,
sample_count_to_GCAG_junction_count,
sample_count_to_GCAG_ann_count,
sample_count_to_ATAC_junction_count,
sample_count_to_ATAC_ann_count]
project_stats_to_aggregate = [project_count_to_junction_count,
project_count_to_annotated_junction_count,
project_count_to_exonskip_junction_count,
project_count_to_altstartend_junction_count,
project_count_to_novel_junction_count,
project_count_to_GTAG_junction_count,
project_count_to_GTAG_ann_count,
project_count_to_GCAG_junction_count,
project_count_to_GCAG_ann_count,
project_count_to_ATAC_junction_count,
project_count_to_ATAC_ann_count]
seqc_stats_to_aggregate = [seqc_sample_count_to_junction_count,
seqc_sample_count_to_magic,
seqc_sample_count_to_rmake,
seqc_sample_count_to_subread,
seqc_sample_count_to_ones,
seqc_sample_count_to_twos,
seqc_sample_count_to_threes]
header_prototype = ('min {descriptor}s\t'
'junctions\t'
'annotated\t'
'exonskips\t'
'altstartend\t'
'novel\t'
'GTAG\t'
'annotated GTAG\t'
'GCAG\t'
'annotated GCAG\t'
'ATAC\t'
'annotated ATAC')
seqc_header = ('min seqc samples\t'
'junctions\t'
'magic junctions\t'
'rmake junctions\t'
'subread junctions\t'
'exactly one of {magic, rmake, subread} junctions\t'
'exactly two of {magic, rmake, subread} junctions\t'
'all three of {magic, rmake, subread} junctions')
for stats, header, descriptor in [
(sample_stats_to_aggregate,
header_prototype.format(descriptor='sample'),
'sample'),
(project_stats_to_aggregate,
header_prototype.format(descriptor='project'),
'project'),
(seqc_stats_to_aggregate,
seqc_header,
'seqc_sample')
]:
max_count, min_count = 0, 1000000000 # way larger than max # samples
for stat in stats:
max_count = max(stat.keys() + [max_count])
min_count = min(stat.keys() + [min_count])
stat_count = len(stats)
stat_aggregators = [0 for _ in xrange(stat_count)]
with open(args.basename + '.' + descriptor + '.stats.tsv', 'w') \
as stat_stream:
print >>stat_stream, header
for descriptor_count in xrange(max_count, min_count - 1, -1):
for i in xrange(stat_count):
stat_aggregators[i] += stats[i][descriptor_count]
print >>stat_stream, '\t'.join(
[str(descriptor_count)]
+ [str(el) for el in stat_aggregators]
)
print >>sys.stderr, ('Dumped sample/project-level and SEQC '
'aggregate junction stats.')
# Dump junction information by sample
with open(args.basename + '.stats_by_sample.tsv', 'w') as stat_stream:
print >>stat_stream, ('sample index\tproject\tsample\texperiment\trun'
'\tjunctions\tannotated_junctions'
'\tjunctions_geq_5\tannotated_junctions_geq_5'
'\toverlaps\tannotated_overlaps')
for sample_index in sorted(index_to_sra.keys()):
print >>stat_stream, '\t'.join(
[str(el) for el in
[sample_index, index_to_sra[sample_index],
junction_counts[sample_index],
annotated_junction_counts[sample_index],
junction_counts_geq_5[sample_index],
annotated_junction_counts_geq_5[sample_index],
overlap_counts[sample_index],
annotated_overlap_counts[sample_index]]]
)
print >>sys.stderr, 'Dumped junction info by sample.'
# SEQC summary
with open(args.basename + '.seqc_summary.txt', 'w') as seqc_stream:
in_all = set.intersection(
magic_junctions, rmake_junctions, subread_junctions
)
in_one = set.union(
magic_junctions, rmake_junctions, subread_junctions
)
in_two = set.union(
set.intersection(magic_junctions, rmake_junctions),
set.intersection(magic_junctions, subread_junctions),
set.intersection(rmake_junctions, subread_junctions)
)
print >>seqc_stream, (
'total samples studied by SEQC consortium and Rail: %d'
% len(seqc_indexes)
)
print >>seqc_stream, (
'junctions found by magic, rmake, and subread: %d'
% len(in_all)
)
print >>seqc_stream, (
'junctions found by magic, rmake, or subread: %d'
% len(in_one)
)
print >>seqc_stream, (
'junctions found by at least two of '
'[magic, rmake, subread]: %d'
) % len(in_two)
print >>seqc_stream, (
'junctions found by Rail: %d' % len(rail_seqc_junctions)
)
print >>sys.stderr, 'Dumped SEQC summary.'
| 48.155193 | 79 | 0.581284 | 4,890 | 41,269 | 4.709611 | 0.130266 | 0.027052 | 0.031046 | 0.015502 | 0.426617 | 0.362093 | 0.299609 | 0.202258 | 0.107208 | 0.080113 | 0 | 0.020025 | 0.335676 | 41,269 | 856 | 80 | 48.211449 | 0.819996 | 0.262376 | 0 | 0.142373 | 0 | 0 | 0.099413 | 0.017474 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005085 | false | 0.001695 | 0.023729 | 0 | 0.032203 | 0.049153 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbfdceccbe682d70951aed3927191954bc8fa300 | 1,707 | py | Python | lhc/io/sam/iterator.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | lhc/io/sam/iterator.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | lhc/io/sam/iterator.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | import gzip
from collections import namedtuple
from itertools import chain, tee
sam_line_headers = ('qname', 'flag', 'rname', 'pos', 'mapq', 'cigar', 'rnext', 'pnext', 'tlen', 'seq', 'qual', 'tags')
class SamLine(namedtuple('SamLine', sam_line_headers)):
def __str__(self):
return '{0.qname}\t{0.flag}\t{0.rname}\t{1}\t{0.mapq}\t{0.cigar}\t{0.rnext}\t{0.pnext}\t{0.tlen}\t{0.seq}\t{0.qual}\t{0.tags}'.format(self, self.pos + 1)
class SamIterator(object):
def __init__(self, fname):
if isinstance(fname, file):
self.fname = fname.name
it = fname
else:
self.fname = fname
it = gzip.open(fname) if fname.endswith('.bam') else\
open(fname, encoding='utf-8')
self.iterator = pairwise(it)
self.hdrs, self.line_no = self.parse_headers(self.iterator)
def __iter__(self):
return self
def __next__(self):
line, next_line = next(self.iterator)
self.line_no += 1
return self.parse_line(line)
@staticmethod
def parse_headers(pairwise_iterator):
hdrs = []
line_no = 0
for line_no, (line, next_line) in enumerate(pairwise_iterator):
hdrs.append(line.rstrip('\r\n'))
if not next_line.startswith('@'):
break
return hdrs, line_no
@staticmethod
def parse_line(line):
parts = line.rstrip('\r\n').split('\t', 11)
parts[3] = int(parts[3]) - 1
parts[4] = int(parts[4])
parts[8] = int(parts[8])
return SamLine(*parts)
def pairwise(iterable):
a, b = tee(iterable)
b = chain(b, [None])
next(b)
return zip(a, b)
| 28.45 | 161 | 0.574692 | 233 | 1,707 | 4.064378 | 0.334764 | 0.021119 | 0.029567 | 0.025343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02008 | 0.27065 | 1,707 | 59 | 162 | 28.932203 | 0.740562 | 0 | 0 | 0.044444 | 0 | 0.022222 | 0.114236 | 0.068541 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155556 | false | 0 | 0.066667 | 0.044444 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbfe02f0133156c7e9c6654722f8ce99c07df6ac | 857 | py | Python | etl/parsers/etw/Error_Instrument.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 104 | 2020-03-04T14:31:31.000Z | 2022-03-28T02:59:36.000Z | etl/parsers/etw/Error_Instrument.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 7 | 2020-04-20T09:18:39.000Z | 2022-03-19T17:06:19.000Z | etl/parsers/etw/Error_Instrument.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 16 | 2020-03-05T18:55:59.000Z | 2022-03-01T10:19:28.000Z | # -*- coding: utf-8 -*-
"""
Error Instrument
GUID : cd7cf0d0-02cc-4872-9b65-0dba0a90efe8
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("cd7cf0d0-02cc-4872-9b65-0dba0a90efe8"), event_id=1072, version=0)
class Error_Instrument_1072_0(Etw):
pattern = Struct(
"ProcessName" / WString,
"WindowTitle" / WString,
"MsgCaption" / WString,
"MsgText" / WString,
"CallerModuleName" / WString,
"BaseAddress" / Int64ul,
"ImageSize" / Int32ul,
"ReturnAddress" / Int64ul,
"__binLength" / Int32ul,
"binary" / Bytes(lambda this: this.__binLength)
)
| 31.740741 | 123 | 0.670945 | 93 | 857 | 6.096774 | 0.602151 | 0.037037 | 0.056437 | 0.070547 | 0.126984 | 0.126984 | 0 | 0 | 0 | 0 | 0 | 0.101322 | 0.205368 | 857 | 26 | 124 | 32.961538 | 0.731278 | 0.096849 | 0 | 0 | 0 | 0 | 0.184314 | 0.047059 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbfe0c38738fb94a53afcf6ad25b3180e9c40315 | 8,311 | py | Python | kanjiAnalyze.py | asdfhamiltonian/kanjiAnalyze | b65fd40aa87c411f01c7443f2f39dbbd8d3000b6 | [
"IJG"
] | null | null | null | kanjiAnalyze.py | asdfhamiltonian/kanjiAnalyze | b65fd40aa87c411f01c7443f2f39dbbd8d3000b6 | [
"IJG"
] | null | null | null | kanjiAnalyze.py | asdfhamiltonian/kanjiAnalyze | b65fd40aa87c411f01c7443f2f39dbbd8d3000b6 | [
"IJG"
] | null | null | null | # encoding: utf-8
"""
This package uses the KANJIDIC dictionary file.
This file is the property of the Electronic Dictionary Research and
Development Group, and is used in conformance with the Group's licence.
(see http://www.csse.monash.edu.au/~jwb/kanjidic.html)
"""
import os.path
import pickle
import xml.etree.ElementTree as ET
from collections import OrderedDict
from math import sqrt
tree = ET.parse('kanjidic2.xml')
root = tree.getroot()
masterDictionary = OrderedDict()
if not os.path.isfile("kanjiPickle.p"):
for kanji in root.findall('character'):
if kanji[3].find('grade') is not None:
tempdict = OrderedDict()
tempdict["grade"] = int(kanji[3].find('grade').text)
symbol = kanji.find('literal').text
try:
tempdict["freq"] = int(kanji[3].find('freq').text)
except:
tempdict["freq"] = "NA"
try:
tempdict["jlpt"] = int(kanji[3].find('jlpt').text)
except:
tempdict["jlpt"] = "NA"
for node in kanji.find('dic_number'):
if node.attrib["dr_type"] == "nelson_c":
tempdict["Nelson"] = node.text
elif node.attrib["dr_type"] == "oneill_kk":
tempdict["O'Neill"] = node.text
else:
pass
meaning = []
onyomi = []
kunyomi = []
nanori = []
for child in kanji.find('reading_meaning')[0]:
"""python interpreter seemed to dislike serial if statements,
works better if set up as if, elif, elif, else"""
if (child.tag == "meaning") and (child.attrib == {}):
meaning.append(child.text)
elif (("r_type" in child.attrib) and
(child.attrib["r_type"] == "ja_on")):
onyomi.append(child.text)
elif (("r_type" in child.attrib) and
(child.attrib["r_type"] == "ja_kun")):
kunyomi.append(child.text)
else:
pass
"""nanori is in a different level of the xml file"""
for child in kanji.find('reading_meaning'):
if child.tag == "nanori":
nanori.append(child.text)
else:
pass
tempdict["ja_on"] = onyomi
tempdict["ja_kun"] = kunyomi
tempdict["meaning"] = meaning
tempdict["nanori"] = nanori
masterDictionary[symbol] = tempdict
pickle.dump(masterDictionary, open("kanjiPickle.p", "wb"))
else:
masterDictionary = pickle.load(open("kanjiPickle.p", "rb"))
print(len(masterDictionary), "\n")
'''list of non-kanji characters for removal'''
notKanji = '''ぁあぃいぅうぇえぉおかがきぎくぐけげこごさざしじすずせぜそぞただちぢっつづてでとどなにぬねの
はばぱひびぴふぶぷへべぺほぼぽまみむめもゃやゅゆょよらりるれろゎわゐゑをんゔゕゖーァアィイゥウェエォオカガキギ
クグケゲコゴサザシジスズセゼソゾタダチヂッツヅテデトドナニヌネノハバパヒビピフブプヘベペホボポマミムメモャヤュユョヨ
ラリルレロヮワヰヱヲンヴヵヶヷヸヹヺ・ー。、「」 ()ABCDEFGHIJKLMNOPQRSTUVWXYZ
abcdefghijklmnopqrstuvwxyz1234567890'''
'''statistical tests'''
def avg(x):
return sum(x)/len(x)
def variance(x):
"""returns the variance for a list of numbers"""
x_bar = avg(x)
squareDiffList = [(x_i - x_bar)**2 for x_i in x]
return sum(squareDiffList)/(len(squareDiffList) - 1)
def sd(x):
"""returns the standard deviation for a list of numbers"""
return sqrt(variance(x))
def correlation(tuplist):
"""returns the correlation coefficient of a list of tupples"""
x_list = [item[0] for item in tuplist]
y_list = [item[1] for item in tuplist]
x_bar = avg(x_list)
s_x = sd(x_list)
y_bar = avg(y_list)
s_y = sd(y_list)
n = len(tuplist)
numerator_list = [(item[0] - x_bar) * (item[1] - y_bar) for
item in tuplist]
r = sum(numerator_list)/((n-1) * s_x * s_y)
return r
def strip(text):
"""removes non-kanji chars from strings"""
for char in text:
if char in notKanji:
text = text.replace(char, "")
return text
def gradeStats(text):
"""returns avg, variance, standard dev and N for grade level of
kanji in a string"""
charArray = []
for char in text:
if char in masterDictionary:
grade = int(masterDictionary[char]["grade"])
charArray.append(grade)
else:
pass
return [avg(charArray), variance(charArray), sd(charArray), len(charArray)]
def jlptStats(text):
"""returns avg, variance, standard dev and N for JLPT level of
kanji in a string"""
charArray = []
for char in text:
if ((char in masterDictionary) and
(masterDictionary[char]["jlpt"] != 'NA')):
jlpt = int(masterDictionary[char]["jlpt"])
charArray.append(jlpt)
else:
pass
return [avg(charArray), variance(charArray),
sd(charArray), len(charArray)]
def frequncyStats(text):
"""returns avg, variance, standard dev and N for usage frequency of
kanji in a string"""
charArray = []
for char in text:
if ((char in masterDictionary) and
(masterDictionary[char]["freq"] != "NA")):
frequency = int(masterDictionary[char]["freq"])
charArray.append(frequency)
else:
pass
return [avg(charArray), variance(charArray),
sd(charArray), len(charArray)]
marsArticle = open("火星.txt", "r").read()
marsArticle = strip(marsArticle)
print("Japanese Wikipedia article about Mars: \n",
"Grade Level Stats: ", gradeStats(marsArticle), "\n",
"JLPT Level Stats: ", jlptStats(marsArticle), "\n",
"Character Frequency Stats: ", frequncyStats(marsArticle), "\n\n")
historyArticle = open("戦国時代.txt", "r").read()
historyArticle = strip(historyArticle)
print("Japanese Wikipedia article about the Sengoku Period: \n"
"Grade Level Stats: ", gradeStats(historyArticle), "\n",
"JLPT Level Stats: ", jlptStats(historyArticle), "\n",
"Character Frequency Stats: ", frequncyStats(historyArticle), "\n\n")
jinmeiyouKanji = '''
丑 丞 乃 之 乎 也 云 亘‐亙 些 亦 亥 亨 亮 仔 伊 伍 伽 佃 佑 伶 侃 侑 俄 俠 俣 俐 倭 俱 倦 倖 偲 傭 儲 允 兎
兜 其 冴 凌 凜‐凛 凧 凪 凰 凱 函 劉 劫 勁 勺 勿 匁 匡 廿 卜 卯 卿 厨 厩 叉 叡 叢 叶 只 吾 吞 吻 哉 哨 啄 哩
喬 喧 喰 喋 嘩 嘉 嘗 噌 噂 圃 圭 坐 尭‐堯 坦 埴 堰 堺 堵 塙 壕 壬 夷 奄 奎 套 娃 姪 姥 娩 嬉 孟 宏 宋 宕 宥
寅 寓 寵 尖 尤 屑 峨 峻 崚 嵯 嵩 嶺 巌‐巖 巫 已 巳 巴 巷 巽 帖 幌 幡 庄 庇 庚 庵 廟 廻 弘 弛 彗 彦 彪 彬 徠
忽 怜 恢 恰 恕 悌 惟 惚 悉 惇 惹 惺 惣 慧 憐 戊 或 戟 托 按 挺 挽 掬 捲 捷 捺 捧 掠 揃 摑 摺 撒 撰 撞 播 撫
擢 孜 敦 斐 斡 斧 斯 於 旭 昂 昊 昏 昌 昴 晏 晃‐晄 晒 晋 晟 晦 晨 智 暉 暢 曙 曝 曳 朋 朔 杏 杖 杜 李 杭 杵
杷 枇 柑 柴 柘 柊 柏 柾 柚 桧‐檜 栞 桔 桂 栖 桐 栗 梧 梓 梢 梛 梯 桶 梶 椛 梁 棲 椋 椀 楯 楚 楕 椿 楠 楓 椰
楢 楊 榎 樺 榊 榛 槙‐槇 槍 槌 樫 槻 樟 樋 橘 樽 橙 檎 檀 櫂 櫛 櫓 欣 欽 歎 此 殆 毅 毘 毬 汀 汝 汐 汲 沌 沓
沫 洸 洲 洵 洛 浩 浬 淵 淳 渚‐渚 淀 淋 渥 湘 湊 湛 溢 滉 溜 漱 漕 漣 澪 濡 瀕 灘 灸 灼 烏 焰 焚 煌 煤 煉 熙
燕 燎 燦 燭 燿 爾 牒 牟 牡 牽 犀 狼 猪‐猪 獅 玖 珂 珈 珊 珀 玲 琢‐琢 琉 瑛 琥 琶 琵 琳 瑚 瑞 瑶 瑳 瓜 瓢 甥
甫 畠 畢 疋 疏 皐 皓 眸 瞥 矩 砦 砥 砧 硯 碓 碗 碩 碧 磐 磯 祇 祢‐禰 祐‐祐 祷‐禱 禄‐祿 禎‐禎 禽 禾 秦 秤 稀
稔 稟 稜 穣‐穰 穹 穿 窄 窪 窺 竣 竪 竺 竿 笈 笹 笙 笠 筈 筑 箕 箔 篇 篠 簞 簾 籾 粥 粟 糊 紘 紗 紐 絃 紬 絆
絢 綺 綜 綴 緋 綾 綸 縞 徽 繫 繡 纂 纏 羚 翔 翠 耀 而 耶 耽 聡 肇 肋 肴 胤 胡 脩 腔 脹 膏 臥 舜 舵 芥 芹 芭
芙 芦 苑 茄 苔 苺 茅 茉 茸 茜 莞 荻 莫 莉 菅 菫 菖 萄 菩 萌‐萠 萊 菱 葦 葵 萱 葺 萩 董 葡 蓑 蒔 蒐 蒼 蒲 蒙
蓉 蓮 蔭 蔣 蔦 蓬 蔓 蕎 蕨 蕉 蕃 蕪 薙 蕾 蕗 藁 薩 蘇 蘭 蝦 蝶 螺 蟬 蟹 蠟 衿 袈 袴 裡 裟 裳 襖 訊 訣 註 詢
詫 誼 諏 諄 諒 謂 諺 讃 豹 貰 賑 赳 跨 蹄 蹟 輔 輯 輿 轟 辰 辻 迂 迄 辿 迪 迦 這 逞 逗 逢 遥‐遙 遁 遼 邑 祁
郁 鄭 酉 醇 醐 醍 醬 釉 釘 釧 銑 鋒 鋸 錘 錐 錆 錫 鍬 鎧 閃 閏 閤 阿 陀 隈 隼 雀 雁 雛 雫 霞 靖 鞄 鞍 鞘 鞠
鞭 頁 頌 頗 顚 颯 饗 馨 馴 馳 駕 駿 驍 魁 魯 鮎 鯉 鯛 鰯 鱒 鱗 鳩 鳶 鳳 鴨 鴻 鵜 鵬 鷗 鷲 鷺 鷹 麒 麟 麿 黎
黛 鼎
'''
print("List of Jinmeiyou Kanji: \n"
"Grade Level Stats: ", gradeStats(jinmeiyouKanji), "\n",
"JLPT Level Stats: ", jlptStats(jinmeiyouKanji), "\n",
"Character Frequency Stats: ", frequncyStats(jinmeiyouKanji), "\n\n")
nekodearu = open("吾輩は猫である.txt", "r").read()
nekodearu = strip(nekodearu)
print("I am Cat by Natsume Soseki: \n"
"Grade Level Stats: ", gradeStats(nekodearu), "\n",
"JLPT Level Stats: ", jlptStats(nekodearu), "\n",
"Character Frequency Stats: ", frequncyStats(nekodearu), "\n\n")
hosomichi = open("奥の細道.txt", "r").read()
hosomichi = strip(hosomichi)
print("Oku no Hosomichi by Matsuo Basho: \n",
"Grade Level Stats: ", gradeStats(hosomichi), "\n",
"JLPT Level Stats: ", jlptStats(hosomichi), "\n",
"Character Frequency Stats: ", frequncyStats(hosomichi), "\n\n")
| 36.292576 | 79 | 0.581037 | 1,466 | 8,311 | 3.283083 | 0.581855 | 0.020777 | 0.011427 | 0.016622 | 0.280906 | 0.159776 | 0.159776 | 0.1417 | 0.1417 | 0.116767 | 0 | 0.004155 | 0.305017 | 8,311 | 228 | 80 | 36.451754 | 0.825831 | 0.082902 | 0 | 0.190476 | 0 | 0.107143 | 0.332969 | 0.033748 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.035714 | 0.029762 | 0.005952 | 0.125 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cbffb7bb41609d51ef51651416d46a5efa2cbb9a | 578 | py | Python | rotate.py | winksaville/cadquery-wing1 | 43da6a179e1a527401a4328764f3726048d66339 | [
"MIT"
] | null | null | null | rotate.py | winksaville/cadquery-wing1 | 43da6a179e1a527401a4328764f3726048d66339 | [
"MIT"
] | null | null | null | rotate.py | winksaville/cadquery-wing1 | 43da6a179e1a527401a4328764f3726048d66339 | [
"MIT"
] | null | null | null | # The rotate is being disregard.
# See:
# https://groups.google.com/g/cadquery/c/swIm32rwbKg/m/E0p_ONahAwAJ
#el = (
# cq.Workplane("XY")
# .ellipse(1, 5)
# .rotate(
# axisStartPoint=(0, 0, 0),
# axisEndPoint=(0, 0, 1),
# angleDegrees=90)
#)
# Jeremey in post:
# https://groups.google.com/g/cadquery/c/swIm32rwbKg/m/-sSXcpvnAwAJ
# suggests using transformed instead, this "works".
oel = (
cq.Workplane("XY")
.ellipse(1, 5)
)
el = (
cq.Workplane("XY")
.transformed(rotate=(0, 0, 90))
.ellipse(1, 5)
)
r = el.extrude(25)
| 19.931034 | 70 | 0.598616 | 77 | 578 | 4.480519 | 0.558442 | 0.023188 | 0.113043 | 0.115942 | 0.371014 | 0.371014 | 0.243478 | 0.243478 | 0.243478 | 0 | 0 | 0.055066 | 0.214533 | 578 | 28 | 71 | 20.642857 | 0.704846 | 0.66955 | 0 | 0.4 | 0 | 0 | 0.022727 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02007348627f4ef13c1bf7f02eefb74e199a2762 | 7,788 | py | Python | python/oneflow/test/graph/test_graph_lr_scheduler.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/graph/test_graph_lr_scheduler.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/graph/test_graph_lr_scheduler.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import unittest
import os
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.nn.parameter import Parameter
def _test_linear_graph_train_with_lr_sch(
test_case, iter_num, device, get_opt_and_lr_sch
):
def train_with_module(iter_num=3):
linear = flow.nn.Linear(3, 8)
linear = linear.to(device)
flow.nn.init.constant_(linear.weight, -0.68758)
flow.nn.init.constant_(linear.bias, 0.23)
opt, lr_sch = get_opt_and_lr_sch(linear.parameters())
x = flow.Tensor(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
device=device,
requires_grad=False,
)
def one_iter():
of_out = linear(x)
of_out = of_out.sum()
of_out.backward()
opt.step()
if lr_sch is not None:
lr_sch.step()
opt.zero_grad()
return of_out.numpy(), linear.weight.numpy()
check_list = []
for i in range(iter_num):
check_list.append(one_iter())
return check_list
def train_with_graph(iter_num=3):
linear = flow.nn.Linear(3, 8)
linear = linear.to(device)
flow.nn.init.constant_(linear.weight, -0.68758)
flow.nn.init.constant_(linear.bias, 0.23)
opt, lr_sch = get_opt_and_lr_sch(linear.parameters())
x = flow.Tensor(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
device=device,
requires_grad=False,
)
class LinearTrainGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.linear = linear
if lr_sch is None:
self.add_optimizer(opt)
else:
self.add_optimizer(opt, lr_sch=lr_sch)
def build(self, x):
out = self.linear(x)
out = out.sum()
out.backward()
return out
linear_t_g = LinearTrainGraph()
def one_iter():
of_graph_out = linear_t_g(x)
return of_graph_out.numpy(), linear_t_g.linear.weight.origin.numpy()
check_list = []
for i in range(iter_num):
check_list.append(one_iter())
return check_list
module_check_list = train_with_module(iter_num)
graph_check_list = train_with_graph(iter_num)
for i in range(iter_num):
# check equal on loss
test_case.assertTrue(
np.allclose(
module_check_list[i][0],
graph_check_list[i][0],
rtol=0.00001,
atol=0.00001,
)
)
# check equal on weight
test_case.assertTrue(
np.allclose(
module_check_list[i][1],
graph_check_list[i][1],
rtol=0.00001,
atol=0.00001,
)
)
def _sgd_cosine_fn(parameters):
of_sgd = flow.optim.SGD(parameters, lr=0.001)
alpha = 0.5
steps = 10
cosine_annealing_lr = flow.optim.lr_scheduler.CosineAnnealingLR(
of_sgd, steps=steps, alpha=alpha
)
return of_sgd, cosine_annealing_lr
def _sgd_cosine_constant_fn(parameters):
of_sgd = flow.optim.SGD(parameters, lr=0.001)
alpha = 0.5
steps = 10
cosine_annealing_lr = flow.optim.lr_scheduler.CosineAnnealingLR(
of_sgd, steps=steps, alpha=alpha
)
constant_warmup_cosine_lr = flow.optim.lr_scheduler.WarmUpLR(
cosine_annealing_lr, warmup_factor=0.5, warmup_iters=5, warmup_method="constant"
)
return of_sgd, constant_warmup_cosine_lr
def _sgd_constant_fn(parameters):
of_sgd = flow.optim.SGD(parameters, lr=0.001)
alpha = 0.5
steps = 10
constant_warmup_lr = flow.optim.lr_scheduler.WarmUpLR(
of_sgd, warmup_factor=0.5, warmup_iters=5, warmup_method="constant"
)
return of_sgd, constant_warmup_lr
def _sgd_cosine_linear_fn(parameters):
of_sgd = flow.optim.SGD(parameters, lr=0.001)
alpha = 0.5
steps = 10
cosine_annealing_lr = flow.optim.lr_scheduler.CosineAnnealingLR(
of_sgd, steps=steps, alpha=alpha
)
linear_warmup_cosine_lr = flow.optim.lr_scheduler.WarmUpLR(
cosine_annealing_lr, warmup_factor=0.5, warmup_iters=5, warmup_method="linear"
)
return of_sgd, linear_warmup_cosine_lr
def _sgd_linear_fn(parameters):
of_sgd = flow.optim.SGD(parameters, lr=0.001)
alpha = 0.5
steps = 10
linear_warmup_lr = flow.optim.lr_scheduler.WarmUpLR(
of_sgd, warmup_factor=0.5, warmup_iters=5, warmup_method="linear"
)
return of_sgd, linear_warmup_lr
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestLinearGraphTrainWithCosineLrScheduler(flow.unittest.TestCase):
def test_graph_cosine(test_case):
_test_linear_graph_train_with_lr_sch(
test_case, 21, flow.device("cuda"), _sgd_cosine_fn
)
_test_linear_graph_train_with_lr_sch(
test_case, 21, flow.device("cpu"), _sgd_cosine_fn
)
def test_graph_cosine_constant(test_case):
_test_linear_graph_train_with_lr_sch(
test_case, 21, flow.device("cuda"), _sgd_cosine_constant_fn
)
_test_linear_graph_train_with_lr_sch(
test_case, 21, flow.device("cpu"), _sgd_cosine_constant_fn
)
def test_graph_constant(test_case):
_test_linear_graph_train_with_lr_sch(
test_case, 21, flow.device("cuda"), _sgd_constant_fn
)
_test_linear_graph_train_with_lr_sch(
test_case, 21, flow.device("cpu"), _sgd_constant_fn
)
def test_graph_cosine_linear(test_case):
_test_linear_graph_train_with_lr_sch(
test_case, 21, flow.device("cuda"), _sgd_cosine_linear_fn
)
_test_linear_graph_train_with_lr_sch(
test_case, 21, flow.device("cpu"), _sgd_cosine_linear_fn
)
def test_graph_linear(test_case):
_test_linear_graph_train_with_lr_sch(
test_case, 21, flow.device("cuda"), _sgd_linear_fn
)
_test_linear_graph_train_with_lr_sch(
test_case, 21, flow.device("cpu"), _sgd_linear_fn
)
if __name__ == "__main__":
unittest.main()
| 32.049383 | 88 | 0.616846 | 1,023 | 7,788 | 4.377322 | 0.186706 | 0.023448 | 0.036847 | 0.049129 | 0.682671 | 0.641581 | 0.632649 | 0.627512 | 0.627512 | 0.599598 | 0 | 0.100753 | 0.28377 | 7,788 | 242 | 89 | 32.181818 | 0.702044 | 0.080123 | 0 | 0.444444 | 0 | 0 | 0.015514 | 0.002935 | 0 | 0 | 0 | 0 | 0.010582 | 1 | 0.089947 | false | 0 | 0.037037 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0200cf7874cdd739b16393d4b305f17e62fe51ea | 232 | py | Python | abc182_d.py | Lockdef/kyopro-code | 2d943a87987af05122c556e173e5108a0c1c77c8 | [
"MIT"
] | null | null | null | abc182_d.py | Lockdef/kyopro-code | 2d943a87987af05122c556e173e5108a0c1c77c8 | [
"MIT"
] | null | null | null | abc182_d.py | Lockdef/kyopro-code | 2d943a87987af05122c556e173e5108a0c1c77c8 | [
"MIT"
] | null | null | null | n = int(input())
a = list(map(int, input().split()))
s = 0 # aの累積和
ms = -float("inf") # sの最大値
b = 0 # フェーズ開始時の座標
res = 0 # 結果
for i in range(n):
s += a[i]
ms = max(ms, s)
res = max(res, b + ms)
b += s
print(res)
| 17.846154 | 35 | 0.49569 | 43 | 232 | 2.674419 | 0.55814 | 0.13913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018293 | 0.293103 | 232 | 12 | 36 | 19.333333 | 0.682927 | 0.107759 | 0 | 0 | 0 | 0 | 0.014851 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0202aea6bc7880acee85937510df0f4001a8dc7c | 1,419 | py | Python | code/models/ffair.py | mlii0117/FFAIR | 121543df3422306839142a89259bef5a37d83993 | [
"MIT"
] | 20 | 2021-10-09T05:07:16.000Z | 2022-03-22T02:16:37.000Z | code/models/ffair.py | mlii0117/FFAIR | 121543df3422306839142a89259bef5a37d83993 | [
"MIT"
] | 1 | 2021-12-24T11:04:05.000Z | 2021-12-29T01:41:49.000Z | code/models/ffair.py | mlii0117/FFAIR | 121543df3422306839142a89259bef5a37d83993 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
from modules.visual_extractor import VisualExtractor
from modules.encoder_decoder import EncoderDecoder
class FFAIRModel(nn.Module):
def __init__(self, args, tokenizer):
super(FFAIRModel, self).__init__()
self.args = args
self.tokenizer = tokenizer
self.visual_extractor = VisualExtractor(args)
self.encoder_decoder = EncoderDecoder(args, tokenizer)
self.forward = self.forward_ffair
def __str__(self):
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super().__str__() + '\nTrainable parameters: {}'.format(params)
def forward_ffair(self, images, targets=None, mode='train'):
att_feats = 0
fc_feats = 0
for ind in range(images.shape[1]):
att_feats_new, fc_feats_new = self.visual_extractor(images[:, ind])
att_feats += att_feats_new
fc_feats += fc_feats_new
att_feats /= images.shape[1]
fc_feats /= images.shape[1]
if mode == 'train':
output = self.encoder_decoder(fc_feats, att_feats, targets, mode='forward')
elif mode == 'sample':
output, _ = self.encoder_decoder(fc_feats, att_feats, mode='sample')
else:
raise ValueError
return output
| 35.475 | 87 | 0.649049 | 174 | 1,419 | 5.022989 | 0.362069 | 0.064073 | 0.061785 | 0.029748 | 0.130435 | 0.089245 | 0.089245 | 0.089245 | 0 | 0 | 0 | 0.004695 | 0.249471 | 1,419 | 39 | 88 | 36.384615 | 0.815962 | 0 | 0 | 0 | 0 | 0 | 0.038787 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.151515 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02056a86ea71e7e9f212e422bf3651bf0cf8efa4 | 4,683 | py | Python | tests/test_column.py | colinlcrawford/mock-data-creator | 80bdf4d3f4400108623aea3e1423707c6e2ff5f6 | [
"MIT"
] | null | null | null | tests/test_column.py | colinlcrawford/mock-data-creator | 80bdf4d3f4400108623aea3e1423707c6e2ff5f6 | [
"MIT"
] | null | null | null | tests/test_column.py | colinlcrawford/mock-data-creator | 80bdf4d3f4400108623aea3e1423707c6e2ff5f6 | [
"MIT"
] | null | null | null | """
Unit tests for Column class
"""
import unittest
from mockdataset.column import Column, MappingColumn, PercentageDiscreteColumn
def test_value_generator_fn(total_rows, row_number, previous_row_values):
"""
test value generator function for the test column
"""
return 10
class TestColumn(unittest.TestCase):
"""
Unit tests for Column class
"""
def setUp(self):
"""
test set up
"""
self.column = Column(
column_name="test_column",
value_generator=test_value_generator_fn)
def test_init(self):
"""
test that Column initializes correctly
"""
self.assertEqual(self.column.column_name, "test_column")
def test_create_value(self):
"""
test that Column call its create_value function correctly
"""
test_total_rows = 10
test_row_number = 0
test_previous_row_values = []
column_next_value = self.column.create_value(
total_rows=test_total_rows,
row_number=test_row_number,
previous_row_values=test_previous_row_values)
expected_next_value = test_value_generator_fn(
total_rows=test_total_rows,
row_number=test_row_number,
previous_row_values=test_previous_row_values)
self.assertEqual(column_next_value, expected_next_value)
class TestMappingColumn(unittest.TestCase):
def setUp(self):
test_mapping = {
"Whale": "Big",
"Cat": "Small"
}
self.test_previous_rows_with_match = {
"Animal": "Whale"
}
self.test_previous_rows_without_match = {
"Animal": "Dog"
}
self.test_default_value = "Medium"
self.column = MappingColumn(
column_name="Size",
column_to_map="Animal",
mapping=test_mapping,
default_value=self.test_default_value)
def test_create_value(self):
next_value = self.column.create_value(
total_rows=10,
row_number=3,
previous_row_values=self.test_previous_rows_with_match)
self.assertEqual(next_value, "Big")
def test_default_value_for_unmapped_values(self):
next_value = self.column.create_value(
total_rows=10,
row_number=3,
previous_row_values=self.test_previous_rows_without_match)
self.assertEqual(next_value, self.test_default_value)
class TestPercentageDiscreteColumn(unittest.TestCase):
def setUp(self):
self.test_category_to_percentage = {
"Cat": 0.2,
"Dog": 0.2,
"Whale": 0.2,
"Lion": 0.4
}
self.column = PercentageDiscreteColumn(
column_name="Animal",
category_to_percentage=self.test_category_to_percentage,
default_value="Lion"
)
self.test_category_to_percentage_not_all_covered = {
"Cat": 0.33,
"Dog": 0.33,
}
self.column_not_all_covered = PercentageDiscreteColumn(
column_name="Animal",
category_to_percentage=self.test_category_to_percentage_not_all_covered,
default_value="Lion"
)
def test_create_value(self):
"""
test the PercentageDiscreteColumn creates the correct number of each
user provided value based on the percentages provided by the user
"""
total_test_rows = 5
values = []
for i in range(total_test_rows):
values.append(self.column.create_value(
total_rows=total_test_rows,
row_number=i,
previous_row_values=values))
expected_values = [*self.test_category_to_percentage.keys(), "Lion"]
for value, expected_value in zip(values, expected_values):
self.assertEqual(value, expected_value)
def test_create_default_value(self):
"""
test the PercentageDiscreteColumn uses it's default value once it has
filled the required percentages from the user for the column
"""
total_test_rows = 3
values = []
for i in range(total_test_rows):
values.append(self.column_not_all_covered.create_value(
total_rows=total_test_rows,
row_number=i,
previous_row_values=values))
expected_values = [
*self.test_category_to_percentage_not_all_covered.keys(),
"Lion"
]
for value, expected_value in zip(values, expected_values):
self.assertEqual(value, expected_value)
| 31.22 | 84 | 0.620329 | 526 | 4,683 | 5.171103 | 0.18251 | 0.055882 | 0.0625 | 0.039706 | 0.633824 | 0.488235 | 0.409926 | 0.409926 | 0.374265 | 0.374265 | 0 | 0.008249 | 0.301089 | 4,683 | 149 | 85 | 31.42953 | 0.822793 | 0.102498 | 0 | 0.356436 | 0 | 0 | 0.031297 | 0 | 0 | 0 | 0 | 0 | 0.059406 | 1 | 0.09901 | false | 0 | 0.019802 | 0 | 0.158416 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02061cc446b228da1290c17d822244a26d970d36 | 754 | py | Python | miniProject/imagefacedection/detect_faces.py | scotthuang1989/opencv_study | 9b6354907609c9841915f6300ee5915a9d80906f | [
"MIT"
] | null | null | null | miniProject/imagefacedection/detect_faces.py | scotthuang1989/opencv_study | 9b6354907609c9841915f6300ee5915a9d80906f | [
"MIT"
] | null | null | null | miniProject/imagefacedection/detect_faces.py | scotthuang1989/opencv_study | 9b6354907609c9841915f6300ee5915a9d80906f | [
"MIT"
] | 1 | 2018-04-16T13:57:14.000Z | 2018-04-16T13:57:14.000Z | from __future__ import print_function
from pyimagesearch.facedetector import FaceDetector
import argparse
import cv2
ap=argparse.ArgumentParser()
ap.add_argument('-f', '--face',required=True,
help="path to where the face cascade resides")
ap.add_argument('-i','--image',required=True,
help='path to where the image file resides')
args=vars(ap.parse_args())
image=cv2.imread(args['image'])
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
fd=FaceDetector(args["face"])
faceRects=fd.detect(gray,scaleFactor=1.05, minNeighbors=5,
minSize=(30,30))
print("I found {} faces(s)".format(len(faceRects)))
for (x,y,w,h) in faceRects:
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow("Faces", image)
cv2.waitKey()
| 29 | 58 | 0.706897 | 114 | 754 | 4.596491 | 0.54386 | 0.045802 | 0.049618 | 0.076336 | 0.114504 | 0.114504 | 0.114504 | 0 | 0 | 0 | 0 | 0.033333 | 0.124668 | 754 | 25 | 59 | 30.16 | 0.760606 | 0 | 0 | 0 | 0 | 0 | 0.164456 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02065aa4478977944e5e8c1c9931e63321aea950 | 11,004 | py | Python | docs/Lambda/auto-tag-lambda-fn.py | ndreasg/aws-vulnerability-management-workshop | cfc2951fe8b6ecc2eb2d5740d1042e876b22200a | [
"MIT-0"
] | 6 | 2020-08-31T13:06:54.000Z | 2022-02-03T02:39:30.000Z | docs/Lambda/auto-tag-lambda-fn.py | ndreasg/aws-vulnerability-management-workshop | cfc2951fe8b6ecc2eb2d5740d1042e876b22200a | [
"MIT-0"
] | 2 | 2021-03-03T17:45:09.000Z | 2021-04-13T23:36:05.000Z | docs/Lambda/auto-tag-lambda-fn.py | ndreasg/aws-vulnerability-management-workshop | cfc2951fe8b6ecc2eb2d5740d1042e876b22200a | [
"MIT-0"
] | 7 | 2021-01-22T10:23:23.000Z | 2022-02-14T09:36:17.000Z | import boto3
import jmespath
import os
import logging
import time
import datetime
import sys
from boto3.dynamodb.conditions import Attr
from botocore.exceptions import ClientError
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb_table_name = 'AutoTag-ASGInfo'
ec2 = boto3.resource('ec2')
dynamodb = boto3.resource('dynamodb')
ec2_client = boto3.client('ec2')
def lambda_handler(event, context):
global username
ids = []
try:
source = event['source']
detail = event['detail']
detailtype = event['detail-type']
print('Received event from ' + source + ' with detail-type as: ' + detailtype);
## Check if event is from Auto scaling and handle it accordingly.
if (source == 'aws.autoscaling'):
table = dynamodb.Table(dynamodb_table_name)
if (detailtype == 'EC2 Instance Launch Successful'):
asgName = detail['AutoScalingGroupName']
instanceId = detail['EC2InstanceId']
instanceIds = ''
## Look up user info from ASG Table based on ASG name
print('Looking up user info for Auto scaling group: ' + asgName)
response = table.get_item(
Key={
'asgName': asgName,
},
ConsistentRead=True
)
if ('Item' in response):
item = response['Item']
if ('userName' in item):
username = item['userName']
ids.append(instanceId)
logger.info(ids)
instances = ec2.instances.filter(InstanceIds=ids)
# loop through the instance volumes and network interfaces
for instance in instances:
for vol in instance.volumes.all():
ids.append(vol.id)
for eni in instance.network_interfaces:
ids.append(eni.id)
if ids:
for resourceid in ids:
print('Tagging resource ' + resourceid + ' with Owner as ' + username)
ec2.create_tags(Resources=ids,
Tags=[{'Key': 'Owner', 'Value': username}])
return True
else:
print('User info could not be found in ASG table.. storing instanceId info to tag later')
if ('instanceInfo' in item):
instanceIds = item['instanceInfo'] + ',' + instanceId
else:
instanceIds = instanceId
## store AutoScaling group name along with instance id information to tag later
try:
table.put_item(
Item={
'asgName': asgName,
'instanceInfo': instanceIds,
}
)
except ClientError as e:
print('Error storing ASG info in dynamo table for ASG: ' + str(asgName))
raise
return True
elif (detailtype == 'AWS API Call via CloudTrail'):
eventname = detail['eventName']
if (eventname == 'CreateAutoScalingGroup'):
principal = detail['userIdentity']['principalId']
userType = detail['userIdentity']['type']
user = None
if userType == 'IAMUser':
user = detail['userIdentity']['userName']
else:
if (':' in principal):
user = principal.split(':')[1]
if (user is None):
logger.info('User info could not be found in event details, Exiting...')
return True
asgName = detail['requestParameters']['autoScalingGroupName']
## store AutoScaling group name in dynamo table along with user info to look up later
try:
response = table.put_item(
Item={
'asgName': asgName,
'userName': user,
},
ReturnValues="ALL_OLD"
)
if ('Attributes' in response):
attributes = response['Attributes']
if ('instanceInfo' in attributes):
print('ASG table contained untagged instances. Tagging them now..')
instanceIds = attributes['instanceInfo']
if (',' in instanceIds):
ids = instanceIds.split(',')
else:
ids.append(instanceIds)
instances = ec2.instances.filter(InstanceIds=ids)
# loop through the instance volumes and network interfaces
for instance in instances:
for vol in instance.volumes.all():
ids.append(vol.id)
for eni in instance.network_interfaces:
ids.append(eni.id)
if ids:
for resourceid in ids:
print('Tagging resource ' + resourceid + ' with Owner as ' + user)
ec2.create_tags(Resources=ids,
Tags=[{'Key': 'Owner', 'Value': user}])
return True
except ClientError as e:
print('Error storing ASG info in dynamo table for ASG: ' + str(asgName))
raise
elif (eventname == 'DeleteAutoScalingGroup'):
asgName = detail['requestParameters']['autoScalingGroupName']
print('Deleting ASG info in dynamo table for ASG: ' + str(asgName))
## store ASG name in dynamo table along with user info to look up later
try:
response = table.delete_item(
Key={ 'asgName': asgName }
)
except ClientError as e:
print('Error deleting ASG info in dynamo table for ASG: ' + str(asgName))
raise
else:
logger.info('Not supported Auto scaling API Call')
else:
logger.info('Not supported Auto scaling action')
else:
## Handle API Event generated by EC2
eventname = detail['eventName']
arn = detail['userIdentity']['arn']
principal = detail['userIdentity']['principalId']
userType = detail['userIdentity']['type']
user = None
logger.info('principalId: ' + str(principal))
logger.info('eventName: ' + str(eventname))
logger.info('detail: ' + str(detail))
if userType == 'IAMUser':
user = detail['userIdentity']['userName']
else:
if (':' in principal):
user = principal.split(':')[1]
if (user is None):
logger.info('User info could not be found in principal : ' + str(principal))
logger.info('Exiting...')
return True
if not detail['responseElements']:
logger.warning('No responseElements found')
if detail['errorCode']:
logger.error('errorCode: ' + detail['errorCode'])
if detail['errorMessage']:
logger.error('errorMessage: ' + detail['errorMessage'])
return False
if eventname == 'CreateVolume':
ids.append(detail['responseElements']['volumeId'])
logger.info(ids)
elif eventname == 'RunInstances':
items = detail['responseElements']['instancesSet']['items']
for item in items:
ids.append(item['instanceId'])
logger.info(ids)
logger.info('number of instances: ' + str(len(ids)))
base = ec2.instances.filter(InstanceIds=ids)
# loop through the instances
for instance in base:
for vol in instance.volumes.all():
ids.append(vol.id)
for eni in instance.network_interfaces:
ids.append(eni.id)
elif eventname == 'CreateImage':
ids.append(detail['responseElements']['imageId'])
logger.info(ids)
elif eventname == 'CreateSnapshot':
ids.append(detail['responseElements']['snapshotId'])
logger.info(ids)
else:
logger.warning('Not supported action')
if ids:
for resourceid in ids:
print('Tagging resource ' + resourceid)
if resourceid.startswith('i-'):
ec2response = ec2_client.describe_instances(InstanceIds=[resourceid])
platform = jmespath.search(
"Reservations[].Instances[?InstanceId=='{}'].Platform|[][][]|[0]".format(resourceid),
ec2response
)
logger.debug("Instance platform: {}".format(platform))
if platform == 'windows':
ec2.create_tags(Resources=[resourceid], Tags=[{'Key': 'Owner', 'Value': user}, {'Key': 'PrincipalId', 'Value': principal}, {'Key': 'OSType', 'Value': 'Windows'}])
elif platform != 'windows':
ec2.create_tags(Resources=[resourceid], Tags=[{'Key': 'Owner', 'Value': user}, {'Key': 'PrincipalId', 'Value': principal}, {'Key': 'OSType', 'Value': 'Linux'}])
else:
ec2.create_tags(Resources=[resourceid], Tags=[{'Key': 'Owner', 'Value': user}, {'Key': 'PrincipalId', 'Value': principal}])
logger.info(' Remaining time (ms): ' + str(context.get_remaining_time_in_millis()) + '\n')
return True
except Exception as e:
logger.error('Something went wrong: ' + str(e))
logger.error('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
return False
| 50.944444 | 190 | 0.467739 | 908 | 11,004 | 5.638767 | 0.215859 | 0.029297 | 0.015234 | 0.021484 | 0.414844 | 0.403906 | 0.387305 | 0.367969 | 0.358984 | 0.342578 | 0 | 0.004351 | 0.436114 | 11,004 | 215 | 191 | 51.181395 | 0.82079 | 0.046983 | 0 | 0.415842 | 0 | 0 | 0.184987 | 0.010219 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004951 | false | 0 | 0.044554 | 0 | 0.089109 | 0.054455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0206b2b25fd19becdab3751f8ecba1df3a964971 | 1,119 | py | Python | gpenkf/experiments/synthetic/plot_example.py | danilkuzin/GP-EnKF | 215623e0f322ddae9757854e7278b60e11e570bf | [
"MIT"
] | 12 | 2018-11-09T10:08:36.000Z | 2021-07-11T05:04:52.000Z | gpenkf/experiments/synthetic/plot_example.py | danilkuzin/GP-EnKF | 215623e0f322ddae9757854e7278b60e11e570bf | [
"MIT"
] | null | null | null | gpenkf/experiments/synthetic/plot_example.py | danilkuzin/GP-EnKF | 215623e0f322ddae9757854e7278b60e11e570bf | [
"MIT"
] | 1 | 2019-10-29T05:57:47.000Z | 2019-10-29T05:57:47.000Z | import numpy as np
import matplotlib.pyplot as plt
def plot_synthetic_function():
borders = [-10, 10] # The borders of the x-axis
sample_size = 5 # Number of new observations at every iteration
noise = 0.5 # Observation noise
fine_grid = np.linspace(-10, 10, 2001) # grid for plotting purposes
def f(x):
return x / 2 + (25 * x) / (1 + x ** 2) * np.cos(x)
# Sample data example
x_new = ((borders[1] - borders[0]) * np.random.random_sample((sample_size, 1))
+ borders[0])
x_new = np.sort(x_new, axis=0)
f_new = f(x_new)
f_new_noised = f(x_new) + np.random.normal(loc=0., scale=noise,
size=(sample_size, 1))
# Plotting
plt.plot(x_new, f_new, 'x', label='samples from f')
plt.plot(x_new, f_new_noised, 'x', label='samples from f with noise')
plt.plot(fine_grid, f(fine_grid), label='f')
plt.legend()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.grid(True)
plt.savefig('synthetic_function_example.eps', format='eps')
if __name__ == "__main__":
plot_synthetic_function()
| 31.083333 | 82 | 0.605004 | 171 | 1,119 | 3.760234 | 0.374269 | 0.043546 | 0.023328 | 0.037325 | 0.133748 | 0.046656 | 0 | 0 | 0 | 0 | 0 | 0.032335 | 0.253798 | 1,119 | 35 | 83 | 31.971429 | 0.737725 | 0.12958 | 0 | 0 | 0 | 0 | 0.091003 | 0.031024 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.08 | 0.04 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0208480a0041db4086f3ceeffedc8ef187168071 | 417 | py | Python | Algorithms/Easy/944. Delete Columns to Make Sorted/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/944. Delete Columns to Make Sorted/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/944. Delete Columns to Make Sorted/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | from typing import List
class Solution:
def minDeletionSize(self, A: List[str]) -> int:
res = 0
for j in range(len(A[0])):
for i in range(len(A)-1):
if A[i][j] > A[i+1][j]:
res += 1
break
return res
if __name__ == "__main__":
s = Solution()
result = s.minDeletionSize(["cba", "daf", "ghi"])
print(result)
| 21.947368 | 53 | 0.47482 | 55 | 417 | 3.454545 | 0.581818 | 0.042105 | 0.105263 | 0.115789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019305 | 0.378897 | 417 | 18 | 54 | 23.166667 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0.040767 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.285714 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
020b84b28c27e3feedff153599945e00b2e16739 | 1,901 | py | Python | card.py | andytaylor823/euchre-ML | 691d5dba9a72af201e004308782c9c429dbeba51 | [
"MIT"
] | null | null | null | card.py | andytaylor823/euchre-ML | 691d5dba9a72af201e004308782c9c429dbeba51 | [
"MIT"
] | null | null | null | card.py | andytaylor823/euchre-ML | 691d5dba9a72af201e004308782c9c429dbeba51 | [
"MIT"
] | null | null | null | # Name options: '9', 'T', 'J', 'Q', 'K', 'A'
# Suit options: 'C', 'D', 'H', 'S'
from copy import deepcopy
same_color = {'C':'S', 'D':'H', 'H':'D', 'S':'C'}
trump_power = {'9':12, 'T':15, 'Q':20, 'K':25, 'A':30, 'left':31, 'right':35, None:0}
non_trump_power = {'9':1, 'T':2, 'J':3, 'Q':4, 'K':5, 'A':10, None:0}
class Card:
def __init__(self, name, suit, trump_suit=None):
self.name, self.suit = name, suit
self.set_trump(trump_suit)
def set_trump(self, trump_suit):
if trump_suit is None:
self.trump = False
else:
if self.suit == trump_suit:
self.trump = True
else:
self.trump = (self.name == 'J') and (self.suit == same_color[trump_suit])
if self.trump:
if self.name == 'J':
self.right = self.suit==trump_suit
self.left = self.suit==same_color[trump_suit]
else:
self.right, self.left = False, False
else:
self.right, self.left = False, False
self._set_power()
def _set_power(self):
if self.trump:
if self.right:
self.power = 35
elif self.left:
self.power = 31
else:
self.power = trump_power[self.name]
else:
self.power = non_trump_power[self.name]
def copy(self):
return deepcopy(self)
def __eq__(self, other):
if other is None: return False
return (other.suit==self.suit) and (other.name==self.name)
def __str__(self):
if self.name is None:
return '--'
return str(self.name) + str(self.suit)
def __repr__(self):
if self.name is None:
return '--'
return str(self.name) + str(self.suit)
def __hash__(self):
return hash(self.name) + hash(self.suit) | 31.163934 | 89 | 0.515518 | 260 | 1,901 | 3.603846 | 0.223077 | 0.102455 | 0.055496 | 0.036286 | 0.32444 | 0.24333 | 0.187834 | 0.121665 | 0.121665 | 0.121665 | 0 | 0.023641 | 0.332457 | 1,901 | 61 | 90 | 31.163934 | 0.714736 | 0.039453 | 0 | 0.32 | 0 | 0 | 0.01864 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.02 | 0.04 | 0.34 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
020d37eb0e0ccb84ad2f285e17f109263db15a69 | 7,406 | py | Python | bnofluxlite/bnofluxlite.py | iotfablab/bnofluxlite | 6621024837c4dde17bb86456b7a1aee8398c8a3a | [
"MIT"
] | null | null | null | bnofluxlite/bnofluxlite.py | iotfablab/bnofluxlite | 6621024837c4dde17bb86456b7a1aee8398c8a3a | [
"MIT"
] | null | null | null | bnofluxlite/bnofluxlite.py | iotfablab/bnofluxlite | 6621024837c4dde17bb86456b7a1aee8398c8a3a | [
"MIT"
] | null | null | null | import argparse
import json
import logging
import os
import socket
import ssl
import sys
import time
from queue import Queue
from .BNO055 import BNO055
import paho.mqtt.client as mqtt
# Logging Configuration
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
handler = logging.FileHandler('/var/log/bnofluxlite.log')
handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s-%(name)s-%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
CONFIG = dict()
DEVICE_NAME = ''
DEVICE_ID = ''
INFLUX_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def on_connect(mqttc, obj, flags, rc):
"""MQTT Callback Function upon connecting to MQTT Broker"""
if rc == 0:
logger.debug("MQTT CONNECT rc: " + str(rc))
logger.info("Succesfully Connected to MQTT Broker")
def on_publish(mqttc, obj, mid):
"""MQTT Callback Function upon publishing to MQTT Broker"""
logger.debug("MQTT PUBLISH: mid: " + str(mid))
def on_disconnect(mqttc, obj, rc):
"""MQTT Callback Function upon disconnecting from MQTT Broker"""
if rc == 0:
logger.debug("MQTT DISCONNECTED: rc: " + str(rc))
logger.debug("Disconnected Successfully from MQTT Broker")
def setup_mqtt_client(mqtt_conf, mqtt_client):
"""Configure MQTT Client based on Configuration"""
if mqtt_conf['TLS']['enable']:
logger.info("TLS Setup for Broker")
logger.info("checking TLS_Version")
tls = mqtt_conf['TLS']['tls_version']
if tls == 'tlsv1.2':
tlsVersion = ssl.PROTOCOL_TLSv1_2
elif tls == "tlsv1.1":
tlsVersion = ssl.PROTOCOL_TLSv1_1
elif tls == "tlsv1":
tlsVersion = ssl.PROTOCOL_TLSv1
else:
logger.info("Unknown TLS version - ignoring")
tlsVersion = None
if not mqtt_conf['TLS']['insecure']:
logger.info("Searching for Certificates in certdir")
CERTS_DIR = mqtt_conf['TLS']['certs']['certdir']
if os.path.isdir(CERTS_DIR):
logger.info("certdir exists")
CA_CERT_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['cafile'])
CERT_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['certfile'])
KEY_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['keyfile'])
mqtt_client.tls_set(ca_certs=CA_CERT_FILE, certfile=CERT_FILE, keyfile=KEY_FILE, cert_reqs=ssl.CERT_REQUIRED, tls_version=tlsVersion)
else:
logger.error("certdir does not exist.. check path")
sys.exit()
else:
mqtt_client.tls_set(ca_certs=None, certfile=None, keyfile=None, cert_reqs=ssl.CERT_NONE, tls_version=tlsVersion)
mqtt_client.tls_insecure_set(True)
if mqtt_conf['username'] and mqtt_conf['password']:
logger.info("setting username and password for Broker")
mqtt_client.username_pw_set(mqtt_conf['username'], mqtt_conf['password'])
return mqtt_client
def send_data(payloads, mqtt_client):
"""Publish IMU Values to MQTT Broker + InfluxDB insert"""
global CONFIG
global DEVICE_ID, DEVICE_NAME
global INFLUX_SOCKET
while not payloads.empty():
for topic in CONFIG['imu']['topics']:
data = ''.join(list(payloads.queue))
payloads.queue.clear()
topic_to_publish = DEVICE_NAME + '/' + DEVICE_ID + '/' + topic
#logger.debug(data)
mqtt_client.publish(topic_to_publish, data, qos=1)
INFLUX_SOCKET.sendto(data.encode('utf-8'), (CONFIG['influx']['host'], CONFIG['imu']['udp_port']))
def read_from_imu(i2c_port, updaterate, mqttc):
"""Read from BNO055 Sensor using I2C Port and push data into payload Queue"""
logger.info(f'Starting to Read BNO values on {i2c_port} every {updaterate}s')
queue_capacity = (int (1 / updaterate) + 1)
payload_q = Queue(maxsize=queue_capacity)
logger.debug(f'Setting Queue Capacity of {queue_capacity} >= Sampling rate')
sensor_bno = BNO055(i2c_bus_port=i2c_port)
if sensor_bno.begin() is not True:
raise ValueError('Initialization Failure for BNO055')
sys.exit(1)
time.sleep(1)
sensor_bno.setExternalCrystalUse(True)
time.sleep(2)
logger.info('Reading BNO055 Sensor Data')
mqttc.loop_start()
while 1:
try:
lx, ly, lz = sensor_bno.getVector(BNO055.VECTOR_LINEARACCEL)
payload_q.put_nowait(f'acceleration,type=linear,src=imu x={lx},y={ly},z={lz} {time.time_ns()}\n')
logger.debug('linear acc.: x:{}, y:{}, z:{}'.format(lx, ly, lz))
gX, gY, gZ = sensor_bno.getVector(BNO055.VECTOR_GRAVITY)
payload_q.put_nowait(f'acceleration,type=gravity,src=imu x={gX},y={gY},z={gZ} {time.time_ns()}\n')
logger.debug('gravity: x:{}, y:{}, z:{}'.format(gX, gY, gZ))
yaw, roll, pitch = sensor_bno.getVector(BNO055.VECTOR_EULER)
payload_q.put_nowait(f'orientation,type=euler,src=imu yaw={yaw},pitch={pitch},roll={roll} {time.time_ns()}\n')
logger.debug('euler: yaw:{}, pitch:{}, roll:{}'.format(yaw, pitch, roll))
time.sleep(updaterate)
if payload_q.full():
logger.info('Payload Queue is Full. Publishing to Broker.')
send_data(payload_q, mqttc)
time.sleep(1.0) # sleep for a second in order not to hog up the sending
except Exception as imu_e:
logger.exception(f'Error while reading IMU data: {imu_e}')
break
except KeyboardInterrupt:
logger.exception('CTRL+C pressed')
break
logger.info("cleaning up queue, closing connections")
if not payload_q.empty():
payload_q.queue.clear()
mqttc.loop_stop()
mqttc.disconnect()
sys.exit()
def parse_arguments():
"""Arguments to run the script"""
parser = argparse.ArgumentParser(description='CLI to obtain BNO055 data and save them to InfluxDBv1.x and Publish them to MQTT')
parser.add_argument('--config', '-c', required=True, help='JSON Configuration File for bnofluxlite CLI')
return parser.parse_args()
def main():
"""Initialization"""
args = parse_arguments()
if not os.path.isfile(args.config):
logger.error("configuration file not readable. Check path to configuration file")
sys.exit()
global CONFIG
with open(args.config, 'r') as config_file:
CONFIG = json.load(config_file)
# print(CONFIG)
# MQTT Client Configuration
global DEVICE_NAME, DEVICE_ID
DEVICE_NAME = CONFIG['device']['name']
DEVICE_ID = CONFIG['device']['ID']
MQTT_CONF = CONFIG['mqtt']
mqttc = mqtt.Client(client_id=f'{DEVICE_NAME}/{DEVICE_ID}-IMU')
mqttc = setup_mqtt_client(MQTT_CONF, mqttc)
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_disconnect = on_disconnect
mqttc.connect(CONFIG['mqtt']['broker'], CONFIG['mqtt']['port'])
logger.info('Connecting to IMU (BNO055) Device')
I2C_PORT = CONFIG['imu']['i2cPort']
I2C_UPDATERATE = CONFIG['imu']['updaterate']
logger.debug(f'Device @i2c-{I2C_PORT} with update rate={I2C_UPDATERATE}')
read_from_imu(I2C_PORT, I2C_UPDATERATE, mqttc)
if __name__ == "__main__":
main() | 36.303922 | 149 | 0.649878 | 972 | 7,406 | 4.790123 | 0.260288 | 0.030069 | 0.016538 | 0.01933 | 0.13488 | 0.082904 | 0.053694 | 0.039089 | 0.026203 | 0.026203 | 0 | 0.011956 | 0.220767 | 7,406 | 204 | 150 | 36.303922 | 0.794836 | 0.069268 | 0 | 0.081633 | 0 | 0.020408 | 0.231713 | 0.034458 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054422 | false | 0.020408 | 0.07483 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
020e54fab82e40a7745694ff3f4b76b94cc605b0 | 868 | py | Python | trajnetplusplustools/writers.py | pedro-mgb/trajnetplusplustools | 1e0dbf3dc3b79d58fc617401a08385876aa05b1c | [
"MIT"
] | 35 | 2020-01-15T15:16:19.000Z | 2022-03-31T19:37:57.000Z | trajnetplusplustools/writers.py | pedro-mgb/trajnetplusplustools | 1e0dbf3dc3b79d58fc617401a08385876aa05b1c | [
"MIT"
] | 4 | 2020-04-12T12:36:49.000Z | 2021-03-07T01:39:03.000Z | trajnetplusplustools/writers.py | pedro-mgb/trajnetplusplustools | 1e0dbf3dc3b79d58fc617401a08385876aa05b1c | [
"MIT"
] | 22 | 2020-04-05T05:39:59.000Z | 2022-03-20T16:03:49.000Z | import json
from .data import SceneRow, TrackRow
def trajnet_tracks(row):
x = round(row.x, 2)
y = round(row.y, 2)
if row.prediction_number is None:
return json.dumps({'track': {'f': row.frame, 'p': row.pedestrian, 'x': x, 'y': y}})
return json.dumps({'track': {'f': row.frame, 'p': row.pedestrian, 'x': x, 'y': y,
'prediction_number': row.prediction_number,
'scene_id': row.scene_id}})
def trajnet_scenes(row):
return json.dumps(
{'scene': {'id': row.scene, 'p': row.pedestrian, 's': row.start, 'e': row.end,
'fps': row.fps, 'tag': row.tag}})
def trajnet(row):
if isinstance(row, TrackRow):
return trajnet_tracks(row)
if isinstance(row, SceneRow):
return trajnet_scenes(row)
raise Exception('unknown row type')
| 29.931034 | 91 | 0.565668 | 115 | 868 | 4.191304 | 0.347826 | 0.062241 | 0.093361 | 0.082988 | 0.195021 | 0.195021 | 0.195021 | 0.195021 | 0.195021 | 0.195021 | 0 | 0.00316 | 0.270737 | 868 | 28 | 92 | 31 | 0.758294 | 0 | 0 | 0 | 0 | 0 | 0.086406 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.1 | 0.05 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
020f1c0b4fc5d32b73a94235db84e338eed4cd5c | 9,838 | py | Python | ven2/lib/python2.7/site-packages/zope/browserresource/tests/test_file.py | manliu1225/Facebook_crawler | 0f75a1c4382dd4effc3178d84b99b0cad97337cd | [
"Apache-2.0"
] | 1 | 2019-11-30T07:47:08.000Z | 2019-11-30T07:47:08.000Z | ven2/lib/python2.7/site-packages/zope/browserresource/tests/test_file.py | manliu1225/Facebook_crawler | 0f75a1c4382dd4effc3178d84b99b0cad97337cd | [
"Apache-2.0"
] | 10 | 2016-03-24T07:52:07.000Z | 2020-03-02T09:52:06.000Z | ven2/lib/python2.7/site-packages/zope/browserresource/tests/test_file.py | manliu1225/Facebook_crawler | 0f75a1c4382dd4effc3178d84b99b0cad97337cd | [
"Apache-2.0"
] | 2 | 2015-04-03T08:18:34.000Z | 2019-12-09T09:36:43.000Z | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""File-based browser resource tests.
"""
import doctest
import os
import re
import unittest
from email.utils import formatdate
import time
from zope.component import getGlobalSiteManager
from zope.component import provideAdapter, adapter
from zope.interface import implementer
from zope.interface.verify import verifyObject
from zope.publisher.browser import TestRequest
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.security.checker import NamesChecker
from zope.testing import cleanup
from zope.testing.renormalizing import RENormalizing
from zope.browserresource.file import FileResourceFactory, FileETag
from zope.browserresource.interfaces import IFileResource, IETag
@adapter(IFileResource, IBrowserRequest)
@implementer(IETag)
class MyETag(object):
def __init__(self, context, request):
pass
def __call__(self, mtime, content):
return 'myetag'
@adapter(IFileResource, IBrowserRequest)
@implementer(IETag)
class NoETag(object):
def __init__(self, context, request):
pass
def __call__(self, mtime, content):
return None
def setUp(test):
cleanup.setUp()
data_dir = os.path.join(os.path.dirname(__file__), 'testfiles')
test.globs['testFilePath'] = os.path.join(data_dir, 'test.txt')
test.globs['nullChecker'] = NamesChecker()
test.globs['TestRequest'] = TestRequest
provideAdapter(MyETag)
def tearDown(test):
cleanup.tearDown()
class TestFile(unittest.TestCase):
def setUp(self):
cleanup.setUp()
data_dir = os.path.join(os.path.dirname(__file__), 'testfiles')
self.testFilePath = os.path.join(data_dir, 'test.txt')
self.nullChecker = NamesChecker()
provideAdapter(MyETag)
def tearDown(self):
cleanup.tearDown()
def test_FileETag(self):
# Tests for FileETag
etag_maker = FileETag(object(), TestRequest())
self.assertTrue(verifyObject(IETag, etag_maker))
# By default we constuct an ETag from the file's mtime and size
self.assertEqual(etag_maker(1234, 'abc'), '1234-3')
def test_FileResource_GET_sets_cache_headers(self):
# Test caching headers set by FileResource.GET
factory = FileResourceFactory(self.testFilePath, self.nullChecker, 'test.txt')
timestamp = time.time()
file = factory._FileResourceFactory__file # get mangled file
file.lmt = timestamp
file.lmh = formatdate(timestamp, usegmt=True)
request = TestRequest()
resource = factory(request)
self.assertTrue(resource.GET())
self.assertEqual(request.response.getHeader('Last-Modified'), file.lmh)
self.assertEqual(request.response.getHeader('ETag'),
'"myetag"')
self.assertEqual(request.response.getHeader('Cache-Control'),
'public,max-age=86400')
self.assertTrue(request.response.getHeader('Expires'))
def test_FileResource_GET_if_modified_since(self):
#Test If-Modified-Since header support
factory = FileResourceFactory(self.testFilePath, self.nullChecker, 'test.txt')
timestamp = time.time()
file = factory._FileResourceFactory__file # get mangled file
file.lmt = timestamp
file.lmh = formatdate(timestamp, usegmt=True)
before = timestamp - 1000
request = TestRequest(HTTP_IF_MODIFIED_SINCE=formatdate(before, usegmt=True))
resource = factory(request)
self.assertTrue(resource.GET())
after = timestamp + 1000
request = TestRequest(HTTP_IF_MODIFIED_SINCE=formatdate(after, usegmt=True))
resource = factory(request)
self.assertFalse(resource.GET())
self.assertEqual(request.response.getStatus(),
304)
# Cache control headers and ETag are set on 304 responses
self.assertEqual(request.response.getHeader('ETag'),
'"myetag"')
self.assertEqual(request.response.getHeader('Cache-Control'),
'public,max-age=86400')
self.assertTrue(request.response.getHeader('Expires'))
# Other entity headers are not
self.assertIsNone(request.response.getHeader('Last-Modified'))
self.assertIsNone(request.response.getHeader('Content-Type'))
# It won't fail on bad If-Modified-Since headers.
request = TestRequest(HTTP_IF_MODIFIED_SINCE='bad header')
resource = factory(request)
self.assertTrue(resource.GET())
# it also won't fail if we don't have a last modification time for the
# resource
file.lmt = None
request = TestRequest(HTTP_IF_MODIFIED_SINCE=formatdate(after, usegmt=True))
resource = factory(request)
self.assertTrue(resource.GET())
def test_FileResource_GET_if_none_match(self):
# Test If-None-Match header support
factory = FileResourceFactory(self.testFilePath, self.nullChecker, 'test.txt')
timestamp = time.time()
file = factory._FileResourceFactory__file # get mangled file
file.lmt = timestamp
file.lmh = formatdate(timestamp, usegmt=True)
request = TestRequest(HTTP_IF_NONE_MATCH='"othertag"')
resource = factory(request)
self.assertTrue(resource.GET())
request = TestRequest(HTTP_IF_NONE_MATCH='"myetag"')
resource = factory(request)
self.assertEqual(resource.GET(), b'')
self.assertEqual(request.response.getStatus(),
304)
# Cache control headers and ETag are set on 304 responses
self.assertEqual(request.response.getHeader('ETag'),
'"myetag"')
self.assertEqual(request.response.getHeader('Cache-Control'),
'public,max-age=86400')
self.assertTrue(request.response.getHeader('Expires'))
# Other entity headers are not
self.assertIsNone(request.response.getHeader('Last-Modified'))
self.assertIsNone(request.response.getHeader('Content-Type'))
# It won't fail on bad If-None-Match headers.
request = TestRequest(HTTP_IF_NONE_MATCH='bad header')
resource = factory(request)
self.assertTrue(resource.GET())
# it also won't fail if we don't have an etag for the resource
provideAdapter(NoETag)
request = TestRequest(HTTP_IF_NONE_MATCH='"someetag"')
resource = factory(request)
self.assertTrue(resource.GET())
def test_FileResource_GET_if_none_match_and_if_modified_since(self):
# Test combined If-None-Match and If-Modified-Since header support
factory = FileResourceFactory(self.testFilePath, self.nullChecker, 'test.txt')
timestamp = time.time()
file = factory._FileResourceFactory__file # get mangled file
file.lmt = timestamp
file.lmh = formatdate(timestamp, usegmt=True)
# We've a match
after = timestamp + 1000
request = TestRequest(HTTP_IF_MODIFIED_SINCE=formatdate(after, usegmt=True),
HTTP_IF_NONE_MATCH='"myetag"')
resource = factory(request)
self.assertFalse(resource.GET())
self.assertEqual(request.response.getStatus(),
304)
# Last-modified matches, but ETag doesn't
request = TestRequest(HTTP_IF_MODIFIED_SINCE=formatdate(after, usegmt=True),
HTTP_IF_NONE_MATCH='"otheretag"')
resource = factory(request)
self.assertTrue(resource.GET())
# ETag matches but last-modified doesn't
before = timestamp - 1000
request = TestRequest(HTTP_IF_MODIFIED_SINCE=formatdate(before, usegmt=True),
HTTP_IF_NONE_MATCH='"myetag"')
resource = factory(request)
self.assertTrue(resource.GET())
# Both don't match
before = timestamp - 1000
request = TestRequest(HTTP_IF_MODIFIED_SINCE=formatdate(before, usegmt=True),
HTTP_IF_NONE_MATCH='"otheretag"')
resource = factory(request)
self.assertTrue(resource.GET())
def test_FileResource_GET_works_without_IETag_adapter(self):
# Test backwards compatibility with users of <3.11 that do not provide an ETagAdatper
getGlobalSiteManager().unregisterAdapter(MyETag)
factory = FileResourceFactory(self.testFilePath, self.nullChecker, 'test.txt')
request = TestRequest()
resource = factory(request)
self.assertTrue(resource.GET())
self.assertIsNone(request.response.getHeader('ETag'))
def test_suite():
checker = RENormalizing([
# Python 3 includes module name in exceptions
(re.compile(r"zope.publisher.interfaces.NotFound"),
"NotFound"),
])
return unittest.TestSuite((
unittest.defaultTestLoader.loadTestsFromName(__name__),
doctest.DocTestSuite(
'zope.browserresource.file',
setUp=setUp, tearDown=tearDown,
checker=checker,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE),
))
| 33.691781 | 93 | 0.658264 | 1,055 | 9,838 | 6.017062 | 0.210427 | 0.042533 | 0.056711 | 0.057341 | 0.64414 | 0.624606 | 0.581443 | 0.56569 | 0.544266 | 0.529458 | 0 | 0.009652 | 0.231246 | 9,838 | 291 | 94 | 33.80756 | 0.829697 | 0.149014 | 0 | 0.624277 | 0 | 0 | 0.061047 | 0.007218 | 0 | 0 | 0 | 0 | 0.196532 | 1 | 0.086705 | false | 0.011561 | 0.098266 | 0.011561 | 0.219653 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02144c6da9bc48ec7c8c978209c9494e60e74252 | 13,958 | py | Python | engines/ep/management/mc_bin_server.py | rohansuri/kv_engine | 6d377448a787ce5dc268c95def2850e36f5f1328 | [
"BSD-3-Clause"
] | 1 | 2019-06-13T07:33:09.000Z | 2019-06-13T07:33:09.000Z | engines/ep/management/mc_bin_server.py | rohansuri/kv_engine | 6d377448a787ce5dc268c95def2850e36f5f1328 | [
"BSD-3-Clause"
] | null | null | null | engines/ep/management/mc_bin_server.py | rohansuri/kv_engine | 6d377448a787ce5dc268c95def2850e36f5f1328 | [
"BSD-3-Clause"
] | 2 | 2019-10-11T14:00:49.000Z | 2020-04-06T09:20:15.000Z | #!/usr/bin/env python3
"""
A memcached test server.
Copyright (c) 2007 Dustin Sallings <dustin@spy.net>
"""
import asyncore
import random
import string
import socket
import struct
import time
import hmac
import heapq
import memcacheConstants
from memcacheConstants import MIN_RECV_PACKET, REQ_PKT_FMT, RES_PKT_FMT
from memcacheConstants import INCRDECR_RES_FMT
from memcacheConstants import REQ_MAGIC_BYTE, RES_MAGIC_BYTE, EXTRA_HDR_FMTS
VERSION="1.0"
class BaseBackend(object):
"""Higher-level backend (processes commands and stuff)."""
# Command IDs to method names. This is used to build a dispatch dict on
# the fly.
CMDS={
memcacheConstants.CMD_GET: 'handle_get',
memcacheConstants.CMD_GETQ: 'handle_getq',
memcacheConstants.CMD_SET: 'handle_set',
memcacheConstants.CMD_ADD: 'handle_add',
memcacheConstants.CMD_REPLACE: 'handle_replace',
memcacheConstants.CMD_DELETE: 'handle_delete',
memcacheConstants.CMD_INCR: 'handle_incr',
memcacheConstants.CMD_DECR: 'handle_decr',
memcacheConstants.CMD_QUIT: 'handle_quit',
memcacheConstants.CMD_FLUSH: 'handle_flush',
memcacheConstants.CMD_NOOP: 'handle_noop',
memcacheConstants.CMD_VERSION: 'handle_version',
memcacheConstants.CMD_APPEND: 'handle_append',
memcacheConstants.CMD_PREPEND: 'handle_prepend',
memcacheConstants.CMD_SASL_LIST_MECHS: 'handle_sasl_mechs',
memcacheConstants.CMD_SASL_AUTH: 'handle_sasl_auth',
memcacheConstants.CMD_SASL_STEP: 'handle_sasl_step',
}
def __init__(self):
self.handlers={}
self.sched=[]
for id, method in self.CMDS.items():
self.handlers[id]=getattr(self, method, self.handle_unknown)
def _splitKeys(self, fmt, keylen, data):
"""Split the given data into the headers as specified in the given
format, the key, and the data.
Return (hdrTuple, key, data)"""
hdrSize=struct.calcsize(fmt)
assert hdrSize <= len(data), "Data too short for " + fmt + ': ' + repr(data)
hdr=struct.unpack(fmt, data[:hdrSize])
assert len(data) >= hdrSize + keylen
key=data[hdrSize:keylen+hdrSize]
assert len(key) == keylen, "len(%s) == %d, expected %d" \
% (key, len(key), keylen)
val=data[keylen+hdrSize:]
return hdr, key, val
def _error(self, which, msg):
return which, 0, msg
def processCommand(self, cmd, keylen, vb, cas, data):
"""Entry point for command processing. Lower level protocol
implementations deliver values here."""
now=time.time()
while self.sched and self.sched[0][0] <= now:
print("Running delayed job.")
heapq.heappop(self.sched)[1]()
hdrs, key, val=self._splitKeys(EXTRA_HDR_FMTS.get(cmd, ''),
keylen, data)
return self.handlers.get(cmd, self.handle_unknown)(cmd, hdrs, key,
cas, val)
def handle_noop(self, cmd, hdrs, key, cas, data):
"""Handle a noop"""
print("Noop")
return 0, 0, ''
def handle_unknown(self, cmd, hdrs, key, cas, data):
"""invoked for any unknown command."""
return self._error(memcacheConstants.ERR_UNKNOWN_CMD,
"The command %d is unknown" % cmd)
class DictBackend(BaseBackend):
"""Sample backend implementation with a non-expiring dict."""
def __init__(self):
super(DictBackend, self).__init__()
self.storage={}
self.held_keys={}
self.challenge = ''.join(random.sample(string.ascii_letters
+ string.digits, 32))
def __lookup(self, key):
rv=self.storage.get(key, None)
if rv:
now=time.time()
if now >= rv[1]:
print(key, "expired")
del self.storage[key]
rv=None
else:
print("Miss looking up", key)
return rv
def handle_get(self, cmd, hdrs, key, cas, data):
val=self.__lookup(key)
if val:
rv = 0, id(val), struct.pack(
memcacheConstants.GET_RES_FMT, val[0]) + str(val[2])
else:
rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
return rv
def handle_set(self, cmd, hdrs, key, cas, data):
print("Handling a set with", hdrs)
val=self.__lookup(key)
exp, flags=hdrs
def f(val):
return self.__handle_unconditional_set(cmd, hdrs, key, data)
return self._withCAS(key, cas, f)
def handle_getq(self, cmd, hdrs, key, cas, data):
rv=self.handle_get(cmd, hdrs, key, cas, data)
if rv[0] == memcacheConstants.ERR_NOT_FOUND:
print("Swallowing miss")
rv = None
return rv
def __handle_unconditional_set(self, cmd, hdrs, key, data):
exp=hdrs[1]
# If it's going to expire soon, tell it to wait a while.
if exp == 0:
exp=float(2 ** 31)
self.storage[key]=(hdrs[0], time.time() + exp, data)
print("Stored", self.storage[key], "in", key)
if key in self.held_keys:
del self.held_keys[key]
return 0, id(self.storage[key]), ''
def __mutation(self, cmd, hdrs, key, data, multiplier):
amount, initial, expiration=hdrs
rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
val=self.storage.get(key, None)
print("Mutating %s, hdrs=%s, val=%s %s" % (key, repr(hdrs), repr(val),
multiplier))
if val:
val = (val[0], val[1], max(0, int(val[2]) + (multiplier * amount)))
self.storage[key]=val
rv=0, id(val), str(val[2])
else:
if expiration != memcacheConstants.INCRDECR_SPECIAL:
self.storage[key]=(0, time.time() + expiration, initial)
rv=0, id(self.storage[key]), str(initial)
if rv[0] == 0:
rv = rv[0], rv[1], struct.pack(
memcacheConstants.INCRDECR_RES_FMT, int(rv[2]))
print("Returning", rv)
return rv
def handle_incr(self, cmd, hdrs, key, cas, data):
return self.__mutation(cmd, hdrs, key, data, 1)
def handle_decr(self, cmd, hdrs, key, cas, data):
return self.__mutation(cmd, hdrs, key, data, -1)
def __has_hold(self, key):
rv=False
now=time.time()
print("Looking for hold of", key, "in", self.held_keys, "as of", now)
if key in self.held_keys:
if time.time() > self.held_keys[key]:
del self.held_keys[key]
else:
rv=True
return rv
def handle_add(self, cmd, hdrs, key, cas, data):
rv=self._error(memcacheConstants.ERR_EXISTS, 'Data exists for key')
if key not in self.storage and not self.__has_hold(key):
rv=self.__handle_unconditional_set(cmd, hdrs, key, data)
return rv
def handle_replace(self, cmd, hdrs, key, cas, data):
rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
if key in self.storage and not self.__has_hold(key):
rv=self.__handle_unconditional_set(cmd, hdrs, key, data)
return rv
def handle_flush(self, cmd, hdrs, key, cas, data):
timebomb_delay=hdrs[0]
def f():
self.storage.clear()
self.held_keys.clear()
print("Flushed")
if timebomb_delay:
heapq.heappush(self.sched, (time.time() + timebomb_delay, f))
else:
f()
return 0, 0, ''
def handle_delete(self, cmd, hdrs, key, cas, data):
def f(val):
rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
if val:
del self.storage[key]
rv = 0, 0, ''
print("Deleted", key, hdrs[0])
if hdrs[0] > 0:
self.held_keys[key] = time.time() + hdrs[0]
return rv
return self._withCAS(key, cas, f)
def handle_version(self, cmd, hdrs, key, cas, data):
return 0, 0, "Python test memcached server %s" % VERSION
def _withCAS(self, key, cas, f):
val=self.storage.get(key, None)
if cas == 0 or (val and cas == id(val)):
rv=f(val)
elif val:
rv = self._error(memcacheConstants.ERR_EXISTS, 'Exists')
else:
rv = self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
return rv
def handle_prepend(self, cmd, hdrs, key, cas, data):
def f(val):
self.storage[key]=(val[0], val[1], data + val[2])
return 0, id(self.storage[key]), ''
return self._withCAS(key, cas, f)
def handle_append(self, cmd, hdrs, key, cas, data):
def f(val):
self.storage[key]=(val[0], val[1], val[2] + data)
return 0, id(self.storage[key]), ''
return self._withCAS(key, cas, f)
def handle_sasl_mechs(self, cmd, hdrs, key, cas, data):
return 0, 0, 'PLAIN CRAM-MD5'
def handle_sasl_step(self, cmd, hdrs, key, cas, data):
assert key == 'CRAM-MD5'
u, resp = data.split(' ', 1)
expected = hmac.HMAC('testpass', self.challenge).hexdigest()
if u == 'testuser' and resp == expected:
print("Successful CRAM-MD5 auth.")
return 0, 0, 'OK'
else:
print("Errored a CRAM-MD5 auth.")
return self._error(memcacheConstants.ERR_AUTH, 'Auth error.')
def _handle_sasl_auth_plain(self, data):
foruser, user, passwd = data.split("\0")
if user == 'testuser' and passwd == 'testpass':
print("Successful plain auth")
return 0, 0, "OK"
else:
print("Bad username/password: %s/%s" % (user, passwd))
return self._error(memcacheConstants.ERR_AUTH, 'Auth error.')
def _handle_sasl_auth_cram_md5(self, data):
assert data == ''
print("Issuing %s as a CRAM-MD5 challenge." % self.challenge)
return memcacheConstants.ERR_AUTH_CONTINUE, 0, self.challenge
def handle_sasl_auth(self, cmd, hdrs, key, cas, data):
mech = key
if mech == 'PLAIN':
return self._handle_sasl_auth_plain(data)
elif mech == 'CRAM-MD5':
return self._handle_sasl_auth_cram_md5(data)
else:
print("Unhandled auth type: %s" % mech)
return self._error(memcacheConstants.ERR_AUTH, 'Auth error.')
class MemcachedBinaryChannel(asyncore.dispatcher):
"""A channel implementing the binary protocol for memcached."""
# Receive buffer size
BUFFER_SIZE = 4096
def __init__(self, channel, backend, wbuf=""):
asyncore.dispatcher.__init__(self, channel)
self.log_info("New bin connection from %s" % str(self.addr))
self.backend=backend
self.wbuf=wbuf
self.rbuf=""
def __hasEnoughBytes(self):
rv=False
if len(self.rbuf) >= MIN_RECV_PACKET:
magic, cmd, keylen, extralen, datatype, vb, remaining, opaque, cas=\
struct.unpack(REQ_PKT_FMT, self.rbuf[:MIN_RECV_PACKET])
rv = len(self.rbuf) - MIN_RECV_PACKET >= remaining
return rv
def processCommand(self, cmd, keylen, vb, cas, data):
return self.backend.processCommand(cmd, keylen, vb, cas, data)
def handle_read(self):
self.rbuf += self.recv(self.BUFFER_SIZE)
while self.__hasEnoughBytes():
magic, cmd, keylen, extralen, datatype, vb, remaining, opaque, cas=\
struct.unpack(REQ_PKT_FMT, self.rbuf[:MIN_RECV_PACKET])
assert magic == REQ_MAGIC_BYTE
assert keylen <= remaining, "Keylen is too big: %d > %d" \
% (keylen, remaining)
assert extralen == memcacheConstants.EXTRA_HDR_SIZES.get(cmd, 0), \
"Extralen is too large for cmd 0x%x: %d" % (cmd, extralen)
# Grab the data section of this request
data=self.rbuf[MIN_RECV_PACKET:MIN_RECV_PACKET+remaining]
assert len(data) == remaining
# Remove this request from the read buffer
self.rbuf=self.rbuf[MIN_RECV_PACKET+remaining:]
# Process the command
cmdVal = self.processCommand(cmd, keylen, vb, extralen, cas, data)
# Queue the response to the client if applicable.
if cmdVal:
try:
status, cas, response = cmdVal
except ValueError:
print("Got", cmdVal)
raise
dtype=0
extralen=memcacheConstants.EXTRA_HDR_SIZES.get(cmd, 0)
self.wbuf += struct.pack(RES_PKT_FMT,
RES_MAGIC_BYTE, cmd, keylen,
extralen, dtype, status,
len(response), opaque, cas) + response
def writable(self):
return self.wbuf
def handle_write(self):
sent = self.send(self.wbuf)
self.wbuf = self.wbuf[sent:]
def handle_close(self):
self.log_info("Disconnected from %s" % str(self.addr))
self.close()
class MemcachedServer(asyncore.dispatcher):
"""A memcached server."""
def __init__(self, backend, handler, port=11211):
asyncore.dispatcher.__init__(self)
self.handler=handler
self.backend=backend
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind(("", port))
self.listen(5)
self.log_info("Listening on %d" % port)
def handle_accept(self):
channel, addr = self.accept()
self.handler(channel, self.backend)
if __name__ == '__main__':
port = 11211
import sys
if sys.argv > 1:
port = int(sys.argv[1])
server = MemcachedServer(DictBackend(), MemcachedBinaryChannel, port=port)
asyncore.loop()
| 36.067183 | 84 | 0.592205 | 1,746 | 13,958 | 4.576747 | 0.179267 | 0.023652 | 0.032537 | 0.03091 | 0.309348 | 0.264422 | 0.207609 | 0.201852 | 0.163934 | 0.141534 | 0 | 0.01009 | 0.289941 | 13,958 | 386 | 85 | 36.160622 | 0.796186 | 0.061327 | 0 | 0.237624 | 0 | 0 | 0.073645 | 0 | 0 | 0 | 0 | 0 | 0.029703 | 1 | 0.141914 | false | 0.013201 | 0.042904 | 0.026403 | 0.330033 | 0.062706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
021519028496c68e5fd1b7f231ae1d62dfd0120a | 804 | py | Python | mlir/test/Integration/Dialect/SparseTensor/taco/test_simple_tensor_algebra.py | rastogishubham/llvm-project | 1785d49d77a82222d33122ab6e2a115c91d007a1 | [
"Apache-2.0"
] | null | null | null | mlir/test/Integration/Dialect/SparseTensor/taco/test_simple_tensor_algebra.py | rastogishubham/llvm-project | 1785d49d77a82222d33122ab6e2a115c91d007a1 | [
"Apache-2.0"
] | null | null | null | mlir/test/Integration/Dialect/SparseTensor/taco/test_simple_tensor_algebra.py | rastogishubham/llvm-project | 1785d49d77a82222d33122ab6e2a115c91d007a1 | [
"Apache-2.0"
] | null | null | null | # RUN: SUPPORTLIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext %PYTHON %s | FileCheck %s
import os
import sys
_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import mlir_pytaco_api as pt
compressed = pt.compressed
dense = pt.dense
# Ensure that we can run an unmodified PyTACO program with a simple tensor
# algebra expression using tensor index notation, and produce the expected
# result.
i, j = pt.get_index_vars(2)
A = pt.tensor([2, 3])
B = pt.tensor([2, 3])
C = pt.tensor([2, 3])
D = pt.tensor([2, 3], dense)
A.insert([0, 1], 10)
A.insert([1, 2], 40)
B.insert([0, 0], 20)
B.insert([1, 2], 30)
C.insert([0, 1], 5)
C.insert([1, 2], 7)
D[i, j] = A[i, j] + B[i, j] - C[i, j]
# CHECK: [20. 5. 0. 0. 0. 63.]
print(D.to_array().reshape(6))
| 25.935484 | 98 | 0.676617 | 154 | 804 | 3.409091 | 0.480519 | 0.019048 | 0.068571 | 0.07619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058394 | 0.14801 | 804 | 30 | 99 | 26.8 | 0.708029 | 0.347015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15 | 0 | 0.15 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0215da0ac1ad5a55a5e22561670428d6e9bd7025 | 921 | py | Python | Python/rotate_string.py | anu-ka/coding-problems | 2f48017cc734d7de81d62042ba385ead709f0ca7 | [
"MIT"
] | null | null | null | Python/rotate_string.py | anu-ka/coding-problems | 2f48017cc734d7de81d62042ba385ead709f0ca7 | [
"MIT"
] | null | null | null | Python/rotate_string.py | anu-ka/coding-problems | 2f48017cc734d7de81d62042ba385ead709f0ca7 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/rotate-string/
# Given two strings s and goal, return true if and only if s can become goal after some number of shifts on s.
# A shift on s consists of moving the leftmost character of s to the rightmost position.
# For example, if s = "abcde", then it will be "bcdea" after one shift.
import pytest
class Solution:
def rotateString(self, s: str, goal: str) -> bool:
if len(s) != len(goal):
return False
length = len(s)
for i in range(1, length + 1):
if goal[i:length] + goal[0:i] == s:
return True
return False
@pytest.mark.parametrize(
("s", "goal", "result"),
[
("abcde", "cdeab", True),
("abcde", "abced", False),
("ckahkzpikz", "hkzpikzcka", True),
],
)
def test_basic(s: str, goal: str, result: bool) -> None:
assert result == Solution().rotateString(s, goal)
| 29.709677 | 110 | 0.598263 | 130 | 921 | 4.230769 | 0.546154 | 0.036364 | 0.029091 | 0.04 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004458 | 0.269273 | 921 | 30 | 111 | 30.7 | 0.812779 | 0.336591 | 0 | 0.1 | 0 | 0 | 0.084158 | 0 | 0 | 0 | 0 | 0 | 0.05 | 1 | 0.1 | false | 0 | 0.05 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02181542f910802a14631350d4002b13405a8a39 | 869 | py | Python | May LeetCoding Challenge/Count Square Submatrices with All Ones.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | May LeetCoding Challenge/Count Square Submatrices with All Ones.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | May LeetCoding Challenge/Count Square Submatrices with All Ones.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | class Solution:
def countSquares(self, matrix) -> int:
# if matrix[i][j] == 0: dp[i][j] = 0
# if matrix[i][j] == 1: dp[i][j] = min(dp[i-1][j-1], dp[i-1][j], dp[i][j-1]) + 1
m, n = (len(matrix), len(matrix[0]))
dp = [[0 for i in range(n + 1)] for i in range(m + 1)]
ans = 0
for i in range(1, m + 1):
for j in range(1, n + 1):
if matrix[i - 1][j - 1] == 0:
dp[i][j] = 0
else:
dp[i][j] = min(dp[i - 1][j - 1], dp[i - 1][j], dp[i][j - 1]) + 1
ans += dp[i][j]
return ans
if __name__ == '__main__':
s = Solution()
print(s.countSquares([
[1, 0, 1],
[1, 1, 0],
[1, 1, 0]
]))
print(s.countSquares([
[0, 1, 1, 1],
[1, 1, 1, 1],
[0, 1, 1, 1]
]))
| 28.966667 | 88 | 0.368239 | 144 | 869 | 2.166667 | 0.1875 | 0.083333 | 0.089744 | 0.064103 | 0.330128 | 0.205128 | 0.153846 | 0.153846 | 0.153846 | 0.153846 | 0 | 0.092702 | 0.416571 | 869 | 29 | 89 | 29.965517 | 0.522682 | 0.130035 | 0 | 0.16 | 0 | 0 | 0.010624 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0 | 0 | 0.12 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02182dddbacd347c62c38cf5ff5894c334b5f5a6 | 3,869 | py | Python | RL PR Control LC model/PR_CON.py | XiyuZhai97/Deadbeat-Control-Strategy-of-Electronic-Converter | d43457b7ee168fc308a127a0769b2bdf08a02cf5 | [
"MIT"
] | 2 | 2019-09-22T18:33:03.000Z | 2019-09-24T08:20:43.000Z | RL PR Control LCX model/PR_CON.py | XiyuZhai97/Deadbeat-Control-Strategy-of-Electronic-Converter | d43457b7ee168fc308a127a0769b2bdf08a02cf5 | [
"MIT"
] | null | null | null | RL PR Control LCX model/PR_CON.py | XiyuZhai97/Deadbeat-Control-Strategy-of-Electronic-Converter | d43457b7ee168fc308a127a0769b2bdf08a02cf5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ==============================================================================
import math
class PRControl:
"""PR Controller
G(s)=Kp+(2*Kr*Wc*s)/(s^2+2*Wc*s+Wr)
Kp--PR控制器的比例系数
Kr--谐振系数
Wc--截止频率
Wr--谐振频率
"""
def __init__(self, kp=0.1, kr=100, wc=5, wr=100*math.pi, t_sample=0.0000001):
# self.Kp = kp
# self.Kr = kr
# self.Wc = wc
# self.Wr = wr
self.prb0 = (kp*pow(wr,2)*pow(t_sample,2) + 4*(kp+kr)*wc*t_sample + 4*kp)/(pow(wr,2)*pow(t_sample,2) + 4*wc*t_sample +4)
self.prb1 = (2*kp*pow(wr,2)*pow(t_sample,2) - 8*kp)/(pow(wr,2)*pow(t_sample,2) + 4*wc*t_sample +4)
self.prb2 = (kp*pow(wr,2)*pow(t_sample,2) - 4*(kp+kr)*wc*t_sample + 4*kp)/(pow(wr,2)*pow(t_sample,2) + 4*wc*t_sample +4)
self.pra1 = (2* pow(wr,2)*pow(t_sample,2) - 8)/(pow(wr,2)*pow(t_sample,2) + 4*wc*t_sample +4)
self.pra2 = (pow(wr,2)*pow(t_sample,2) - 4*wc*t_sample +4)/(pow(wr,2)*pow(t_sample,2) + 4*wc*t_sample +4)
self.clear()
# self.In_Reference = 0.0
self.error=0.0
self.last1_error=0.0
self.last2_error=0.0
self.output = 0.0
self.last1_output=0.0
self.last2_output=0.0
def clear(self):
# self.In_Reference = 0.0
self.output = 0.0
self.error=0.0
self.last1_error=0.0
self.last2_error=0.0
# def tuner(self, kp, kr, wc, wr):
# self.prb0 = (kp*pow(wr,2)*pow(self.t_sample,2) + 4*(kp+kr)*wc*self.t_sample + 4*kp)/(pow(wr,2)*pow(self.t_sample,2) + 4*wc*self.t_sample +4)
# self.prb1 = (2*kp*pow(wr,2)*pow(self.t_sample,2) - 8*kp)/(pow(wr,2)*pow(self.t_sample,2) + 4*wc*self.t_sample +4)
# self.prb2 = (kp*pow(wr,2)*pow(self.t_sample,2) - 4*(kp+kr)*wc*self.t_sample + 4*kp)/(pow(wr,2)*pow(self.t_sample,2) + 4*wc*self.t_sample +4)
# self.pra1 = (2* pow(wr,2)*pow(self.t_sample,2) - 8)/(pow(wr,2)*pow(self.t_sample,2) + 4*wc*self.t_sample +4)
# self.pra2 = (pow(wr,2)*pow(self.t_sample,2) - 4*wc*self.t_sample +4)/(pow(wr,2)*pow(self.t_sample,2) + 4*wc*self.t_sample +4)
def update(self, feedback_value):
"""Clears PID computations and coefficients
u(k)=-a1*u(k-1)-a2*u(k-2)+b0*e(k)+
"""
self.error = feedback_value # self.In_Reference - feedback_value
self.output = self.prb0*self.error + self.prb1*self.last1_error + self.prb2*self.last2_error - self.pra1*self.last1_output - self.pra2*self.last2_output
self.last2_output = self.last1_output
self.last1_output = self.output
self.last2_error = self.last1_error
self.last1_error = self.error
# def setKp(self, proportional_gain):
# """Determines how aggressively the PID reacts to the current error with setting Proportional Gain"""
# self.Kp = proportional_gain
#
# def setKi(self, integral_gain):
# """Determines how aggressively the PID reacts to the current error with setting Integral Gain"""
# self.Ki = integral_gain
#
# def setKd(self, derivative_gain):
# """Determines how aggressively the PID reacts to the current error with setting Derivative Gain"""
# self.Kd = derivative_gain
# def setWindup(self, windup):
# """Integral windup, also known as integrator windup or reset windup,
# refers to the situation in a PID feedback controller where
# a large change in setpoint occurs (say a positive change)
# and the integral terms accumulates a significant error
# during the rise (windup), thus overshooting and continuing
# to increase as this accumulated error is unwound
# (offset by errors in the other direction).
# The specific problem is the excess overshooting.
# """
# self.windup_guard = windup
| 45.517647 | 160 | 0.599638 | 642 | 3,869 | 3.504673 | 0.188474 | 0.115111 | 0.053333 | 0.08 | 0.513778 | 0.479556 | 0.466222 | 0.453778 | 0.440889 | 0.421778 | 0 | 0.05356 | 0.223055 | 3,869 | 84 | 161 | 46.059524 | 0.694943 | 0.554665 | 0 | 0.296296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.037037 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
021cfe77faa5869c819831635c5a8b873d13d9ce | 88,869 | py | Python | pfs_middleware/pfs_middleware/middleware.py | tipabu/ProxyFS | 2cfae94c50c8ebec30103afc738f31467e026ece | [
"Apache-2.0"
] | null | null | null | pfs_middleware/pfs_middleware/middleware.py | tipabu/ProxyFS | 2cfae94c50c8ebec30103afc738f31467e026ece | [
"Apache-2.0"
] | null | null | null | pfs_middleware/pfs_middleware/middleware.py | tipabu/ProxyFS | 2cfae94c50c8ebec30103afc738f31467e026ece | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016-2017 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Middleware that will provide a Swift-ish API for a ProxyFS account.
It tries to mimic the Swift API as much as possible, but there are some
differences that must be called out.
* ETags are sometimes different. In Swift, an object's ETag is the MD5
checksum of its contents. With this middleware, an object's ETag is
sometimes an opaque value sufficient to provide a strong identifier as per
RFC 7231, but it is not the MD5 checksum of the object's contents.
ETags start out as MD5 checksums, but if files are subsequently modified
via SMB or NFS, the ETags become opaque values.
* Container listings lack object count. To get an object count, it would be
necessary to traverse the entire directory structure underneath the
container directory. This would be unbearably slow and resource-intensive.
* Account HEAD responses contain a header "ProxyFS-Enabled: yes". This way,
clients can know what they're dealing with.
* Support for the COALESCE verb. Since static large objects will not work
with this middleware's ETag values, another solution was found. A client
can combine (or coalesce, if you will) a number of small files together
into one large file, allowing for parallel uploads like a client can get
with static large objects.
Once the small files are uploaded, one makes a COALESCE request to the
destination path. The request body is a JSON object containing a key
"elements" whose value is a list of account-relative object names.
Example:
COALESCE /v1/AUTH_me/c/rainbow HTTP/1.1
X-Auth-Token: t
... more headers ...
{
"elements": [
"/c/red",
"/c/orange",
"/c/yellow",
"/c/green",
"/c/blue",
"/c/indigo",
"/c/violet"
]
}
This will combine the files /c/red, ..., /c/violet into a single file
/c/rainbow. The files /c/red et cetera will be deleted as a result of this
request.
"""
import contextlib
import datetime
import eventlet
import eventlet.queue
import hashlib
import itertools
import json
import math
import mimetypes
import re
import six
import socket
import time
import uuid
import xml.etree.ElementTree as ET
from six.moves.urllib import parse as urllib_parse
from io import BytesIO
from swift.common.middleware.acl import parse_acl, format_acl
from . import pfs_errno, rpc, swift_code, utils
# Generally speaking, let's try to keep the use of Swift code to a
# reasonable level. Using dozens of random functions from swift.common.utils
# will ensure that this middleware breaks with every new Swift release. On
# the other hand, there's some really good, actually-works-in-production
# code in Swift that does things we need.
# Were we to make an account HEAD request instead of calling
# get_account_info, we'd lose the benefit of Swift's caching. This would
# slow down requests *a lot*. Same for containers.
from swift.proxy.controllers.base import (
get_account_info, get_container_info, clear_info_cache)
# Plain WSGI is annoying to work with, and nobody wants a dependency on
# webob.
from swift.common import swob, constraints
# Our logs should go to the same place as everyone else's. Plus, this logger
# works well in an eventlet-ified process, and SegmentedIterable needs one.
from swift.common.utils import config_true_value, get_logger, Timestamp
# POSIX file-path limits. Taken from Linux's limits.h, which is also where
# ProxyFS gets them.
NAME_MAX = 255
PATH_MAX = 4096
# Used for content type of directories in container listings
DIRECTORY_CONTENT_TYPE = "application/directory"
ZERO_FILL_PATH = "/0"
LEASE_RENEWAL_INTERVAL = 5 # seconds
# Beware: ORIGINAL_MD5_HEADER is random case, not title case, but is
# stored on-disk just as defined here. Care must be taken when comparing
# it to incoming headers which are title case.
ORIGINAL_MD5_HEADER = "X-Object-Sysmeta-ProxyFS-Initial-MD5"
S3API_ETAG_HEADER = "X-Object-Sysmeta-S3Api-Etag"
LISTING_ETAG_OVERRIDE_HEADER = \
"X-Object-Sysmeta-Container-Update-Override-Etag"
# They don't start with X-Object-(Meta|Sysmeta)-, but we save them anyway.
SPECIAL_OBJECT_METADATA_HEADERS = {
"Content-Type",
"Content-Disposition",
"Content-Encoding",
"X-Object-Manifest",
"X-Static-Large-Object"}
# These are not mutated on object POST.
STICKY_OBJECT_METADATA_HEADERS = {
"X-Static-Large-Object",
ORIGINAL_MD5_HEADER}
SPECIAL_CONTAINER_METADATA_HEADERS = {
"X-Container-Read",
"X-Container-Write",
"X-Container-Sync-Key",
"X-Container-Sync-To",
"X-Versions-Location"}
# ProxyFS directories don't know how many objects are under them, nor how
# many bytes each one uses. (Yes, a directory knows how many files and
# subdirectories it contains, but that doesn't include things in those
# subdirectories.)
CONTAINER_HEADERS_WE_LIE_ABOUT = {
"X-Container-Object-Count": "0",
"X-Container-Bytes-Used": "0",
}
SWIFT_OWNER_HEADERS = {
"X-Container-Read",
"X-Container-Write",
"X-Container-Sync-Key",
"X-Container-Sync-To",
"X-Account-Meta-Temp-Url-Key",
"X-Account-Meta-Temp-Url-Key-2",
"X-Container-Meta-Temp-Url-Key",
"X-Container-Meta-Temp-Url-Key-2",
"X-Account-Access-Control"}
MD5_ETAG_RE = re.compile("^[a-f0-9]{32}$")
EMPTY_OBJECT_ETAG = "d41d8cd98f00b204e9800998ecf8427e"
RPC_TIMEOUT_DEFAULT = 30.0
MAX_RPC_BODY_SIZE = 2 ** 20
def listing_iter_from_read_plan(read_plan):
"""
Takes a read plan from proxyfsd and turns it into an iterable of
tuples suitable for passing to SegmentedIterable.
Example read plan:
[
{
"Length": 4,
"ObjectPath": "/v1/AUTH_test/Replicated3Way_1/0000000000000074",
"Offset": 0
},
{
"Length": 17,
"ObjectPath": "/v1/AUTH_test/Replicated3Way_1/0000000000000076",
"Offset": 0
},
{
"Length": 19,
"ObjectPath": "/v1/AUTH_test/Replicated3Way_1/0000000000000078",
"Offset": 0
},
{
"Length": 89,
"ObjectPath": "/v1/AUTH_test/Replicated3Way_1/000000000000007A",
"Offset": 0
}
]
Example return value:
[
("/v1/AUTH_test/Replicated3Way_1/0000000000000074", None, None, 0, 3),
("/v1/AUTH_test/Replicated3Way_1/0000000000000076", None, None, 0, 16),
("/v1/AUTH_test/Replicated3Way_1/0000000000000078", None, None, 0, 18),
("/v1/AUTH_test/Replicated3Way_1/000000000000007A", None, None, 0, 88),
]
"""
if read_plan is None:
# ProxyFS likes to send null values instead of empty lists.
read_plan = ()
# It's a little ugly that the GoCase field names escape from the
# RPC-response parser all the way to here, but it's inefficient, in both
# CPU cycles and programmer brainpower, to create some intermediate
# representation just to avoid GoCase.
return [(rpe["ObjectPath"] or ZERO_FILL_PATH,
None, # we don't know the segment's ETag
None, # we don't know the segment's length
rpe["Offset"],
rpe["Offset"] + rpe["Length"] - 1)
for rpe in read_plan]
def x_timestamp_from_epoch_ns(epoch_ns):
"""
Convert a ProxyFS-style Unix timestamp to a Swift X-Timestamp header.
ProxyFS uses an integral number of nanoseconds since the epoch, while
Swift uses a floating-point number with centimillisecond (10^-5 second)
precision.
:param epoch_ns: Unix time, expressed as an integral number of
nanoseconds since the epoch. Note that this is not the
usual Unix convention of a *real* number of *seconds*
since the epoch.
:returns: ISO-8601 timestamp (like those found in Swift's container
listings), e.g. '2016-08-05T00:55:16.966920'
"""
return "{0:.5f}".format(float(epoch_ns) / 1000000000)
def iso_timestamp_from_epoch_ns(epoch_ns):
"""
Convert a Unix timestamp to an ISO-8601 timestamp.
:param epoch_ns: Unix time, expressed as an integral number of
nanoseconds since the epoch. Note that this is not the
usual Unix convention of a *real* number of *seconds*
since the epoch.
:returns: ISO-8601 timestamp (like those found in Swift's container
listings), e.g. '2016-08-05T00:55:16.966920'
"""
iso_timestamp = datetime.datetime.utcfromtimestamp(
epoch_ns / 1000000000.0).isoformat()
# Convieniently (ha!), isoformat() method omits the
# fractional-seconds part if it's equal to 0. The Swift proxy server
# does not, so we match its output.
if iso_timestamp[-7] != ".":
iso_timestamp += ".000000"
return iso_timestamp
def last_modified_from_epoch_ns(epoch_ns):
"""
Convert a Unix timestamp to an IMF-Fixdate timestamp.
:param epoch_ns: Unix time, expressed as an integral number of
nanoseconds since the epoch. Note that this is not the
usual Unix convention of a *real* number of *seconds*
since the epoch.
:returns: Last-Modified header value in IMF-Fixdate format as specified
in RFC 7231 section 7.1.1.1.
"""
return time.strftime(
'%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(math.ceil(epoch_ns / 1000000000.0)))
def guess_content_type(filename, is_dir):
if is_dir:
return DIRECTORY_CONTENT_TYPE
content_type, _ = mimetypes.guess_type(filename)
if not content_type:
content_type = "application/octet-stream"
return content_type
def should_validate_etag(a_string):
if not a_string:
return False
return not a_string.strip('"').startswith('pfsv')
@contextlib.contextmanager
def pop_and_restore(hsh, key, default=None):
"""
Temporarily remove and yield a value from a hash. Restores that
key/value pair to its original state in the hash on exit.
"""
if key in hsh:
value = hsh.pop(key)
was_there = True
else:
value = default
was_there = False
yield value
if was_there:
hsh[key] = value
else:
hsh.pop(key, None)
def deserialize_metadata(raw_metadata):
"""Deserialize JSON-encoded metadata to WSGI strings"""
if raw_metadata:
try:
metadata = json.loads(raw_metadata)
except ValueError:
metadata = {}
else:
metadata = {}
encoded_metadata = {}
for k, v in metadata.items():
if six.PY2:
key = k.encode('utf8') if isinstance(
k, six.text_type) else str(k)
value = v.encode('utf8') if isinstance(
v, six.text_type) else str(v)
else:
key = swift_code.str_to_wsgi(k) if isinstance(k, str) else str(k)
value = swift_code.str_to_wsgi(v) if isinstance(v, str) else str(v)
encoded_metadata[key] = value
return encoded_metadata
def serialize_metadata(wsgi_metadata):
return json.dumps({
swift_code.wsgi_to_str(key): (
swift_code.wsgi_to_str(value)
if isinstance(value, six.string_types) else value)
for key, value in wsgi_metadata.items()})
def merge_container_metadata(old, new):
merged = old.copy()
for k, v in new.items():
merged[k] = v
return {k: v for k, v in merged.items() if v}
def merge_object_metadata(old, new):
'''
Merge the existing metadata for an object with new metadata passed
in as a result of a POST operation. X-Object-Sysmeta- and similar
metadata cannot be changed by a POST.
'''
merged = new.copy()
for header, value in merged.items():
if (header.startswith("X-Object-Sysmeta-") or
header in STICKY_OBJECT_METADATA_HEADERS):
del merged[header]
for header, value in old.items():
if (header.startswith("X-Object-Sysmeta-") or
header in STICKY_OBJECT_METADATA_HEADERS):
merged[header] = value
old_ct = old.get("Content-Type")
new_ct = new.get("Content-Type")
if old_ct is not None:
if not new_ct:
merged["Content-Type"] = old_ct
elif ';swift_bytes=' in old_ct:
merged["Content-Type"] = '%s;swift_bytes=%s' % (
new_ct, old_ct.rsplit(';swift_bytes=', 1)[1])
return {k: v for k, v in merged.items() if v}
def extract_object_metadata_from_headers(headers):
"""
Find and return the key/value pairs containing object metadata.
This tries to do the same thing as the Swift object server: save only
relevant headers. If the user sends in "X-Fungus-Amungus: shroomy" in
the PUT request's headers, we'll ignore it, just like plain old Swift
would.
:param headers: request headers (a dictionary)
:returns: dictionary containing object-metadata headers (and not a
swob.HeaderKeyDict or similar object)
"""
meta_headers = {}
for header, value in headers.items():
header = header.title()
if (header.startswith("X-Object-Meta-") or
header.startswith("X-Object-Sysmeta-") or
header in SPECIAL_OBJECT_METADATA_HEADERS):
# do not let a client pass in ORIGINAL_MD5_HEADER
if header not in (ORIGINAL_MD5_HEADER,
ORIGINAL_MD5_HEADER.title()):
meta_headers[header] = value
return meta_headers
def extract_container_metadata_from_headers(req):
"""
Find and return the key/value pairs containing container metadata.
This tries to do the same thing as the Swift container server: save only
relevant headers. If the user sends in "X-Fungus-Amungus: shroomy" in
the PUT request's headers, we'll ignore it, just like plain old Swift
would.
:param req: a swob Request
:returns: dictionary containing container-metadata headers
"""
meta_headers = {}
for header, value in req.headers.items():
header = header.title()
if ((header.startswith("X-Container-Meta-") or
header.startswith("X-Container-Sysmeta-") or
header in SPECIAL_CONTAINER_METADATA_HEADERS) and
(req.environ.get('swift_owner', False) or
header not in SWIFT_OWNER_HEADERS)):
meta_headers[header] = value
if header.startswith("X-Remove-"):
header = header.replace("-Remove", "", 1)
if ((header.startswith("X-Container-Meta-") or
header in SPECIAL_CONTAINER_METADATA_HEADERS) and
(req.environ.get('swift_owner', False) or
header not in SWIFT_OWNER_HEADERS)):
meta_headers[header] = ""
return meta_headers
def mung_etags(obj_metadata, etag, num_writes):
'''
Mung the ETag headers that will be stored with an object. The
goal is to preserve ETag metadata passed down by other filters but
to do so in such a way that it will be invalidated if there is a
write to or truncate of the object via the ProxyFS file API.
The mechanism is to prepend a counter to the ETag header values
that is incremented each time the object is modified is modified.
When the object is read, if the value for the counter has changed,
the ETag is assumed to be invalid. The counter is typically the
number of writes to the object.
etag is either None or the value that should be returned as the
ETag for the object (in the absence of other considerations).
This assumes that all headers have been converted to "titlecase",
except ORIGINAL_MD5_HEADER which is the random case string
"X-Object-Sysmeta-ProxyFS-Initial-MD5".
This ignores SLO headers because it assumes they have already been
stripped.
'''
if LISTING_ETAG_OVERRIDE_HEADER in obj_metadata:
obj_metadata[LISTING_ETAG_OVERRIDE_HEADER] = "%d:%s" % (
num_writes, obj_metadata[LISTING_ETAG_OVERRIDE_HEADER])
if S3API_ETAG_HEADER in obj_metadata:
obj_metadata[S3API_ETAG_HEADER] = "%d:%s" % (
num_writes, obj_metadata[S3API_ETAG_HEADER])
if etag is not None:
obj_metadata[ORIGINAL_MD5_HEADER] = "%d:%s" % (num_writes, etag)
return
def unmung_etags(obj_metadata, num_writes):
'''
Unmung the ETag headers associated with an object to return
them to the state they were in when passed to pfs_middleware.
Delete them if the object has changed or the header value
does not parse correctly.
This assumes that all headers have been converted to "titlecase",
which means, among other things, that "ETag" will show up as
"Etag".
'''
# if the header is invalid or stale it is not added back after the pop
if LISTING_ETAG_OVERRIDE_HEADER in obj_metadata:
val = obj_metadata.pop(LISTING_ETAG_OVERRIDE_HEADER)
try:
stored_num_writes, rest = val.split(':', 1)
if int(stored_num_writes) == num_writes:
obj_metadata[LISTING_ETAG_OVERRIDE_HEADER] = rest
except ValueError:
pass
if S3API_ETAG_HEADER in obj_metadata:
val = obj_metadata.pop(S3API_ETAG_HEADER)
try:
stored_num_writes, rest = val.split(':', 1)
if int(stored_num_writes) == num_writes:
obj_metadata[S3API_ETAG_HEADER] = rest
except ValueError:
pass
if ORIGINAL_MD5_HEADER in obj_metadata:
val = obj_metadata.pop(ORIGINAL_MD5_HEADER)
try:
stored_num_writes, rest = val.split(':', 1)
if int(stored_num_writes) == num_writes:
obj_metadata[ORIGINAL_MD5_HEADER] = rest
except ValueError:
pass
def best_possible_etag(obj_metadata, account_name, inum, num_writes,
is_dir=False, container_listing=False):
'''
Return the ETag that is most likely to be correct for the query,
but leave other valid ETags values in the metadata, in case a
higher layer filter wants to use them to override the value
returned here.
If the ETags in the metadata are invalid, construct and return a
new ProxyFS ETag based on the account name, inode number, and
number of writes.
obj_metadata may be a Python dictionary, a swob.HeaderKeyDict, or a
swob.HeaderEnvironProxy. ORIGINAL_MD5_HEADER is random case, not
title case, but if obj_metadata is a Python dictionary it will
preserve the same random case. The other two types do case folding so
we don't need to map ORIGINAL_MD5_HEADER to title case.
'''
if is_dir:
return EMPTY_OBJECT_ETAG
if container_listing and LISTING_ETAG_OVERRIDE_HEADER in obj_metadata:
return obj_metadata[LISTING_ETAG_OVERRIDE_HEADER]
if ORIGINAL_MD5_HEADER in obj_metadata:
return obj_metadata[ORIGINAL_MD5_HEADER]
return construct_etag(account_name, inum, num_writes)
def construct_etag(account_name, inum, num_writes):
# We append -32 in an attempt to placate S3 clients. In S3, the ETag of
# a multipart object looks like "hash-N" where <hash> is the MD5 of the
# MD5s of the segments and <N> is the number of segments.
#
# Since this etag is not an MD5 digest value, we append -32 here in
# hopes that some S3 clients will be able to download ProxyFS files via
# S3 API without complaining about checksums.
#
# 32 was chosen because it was the ticket number of the author's lunch
# order on the day this code was written. It has no significance.
return '"pfsv2/{}/{:08X}/{:08X}-32"'.format(
urllib_parse.quote(account_name), inum, num_writes)
def iterator_posthook(iterable, posthook, *posthook_args, **posthook_kwargs):
try:
for x in iterable:
yield x
finally:
posthook(*posthook_args, **posthook_kwargs)
class ZeroFiller(object):
"""
Internal middleware to handle the zero-fill portions of sparse files for
object GET responses.
"""
ZEROES = b"\x00" * 4096
def __init__(self, app):
self.app = app
@swob.wsgify
def __call__(self, req):
if req.path == ZERO_FILL_PATH:
# We know we can do this since the creator of these requests is
# also in this library.
start, end = req.range.ranges[0]
nbytes = end - start + 1
resp = swob.Response(
request=req, status=206,
headers={"Content-Length": nbytes,
"Content-Range": "%d-%d/%d" % (start, end, nbytes)},
app_iter=self.yield_n_zeroes(nbytes))
return resp
else:
return self.app
def yield_n_zeroes(self, n):
# It's a little clunky, but it does avoid creating new strings of
# zeroes over and over again, and it uses only a small amount of
# memory. This becomes important if a file contains hundreds of
# megabytes or more of zeroes; allocating a single string of zeroes
# would do Bad Things(tm) to our memory usage.
zlen = len(self.ZEROES)
while n > zlen:
yield self.ZEROES
n -= zlen
if n > 0:
yield self.ZEROES[:n]
class SnoopingInput(object):
"""
Wrap WSGI input and call a provided callback every time data is read.
"""
def __init__(self, wsgi_input, callback):
self.wsgi_input = wsgi_input
self.callback = callback
def read(self, *a, **kw):
chunk = self.wsgi_input.read(*a, **kw)
self.callback(chunk)
return chunk
def readline(self, *a, **kw):
line = self.wsgi_input.readline(*a, **kw)
self.callback(line)
return line
class LimitedInput(object):
"""
Wrap WSGI input and limit the consumer to taking at most N bytes.
Also count bytes read. This lets us tell ProxyFS how big an object is
after an object PUT completes.
"""
def __init__(self, wsgi_input, limit):
self._peeked_data = b""
self.limit = self.orig_limit = limit
self.bytes_read = 0
self.wsgi_input = wsgi_input
def read(self, length=None, *args, **kwargs):
if length is None:
to_read = self.limit
else:
to_read = min(self.limit, length)
to_read -= len(self._peeked_data)
chunk = self.wsgi_input.read(to_read, *args, **kwargs)
chunk = self._peeked_data + chunk
self._peeked_data = b""
self.bytes_read += len(chunk)
self.limit -= len(chunk)
return chunk
def readline(self, size=None, *args, **kwargs):
if size is None:
to_read = self.limit
else:
to_read = min(self.limit, size)
to_read -= len(self._peeked_data)
line = self.wsgi_input.readline(to_read, *args, **kwargs)
line = self._peeked_data + line
self._peeked_data = b""
self.bytes_read += len(line)
self.limit -= len(line)
return line
@property
def has_more_to_read(self):
if not self._peeked_data:
self._peeked_data = self.wsgi_input.read(1)
return len(self._peeked_data) > 0
class RequestContext(object):
"""
Stuff we need to service the current request.
Basically a pile of data with a name.
"""
def __init__(self, req, proxyfsd_addrinfo,
account_name, container_name, object_name):
# swob.Request object
self.req = req
# address info for proxyfsd, as returned from socket.getaddrinfo()
#
# NB: this is only used for Server.IsAccountBimodal requests; the
# return value there tells us where to go for this particular
# account. That may or may not be the same as
# self.proxyfsd_addrinfo.
self.proxyfsd_addrinfo = proxyfsd_addrinfo
# account/container/object names
self.account_name = account_name
self.container_name = container_name
self.object_name = object_name
class PfsMiddleware(object):
def __init__(self, app, conf, logger=None):
self._cached_proxy_info = None
self.app = app
self.zero_filler_app = ZeroFiller(app)
self.conf = conf
self.logger = logger or get_logger(conf, log_route='pfs')
proxyfsd_hosts = [h.strip() for h
in conf.get('proxyfsd_host', '127.0.0.1').split(',')]
self.proxyfsd_port = int(conf.get('proxyfsd_port', '12345'))
self.proxyfsd_addrinfos = []
for host in proxyfsd_hosts:
try:
# If hostname resolution fails, we'll cause the proxy to
# fail to start. This is probably better than returning 500
# to every single request, but maybe not.
#
# To ensure that proxy startup works, use IP addresses for
# proxyfsd_host. Then socket.getaddrinfo() will always work.
addrinfo = socket.getaddrinfo(
host, self.proxyfsd_port,
socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
self.proxyfsd_addrinfos.append(addrinfo)
except socket.gaierror:
self.logger.error("Error resolving hostname %r", host)
raise
self.proxyfsd_rpc_timeout = float(conf.get('rpc_timeout',
RPC_TIMEOUT_DEFAULT))
self.bimodal_recheck_interval = float(conf.get(
'bimodal_recheck_interval', '60.0'))
self.max_get_time = int(conf.get('max_get_time', '86400'))
self.max_log_segment_size = int(conf.get(
'max_log_segment_size', '2147483648')) # 2 GiB
self.max_coalesce = int(conf.get('max_coalesce', '1000'))
# Assume a max object length of the Swift default of 1024 bytes plus
# a few extra for JSON quotes, commas, et cetera.
self.max_coalesce_request_size = self.max_coalesce * 1100
self.bypass_mode = conf.get('bypass_mode', 'off')
if self.bypass_mode not in ('off', 'read-only', 'read-write'):
raise ValueError('Expected bypass_mode to be one of off, '
'read-only, or read-write')
@swob.wsgify
def __call__(self, req):
vrs, acc, con, obj = utils.parse_path(req.path)
# The only way to specify bypass mode: /proxyfs/AUTH_acct/...
proxyfs_path = False
if vrs == 'proxyfs':
proxyfs_path = True
req.path_info = req.path_info.replace('/proxyfs/', '/v1/', 1)
vrs = 'v1'
if not acc or not constraints.valid_api_version(vrs) or (
obj and not con):
# could be a GET /info request or something made up by some
# other middleware; get out of the way.
return self.app
if not constraints.check_utf8(req.path_info):
return swob.HTTPPreconditionFailed(
body='Invalid UTF8 or contains NULL')
try:
# Check account to see if this is a bimodal-access account or
# not. ProxyFS is the sole source of truth on this matter.
is_bimodal, proxyfsd_addrinfo = self._unpack_owning_proxyfs(req)
if not is_bimodal and proxyfsd_addrinfo is None:
# This is a plain old Swift account, so we get out of the
# way.
return self.app
elif proxyfsd_addrinfo is None:
# This is a bimodal account, but there is currently no
# proxyfsd responsible for it. This can happen during a move
# of a ProxyFS volume from one proxyfsd to another, and
# should be cleared up quickly. Nevertheless, all we can do
# here is return an error to the client.
return swob.HTTPServiceUnavailable(request=req)
if con in ('.', '..') or con and len(con) > NAME_MAX:
if req.method == 'PUT' and not obj:
return swob.HTTPBadRequest(
request=req,
body='Container name cannot be "." or "..", '
'or be more than 255 bytes long')
else:
return swob.HTTPNotFound(request=req)
elif obj and any(p in ('', '.', '..') or len(p) > NAME_MAX
for p in obj.split('/')):
if req.method == 'PUT':
return swob.HTTPBadRequest(
request=req,
body='No path component may be "", ".", "..", or '
'more than 255 bytes long')
else:
return swob.HTTPNotFound(request=req)
ctx = RequestContext(req, proxyfsd_addrinfo, acc, con, obj)
is_bypass_request = (
proxyfs_path and
self.bypass_mode in ('read-only', 'read-write') and
req.method != "PROXYFS")
# For requests that we make to Swift, we have to ensure that any
# auth callback is not present in the WSGI environment.
# Authorization typically uses the object path as an input, and
# letting that loose on log-segment object paths is not likely to
# end well.
#
# We are careful to restore any auth to the environment when we
# exit, though, as this lets authentication work on the segments of
# Swift large objects. The SLO or DLO middleware is to the left of
# us in the pipeline, and it will make multiple subrequests, and
# auth is required for each one.
with pop_and_restore(req.environ,
'swift.authorize') as auth_cb, \
pop_and_restore(req.environ, 'swift.authorize_override',
False):
if auth_cb and req.environ.get('swift.source') != 'PFS':
if not is_bypass_request:
req.acl = self._fetch_appropriate_acl(ctx)
# else, user needs to be swift owner
denial_response = auth_cb(ctx.req)
if denial_response:
return denial_response
# Authorization succeeded
method = req.method
# Check whether we ought to bypass. Note that swift_owner
# won't be set until we call authorize
if is_bypass_request and req.environ.get('swift_owner'):
if self.bypass_mode == 'read-only' and method not in (
'GET', 'HEAD'):
return swob.HTTPMethodNotAllowed(request=req)
return self.app
# Otherwise, dispatch to a helper method
if method == 'GET' and obj:
resp = self.get_object(ctx)
elif method == 'HEAD' and obj:
resp = self.head_object(ctx)
elif method == 'PUT' and obj:
resp = self.put_object(ctx)
elif method == 'POST' and obj:
resp = self.post_object(ctx)
elif method == 'DELETE' and obj:
resp = self.delete_object(ctx)
elif method == 'COALESCE' and obj:
resp = self.coalesce_object(ctx, auth_cb)
elif method == 'GET' and con:
resp = self.get_container(ctx)
elif method == 'HEAD' and con:
resp = self.head_container(ctx)
elif method == 'PUT' and con:
resp = self.put_container(ctx)
elif method == 'POST' and con:
resp = self.post_container(ctx)
elif method == 'DELETE' and con:
resp = self.delete_container(ctx)
elif method == 'GET':
resp = self.get_account(ctx)
elif method == 'HEAD':
resp = self.head_account(ctx)
elif method == 'PROXYFS' and not con and not obj:
if not (req.environ.get('swift_owner') and
self.bypass_mode in ('read-only', 'read-write')):
return swob.HTTPMethodNotAllowed(request=req)
resp = self.proxy_rpc(ctx)
# account PUT, POST, and DELETE are just passed
# through to Swift
else:
return self.app
if req.method in ('GET', 'HEAD'):
resp.headers["Accept-Ranges"] = "bytes"
if not req.environ.get('swift_owner', False):
for key in SWIFT_OWNER_HEADERS:
if key in resp.headers:
del resp.headers[key]
return resp
# Provide some top-level exception handling and logging for
# exceptional exceptions. Non-exceptional exceptions will be handled
# closer to where they were raised.
except utils.RpcTimeout as err:
self.logger.error("RPC timeout: %s", err)
return swob.HTTPInternalServerError(
request=req,
headers={"Content-Type": "text/plain"},
body="RPC timeout: {0}".format(err))
except utils.RpcError as err:
self.logger.error(
"RPC error: %s; consulting proxyfsd logs may be helpful", err)
return swob.HTTPInternalServerError(
request=req,
headers={"Content-Type": "text/plain"},
body="RPC error: {0}".format(err))
def _fetch_appropriate_acl(self, ctx):
"""
Return the appropriate ACL to handle authorization for the given
request. This method may make a subrequest if necessary.
The ACL will be one of three things:
* for object/container GET/HEAD, it's the container's read ACL
(X-Container-Read).
* for object PUT/POST/DELETE, it's the container's write ACL
(X-Container-Write).
* for object COALESCE, it's the container's write ACL
(X-Container-Write). Separately, we *also* need to authorize
all the "segments" against both read *and* write ACLs
(see coalesce_object).
* for all other requests, it's None
Some authentication systems, of course, also have account-level
ACLs. However, in the fine tradition of having two possible courses
of action and choosing both, the loading of the account-level ACLs
is handled by the auth callback.
"""
bimodal_checker = ctx.req.environ[utils.ENV_BIMODAL_CHECKER]
if ctx.req.method in ('GET', 'HEAD') and ctx.container_name:
container_info = get_container_info(
ctx.req.environ, bimodal_checker,
swift_source="PFS")
return container_info['read_acl']
elif ctx.object_name and ctx.req.method in (
'PUT', 'POST', 'DELETE', 'COALESCE'):
container_info = get_container_info(
ctx.req.environ, bimodal_checker,
swift_source="PFS")
return container_info['write_acl']
else:
return None
def proxy_rpc(self, ctx):
req = ctx.req
ct = req.headers.get('Content-Type')
if not ct or ct.split(';', 1)[0].strip() != 'application/json':
msg = 'RPC body must have Content-Type application/json'
if ct:
msg += ', not %s' % ct
return swob.Response(status=415, request=req, body=msg)
cl = req.content_length
if cl is None:
if req.headers.get('Transfer-Encoding') != 'chunked':
return swob.HTTPLengthRequired(request=req)
json_payloads = req.body_file.read(MAX_RPC_BODY_SIZE).split(b'\n')
if req.body_file.read(1):
return swob.HTTPRequestEntityTooLarge(request=req)
else:
if cl > MAX_RPC_BODY_SIZE:
return swob.HTTPRequestEntityTooLarge(request=req)
json_payloads = req.body.split(b'\n')
if self.bypass_mode == 'read-write':
allowed_methods = rpc.allow_read_write
else:
allowed_methods = rpc.allow_read_only
payloads = []
for i, json_payload in enumerate(x for x in json_payloads
if x.strip()):
try:
payload = json.loads(json_payload.decode('utf8'))
if payload['jsonrpc'] != '2.0':
raise ValueError(
'expected JSONRPC 2.0, got %s' % payload['jsonrpc'])
if not isinstance(payload['method'], six.string_types):
raise ValueError(
'expected string, got %s' % type(payload['method']))
if payload['method'] not in allowed_methods:
raise ValueError(
'method %s not allowed' % payload['method'])
if not (isinstance(payload['params'], list) and
len(payload['params']) == 1 and
isinstance(payload['params'][0], dict)):
raise ValueError
except (TypeError, KeyError, ValueError) as err:
return swob.HTTPBadRequest(
request=req,
body=(b'Could not parse/validate JSON payload #%d %s: %s' %
(i, json_payload, str(err).encode('utf8'))))
payloads.append(payload)
# TODO: consider allowing more than one payload per request
if len(payloads) != 1:
return swob.HTTPBadRequest(
request=req,
body='Expected exactly one JSON payload')
# Our basic validation is done; spin up a connection and send requests
client = utils.JsonRpcClient(ctx.proxyfsd_addrinfo)
payload = payloads[0]
try:
if 'id' not in payload:
payload['id'] = str(uuid.uuid4())
payload['params'][0]['AccountName'] = ctx.account_name
response = client.call(payload, self.proxyfsd_rpc_timeout,
raise_on_rpc_error=False)
except eventlet.Timeout:
self.logger.debug(
"Timeout (%.6fs) communicating with %s, calling %s",
self.proxyfsd_rpc_timeout, ctx.proxyfsd_addrinfo,
payloads[0]['method'])
return swob.HTTPBadGateway(request=req)
except socket.error as err:
self.logger.debug("Error communicating with %r: %s.",
ctx.proxyfsd_addrinfo, err)
return swob.HTTPBadGateway(request=req)
else:
return swob.HTTPOk(
request=req, body=json.dumps(response),
headers={'Content-Type': 'application/json'})
def get_account(self, ctx):
req = ctx.req
limit = self._get_listing_limit(
req, self._default_account_listing_limit())
marker = req.params.get('marker', '')
end_marker = req.params.get('end_marker', '')
get_account_request = rpc.get_account_request(
urllib_parse.unquote(req.path), marker, end_marker, limit)
# If the account does not exist, then __call__ just falls through to
# self.app, so we never even get here. If we got here, then the
# account does exist, so we don't have to worry about not-found
# versus other-error here. Any error counts as "completely busted".
#
# We let the top-level RpcError handler catch this.
get_account_response = self.rpc_call(ctx, get_account_request)
account_mtime, account_entries = rpc.parse_get_account_response(
get_account_response)
resp_content_type = swift_code.get_listing_content_type(req)
if resp_content_type == "text/plain":
body = self._plaintext_account_get_response(account_entries)
elif resp_content_type == "application/json":
body = self._json_account_get_response(account_entries)
elif resp_content_type.endswith("/xml"):
body = self._xml_account_get_response(account_entries,
ctx.account_name)
else:
raise Exception("unexpected content type %r" %
(resp_content_type,))
resp_class = swob.HTTPOk if body else swob.HTTPNoContent
resp = resp_class(content_type=resp_content_type, charset="utf-8",
request=req, body=body)
# For accounts, the meta/sysmeta is stored in the account DB in
# Swift, not in ProxyFS.
account_info = get_account_info(
req.environ, req.environ[utils.ENV_BIMODAL_CHECKER],
swift_source="PFS")
for key, value in account_info["meta"].items():
resp.headers["X-Account-Meta-" + key] = value
for key, value in account_info["sysmeta"].items():
resp.headers["X-Account-Sysmeta-" + key] = value
acc_acl = resp.headers.get("X-Account-Sysmeta-Core-Access-Control")
parsed_acc_acl = parse_acl(version=2, data=acc_acl)
if parsed_acc_acl:
acc_acl = format_acl(version=2, acl_dict=parsed_acc_acl)
resp.headers["X-Account-Access-Control"] = acc_acl
resp.headers["X-Timestamp"] = x_timestamp_from_epoch_ns(account_mtime)
# Pretend the object counts are 0 and that all containers have the
# default storage policy. Until (a) containers have some support for
# the X-Storage-Policy header, and (b) we get container metadata
# back from Server.RpcGetAccount, this is the best we can do.
policy = self._default_storage_policy()
resp.headers["X-Account-Object-Count"] = "0"
resp.headers["X-Account-Bytes-Used"] = "0"
resp.headers["X-Account-Container-Count"] = str(len(account_entries))
resp.headers["X-Account-Storage-Policy-%s-Object-Count" % policy] = "0"
resp.headers["X-Account-Storage-Policy-%s-Bytes-Used" % policy] = "0"
k = "X-Account-Storage-Policy-%s-Container-Count" % policy
resp.headers[k] = str(len(account_entries))
return resp
def _plaintext_account_get_response(self, account_entries):
chunks = []
for entry in account_entries:
chunks.append(entry["Basename"].encode('utf-8'))
chunks.append(b"\n")
return b''.join(chunks)
def _json_account_get_response(self, account_entries):
json_entries = []
for entry in account_entries:
json_entry = {
"name": entry["Basename"],
# Older versions of proxyfsd only returned mtime, but ctime
# better reflects the semantics of X-Timestamp
"last_modified": iso_timestamp_from_epoch_ns(entry.get(
"AttrChangeTime", entry["ModificationTime"])),
# proxyfsd can't compute these without recursively walking
# the entire filesystem, so rather than have a built-in DoS
# attack, we just put out zeros here.
#
# These stats aren't particularly up-to-date in Swift
# anyway, so there aren't going to be working clients that
# rely on them for accuracy.
"count": 0,
"bytes": 0}
json_entries.append(json_entry)
return json.dumps(json_entries)
def _xml_account_get_response(self, account_entries, account_name):
root_node = ET.Element('account', name=account_name)
for entry in account_entries:
container_node = ET.Element('container')
name_node = ET.Element('name')
name_node.text = entry["Basename"]
container_node.append(name_node)
count_node = ET.Element('count')
count_node.text = '0'
container_node.append(count_node)
bytes_node = ET.Element('bytes')
bytes_node.text = '0'
container_node.append(bytes_node)
lm_node = ET.Element('last_modified')
# Older versions of proxyfsd only returned mtime, but ctime
# better reflects the semantics of X-Timestamp
lm_node.text = iso_timestamp_from_epoch_ns(entry.get(
"AttrChangeTime", entry["ModificationTime"]))
container_node.append(lm_node)
root_node.append(container_node)
buf = BytesIO()
ET.ElementTree(root_node).write(
buf, encoding="utf-8", xml_declaration=True)
return buf.getvalue()
def head_account(self, ctx):
req = ctx.req
resp = req.get_response(self.app)
resp.headers["ProxyFS-Enabled"] = "yes"
return resp
def head_container(self, ctx):
head_request = rpc.head_request(urllib_parse.unquote(ctx.req.path))
try:
head_response = self.rpc_call(ctx, head_request)
except utils.RpcError as err:
if err.errno == pfs_errno.NotFoundError:
return swob.HTTPNotFound(request=ctx.req)
else:
raise
raw_metadata, mtime_ns, _, _, _, _ = rpc.parse_head_response(
head_response)
metadata = deserialize_metadata(raw_metadata)
resp = swob.HTTPNoContent(request=ctx.req, headers=metadata)
resp.headers["X-Timestamp"] = x_timestamp_from_epoch_ns(mtime_ns)
resp.headers["Last-Modified"] = last_modified_from_epoch_ns(mtime_ns)
resp.headers["Content-Type"] = swift_code.get_listing_content_type(
ctx.req)
resp.charset = "utf-8"
self._add_required_container_headers(resp)
return resp
def put_container(self, ctx):
req = ctx.req
container_path = urllib_parse.unquote(req.path)
err = constraints.check_metadata(req, 'container')
if err:
return err
err = swift_code.clean_acls(req)
if err:
return err
new_metadata = extract_container_metadata_from_headers(req)
# Check name's length. The account name is checked separately (by
# Swift, not by this middleware) and has its own limit; we are
# concerned only with the container portion of the path.
_, _, container_name, _ = utils.parse_path(req.path)
maxlen = self._max_container_name_length()
if len(container_name) > maxlen:
return swob.HTTPBadRequest(
request=req,
body=('Container name length of %d longer than %d' %
(len(container_name), maxlen)))
try:
head_response = self.rpc_call(
ctx, rpc.head_request(container_path))
raw_old_metadata, _, _, _, _, _ = rpc.parse_head_response(
head_response)
except utils.RpcError as err:
if err.errno == pfs_errno.NotFoundError:
clear_info_cache(None, ctx.req.environ, ctx.account_name,
container=ctx.container_name)
self.rpc_call(ctx, rpc.put_container_request(
container_path,
"",
serialize_metadata({
k: v for k, v in new_metadata.items() if v})))
return swob.HTTPCreated(request=req)
else:
raise
old_metadata = deserialize_metadata(raw_old_metadata)
merged_metadata = merge_container_metadata(
old_metadata, new_metadata)
raw_merged_metadata = serialize_metadata(merged_metadata)
clear_info_cache(None, ctx.req.environ, ctx.account_name,
container=ctx.container_name)
self.rpc_call(ctx, rpc.put_container_request(
container_path, raw_old_metadata, raw_merged_metadata))
return swob.HTTPAccepted(request=req)
def post_container(self, ctx):
req = ctx.req
container_path = urllib_parse.unquote(req.path)
err = constraints.check_metadata(req, 'container')
if err:
return err
err = swift_code.clean_acls(req)
if err:
return err
new_metadata = extract_container_metadata_from_headers(req)
try:
head_response = self.rpc_call(
ctx, rpc.head_request(container_path))
raw_old_metadata, _, _, _, _, _ = rpc.parse_head_response(
head_response)
except utils.RpcError as err:
if err.errno == pfs_errno.NotFoundError:
return swob.HTTPNotFound(request=req)
else:
raise
old_metadata = deserialize_metadata(raw_old_metadata)
merged_metadata = merge_container_metadata(
old_metadata, new_metadata)
raw_merged_metadata = serialize_metadata(merged_metadata)
# Check that we're still within overall limits
req.headers.clear()
req.headers.update(merged_metadata)
err = constraints.check_metadata(req, 'container')
if err:
return err
# reset it...
req.headers.clear()
req.headers.update(new_metadata)
clear_info_cache(None, req.environ, ctx.account_name,
container=ctx.container_name)
self.rpc_call(ctx, rpc.post_request(
container_path, raw_old_metadata, raw_merged_metadata))
return swob.HTTPNoContent(request=req)
def delete_container(self, ctx):
# Turns out these are the same RPC with the same error handling, so
# why not?
clear_info_cache(None, ctx.req.environ, ctx.account_name,
container=ctx.container_name)
return self.delete_object(ctx)
def _get_listing_limit(self, req, default_limit):
raw_limit = req.params.get('limit')
if raw_limit is not None:
try:
limit = int(raw_limit)
except ValueError:
limit = default_limit
if limit > default_limit:
err = "Maximum limit is %d" % default_limit
raise swob.HTTPPreconditionFailed(request=req, body=err)
elif limit < 0:
limit = default_limit
else:
limit = default_limit
return limit
def _max_container_name_length(self):
proxy_info = self._proxy_info()
swift_max = proxy_info["swift"]["max_container_name_length"]
return min(swift_max, NAME_MAX)
def _default_account_listing_limit(self):
proxy_info = self._proxy_info()
return proxy_info["swift"]["account_listing_limit"]
def _default_container_listing_limit(self):
proxy_info = self._proxy_info()
return proxy_info["swift"]["container_listing_limit"]
def _default_storage_policy(self):
proxy_info = self._proxy_info()
# Swift guarantees that exactly one default storage policy exists.
return [pol["name"]
for pol in proxy_info["swift"]["policies"]
if pol.get("default", False)][0]
def _proxy_info(self):
if self._cached_proxy_info is None:
req = swob.Request.blank("/info")
resp = req.get_response(self.app)
self._cached_proxy_info = json.loads(resp.body)
return self._cached_proxy_info
def _add_required_container_headers(self, resp):
resp.headers.update(CONTAINER_HEADERS_WE_LIE_ABOUT)
resp.headers["X-Storage-Policy"] = self._default_storage_policy()
def get_container(self, ctx):
req = ctx.req
if req.environ.get('swift.source') in ('DLO', 'SW', 'VW'):
# Middlewares typically want json, but most *assume* it following
# https://github.com/openstack/swift/commit/4806434
# TODO: maybe replace with `if req.environ.get('swift.source'):` ??
params = req.params
params['format'] = 'json'
req.params = params
limit = self._get_listing_limit(
req, self._default_container_listing_limit())
marker = req.params.get('marker', '')
end_marker = req.params.get('end_marker', '')
prefix = req.params.get('prefix', '')
delimiter = req.params.get('delimiter', '')
# For now, we only support "/" as a delimiter
if delimiter not in ("", "/"):
return swob.HTTPBadRequest(request=req)
get_container_request = rpc.get_container_request(
urllib_parse.unquote(req.path),
marker, end_marker, limit, prefix, delimiter)
try:
get_container_response = self.rpc_call(ctx, get_container_request)
except utils.RpcError as err:
if err.errno == pfs_errno.NotFoundError:
return swob.HTTPNotFound(request=req)
else:
raise
container_ents, raw_metadata, mtime_ns = \
rpc.parse_get_container_response(get_container_response)
resp_content_type = swift_code.get_listing_content_type(req)
resp = swob.HTTPOk(content_type=resp_content_type, charset="utf-8",
request=req)
if resp_content_type == "text/plain":
resp.body = self._plaintext_container_get_response(
container_ents)
elif resp_content_type == "application/json":
resp.body = self._json_container_get_response(
container_ents, ctx.account_name, delimiter)
elif resp_content_type.endswith("/xml"):
resp.body = self._xml_container_get_response(
container_ents, ctx.account_name, ctx.container_name)
else:
raise Exception("unexpected content type %r" %
(resp_content_type,))
metadata = deserialize_metadata(raw_metadata)
resp.headers.update(metadata)
self._add_required_container_headers(resp)
resp.headers["X-Timestamp"] = x_timestamp_from_epoch_ns(mtime_ns)
resp.headers["Last-Modified"] = last_modified_from_epoch_ns(mtime_ns)
return resp
def _plaintext_container_get_response(self, container_entries):
chunks = []
for ent in container_entries:
chunks.append(ent["Basename"].encode('utf-8'))
chunks.append(b"\n")
return b''.join(chunks)
def _json_container_get_response(self, container_entries, account_name,
delimiter):
json_entries = []
for ent in container_entries:
name = ent["Basename"]
size = ent["FileSize"]
# Older versions of proxyfsd only returned mtime, but ctime
# better reflects the semantics of X-Timestamp
last_modified = iso_timestamp_from_epoch_ns(ent.get(
"AttrChangeTime", ent["ModificationTime"]))
obj_metadata = deserialize_metadata(ent["Metadata"])
unmung_etags(obj_metadata, ent["NumWrites"])
content_type = swift_code.wsgi_to_str(
obj_metadata.get("Content-Type"))
if content_type is None:
content_type = guess_content_type(ent["Basename"],
ent["IsDir"])
content_type, swift_bytes = content_type.partition(
';swift_bytes=')[::2]
etag = best_possible_etag(
obj_metadata, account_name,
ent["InodeNumber"], ent["NumWrites"], is_dir=ent["IsDir"],
container_listing=True)
json_entry = {
"name": name,
"bytes": int(swift_bytes or size),
"content_type": content_type,
"hash": etag,
"last_modified": last_modified}
json_entries.append(json_entry)
if delimiter != "" and "IsDir" in ent and ent["IsDir"]:
json_entries.append({"subdir": ent["Basename"] + delimiter})
return json.dumps(json_entries).encode('ascii')
# TODO: This method is usually non reachable, because at some point in the
# pipeline, we convert JSON to XML. We should either remove this or update
# it to support delimiters in case it's really needed.
# Same thing probably applies to plain text responses.
def _xml_container_get_response(self, container_entries, account_name,
container_name):
root_node = ET.Element('container', name=container_name)
for container_entry in container_entries:
obj_name = container_entry['Basename']
obj_metadata = deserialize_metadata(container_entry["Metadata"])
unmung_etags(obj_metadata, container_entry["NumWrites"])
content_type = swift_code.wsgi_to_str(
obj_metadata.get("Content-Type"))
if content_type is None:
content_type = guess_content_type(
container_entry["Basename"], container_entry["IsDir"])
content_type, swift_bytes = content_type.partition(
';swift_bytes=')[::2]
etag = best_possible_etag(
obj_metadata, account_name,
container_entry["InodeNumber"],
container_entry["NumWrites"],
is_dir=container_entry["IsDir"])
container_node = ET.Element('object')
name_node = ET.Element('name')
name_node.text = obj_name
container_node.append(name_node)
hash_node = ET.Element('hash')
hash_node.text = etag
container_node.append(hash_node)
bytes_node = ET.Element('bytes')
bytes_node.text = swift_bytes or str(container_entry["FileSize"])
container_node.append(bytes_node)
ct_node = ET.Element('content_type')
if six.PY2:
ct_node.text = content_type.decode('utf-8')
else:
ct_node.text = content_type
container_node.append(ct_node)
lm_node = ET.Element('last_modified')
# Older versions of proxyfsd only returned mtime, but ctime
# better reflects the semantics of X-Timestamp
lm_node.text = iso_timestamp_from_epoch_ns(container_entry.get(
"AttrChangeTime", container_entry["ModificationTime"]))
container_node.append(lm_node)
root_node.append(container_node)
buf = BytesIO()
ET.ElementTree(root_node).write(
buf, encoding="utf-8", xml_declaration=True)
return buf.getvalue()
def put_object(self, ctx):
req = ctx.req
# Make sure the (virtual) container exists
#
# We have to dig out an earlier-in-the-chain middleware here because
# Swift's get_container_info() function has an internal whitelist of
# environ keys that it'll keep, and our is-bimodal stuff isn't
# included. To work around this, we pass in the middleware chain
# starting with the bimodal checker so it can repopulate our environ
# keys. At least the RpcIsBimodal response is cached, so this
# shouldn't be too slow.
container_info = get_container_info(
req.environ, req.environ[utils.ENV_BIMODAL_CHECKER],
swift_source="PFS")
if not 200 <= container_info["status"] < 300:
return swob.HTTPNotFound(request=req)
if 'x-timestamp' in req.headers:
try:
req_timestamp = Timestamp(req.headers['X-Timestamp'])
except ValueError:
return swob.HTTPBadRequest(
request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
req.headers['X-Timestamp'] = req_timestamp.internal
else:
req.headers['X-Timestamp'] = Timestamp(time.time()).internal
if not req.headers.get('Content-Type'):
req.headers['Content-Type'] = guess_content_type(
req.path, is_dir=ctx.object_name.endswith('/'))
err = constraints.check_object_creation(req, ctx.object_name)
if err:
return err
if (req.headers['Content-Type'] == DIRECTORY_CONTENT_TYPE and
req.headers.get('Content-Length') == '0'):
return self.put_object_as_directory(ctx)
else:
return self.put_object_as_file(ctx)
def put_object_as_directory(self, ctx):
"""
Create or update an object as a directory.
"""
req = ctx.req
request_etag = req.headers.get("ETag", "")
if should_validate_etag(request_etag) and \
request_etag != EMPTY_OBJECT_ETAG:
return swob.HTTPUnprocessableEntity(request=req)
path = urllib_parse.unquote(req.path)
obj_metadata = extract_object_metadata_from_headers(req.headers)
# mung the passed etags, if any (NumWrites for a directory is
# always zero)
mung_etags(obj_metadata, request_etag, 0)
rpc_req = rpc.middleware_mkdir_request(
path, serialize_metadata(obj_metadata))
rpc_resp = self.rpc_call(ctx, rpc_req)
mtime_ns, inode, num_writes = rpc.parse_middleware_mkdir_response(
rpc_resp)
# currently best_possible_etag() returns EMPTY_OBJECT_ETAG for
# all directories, but that might change in the future.
# unmung the obj_metadata so best_possible_etag() can use it
# if its behavior changes (note that num_writes is forced to 0).
unmung_etags(obj_metadata, 0)
resp_headers = {
"Content-Type": DIRECTORY_CONTENT_TYPE,
"Last-Modified": last_modified_from_epoch_ns(mtime_ns)}
resp_headers["ETag"] = best_possible_etag(
obj_metadata, ctx.account_name, inode, 0, is_dir=True)
return swob.HTTPCreated(request=req, headers=resp_headers, body="")
def put_object_as_file(self, ctx):
"""
ProxyFS has the concepts of "virtual" and "physical" path. The
virtual path is the one that the user sees, i.e. /v1/acc/con/obj.
A physical path is the location of an underlying log-segment
object, e.g. /v1/acc/ContainerPoolName_1/00000000501B7321.
An object in ProxyFS is backed by one or more physical objects. In
the case of an object PUT, we ask proxyfsd for one or more suitable
physical-object names for the object, write the data there
ourselves, then tell proxyfsd what we've done.
"""
req = ctx.req
virtual_path = urllib_parse.unquote(req.path)
put_location_req = rpc.put_location_request(virtual_path)
request_etag = req.headers.get("ETag", "")
hasher = hashlib.md5()
wsgi_input = SnoopingInput(req.environ["wsgi.input"], hasher.update)
# TODO: when the upload size is known (i.e. Content-Length is set),
# ask for enough locations up front that we can consume the whole
# request with only one call to RpcPutLocation(s).
# TODO: ask to validate the path a bit better; if we are putting an
# object at /v1/a/c/kitten.png/whoops.txt (where kitten.png is a
# file), we should probably catch that before reading any input so
# that, if the client sent "Expect: 100-continue", we can give them
# an error early.
physical_path_gen = (
rpc.parse_put_location_response(
self.rpc_call(ctx, put_location_req))
for _ in itertools.repeat(None))
error_response = swift_code.check_object_creation(req)
if error_response:
return error_response
# Since this upload can be arbitrarily large, we split it across
# multiple log segments.
log_segments = []
i = 0
while True:
# First, make sure there's more data to read from the client. No
# sense allocating log segments and whatnot if we're not going
# to use them.
subinput = LimitedInput(wsgi_input, self.max_log_segment_size)
if not subinput.has_more_to_read:
break
# Ask ProxyFS for the next log segment we can use
phys_path = next(physical_path_gen)
# Set up the subrequest with the bare minimum of useful headers.
# This lets us avoid headers that will break the PUT immediately
# (ETag), headers that may complicate GETs of this object
# (X-Static-Large-Object, X-Object-Manifest), things that will
# break the GET some time in the future (X-Delete-At,
# X-Delete-After), and things that take up xattr space for no
# real gain (user metadata).
subreq = swob.Request.blank(phys_path)
subreq.method = 'PUT'
subreq.environ['wsgi.input'] = subinput
subreq.headers["Transfer-Encoding"] = "chunked"
# This ensures that (a) every subrequest has its own unique
# txid, and (b) a log search for the txid in the response finds
# all of the subrequests.
trans_id = req.headers.get('X-Trans-Id')
if trans_id:
subreq.headers['X-Trans-Id'] = trans_id + ("-%03x" % i)
# Actually put one chunk of the data into Swift
subresp = subreq.get_response(self.app)
if not 200 <= subresp.status_int < 299:
# Something went wrong; may as well bail out now
return subresp
log_segments.append((phys_path, subinput.bytes_read))
i += 1
if should_validate_etag(request_etag) and \
hasher.hexdigest() != request_etag:
return swob.HTTPUnprocessableEntity(request=req)
# All the data is now in Swift; we just have to tell proxyfsd
# about it. Mung any passed ETags values to include the
# number of writes to the file (basically, the object's update
# count) and supply the MD5 hash computed here which becomes
# object's future ETag value until the object updated.
obj_metadata = extract_object_metadata_from_headers(req.headers)
mung_etags(obj_metadata, hasher.hexdigest(), len(log_segments))
put_complete_req = rpc.put_complete_request(
virtual_path, log_segments, serialize_metadata(obj_metadata))
try:
mtime_ns, inode, __writes = rpc.parse_put_complete_response(
self.rpc_call(ctx, put_complete_req))
except utils.RpcError as err:
# We deliberately don't try to clean up our log segments on
# failure. ProxyFS is responsible for cleaning up unreferenced
# log segments.
if err.errno == pfs_errno.NotEmptyError:
return swob.HTTPConflict(
request=req,
headers={"Content-Type": "text/plain"},
body="This is a non-empty directory")
elif err.errno == pfs_errno.NotDirError:
return swob.HTTPConflict(
request=req,
headers={"Content-Type": "text/plain"},
body="Path element is a file, not a directory")
else:
# punt to top-level error handler
raise
# For reference, an object PUT response to plain Swift looks like:
# HTTP/1.1 201 Created
# Last-Modified: Thu, 08 Dec 2016 22:51:13 GMT
# Content-Length: 0
# Etag: 9303a8d23189779e71f347032d633327
# Content-Type: text/html; charset=UTF-8
# X-Trans-Id: tx7b3e2b88df2f4975a5476-005849e3e0dfw1
# Date: Thu, 08 Dec 2016 22:51:12 GMT
#
# We get Content-Length, X-Trans-Id, and Date for free, but we need
# to fill in the rest.
resp_headers = {
"Etag": hasher.hexdigest(),
"Content-Type": guess_content_type(req.path, False),
"Last-Modified": last_modified_from_epoch_ns(mtime_ns)}
return swob.HTTPCreated(request=req, headers=resp_headers, body="")
def post_object(self, ctx):
req = ctx.req
err = constraints.check_metadata(req, 'object')
if err:
return err
path = urllib_parse.unquote(req.path)
new_metadata = extract_object_metadata_from_headers(req.headers)
try:
head_response = self.rpc_call(ctx, rpc.head_request(path))
raw_old_metadata, mtime, _, _, inode_number, _ = \
rpc.parse_head_response(head_response)
except utils.RpcError as err:
if err.errno in (pfs_errno.NotFoundError, pfs_errno.NotDirError):
return swob.HTTPNotFound(request=req)
else:
raise
# There is no need to call unmung_etags() before the merge and
# mung_etags() after because the merge cannot change the several
# possible ETag headers.
#
# This might be an opportunity to drop an ETAG header that has
# become stale due to num_writes changing, but that does not
# seem important to address.
old_metadata = deserialize_metadata(raw_old_metadata)
merged_metadata = merge_object_metadata(old_metadata, new_metadata)
raw_merged_metadata = serialize_metadata(merged_metadata)
self.rpc_call(ctx, rpc.post_request(
path, raw_old_metadata, raw_merged_metadata))
resp = swob.HTTPAccepted(request=req, body="")
return resp
def get_object(self, ctx):
req = ctx.req
byteranges = req.range.ranges if req.range else ()
try:
object_response = self.rpc_call(ctx, rpc.get_object_request(
urllib_parse.unquote(req.path), byteranges))
except utils.RpcError as err:
if err.errno in (pfs_errno.NotFoundError, pfs_errno.NotDirError):
return swob.HTTPNotFound(request=req)
elif err.errno == pfs_errno.IsDirError:
return swob.HTTPOk(
request=req, body="",
headers={"Content-Type": DIRECTORY_CONTENT_TYPE,
"ETag": EMPTY_OBJECT_ETAG})
else:
# punt to top-level exception handler
raise
(read_plan, raw_metadata, size, mtime_ns,
is_dir, ino, num_writes, lease_id) = \
rpc.parse_get_object_response(object_response)
metadata = deserialize_metadata(raw_metadata)
unmung_etags(metadata, num_writes)
headers = swob.HeaderKeyDict(metadata)
if "Content-Type" not in headers:
headers["Content-Type"] = guess_content_type(req.path, is_dir)
else:
headers['Content-Type'] = headers['Content-Type'].split(
';swift_bytes=')[0]
headers["Accept-Ranges"] = "bytes"
headers["Last-Modified"] = last_modified_from_epoch_ns(
mtime_ns)
headers["X-Timestamp"] = x_timestamp_from_epoch_ns(
mtime_ns)
headers["Etag"] = best_possible_etag(
headers, ctx.account_name, ino, num_writes, is_dir=is_dir)
get_read_plan = req.params.get("get-read-plan", "no")
if get_read_plan == "":
get_read_plan = "yes"
if self.bypass_mode != 'off' and req.environ.get('swift_owner') and \
config_true_value(get_read_plan):
headers.update({
# Flag that pfs_middleware correctly interpretted this request
"X-ProxyFS-Read-Plan": "True",
# Stash the "real" content type...
"X-Object-Content-Type": headers["Content-Type"],
# ... so we can indicate that *this* data is coming out JSON
"Content-Type": "application/json",
# Also include the total object size
# (since the read plan respects Range requests)
"X-Object-Content-Length": size,
})
return swob.HTTPOk(request=req, body=json.dumps(read_plan),
headers=headers)
if size > 0 and read_plan is None:
headers["Content-Range"] = "bytes */%d" % size
return swob.HTTPRequestedRangeNotSatisfiable(
request=req, headers=headers)
# NB: this is a size-0 queue, so it acts as a channel: a put()
# blocks until another greenthread does a get(). This lets us use it
# for (very limited) bidirectional communication.
channel = eventlet.queue.Queue(0)
eventlet.spawn_n(self._keep_lease_alive, ctx, channel, lease_id)
listing_iter = listing_iter_from_read_plan(read_plan)
# Make sure that nobody (like our __call__ method) messes with this
# environment once we've started. Otherwise, the auth callback may
# reappear, causing log-segment GET requests to fail. This may be
# seen with container ACLs; since the underlying container name
# differs from the user-presented one, without copying the
# environment, all object GET requests for objects in a public
# container would fail.
copied_req = swob.Request(req.environ.copy())
# Ideally we'd wrap seg_iter instead, but swob.Response relies on
# its app_iter supporting certain methods for conditional responses
# to work, and forwarding all those methods through the wrapper is
# prone to failure whenever a new method is added.
#
# Wrapping the listing iterator is just as good. After
# SegmentedIterable exhausts it, we can safely release the lease.
def done_with_object_get():
channel.put("you can stop now")
# It's not technically necessary for us to wait for the other
# greenthread here; we could use one-way notification. However,
# doing things this way lets us ensure that, once this function
# returns, there are no more background actions taken by the
# greenthread. This makes testing a lot easier; we can call the
# middleware, let it return, and then assert things. Were we to
# use a fire-and-forget style, we'd never be sure when all the
# RPCs had been called, and the tests would end up flaky.
channel.get()
wrapped_listing_iter = iterator_posthook(
listing_iter, done_with_object_get)
seg_iter = swift_code.SegmentedIterable(
copied_req, self.zero_filler_app, wrapped_listing_iter,
self.max_get_time,
self.logger, 'PFS', 'PFS',
name=req.path)
resp = swob.HTTPOk(app_iter=seg_iter, conditional_response=True,
request=req,
headers=headers,
content_length=size)
# Support conditional if-match/if-none-match requests for SLOs
resp._conditional_etag = swift_code.resolve_etag_is_at_header(
req, resp.headers)
return resp
def _keep_lease_alive(self, ctx, channel, lease_id):
keep_going = [True]
lease_error = [False]
def renew():
if lease_error[0]:
return
try:
self.rpc_call(ctx, rpc.renew_lease_request(lease_id))
except (utils.RpcError, utils.RpcTimeout):
# If there's an error renewing the lease, stop pestering
# proxyfsd about it. We'll keep serving the object
# anyway, and we'll just hope no log segments vanish
# while we do it.
keep_going[0] = False
lease_error[0] = True
# It could have been a while since this greenthread was created.
# Let's renew first just to be sure.
renew()
while keep_going[0]:
try:
channel.get(block=True, timeout=LEASE_RENEWAL_INTERVAL)
# When we get a message here, we should stop.
keep_going[0] = False
except eventlet.queue.Empty:
# Nobody told us we're done, so renew the lease and loop
# around again.
renew()
if not lease_error[0]:
# Tell proxyfsd that we're done with the lease, but only if
# there were no errors keeping it renewed.
self.rpc_call(ctx, rpc.release_lease_request(lease_id))
channel.put("alright, it's done")
def delete_object(self, ctx):
try:
self.rpc_call(ctx, rpc.delete_request(
urllib_parse.unquote(ctx.req.path)))
except utils.RpcError as err:
if err.errno in (pfs_errno.NotFoundError, pfs_errno.NotDirError):
return swob.HTTPNotFound(request=ctx.req)
elif err.errno == pfs_errno.NotEmptyError:
return swob.HTTPConflict(request=ctx.req)
else:
raise
return swob.HTTPNoContent(request=ctx.req)
def head_object(self, ctx):
req = ctx.req
head_request = rpc.head_request(urllib_parse.unquote(req.path))
try:
head_response = self.rpc_call(ctx, head_request)
except utils.RpcError as err:
if err.errno in (pfs_errno.NotFoundError, pfs_errno.NotDirError):
return swob.HTTPNotFound(request=req)
else:
raise
raw_md, last_modified_ns, file_size, is_dir, ino, num_writes = \
rpc.parse_head_response(head_response)
metadata = deserialize_metadata(raw_md)
unmung_etags(metadata, num_writes)
headers = swob.HeaderKeyDict(metadata)
if "Content-Type" not in headers:
headers["Content-Type"] = guess_content_type(req.path, is_dir)
else:
headers['Content-Type'] = headers['Content-Type'].split(
';swift_bytes=')[0]
headers["Content-Length"] = file_size
headers["ETag"] = best_possible_etag(
headers, ctx.account_name, ino, num_writes, is_dir=is_dir)
headers["Last-Modified"] = last_modified_from_epoch_ns(
last_modified_ns)
headers["X-Timestamp"] = x_timestamp_from_epoch_ns(
last_modified_ns)
resp = swob.HTTPOk(request=req, headers=headers,
conditional_response=True)
# Support conditional if-match/if-none-match requests for SLOs
resp._conditional_etag = swift_code.resolve_etag_is_at_header(
req, resp.headers)
return resp
def coalesce_object(self, ctx, auth_cb):
# extract and verify the object list for the new object
req = ctx.req
object_path = urllib_parse.unquote(req.path)
probably_json = req.environ['wsgi.input'].read(
self.max_coalesce_request_size + 1)
if len(probably_json) > self.max_coalesce_request_size:
return swob.HTTPRequestEntityTooLarge(request=req)
try:
decoded_json = json.loads(probably_json)
except ValueError:
return swob.HTTPBadRequest(request=req, body="Malformed JSON")
if "elements" not in decoded_json:
return swob.HTTPBadRequest(request=req, body="Malformed JSON")
if not isinstance(decoded_json, dict):
return swob.HTTPBadRequest(request=req, body="Malformed JSON")
if not isinstance(decoded_json["elements"], list):
return swob.HTTPBadRequest(request=req, body="Malformed JSON")
if len(decoded_json["elements"]) > self.max_coalesce:
return swob.HTTPRequestEntityTooLarge(request=req)
authed_containers = set()
ctx.req.environ.setdefault('swift.infocache', {})
for elem in decoded_json["elements"]:
if not isinstance(elem, six.string_types):
return swob.HTTPBadRequest(request=req, body="Malformed JSON")
normalized_elem = elem
if normalized_elem.startswith('/'):
normalized_elem = normalized_elem[1:]
if any(p in ('', '.', '..') for p in normalized_elem.split('/')):
return swob.HTTPBadRequest(request=req,
body="Bad element path: %s" % elem)
elem_container = normalized_elem.split('/', 1)[0]
elem_container_path = '/v1/%s/%s' % (
ctx.account_name, elem_container)
if auth_cb and elem_container_path not in authed_containers:
# Gotta check auth for all of the segments, too
bimodal_checker = ctx.req.environ[utils.ENV_BIMODAL_CHECKER]
acl_env = ctx.req.environ.copy()
acl_env['PATH_INFO'] = swift_code.text_to_wsgi(
elem_container_path)
container_info = get_container_info(
acl_env, bimodal_checker,
swift_source="PFS")
for acl in ('read_acl', 'write_acl'):
req.acl = container_info[acl]
denial_response = auth_cb(ctx.req)
if denial_response:
return denial_response
authed_containers.add(elem_container_path)
# proxyfs treats the number of objects as the number of writes
num_writes = len(decoded_json["elements"])
# validate the metadata for the new object (further munging
# of ETags will be done later)
err = constraints.check_metadata(req, 'object')
if err:
return err
# retrieve the ETag value in the request, or None
req_etag = req.headers.get('ETag')
# strip out user supplied and other unwanted headers
obj_metadata = extract_object_metadata_from_headers(req.headers)
# strip out headers that apply only to SLO objects
unwanted_headers = ['X-Static-Large-Object']
for header in obj_metadata.keys():
if header.startswith("X-Object-Sysmeta-Slo-"):
unwanted_headers.append(header)
for header in unwanted_headers:
if header in obj_metadata:
del obj_metadata[header]
# Now that we know the number of writes (really number of objects) we
# can mung the sundry ETag headers.
mung_etags(obj_metadata, req_etag, num_writes)
raw_obj_metadata = serialize_metadata(obj_metadata)
# now get proxyfs to coalesce the objects and set initial headers
try:
coalesce_response = self.rpc_call(
ctx, rpc.coalesce_object_request(
object_path, decoded_json["elements"], raw_obj_metadata))
except utils.RpcError as err:
if err.errno == pfs_errno.NotFoundError:
return swob.HTTPNotFound(
request=req,
headers={"Content-Type": "text/plain"},
body="One or more path elements not found")
elif err.errno in (pfs_errno.NotDirError, pfs_errno.IsDirError):
return swob.HTTPConflict(
request=req,
headers={"Content-Type": "text/plain"},
body="Elements must be plain files, not directories")
elif err.errno == pfs_errno.TooManyLinksError:
return swob.HTTPConflict(
request=req,
headers={"Content-Type": "text/plain"},
body=("One or more path elements has multiple links; "
"only singly-linked files can be combined"))
else:
raise
last_modified_ns, inum, num_writes = \
rpc.parse_coalesce_object_response(coalesce_response)
unmung_etags(obj_metadata, num_writes)
headers = {}
headers["Etag"] = best_possible_etag(
obj_metadata, ctx.account_name, inum, num_writes)
headers["Last-Modified"] = last_modified_from_epoch_ns(
last_modified_ns)
headers["X-Timestamp"] = x_timestamp_from_epoch_ns(
last_modified_ns)
return swob.HTTPCreated(request=req, headers=headers)
def _unpack_owning_proxyfs(self, req):
"""
Checks to see if an account is bimodal or not, and if so, which proxyfs
daemon is responsible for it.
This is done by looking in the request environment; there's another
middleware (BimodalChecker) that populates these fields.
:returns: 2-tuple (is-bimodal, proxyfsd-addrinfo).
"""
return (req.environ.get(utils.ENV_IS_BIMODAL),
req.environ.get(utils.ENV_OWNING_PROXYFS))
def rpc_call(self, ctx, rpc_request):
"""
Call a remote procedure in proxyfsd.
:param ctx: context for the current HTTP request
:param rpc_request: Python dictionary containing the request
(method, args, etc.) in JSON-RPC format.
:returns: the result of the RPC, whatever that looks like
:raises: utils.RpcTimeout if the RPC takes too long
:raises: utils.RpcError if the RPC returns an error. Inspecting this
exception's "errno" attribute may be useful. However, errno may
not always be set; if the error returned from proxyfsd does not
have an errno in it, then the exception's errno attribute will
be None.
"""
rpc_method = rpc_request['method']
start_time = time.time()
try:
return self._rpc_call([ctx.proxyfsd_addrinfo], rpc_request)
finally:
duration = time.time() - start_time
self.logger.debug("RPC %s took %.6fs", rpc_method, duration)
def _rpc_call(self, addrinfos, rpc_request):
addrinfos = set(addrinfos)
# We can get fast errors or slow errors here; we retry across all
# hosts on fast errors, but immediately raise a slow error. HTTP
# clients won't wait forever for a response, so we can't retry slow
# errors across all hosts.
#
# Fast errors are things like "connection refused" or "no route to
# host". Slow errors are timeouts.
result = None
while addrinfos:
addrinfo = addrinfos.pop()
rpc_client = utils.JsonRpcClient(addrinfo)
try:
result = rpc_client.call(rpc_request,
self.proxyfsd_rpc_timeout)
except socket.error as err:
if addrinfos:
self.logger.debug("Error communicating with %r: %s. "
"Trying again with another host.",
addrinfo, err)
continue
else:
raise
except eventlet.Timeout:
errstr = "Timeout ({0:.6f}s) calling {1}".format(
self.proxyfsd_rpc_timeout,
rpc_request.get("method", "<unknown method>"))
raise utils.RpcTimeout(errstr)
errstr = result.get("error")
if errstr:
errno = utils.extract_errno(errstr)
raise utils.RpcError(errno, errstr)
return result["result"]
| 40.505469 | 79 | 0.609313 | 11,036 | 88,869 | 4.744473 | 0.119427 | 0.018697 | 0.007219 | 0.005348 | 0.378266 | 0.316558 | 0.258995 | 0.237242 | 0.208862 | 0.194805 | 0 | 0.010886 | 0.30644 | 88,869 | 2,193 | 80 | 40.52394 | 0.838617 | 0.276598 | 0 | 0.373768 | 0 | 0 | 0.085305 | 0.013886 | 0 | 0 | 0 | 0.000912 | 0 | 1 | 0.050796 | false | 0.010614 | 0.016679 | 0.001516 | 0.169826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
021d66c30bd47ee686cbe12ce214f3a9da233cc5 | 7,360 | py | Python | python_codes/objectdetectionvideo.py | onkarjoshi52/ObjectDetection | 15c8dab3cb27b15077678c2babbbcff24b8e227c | [
"Apache-2.0"
] | null | null | null | python_codes/objectdetectionvideo.py | onkarjoshi52/ObjectDetection | 15c8dab3cb27b15077678c2babbbcff24b8e227c | [
"Apache-2.0"
] | null | null | null | python_codes/objectdetectionvideo.py | onkarjoshi52/ObjectDetection | 15c8dab3cb27b15077678c2babbbcff24b8e227c | [
"Apache-2.0"
] | null | null | null | ######## Video Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 1/16/18
# Description:
# This program uses a TensorFlow-trained classifier to perform object detection.
# It loads the classifier and uses it to perform object detection on a video.
# It draws boxes, scores, and labels around the objects of interest in each
# frame of the video.
## Some of the code is copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it more understandable to me.
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
import time
def convertToCentroid(vboxes, w, h):
centroids = []
for vbox in vboxes:
res = [(vbox[1]+vbox[3])/2, (vbox[0]+vbox[2])/2]
centroids.append(res)
return centroids
def getDistances(vbox1, vbox2):
dist1 = abs(np.array(vbox1) - np.array(vbox2))
dist2 = abs(np.array(vbox1) - np.array([vbox2[2], vbox2[3], vbox2[0], vbox2[1]]))
print(vbox1, vbox2, dist1)
print(vbox1, [vbox2[2], vbox2[3], vbox2[0], vbox2[1]], dist2)
return [dist1, dist2]
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'trained-inference-graphs1/frozen_inference_graph'
VIDEO_NAME = 'test.mp4'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training/labelmap.pbtxt')
# Path to video
PATH_TO_VIDEO = os.path.join(CWD_PATH,VIDEO_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 3
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Helper code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and put tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Open video file
video = cv2.VideoCapture(PATH_TO_VIDEO)
#IMAGE_NAME=PATH_TO_VIDEO.split("/")[-1]
while(video.isOpened()):
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
ret, frame = video.read()
w=1920
h=1080
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
sc = scores.tolist()[0]
bx = boxes.tolist()[0]
cls= classes.tolist()[0]
score_ind = [sc.index(score) for score in sc if score >= 0.72]
#print(score_ind)
vis_boxes = [bx[var] for var in score_ind]
#print(vis_boxes)
vis_classes = [cls[var] for var in score_ind]
centroid_array = convertToCentroid(vis_boxes, w, h)
#print(centroid_array)
max_threshX = 150/w
max_threshY = 150/h
for vb1 in vis_boxes:
for vb2 in vis_boxes:
i1=vis_boxes.index(vb1)
i2=vis_boxes.index(vb2)
if i1 == i2:
continue
if vis_classes[i1] == vis_classes[i2]:
continue
print(vis_boxes.index(vb1), '----', vis_boxes.index(vb2))
[dist1, dist2] = getDistances(vb1, vb2)
#print(dist1, '\t', dist2)
bool1 = dist1 < np.array([max_threshY, max_threshX, max_threshY, max_threshX])
bool2 = dist2 < np.array([max_threshY, max_threshX, max_threshY, max_threshX])
#]print(bool1.astype(int).sum(), '\t', bool2.astype(int).sum())
if bool1.astype(int).sum() or bool2.astype(int).sum():
print("Run away")
#np.savetxt('records/boxes/array_boxes1'+IMAGE_NAME+'.csv', np.squeeze(boxes), delimiter=',', fmt='%2.4f')
#np.savetxt('records/scores/array_scores1'+IMAGE_NAME+'.csv', np.squeeze(scores), delimiter=',', fmt='%2.4f')
#np.savetxt('records/classes/array_classes1'+IMAGE_NAME+'.csv', np.squeeze(classes).astype(np.int32), delimiter=',', fmt='%2d')
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
video.release()
cv2.destroyAllWindows()
| 35.555556 | 131 | 0.717663 | 1,095 | 7,360 | 4.6621 | 0.309589 | 0.041136 | 0.011753 | 0.022527 | 0.188051 | 0.150245 | 0.110676 | 0.066601 | 0.057199 | 0.057199 | 0 | 0.020678 | 0.178668 | 7,360 | 206 | 132 | 35.728155 | 0.823821 | 0.379076 | 0 | 0.148148 | 0 | 0 | 0.048965 | 0.021367 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.212963 | 0 | 0.268519 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
021db39bb21718cb8ebf10f0958ded654d27f127 | 6,153 | py | Python | Handwritten_digits/FMNIST_C_FINDER.py | ayjabri/ComputerVision | 757efaa68018270164d7f8e1e0e0f8d7787871d3 | [
"MIT"
] | null | null | null | Handwritten_digits/FMNIST_C_FINDER.py | ayjabri/ComputerVision | 757efaa68018270164d7f8e1e0e0f8d7787871d3 | [
"MIT"
] | null | null | null | Handwritten_digits/FMNIST_C_FINDER.py | ayjabri/ComputerVision | 757efaa68018270164d7f8e1e0e0f8d7787871d3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 24 19:57:27 2020
@author: aymanjabri
Classify one of the MNIST datasets using adaptive filter number, run multiple
batch sizes,learning rates and other hyper parameters
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision as tv
from torchvision import transforms,datasets
import matplotlib.pyplot as plt
from torch.utils.tensorboard import SummaryWriter
# from collections import Counter,OrderedDict,namedtuple
# from itertools import product
#Setup the GUP
if torch.cuda.is_available(): device=torch.device('cuda:0')
else: device=torch.device('cpu')
##Prepare the data for training,validation
#Transforms
means = (0.1307,)
deviations = (0.3081,)
tfms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((means),(deviations))])
#Download the data and define the datasets
trns = datasets.FashionMNIST('/Users/aymanjabri/notebooks/FashionMNIST',train=True,transform=
tfms,download=True)
tsts = datasets.MNIST('/Users/aymanjabri/notebooks/FashionMNIST',train=False,transform=
tfms,download=True)
#Datasets imbalance:
print(sorted(Counter(trns.targets.numpy()).items()))
print(sorted(Counter(tsts.targets.numpy()).items()))
train = torch.utils.data.DataLoader(trns,batch_size=100,shuffle=True)
valid = torch.utils.data.DataLoader(tsts,batch_size=500)
#View data
img,label = next(iter(train))
imgs = tv.utils.make_grid(img,nrow=10,padding=1,normalize=True)
plt.figure(figsize=(12,12))
plt.imshow(imgs.permute(1,2,0))
'''Create a small dataloader to overfit the model with,
before introducing the full dataset'''
#Define sampling method
weights= 100/(torch.bincount(trns.targets).double())
weighted = torch.utils.data.WeightedRandomSampler(weights,num_samples=100
,replacement=True)
random = torch.utils.data.RandomSampler(trns,replacement=True,num_samples=100)
#After trying so many different sampling methods i went with random,
#because it gave the most balanced results.
dl_small= torch.utils.data.DataLoader(trns,batch_size=100,sampler=random)
xs,ys = next(iter(dl_small))
print(sorted(Counter(ys.numpy()).items()))
##Build The CNN
class Net(nn.Module):
def __init__(self,out,kernel):
super().__init__()
self.conv1 = nn.Conv2d(1,out,kernel,padding=1)
self.bn1 = nn.BatchNorm2d(out)
self.conv2 = nn.Conv2d(out,out,kernel,padding=1)
self.bn2 = nn.BatchNorm2d(out)
self.pool1 = nn.AdaptiveMaxPool2d(1)
self.pool2 = nn.AdaptiveAvgPool2d(1)
self.lin1 = nn.Linear(out*2,out)
self.bn = nn.BatchNorm1d(out)
self.lin2 = nn.Linear(out,10)
def forward(self,x):
x = self.bn1(F.relu_(self.conv1(x)))
x = self.bn2(F.relu_(self.conv2(x)))
p1 = self.pool1(x)
p2 = self.pool2(x)
x = torch.cat((p1,p2),dim=1)
x = x.view(x.size(0),-1)
# x = self.convs(x)
x = self.bn(F.relu(self.lin1(x)))
x = self.lin2(x)
return x
def get_correct_num(predict,label):
correct = torch.argmax(predict.softmax(dim=1),dim=1).eq(label)
return correct.sum().item()
def learn(net,data,epochs,tb,lr=1e-3):
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(),lr=lr)
total_losses = []
total_accuracy = []
for epoch in range(1,epochs+1):
e_loss = 0
correct = 0
for batch in data:
optimizer.zero_grad()
img,label = batch
output = net(img)
loss = criterion(output,label)
loss.backward()
optimizer.step()
e_loss += loss.item()
correct += get_correct_num(output,label)
e_acc = round(correct/(len(data)*data.batch_size)*100,5)
if epoch <=10:
print('''Epoch:{} Training Loss {} Training Accuracy {}%
'''.format(epoch,e_loss,e_acc))
else:
if epoch%(epochs/20)==0:
print('''Epoch:{} Training Loss {} Training Accuracy {}%
'''.format(epoch,e_loss,e_acc))
total_losses.append(e_loss)
total_accuracy.append(e_acc)
tb.add_scalar('Losses',e_loss,epoch)
tb.add_scalar('Accuracy',e_acc,epoch)
for name,param in net.named_parameters():
tb.add_histogram('net.{}'.format(name), param)
return total_losses,total_accuracy
def predict_all(net,loader):
with torch.no_grad():
predict = torch.tensor([])
labels = torch.tensor([]).int()
for img,label in loader:
if torch.cuda.is_available(): img,label=img.to(device),label.to(device)
p=net(img)
predict = torch.cat((predict,p),dim=0)
labels = torch.cat((labels,label.int()),dim=0)
return predict,labels
class runner():
def __init__(self,out_channels,kernel_size,data,epochs,lr=1e-3):
self.out_channels = out_channels
self.kernel_size = kernel_size
self.data = data
self.epochs = epochs
self.lr = lr
def run(self):
net = Net(self.out_channels,self.kernel_size)
summary = SummaryWriter(comment='filters:{} kernel:{} lr:{}'.format(
self.out_channels,self.kernel_size,self.lr))
summary.add_graph(net,img)
l,a = learn(net,self.data,self.epochs,summary,self.lr)
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('lr={} out_channels={} kernel={}'.format(
self.lr,self.out_channels,self.kernel_size))
ax1.set_ylabel('losses', color=color)
ax1.plot(l, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('accuracy', color=color) # we already handled the x-label with ax1
ax2.plot(a, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout()
return net
| 36.408284 | 93 | 0.639688 | 832 | 6,153 | 4.643029 | 0.346154 | 0.019933 | 0.018121 | 0.021745 | 0.138752 | 0.088791 | 0.050738 | 0.050738 | 0.030028 | 0.030028 | 0 | 0.02584 | 0.226394 | 6,153 | 168 | 94 | 36.625 | 0.785714 | 0.114741 | 0 | 0.047619 | 0 | 0 | 0.06027 | 0.015021 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.166667 | 0.039683 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
021efac5a5560eaf1d21a0b752ae17dab92eeb6e | 2,557 | py | Python | fairlearn/metrics/__init__.py | seralexger/fairlearn | c3ee7b5a45eb3394fc1b8d17b991e3d970970c05 | [
"MIT"
] | null | null | null | fairlearn/metrics/__init__.py | seralexger/fairlearn | c3ee7b5a45eb3394fc1b8d17b991e3d970970c05 | [
"MIT"
] | null | null | null | fairlearn/metrics/__init__.py | seralexger/fairlearn | c3ee7b5a45eb3394fc1b8d17b991e3d970970c05 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
"""Functionality for computing metrics, with a particular focus on group metrics.
For our purpose, a metric is a function with signature
``f(y_true, y_pred, ....)``
where ``y_true`` are the set of true values and ``y_pred`` are
values predicted by a machine learning algorithm. Other
arguments may be present (most often sample weights), which will
affect how the metric is calculated.
The group metrics in this module have signatures
``g(y_true, y_pred, group_membership, ...)``
where ``group_membership`` is an array of values indicating
a group to which each pair of true and predicted values belong.
The metric is evaluated for the entire set of data, and also
for each subgroup identified in ``group_membership``.
"""
from ._extra_metrics import ( # noqa: F401
true_positive_rate,
true_negative_rate,
false_positive_rate,
false_negative_rate,
_balanced_root_mean_squared_error,
mean_prediction,
selection_rate,
_mean_overprediction,
_mean_underprediction,
)
from ._metrics_engine import ( # noqa: F401
make_metric_group_summary, group_summary,
make_derived_metric,
group_min_from_summary, group_max_from_summary,
difference_from_summary, ratio_from_summary,
_metric_group_summary_dict, _derived_metric_dict)
from ._disparities import ( # noqa: F401
demographic_parity_difference,
demographic_parity_ratio,
equalized_odds_difference,
equalized_odds_ratio,
)
_extra_metrics = [
"true_positive_rate",
"true_negative_rate",
"false_positive_rate",
"false_negative_rate",
"balanced_root_mean_squared_error",
"mean_prediction",
"selection_rate",
"_mean_overprediction",
"_mean_underprediction",
]
_metrics_engine = [
"make_metric_group_summary",
"group_summary",
"make_derived_metric",
"group_min_from_summary",
"group_max_from_summary",
"difference_from_summary",
"ratio_from_summary"
]
# Add the generated metrics of the form `<metric>_group summary` and
# `<metric>_{difference,ratio,group_min,group_max`
globals().update(_metric_group_summary_dict)
globals().update(_derived_metric_dict)
_disparities = [
"demographic_parity_difference",
"demographic_parity_ratio",
"equalized_odds_difference",
"equalized_odds_ratio",
]
__all__ = (
_extra_metrics +
_metrics_engine +
list(_metric_group_summary_dict.keys()) +
list(_derived_metric_dict.keys()) +
_disparities)
| 27.793478 | 81 | 0.753226 | 325 | 2,557 | 5.501538 | 0.356923 | 0.049217 | 0.060403 | 0.036913 | 0.418345 | 0.418345 | 0.418345 | 0.418345 | 0.418345 | 0.418345 | 0 | 0.004217 | 0.165428 | 2,557 | 91 | 82 | 28.098901 | 0.833646 | 0.376613 | 0 | 0 | 0 | 0 | 0.263125 | 0.14105 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
021f1de5a7e77d411e3c3fe53eac5a168c19f2b6 | 9,586 | py | Python | omsdk/sdkinfra.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 61 | 2018-02-21T00:02:20.000Z | 2022-01-26T03:47:19.000Z | omsdk/sdkinfra.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 31 | 2018-03-24T05:43:39.000Z | 2022-03-16T07:10:37.000Z | omsdk/sdkinfra.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 25 | 2018-03-13T10:06:12.000Z | 2022-01-26T03:47:21.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Vaideeswaran Ganesan
#
import os
import imp
import logging
import socket
import sys, glob
from collections import OrderedDict
from omsdk.sdkcenum import EnumWrapper, TypeHelper
logger = logging.getLogger(__name__)
class sdkinfra:
"""
Class to initilaize and load the device drivers
"""
def __init__(self):
self.drivers = {}
self.disc_modules = OrderedDict()
self.driver_names = {}
def load_from_file(self, filepath):
mod_name = None
mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])
logger.debug("Loading " + filepath + "...")
if file_ext.lower() == '.py':
py_mod = imp.load_source(mod_name, filepath)
elif file_ext.lower() == '.pyc':
py_mod = imp.load_compiled(mod_name, filepath)
return {"name": mod_name, "module": py_mod}
def importPath(self, srcdir=None):
oldpaths = sys.path
if not srcdir is None:
oldpaths = [srcdir]
counter = 0
paths = []
for k in oldpaths:
if not k in paths:
paths.append(k)
for psrcdir in paths:
pypath = os.path.join(psrcdir, 'omdrivers', '__DellDrivers__.py')
pyglobpath = os.path.join(psrcdir, 'omdrivers', '*.py')
pydrivers = os.path.join(psrcdir, 'omdrivers')
if not os.path.exists(pypath):
continue
fl = glob.glob(pyglobpath)
for i in range(len(fl)):
if fl[i].endswith("__.py"):
continue
counter = counter + 1
logger.debug("Loading: " + str(counter) + "::" + fl[i])
module_loaded = self.load_from_file(fl[i])
self.drivers[module_loaded["name"]] = module_loaded["module"]
self.driver_names[module_loaded["name"]] = module_loaded["name"]
discClass = getattr(module_loaded["module"], module_loaded["name"])
self.disc_modules[module_loaded["name"]] = discClass(pydrivers)
aliases = self.disc_modules[module_loaded["name"]].my_aliases()
mname = module_loaded["name"]
for alias in aliases:
self.disc_modules[alias] = self.disc_modules[mname]
self.drivers[alias] = self.drivers[mname]
self.driver_names[alias] = self.driver_names[mname]
tempdict = OrderedDict(sorted(self.disc_modules.items(), key=lambda t: t[1].prefDiscOrder))
self.disc_modules = tempdict
self.driver_enum = EnumWrapper("Driver", self.driver_names).enum_type
def find_driver(self, ipaddr, creds, protopref=None, pOptions=None, msgFlag=False):
"""Find a device driver for the given IPAddress or host name
:param ipaddr: ipaddress or hostname of the device
:param creds: bundle of credentials for finding the device driver.
:param protopref: the preferred protocol to be used if the device supports the protocol
:param pOptions: protocol specific options to be passed, port, timeout etc
:type ipaddr: str
:type creds: dict of obj <Snmpv2Credentials or UserCredentials>
:type protopref: enumeration of preferred protocol
:type pOptions: object <SNMPOptions or WSMANOptions or REDFISHOptions>
:return: a driver handle for further configuration/monitoring
:rtype: object <iBaseDriver>
"""
duplicSet = set()
msg = ipaddr + " : Connection to Dell EMC device failed, please check device status and credentials."
drv = None
for mod in self.disc_modules:
if (self.disc_modules[mod] in duplicSet) or (str(mod) == "FileList"):
continue
drv = self._create_driver(mod, ipaddr, creds, protopref, pOptions)
if drv:
msg = ipaddr + " : Connected to Dell EMC device"
break
duplicSet.add(self.disc_modules[mod])
if msgFlag:
return drv, msg
return drv
# Return:
# None - if driver not found, not classifed
# instance of iBaseEntity - if device of the proper type
def get_driver(self, driver_en, ipaddr, creds, protopref=None, pOptions=None):
"""Get a device driver for the given IPAddress or host name, also checking for a particular device type
:param ipaddr: ipaddress or hostname of the device
:param driver_en: enumeration of the device type
:param creds: bundle of credentials for finding the device driver.
:param protopref: the preferred protocol to be used if the device supports the protocol
:param pOptions: protocol specific options to be passed, port, timeout etc
:type ipaddr: str
:type driver_en: enumerate of the device type
:type creds: dict of obj <Snmpv2Credentials or UserCredentials>
:type protopref: enumeration of preferred protocol
:type pOptions: object <SNMPOptions or WSMANOptions or REDFISHOptions>
:return: a driver handle for further configuration/monitoring
:rtype: object <iBaseDriver>
"""
mod = TypeHelper.resolve(driver_en)
logger.debug("get_driver(): Asking for " + mod)
return self._create_driver(mod, ipaddr, creds, protopref, pOptions)
def _create_driver(self, mod, host, creds, protopref, pOptions):
msg = "Connection to Dell EMC device failed, please check device status and credentials."
logger.debug("get_driver(): Asking for " + mod)
ipaddr = host
try:
result = socket.getaddrinfo(host, None)
lastuple = result[-1]
ipaddress = lastuple[-1][0]
if ipaddress:
ipaddr = ipaddress
except socket.gaierror as err:
logger.error("{}: {}: {}".format(host, err, "cannot resolve hostname!"))
if not mod in self.disc_modules:
# TODO: Change this to exception
logger.error("{}: {}".format(host, msg))
logger.debug(mod + " not found!")
return None
try:
logger.debug(mod + " driver found!")
drv = self.disc_modules[mod].is_entitytype(self, ipaddr, creds, protopref, mod, pOptions)
if drv is None:
logger.info("{}: {}".format(host, msg))
if drv:
logger.info("{}: {}".format(host, "Connection to Dell EMC device success!"))
hostname = None
try:
hostname, aliaslist, addresslist = socket.gethostbyaddr(ipaddr)
logger.debug("Found host name for " + ipaddr + " as " + hostname)
except socket.herror:
hostname = None
logger.debug("No host name found for " + ipaddr)
drv.hostname = hostname
return drv
except AttributeError as attrerror:
logger.debug(mod + " is not device or console")
logger.debug(attrerror)
return None
def _driver(self, driver_en):
mod = TypeHelper.resolve(driver_en)
logger.debug("_driver(): Asking for " + mod)
if not mod in self.disc_modules:
# TODO: Change this to exception
logger.debug(mod + " not found!")
return None
try:
logger.debug(mod + " driver found!")
drv = self.disc_modules[mod]._get(self)
return drv
except AttributeError as attrerror:
logger.debug(mod + " is not device or console")
logger.debug(attrerror)
return None
def setPrefProtocolDriver(self, driver_name, protopref):
drv = self.disc_modules.get(driver_name, None)
if drv:
drv.protofactory.prefProtocol = protopref
def excludeDrivers(self, excList):
for drv in excList:
self.disc_modules.pop(drv)
def includeDriversOnly(self, incList):
drvkeys = self.disc_modules.keys()
for drv in drvkeys:
if drv not in incList:
self.disc_modules.pop(drv)
def removeProtoDriver(self, driver_name, protList):
drv = self.disc_modules.get(driver_name, None)
if drv:
for protoenum in protList:
drv.protofactory.removeProto(protoenum) | 43.771689 | 112 | 0.591488 | 1,087 | 9,586 | 5.132475 | 0.25483 | 0.027245 | 0.051084 | 0.010755 | 0.423553 | 0.382864 | 0.350242 | 0.326761 | 0.309912 | 0.294318 | 0 | 0.002919 | 0.320885 | 9,586 | 219 | 113 | 43.77169 | 0.853917 | 0.254225 | 0 | 0.27972 | 0 | 0 | 0.099451 | 0 | 0 | 0 | 0 | 0.004566 | 0 | 1 | 0.076923 | false | 0 | 0.055944 | 0 | 0.20979 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
022529cd922f62048b6f8d21791cd0dc6505d929 | 355 | py | Python | conf/Gnod_conf.py | akkuldn/Gnode-Tests | 9a610c1a8e09efec643b67dbb878e9ea1e0cee09 | [
"MIT"
] | null | null | null | conf/Gnod_conf.py | akkuldn/Gnode-Tests | 9a610c1a8e09efec643b67dbb878e9ea1e0cee09 | [
"MIT"
] | null | null | null | conf/Gnod_conf.py | akkuldn/Gnode-Tests | 9a610c1a8e09efec643b67dbb878e9ea1e0cee09 | [
"MIT"
] | 1 | 2020-01-10T14:40:20.000Z | 2020-01-10T14:40:20.000Z | """
This file contains the names and details that is used to fill out the textboxes in Gnod.com
"""
#name of musicains to be entered in the three textboxes in the discover music page
musician1="Shane Filan"
musician2="Bruno Mars"
musician3="Elvis Presley"
#name of the movie to be entered in the search bar in the movie map page
movie_name="Harry Potter" | 35.5 | 91 | 0.777465 | 62 | 355 | 4.435484 | 0.66129 | 0.072727 | 0.08 | 0.094545 | 0.116364 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010101 | 0.16338 | 355 | 10 | 92 | 35.5 | 0.915825 | 0.687324 | 0 | 0 | 0 | 0 | 0.446602 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02262c932b821bddba28cd56f01c61bc0a4c830d | 2,520 | py | Python | cpmpy/map_coloring.py | hakank/hakank | 313e5c0552569863047f6ce9ae48ea0f6ec0c32b | [
"MIT"
] | 279 | 2015-01-10T09:55:35.000Z | 2022-03-28T02:34:03.000Z | cpmpy/map_coloring.py | hakank/hakank | 313e5c0552569863047f6ce9ae48ea0f6ec0c32b | [
"MIT"
] | 10 | 2017-10-05T15:48:50.000Z | 2021-09-20T12:06:52.000Z | cpmpy/map_coloring.py | hakank/hakank | 313e5c0552569863047f6ce9ae48ea0f6ec0c32b | [
"MIT"
] | 83 | 2015-01-20T03:44:00.000Z | 2022-03-13T23:53:06.000Z | """
Map coloring in cpmpy
From Pascal Van Hentenryck 'The OPL Optimization Programming Language',
page 7, 42.
Symmetry breaking:
* With the simple symmetry breaking constraint that Belgium has color 1
there are 36 solutions:
[1 1 3 2 4 4]
[1 1 4 2 4 3]
[1 1 4 3 4 2]
[1 1 4 3 2 2]
[1 1 3 4 2 2]
[1 1 3 4 3 2]
[1 1 4 2 3 3]
[1 1 3 2 3 4]
[1 2 3 4 3 2]
[1 2 3 4 2 2]
[1 2 4 3 2 2]
[1 2 4 3 4 2]
[1 3 3 4 2 2]
[1 3 3 4 3 2]
[1 3 4 2 3 3]
[1 3 3 2 3 4]
[1 3 3 2 4 4]
[1 3 4 2 4 3]
[1 4 4 2 4 3]
[1 4 3 2 4 4]
[1 4 4 3 4 2]
[1 4 4 3 2 2]
[1 4 4 2 3 3]
[1 4 3 2 3 4]
[1 4 2 3 2 4]
[1 4 2 3 4 4]
[1 2 2 3 4 4]
[1 2 2 3 2 4]
[1 1 2 3 2 4]
[1 1 2 3 4 4]
[1 1 2 4 3 3]
[1 2 2 4 3 3]
[1 3 2 4 3 3]
[1 3 2 4 2 3]
[1 2 2 4 2 3]
[1 1 2 4 2 3]
* With the added constraint value_precede_chain (that color 1 must be used before
color 2 which must be used before color 3 etc) there are just 6 solutions:
[1 2 2 3 4 4]
[1 2 2 3 2 4]
[1 2 3 4 2 2]
[1 2 3 4 3 2]
[1 1 2 3 2 4]
[1 1 2 3 4 4]
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
def map_coloring(use_value_precede_chain=False):
print("Use value precede chain symmetry constraint:", use_value_precede_chain)
Belgium = 0
Denmark = 1
France = 2
Germany = 3
Netherlands = 4
Luxembourg = 5
countries = [Belgium,Denmark,France,Germany,Netherlands,Luxembourg]
num_countries = 6
max_num_colors = 4
color = intvar(1,max_num_colors,shape=num_countries,name="color")
model = Model(
color[Belgium] == 1, # Symmetry breaking
color[France] != color[Belgium],
color[France] != color[Luxembourg],
color[France] != color[Germany],
color[Luxembourg] != color[Germany],
color[Luxembourg] != color[Belgium],
color[Belgium] != color[Netherlands],
color[Belgium] != color[Germany],
color[Germany] != color[Netherlands],
color[Germany] != color[Denmark]
)
if use_value_precede_chain:
model += [value_precede_chain(list(range(1,max_num_colors+1)),color)]
ortools_wrapper2(model,[color])
use_value_precede_chain=False
map_coloring(use_value_precede_chain)
use_value_precede_chain=True
map_coloring(use_value_precede_chain)
| 22.300885 | 82 | 0.605556 | 491 | 2,520 | 3.03055 | 0.179226 | 0.030914 | 0.024194 | 0.107527 | 0.307124 | 0.148522 | 0.061828 | 0.055108 | 0.03293 | 0.03293 | 0 | 0.15493 | 0.295635 | 2,520 | 112 | 83 | 22.5 | 0.68338 | 0.472222 | 0 | 0.055556 | 0 | 0 | 0.037149 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.138889 | 0 | 0.166667 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02270b892808601804bc269b231226b1a9c9e6f8 | 27,727 | py | Python | recipe/ingredients.py | juiceinc/recipe | ef3c5af58e2d68892d54285a24b78565f6401ef4 | [
"MIT"
] | 5 | 2017-10-26T10:44:07.000Z | 2021-08-30T16:35:55.000Z | recipe/ingredients.py | juiceinc/recipe | ef3c5af58e2d68892d54285a24b78565f6401ef4 | [
"MIT"
] | 56 | 2017-10-23T14:01:37.000Z | 2022-02-17T17:07:41.000Z | recipe/ingredients.py | juiceinc/recipe | ef3c5af58e2d68892d54285a24b78565f6401ef4 | [
"MIT"
] | null | null | null | import attr
from functools import total_ordering
from uuid import uuid4
from sqlalchemy import Float, String, and_, between, case, cast, func, or_, text
from recipe.exceptions import BadIngredient
from recipe.utils import AttrDict, filter_to_string
from recipe.utils.datatype import (
convert_date,
convert_datetime,
determine_datatype,
datatype_from_column_expression,
)
ALLOWED_OPERATORS = set(
[
"eq",
"ne",
"lt",
"lte",
"gt",
"gte",
"is",
"isnot",
"like",
"ilike",
"quickselect",
"in",
"notin",
"between",
]
)
@total_ordering
class Ingredient(object):
"""Ingredients combine to make a SQLAlchemy query.
Any unknown keyword arguments provided to an Ingredient
during initialization are stored in a meta object.
.. code:: python
# icon is an unknown keyword argument
m = Metric(func.sum(MyTable.sales), icon='cog')
print(m.meta.icon)
>>> 'cog'
This meta storage can be used to add new capabilities to
ingredients.
Args:
id (:obj:`str`):
An id to identify this Ingredient. If ingredients are
added to a Shelf, the id is automatically set as the key in
the shelf.
columns (:obj:`list` of :obj:`ColumnElement`):
A list of SQLAlchemy columns to use in a query select.
filters (:obj:`list` of :obj:`BinaryExpression`):
A list of SQLAlchemy BinaryExpressions to use in the
.filter() clause of a query.
havings (:obj:`list` of :obj:`BinaryExpression`):
A list of SQLAlchemy BinaryExpressions to use in the
.having() clause of a query.
columns (:obj:`list` of :obj:`ColumnElement`):
A list of SQLAlchemy columns to use in the `group_by` clause
of a query.
formatters: (:obj:`list` of :obj:`callable`):
A list of callables to apply to the result values.
If formatters exist, property `{ingredient.id}_raw` will
exist on each result row containing the unformatted
value.
cache_context (:obj:`str`):
Extra context when caching this ingredient. DEPRECATED
ordering (`string`, 'asc' or 'desc'):
One of 'asc' or 'desc'. 'asc' is the default value.
The default ordering of this ingredient if it is
used in a ``recipe.order_by``.
This is added to the ingredient when the ingredient is
used in a ``recipe.order_by``.
group_by_strategy (:obj:`str`):
A strategy to use when preparing group_bys for the query
"labels" is the default strategy which will use the labels assigned to
each column.
"direct" will use the column expression directly. This alternative is
useful when there might be more than one column with the same label
being used in the query.
quickselects (:obj:`list` of named filters):
A list of named filters that can be accessed through
``build_filter``. Named filters are dictionaries with
a ``name`` (:obj:str) property and a ``condition`` property
(:obj:`BinaryExpression`)
datatype (:obj:`str`):
The identified datatype (num, str, date, bool, datetime) of
the parsed expression
datatype_by_role (:obj:`dict`):
The identified datatype (num, str, date, bool, datetime) for each
role.
Returns:
An Ingredient object.
"""
def __init__(self, **kwargs):
self.id = kwargs.pop("id", uuid4().hex[:12])
self.columns = kwargs.pop("columns", [])
self.filters = kwargs.pop("filters", [])
self.havings = kwargs.pop("havings", [])
self.group_by = kwargs.pop("group_by", [])
self.formatters = kwargs.pop("formatters", [])
self.quickselects = kwargs.pop("quickselects", [])
self.column_suffixes = kwargs.pop("column_suffixes", None)
self.cache_context = kwargs.pop("cache_context", "")
self.datatype = kwargs.pop("datatype", None)
self.datatype_by_role = kwargs.pop("datatype_by_role", dict())
self.anonymize = False
self.roles = {}
self._labels = []
self.error = kwargs.pop("error", None)
# What order should this be in
self.ordering = kwargs.pop("ordering", "asc")
self.group_by_strategy = kwargs.pop("group_by_strategy", "labels")
if not isinstance(self.formatters, (list, tuple)):
raise BadIngredient(
"formatters passed to an ingredient must be a list or tuple"
)
# If explicit suffixes are passed in, there must be one for each column
if self.column_suffixes is not None and len(self.column_suffixes) != len(
self.columns
):
raise BadIngredient("column_suffixes must be the same length as columns")
# Any remaining passed properties are available in self.meta
self.meta = AttrDict(kwargs)
def __hash__(self):
return hash(self.describe())
def __repr__(self):
return self.describe()
def _stringify(self):
"""Return a relevant string based on ingredient type for repr and
ordering. Ingredients with the same classname, id and _stringify
value are considered the same."""
return " ".join(str(col) for col in self.columns)
def describe(self):
"""A string representation of the ingredient."""
return u"({}){} {}".format(self.__class__.__name__, self.id, self._stringify())
def _format_value(self, value):
"""Formats value using any stored formatters."""
for f in self.formatters:
value = f(value)
return value
def make_column_suffixes(self):
"""Make sure we have the right column suffixes. These will be appended
to `id` when generating the query.
Developers note: These are generated when the query runs because the
recipe may be run with anonymization on or off, which will inject
a formatter.
"""
if self.column_suffixes:
return self.column_suffixes
if len(self.columns) == 0:
return ()
elif len(self.columns) == 1:
if self.formatters:
return ("_raw",)
else:
return ("",)
else:
raise BadIngredient(
"column_suffixes must be supplied if there is " "more than one column"
)
@property
def query_columns(self):
"""Yield labeled columns to be used as a select in a query."""
self._labels = []
for column, suffix in zip(self.columns, self.make_column_suffixes()):
self._labels.append(self.id + suffix)
yield column.label(self.id + suffix)
@property
def order_by_columns(self):
"""Yield columns to be used in an order by using this ingredient. Column
ordering is in reverse order of columns
"""
# Ensure the labels are generated
if not self._labels:
list(self.query_columns)
if self.group_by_strategy == "labels":
if self.ordering == "desc":
suffix = " DESC"
else:
suffix = ""
return [
text(lbl + suffix)
for col, lbl in reversed(list(zip(self.columns, self._labels)))
]
else:
if self.ordering == "desc":
return [col.desc() for col in reversed(self.columns)]
else:
return reversed(self.columns)
@property
def cauldron_extras(self):
"""Yield extra tuples containing a field name and a callable that takes
a row.
"""
if self.formatters:
raw_property = self.id + "_raw"
yield self.id, lambda row: self._format_value(getattr(row, raw_property))
def _order(self):
"""Ingredients are sorted by subclass then by id."""
if isinstance(self, Dimension):
return (0, self.id)
elif isinstance(self, Metric):
return (1, self.id)
elif isinstance(self, Filter):
return (2, self.id)
elif isinstance(self, Having):
return (3, self.id)
else:
return (4, self.id)
def __lt__(self, other):
"""Make ingredients sortable."""
return self._order() < other._order()
def __eq__(self, other):
"""Make ingredients sortable."""
return self._order() == other._order()
def __ne__(self, other):
"""Make ingredients sortable."""
return not (self._order() == other._order())
def _build_scalar_filter(self, value, operator=None, target_role=None):
"""Build a Filter given a single value.
Args:
value (a string, number, boolean or None):
operator (`str`)
A valid scalar operator. The default operator
is `eq`
target_role (`str`)
An optional role to build the filter against
Returns:
A Filter object
"""
# Developer's note: Valid operators should appear in ALLOWED_OPERATORS
# This is used by the AutomaticFilter extension.
if operator is None:
operator = "eq"
if target_role and target_role in self.roles:
filter_column = self.roles.get(target_role)
datatype = determine_datatype(self, target_role)
else:
filter_column = self.columns[0]
datatype = determine_datatype(self)
# Ensure that the filter_column and value have compatible data types
# Support passing ILIKE in Paginate extensions
if datatype == "date":
value = convert_date(value)
elif datatype == "datetime":
value = convert_datetime(value)
if isinstance(value, str) and datatype != "str":
filter_column = cast(filter_column, String)
if operator == "eq":
# Default operator is 'eq' so if no operator is provided, handle
# like an 'eq'
if value is None:
return filter_column.is_(value)
else:
return filter_column == value
if operator == "ne":
return filter_column != value
elif operator == "lt":
return filter_column < value
elif operator == "lte":
return filter_column <= value
elif operator == "gt":
return filter_column > value
elif operator == "gte":
return filter_column >= value
elif operator == "is":
return filter_column.is_(value)
elif operator == "isnot":
return filter_column.isnot(value)
elif operator == "like":
value = str(value)
return filter_column.like(value)
elif operator == "ilike":
value = str(value)
return filter_column.ilike(value)
elif operator == "quickselect":
for qs in self.quickselects:
if qs.get("name") == value:
return qs.get("condition")
raise ValueError(
"quickselect {} was not found in "
"ingredient {}".format(value, self.id)
)
else:
raise ValueError("Unknown operator {}".format(operator))
def _build_vector_filter(self, value, operator=None, target_role=None):
"""Build a Filter given a list of values.
Args:
value (a list of string, number, boolean or None):
operator (:obj:`str`)
A valid vector operator. The default operator is
`in`.
target_role (`str`)
An optional role to build the filter against
Returns:
A Filter object
"""
# Developer's note: Valid operators should appear in ALLOWED_OPERATORS
# This is used by the AutomaticFilter extension.
if operator is None:
operator = "in"
if target_role and target_role in self.roles:
filter_column = self.roles.get(target_role)
datatype = determine_datatype(self, target_role)
else:
filter_column = self.columns[0]
datatype = determine_datatype(self)
if datatype == "date":
value = list(map(convert_date, value))
elif datatype == "datetime":
value = list(map(convert_datetime, value))
if operator == "in":
# Default operator is 'in' so if no operator is provided, handle
# like an 'in'
if None in value:
# filter out the Nones
non_none_value = sorted([v for v in value if v is not None])
if non_none_value:
return or_(
filter_column.is_(None), filter_column.in_(non_none_value)
)
else:
return filter_column.is_(None)
else:
# Sort to generate deterministic query sql for caching
value = sorted(value)
return filter_column.in_(value)
elif operator == "notin":
if None in value:
# filter out the Nones
non_none_value = sorted([v for v in value if v is not None])
if non_none_value:
return and_(
filter_column.isnot(None), filter_column.notin_(non_none_value)
)
else:
return filter_column.isnot(None)
else:
# Sort to generate deterministic query sql for caching
value = sorted(value)
return filter_column.notin_(value)
elif operator == "between":
if len(value) != 2:
ValueError(
"When using between, you can only supply a "
"lower and upper bounds."
)
lower_bound, upper_bound = value
return between(filter_column, lower_bound, upper_bound)
elif operator == "quickselect":
qs_conditions = []
for v in value:
qs_found = False
for qs in self.quickselects:
if qs.get("name") == v:
qs_found = True
qs_conditions.append(qs.get("condition"))
break
if not qs_found:
raise ValueError(
"quickselect {} was not found in "
"ingredient {}".format(value, self.id)
)
return or_(*qs_conditions)
else:
raise ValueError("Unknown operator {}".format(operator))
def build_filter(self, value, operator=None, target_role=None):
"""
Builds a filter based on a supplied value and optional operator. If
no operator is supplied an ``in`` filter will be used for a list and a
``eq`` filter if we get a scalar value.
``build_filter`` is used by the AutomaticFilter extension.
Args:
value:
A value or list of values to operate against
operator (:obj:`str`)
An operator that determines the type of comparison
to do against value.
The default operator is 'in' if value is a list and
'eq' if value is a string, number, boolean or None.
target_role (`str`)
An optional role to build the filter against
Returns:
A SQLAlchemy boolean expression
"""
value_is_scalar = not isinstance(value, (list, tuple))
if value_is_scalar:
return self._build_scalar_filter(
value, operator=operator, target_role=target_role
)
else:
return self._build_vector_filter(
value, operator=operator, target_role=target_role
)
@property
def expression(self):
"""An accessor for the SQLAlchemy expression representing this
Ingredient."""
if self.columns:
return self.columns[0]
else:
return None
class Filter(Ingredient):
"""A simple filter created from a single expression."""
def __init__(self, expression, **kwargs):
super(Filter, self).__init__(**kwargs)
self.filters = [expression]
self.datatype = "bool"
def _stringify(self):
return filter_to_string(self)
@property
def expression(self):
"""An accessor for the SQLAlchemy expression representing this
Ingredient."""
if self.filters:
return self.filters[0]
else:
return None
class Having(Ingredient):
"""A Having that limits results based on an aggregate boolean clause"""
def __init__(self, expression, **kwargs):
super(Having, self).__init__(**kwargs)
self.havings = [expression]
self.datatype = "bool"
def _stringify(self):
return " ".join(str(expr) for expr in self.havings)
@property
def expression(self):
"""An accessor for the SQLAlchemy expression representing this
Ingredient."""
if self.havings:
return self.havings[0]
else:
return None
class Dimension(Ingredient):
"""A Dimension is an Ingredient that adds columns and groups by those
columns. Columns should be non-aggregate SQLAlchemy expressions.
The required expression supplies the dimension's "value" role. Additional
expressions can be provided in keyword arguments with keys
that look like "{role}_expression". The role is suffixed to the
end of the SQL column name.
For instance, the following
.. code:: python
Dimension(Hospitals.name,
latitude_expression=Hospitals.lat
longitude_expression=Hospitals.lng,
id='hospital')
would add columns named "hospital", "hospital_latitude", and
"hospital_longitude" to the recipes results. All three of these expressions
would be used as group bys.
Two special roles that can be added are "id" and "order_by". If a keyword argument
"id_expression" is passed, this expression will appear first in the list of
columns and group_bys. This "id" will be used if you call `build_filter` on the
dimension.
If the keyword argument "order_by_expression" is passed, this expression will
appear last in the list of columns and group_bys.
The following additional keyword parameters are also supported:
Args:
lookup (:obj:`dict`):
A dictionary that is used to map values to new values.
Note: Lookup adds a ``formatter`` callable as the first
item in the list of formatters.
lookup_default (:obj:`object`)
A default to show if the value can't be found in the
lookup dictionary.
Returns:
A Filter object
:param lookup: dict A dictionary to translate values into
:param lookup_default: A default to show if the value can't be found in the
lookup dictionary.
"""
def __init__(self, expression, **kwargs):
super(Dimension, self).__init__(**kwargs)
if self.datatype is None:
self.datatype = datatype_from_column_expression(expression)
# We must always have a value role
self.roles = {"value": expression}
for k, v in kwargs.items():
role = None
if k.endswith("_expression"):
# Remove _expression to get the role
role = k[:-11]
if role:
if role == "raw":
raise BadIngredient("raw is a reserved role in dimensions")
self.roles[role] = v
if not self.datatype_by_role:
for k, expr in self.roles.items():
self.datatype_by_role[k] = datatype_from_column_expression(expr)
self.columns = []
self._group_by = []
self.role_keys = []
if "id" in self.roles:
self.columns.append(self.roles["id"])
self._group_by.append(self.roles["id"])
self.role_keys.append("id")
if "value" in self.roles:
self.columns.append(self.roles["value"])
self._group_by.append(self.roles["value"])
self.role_keys.append("value")
# Add all the other columns in sorted order of role
# with order_by coming last
# For instance, if the following are passed
# expression, id_expression, order_by_expresion, zed_expression the order of
# columns would be "id", "value", "zed", "order_by"
# When using group_bys for ordering we put them in reverse order.
ordered_roles = [
k for k in sorted(self.roles.keys()) if k not in ("id", "value")
]
# Move order_by to the end
if "order_by" in ordered_roles:
ordered_roles.remove("order_by")
ordered_roles.append("order_by")
for k in ordered_roles:
self.columns.append(self.roles[k])
self._group_by.append(self.roles[k])
self.role_keys.append(k)
if "lookup" in kwargs:
self.lookup = kwargs.get("lookup")
if not isinstance(self.lookup, dict):
raise BadIngredient("lookup must be a dictionary")
# Inject a formatter that performs the lookup
if "lookup_default" in kwargs:
self.lookup_default = kwargs.get("lookup_default")
self.formatters.insert(
0, lambda value: self.lookup.get(value, self.lookup_default)
)
else:
self.formatters.insert(0, lambda value: self.lookup.get(value, value))
@property
def group_by(self):
# Ensure the labels are generated
if not self._labels:
list(self.query_columns)
if self.group_by_strategy == "labels":
return [lbl for gb, lbl in zip(self._group_by, self._labels)]
else:
return self._group_by
@group_by.setter
def group_by(self, value):
self._group_by = value
@property
def cauldron_extras(self):
"""Yield extra tuples containing a field name and a callable that takes
a row
"""
# This will format the value field
for extra in super(Dimension, self).cauldron_extras:
yield extra
yield self.id + "_id", lambda row: getattr(row, self.id_prop)
def make_column_suffixes(self):
"""Make sure we have the right column suffixes. These will be appended
to `id` when generating the query.
"""
if self.formatters:
value_suffix = "_raw"
else:
value_suffix = ""
return tuple(
value_suffix if role == "value" else "_" + role for role in self.role_keys
)
@property
def id_prop(self):
"""The label of this dimensions id in the query columns"""
if "id" in self.role_keys:
return self.id + "_id"
else:
# Use the value dimension
if self.formatters:
return self.id + "_raw"
else:
return self.id
class IdValueDimension(Dimension):
"""
DEPRECATED: A convenience class for creating a Dimension
with a separate ``id_expression``. The following are identical.
.. code:: python
d = Dimension(Student.student_name, id_expression=Student.student_id)
d = IdValueDimension(Student.student_id, Student.student_name)
The former approach is recommended.
Args:
id_expression (:obj:`ColumnElement`)
A column expression that is used to identify the id
for a Dimension
value_expression (:obj:`ColumnElement`)
A column expression that is used to identify the value
for a Dimension
"""
def __init__(self, id_expression, value_expression, **kwargs):
kwargs["id_expression"] = id_expression
super(IdValueDimension, self).__init__(value_expression, **kwargs)
class LookupDimension(Dimension):
"""DEPRECATED Returns the expression value looked up in a lookup dictionary"""
def __init__(self, expression, lookup, **kwargs):
"""A Dimension that replaces values using a lookup table.
:param expression: The dimension field
:type value: object
:param lookup: A dictionary of key/value pairs. If the keys will
be replaced by values in the value of this Dimension
:type operator: dict
:param default: The value to use if a dimension value isn't
found in the lookup table. The default behavior is to
show the original value if the value isn't found in the
lookup table.
:type default: object
"""
if "default" in kwargs:
kwargs["lookup_default"] = kwargs.pop("default")
kwargs["lookup"] = lookup
super(LookupDimension, self).__init__(expression, **kwargs)
class Metric(Ingredient):
"""A simple metric created from a single expression"""
def __init__(self, expression, **kwargs):
super(Metric, self).__init__(**kwargs)
self.columns = [expression]
if self.datatype is None:
self.datatype = datatype_from_column_expression(expression)
# We must always have a value role
self.roles = {"value": expression}
def build_filter(self, value, operator=None):
"""Building filters with Metric returns Having objects."""
f = super().build_filter(value, operator=operator)
return Having(f.filters[0])
class DivideMetric(Metric):
"""A metric that divides a numerator by a denominator handling several
possible error conditions
The default strategy is to add an small value to the denominator
Passing ifzero allows you to give a different value if the denominator is
zero.
"""
def __init__(self, numerator, denominator, **kwargs):
ifzero = kwargs.pop("ifzero", "epsilon")
epsilon = kwargs.pop("epsilon", 0.000000001)
if ifzero == "epsilon":
# Add an epsilon value to denominator to avoid divide by zero
# errors
expression = cast(numerator, Float) / (
func.coalesce(cast(denominator, Float), 0.0) + epsilon
)
else:
# If the denominator is zero, return the ifzero value otherwise do
# the division
expression = case(
((cast(denominator, Float) == 0.0, ifzero),),
else_=cast(numerator, Float) / cast(denominator, Float),
)
super(DivideMetric, self).__init__(expression, **kwargs)
class WtdAvgMetric(DivideMetric):
"""A metric that generates the weighted average of a metric by a weight."""
def __init__(self, expression, weight_expression, **kwargs):
numerator = func.sum(expression * weight_expression)
denominator = func.sum(weight_expression)
super(WtdAvgMetric, self).__init__(numerator, denominator, **kwargs)
class InvalidIngredient(Ingredient):
pass
| 35.231258 | 87 | 0.587153 | 3,254 | 27,727 | 4.881684 | 0.135526 | 0.020397 | 0.016997 | 0.008687 | 0.354234 | 0.320176 | 0.276802 | 0.257098 | 0.225055 | 0.209191 | 0 | 0.001931 | 0.327587 | 27,727 | 786 | 88 | 35.276081 | 0.850086 | 0.367007 | 0 | 0.286765 | 0 | 0 | 0.062462 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0.004902 | 0.017157 | 0.009804 | 0.269608 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
022808169bdf3198652cecc6de34675dd5c43e12 | 1,826 | py | Python | scripts/dexp2p/multi-server/dexp2p_orderbooks_parser_ms.py | SirSevenG/komodo-cctools-python | d05b462fcbec87ada5144b5d634162c47fa2bf21 | [
"MIT"
] | 7 | 2019-05-16T16:38:48.000Z | 2021-06-19T08:20:09.000Z | scripts/dexp2p/multi-server/dexp2p_orderbooks_parser_ms.py | tonymorony/GatewaysCC-TUI | 6a5b40cd7bfe7509c6891bb9425a1a0f76ebd9a7 | [
"MIT"
] | 13 | 2019-06-03T06:24:53.000Z | 2019-09-26T08:37:55.000Z | scripts/dexp2p/multi-server/dexp2p_orderbooks_parser_ms.py | tonymorony/GatewaysCC-TUI | 6a5b40cd7bfe7509c6891bb9425a1a0f76ebd9a7 | [
"MIT"
] | 8 | 2019-06-02T01:19:44.000Z | 2021-02-26T14:25:31.000Z | import json
import os
# loading nodes packages nodeport[n] : ["id"]["hash"]
package_files_list = os.listdir('spam_p2p/packages')
nodes_packages = {}
last_port = 7000 + int(os.getenv('NODESAMOUNT'))
self_ip = "159.69.45.70"
for node_port in range(7000, last_port):
nodes_packages[node_port] = {}
for file in package_files_list:
if int(file.split("_")[1]) == node_port:
with open('spam_p2p/packages/' + file) as json_file:
packages_counter = 0
list_of_pacakges = json_file.readlines()
for package in list_of_pacakges:
package_json = json.loads(package)
packages_counter = packages_counter + 1
nodes_packages[node_port][packages_counter] = {}
nodes_packages[node_port][packages_counter]["id"] = package_json["result"]["id"]
nodes_packages[node_port][packages_counter]["hash"] = package_json["result"]["hash"]
nodes_packages[node_port]["total"] = packages_counter
# loading nodes orderbooks nodeport[n] : [tag][orderbook]
orderbook_files_list = os.listdir('spam_p2p/orderbooks')
# comparing broadcasted packages vs received orderbooks
for nodeport in nodes_packages:
packages_amount_sent = nodes_packages[nodeport]["total"]
print("Packages sent by node " + self_ip + ":" + str(nodeport) + " : " + str(packages_amount_sent))
for file in orderbook_files_list:
with open('spam_p2p/orderbooks/' + file) as json_file:
file_content = json.load(json_file)
packages_amount = len(json.loads(file_content))
node_address = file.split("_")[1] + ":" + file.split("_")[2][:-5]
print("Packages received from node " + node_address + " " + str(packages_amount) + " by node " + self_ip + ":" + file.split("_")[0]) | 45.65 | 137 | 0.650602 | 228 | 1,826 | 4.929825 | 0.289474 | 0.104093 | 0.075623 | 0.093416 | 0.140569 | 0.140569 | 0 | 0 | 0 | 0 | 0 | 0.019608 | 0.217963 | 1,826 | 40 | 137 | 45.65 | 0.767507 | 0.089266 | 0 | 0 | 0 | 0 | 0.121084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02286a6960aec733228ab4445bc00616a036bf4f | 10,606 | py | Python | antiCheatUtils.py | sharad297/Student-Online-Exam-AntiCheat-Tool | 4ec9a3bf7e6f2ee45a73bac2b25010005b5c7ef4 | [
"MIT"
] | 9 | 2020-09-18T05:03:57.000Z | 2022-03-03T07:01:08.000Z | antiCheatUtils.py | sharad297/Student-Online-Exam-AntiCheat-Tool | 4ec9a3bf7e6f2ee45a73bac2b25010005b5c7ef4 | [
"MIT"
] | 1 | 2021-12-02T06:58:58.000Z | 2021-12-02T06:58:58.000Z | antiCheatUtils.py | sharad297/Student-Online-Exam-AntiCheat-Tool | 4ec9a3bf7e6f2ee45a73bac2b25010005b5c7ef4 | [
"MIT"
] | 6 | 2020-10-01T14:36:49.000Z | 2022-02-21T23:47:04.000Z | ####### Utility and model loading #############
import cv2
import colorsys
import random
import numpy as np
import tensorflow as tf
import face_recognition as faceRec
from keras.layers import Activation
from keras.models import Model
from keras.utils import get_custom_objects
### Custom Class Inheritance ######
class Mish(Activation):
def __init__(self, activation, **kwargs):
super(Mish, self).__init__(activation, **kwargs)
self.__name__ = 'mish'
def mysoftplus(x):
mask_min = tf.cast((x<-20.0),tf.float32)
ymin = mask_min*tf.math.exp(x)
mask_max = tf.cast((x>20.0),tf.float32)
ymax = mask_max*x
mask= tf.cast((abs(x)<=20.0),tf.float32)
y = mask*tf.math.log(tf.math.exp(x) + 1.0)
return(ymin+ymax+y)
def mish(x):
return (x* tf.math.tanh(mysoftplus(x)))
get_custom_objects().update({'mish': Mish(mish)})
print('Loading.....')
# Load the model
from keras.models import load_model,Model
yolo_model = load_model("models/yolo/yolov4.h5")
############# Helper Util Funcs #################
def read_labels(labels_path):
with open(labels_path) as f:
labels = f.readlines()
labels = [c.strip() for c in labels]
return labels
# Load the labels
labels = read_labels("models/yolo/coco_classes.txt")
#Manually Taken index for Cellphones
cellphone_idx = 67
# load and prepare an image
def load_image_pixels(image, shape):
# load the CV image to get its shape
width, height,_ = image.shape
# load the image with the required size
image = cv2.resize(image,shape)
# convert to numpy array
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
# scale pixel values to [0, 1]
image = image.astype('float32')
#Normalize Image
image /= 255.0
# add a dimension so that we have one sample
image = np.expand_dims(image, 0)
return image,width,height
######### Bounding Box Class to store bounding box info for easier access #########
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
######### Helper Functions: Lots of them ################
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def decode_netout(netout, anchors, obj_thresh, net_h, net_w, anchors_nb, scales_x_y):
grid_h, grid_w = netout.shape[:2]
nb_box = anchors_nb
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5 # 5 = bx,by,bh,bw,pc
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2]) # x, y
netout[..., :2] = netout[..., :2]*scales_x_y - 0.5*(scales_x_y - 1.0) # scale x, y
netout[..., 4:] = _sigmoid(netout[..., 4:]) # objectness + classes probabilities
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness
objectness = netout[int(row)][int(col)][b][4]
if(objectness > obj_thresh):
#print("objectness: ",objectness)
# first 4 elements are x, y, w, and h
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = objectness*netout[int(row)][col][b][5:]
classes *= classes > obj_thresh
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
boxes.append(box)
return boxes
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
def generate_colors(class_names):
hsv_tuples = [(x / len(class_names), 1., 1.) for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] ), int(x[1] ), int(x[2] )), colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
return colors
# get all of the results above a threshold (Edited to suit detection of Cell Phones only)
def get_boxes(boxes, labels, thresh, colors):
v_boxes, v_labels, v_scores, v_colors = list(), list(), list(), list()
# enumerate all boxes
for box in boxes:
if box.classes[cellphone_idx] > thresh:
v_boxes.append(box)
v_labels.append(labels[cellphone_idx])
v_scores.append(box.classes[cellphone_idx]*100)
v_colors.append(colors[cellphone_idx])
# don't break, many labels may trigger for one box
return v_boxes, v_labels, v_scores, v_colors
class_threshold = 0.65
# Main Yolo Inference for loop
def Inference(image,input_w,input_h,colors,labels,phoneFramesTotal):
# Get Dimentions of resized frame image.
# Run the model
yhat = yolo_model.predict(image)
# Compute the Yolo layers
obj_thresh = 0.55
anchors = [ [12, 16, 19, 36, 40, 28],[36, 75, 76, 55, 72, 146],[142, 110, 192, 243, 459, 401]]
scales_x_y = [1.2, 1.1, 1.05]
boxes = list()
for i in range(len(anchors)):
# decode the output of the network
boxes += decode_netout(yhat[i][0], anchors[i], obj_thresh, input_h, input_w, len(anchors), scales_x_y[i])
# Correct the boxes according the inital size of the image and Do NMS
#correct_yolo_boxes(boxes, image_h, image_w, input_h, input_w)
do_nms(boxes, 0.38)
# Final Boxes
v_boxes, v_labels, v_scores, v_colors = get_boxes(boxes, labels, 0.5, colors)
## Return image as is if no boxes found #####
if len(v_labels) == 0:
return np.reshape(image,image.shape[1:]),phoneFramesTotal
## Possible Redudant if statement since we directly only pick Cellphone idx . Needs to be checked
if 'cell phone' in v_labels:
phoneFramesTotal +=1
for i in range(len(v_boxes)):
box = v_boxes[i]
# get coordinates
y1, x1, y2, x2 = int((box.ymin)*608), int((box.xmin)*608), int((box.ymax)*608), int((box.xmax)*608)
if len(image.shape) == 4:
image = np.squeeze(image,axis = 0)
image = cv2.rectangle(image, (x1, y2),(x2, y1),v_colors[i], 2)
label = "%s (%.3f)" % (v_labels[i], v_scores[i])
image = cv2.putText(image, label, (x1,y1 -5), cv2.FONT_HERSHEY_SIMPLEX,0.5,(v_colors[i]), 2)
return image,phoneFramesTotal
########## Main Face Recognition inference for loop ##########
def faceRecInference(faceEncodingsKnown,faceNames,frame,absentFramesTotal,nameToCheck):
# Resize Frame to half for better performance and convert from BGR 2 RGB
frameS = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
frameS = frameS[:, :, ::-1]
faceLocsCurr = faceRec.face_locations(frameS)
faceEncsCurr = faceRec.face_encodings(frameS, faceLocsCurr)
faceNamesInFrame = []
for faceEnc in faceEncsCurr:
### Initialize with Unknown face and replace if match found ######
name = 'Unknown Face'
matches = faceRec.compare_faces(faceEncodingsKnown,faceEnc,tolerance = 0.41) # 0.41 strictness for face recogntion since i found this as a good balance. Needs more testing however
dist = faceRec.face_distance(faceEncodingsKnown,faceEnc)
bestMatchIdx = np.argmax(dist)
if matches[bestMatchIdx]:
name = faceNames[bestMatchIdx]
faceNamesInFrame.append(name)
#### Check if Student to Check for is the only one giving the exam and not some one else #####
if nameToCheck not in faceNamesInFrame:
absentFramesTotal +=1
CenterPos = ((int) (frame.shape[1]/2 - 268/2 + 15), (int) (frame.shape[0]/2 - 36/2) + 160)
cv2.putText(frame, 'Student Missing',CenterPos, cv2.FONT_HERSHEY_TRIPLEX, 1.25, (0,0,255), 1)
for (top,right,bottom,left),name in zip(faceLocsCurr,faceNamesInFrame):
# Scaling Bounding boxes back to fit orignal image
top *= 2
right *= 2
bottom *= 2
left *= 2
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255), 1)
return frame,absentFramesTotal
class WrongBoolVal(Exception):
pass
def getFrameSec(fps):
fSec = 100/(fps*100)
return fSec
| 34.435065 | 187 | 0.610032 | 1,511 | 10,606 | 4.166115 | 0.251489 | 0.002859 | 0.006354 | 0.00699 | 0.06386 | 0.043685 | 0.032883 | 0.012867 | 0 | 0 | 0 | 0.039954 | 0.259004 | 10,606 | 307 | 188 | 34.547231 | 0.761038 | 0.184424 | 0 | 0.030769 | 0 | 0 | 0.014428 | 0.005795 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092308 | false | 0.005128 | 0.051282 | 0.010256 | 0.25641 | 0.005128 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0228fc3b1ed698018065725777a758acd5887933 | 3,531 | py | Python | mc-construction.py | therealpickle/mc-construction | d3b6aa8d111484fa3190d2d8bd8396e1235cad69 | [
"MIT"
] | null | null | null | mc-construction.py | therealpickle/mc-construction | d3b6aa8d111484fa3190d2d8bd8396e1235cad69 | [
"MIT"
] | null | null | null | mc-construction.py | therealpickle/mc-construction | d3b6aa8d111484fa3190d2d8bd8396e1235cad69 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
import math
import argparse
import subprocess
from fill_generator import *
from shapes import SphereSolid, HemisphereSolid, ArcTunnelSolid
PACKAGE_BASE_PATH = "packbase"
FUNCTION_PATH = os.path.join(PACKAGE_BASE_PATH, "functions")
PACKAGE_NAME = "Pickle_Functions"
SERVER_ADDRESS = "mason@nastypickle"
SERVER_PATHS = [
"minecraftbe/PickleWorld/worlds/'Pickle Level'/development_behavior_packs",
"minecraftbe/PickleWorld/development_behavior_packs",
]
LOCAL_DIRS = [os.path.join("C:","Users","there","AppData","Local","Packages",
"Microsoft.MinecraftUWP_8wekyb3d8bbwe","LocalState","games","com.mojang",
"development_behavior_packs")]
parser = argparse.ArgumentParser()
parser.add_argument("--copy-to-server", default=False, action="store_true")
parser.add_argument("--copy-to-sandbox", default=False, action="store_true")
args = parser.parse_args()
if args.copy_to_server:
cmd = "cp -r {} {}".format(PACKAGE_BASE_PATH, PACKAGE_NAME)
cmd = cmd.split()
subprocess.run(cmd)
for path in SERVER_PATHS:
ssh_cmd = ['scp', '-r', PACKAGE_NAME, "{}:{:s}".format(SERVER_ADDRESS, path)]
subprocess.run(ssh_cmd)
cmd = "rm -r {}".format(PACKAGE_NAME)
cmd = cmd.split()
subprocess.run(cmd)
exit()
if args.copy_to_sandbox:
cmd = "cp -r {} {}".format(PACKAGE_BASE_PATH, PACKAGE_NAME)
cmd = cmd.split()
subprocess.run(cmd)
for path in LOCAL_DIRS:
cmd = "cp -r {} {}".format(PACKAGE_BASE_PATH, PATH)
cmd = cmd.split()
subprocess.run(cmd)
cmd = "rm -r {}".format(PACKAGE_NAME)
cmd = cmd.split()
subprocess.run(cmd)
exit()
MAX_CMDS = 10000
# note, these make the objects are created with the center at
# the player's current position
def write_commands(fname, commands):
pathname = os.path.join(FUNCTION_PATH, "{}.mcfunction".format(fname))
if len(commands) <= MAX_CMDS:
with open(pathname, 'w') as f:
for cmd in commands:
f.write(cmd + "\n")
else:
raise Exception("Commands exceed limit ({})".format(len(commands)))
if __name__ == '__main__':
for Shape, label in [
(HemisphereSolid, 'dome'),
(SphereSolid, 'sphere-shell')]:
for diameter in [17, 33, 65]:
outer = Shape(diameter)
outer_regions = outer.generate_regions()
inner = Shape(diameter - 2)
inner_regions = inner.generate_regions()
cmds_outer = cmd_fill(outer_regions, 'glass')
cmds_inner = cmd_fill(inner_regions, 'air')
fname = "{}-d{}-glass".format(label, diameter)
cmds = cmds_outer + cmds_inner
print("{}: {}".format(fname, len(cmds)))
write_commands(fname, cmds)
for axis in ['z', 'x']:
for diameter, length in ((9, 17), (11, 17), (9, 33), (11, 33)):
outer = ArcTunnelSolid(diameter, length, axis=axis)
outer_regions = outer.generate_regions()
inner = ArcTunnelSolid(diameter - 2, length, axis=axis)
inner_regions = inner.generate_regions()
cmds_outer = cmd_fill(outer_regions, 'glass')
cmds_inner = cmd_fill(inner_regions, 'air')
fname = "arctunnel-{}-d{}-l{}-glass".format(axis, diameter, length)
cmds = cmds_outer + cmds_inner
print("{}: {}".format(fname, len(cmds)))
write_commands(fname, cmds)
# for length in [9, 17, 33]:
| 29.923729 | 85 | 0.625035 | 425 | 3,531 | 5.002353 | 0.324706 | 0.019755 | 0.035278 | 0.049389 | 0.389934 | 0.342897 | 0.296802 | 0.284102 | 0.284102 | 0.284102 | 0 | 0.013328 | 0.235061 | 3,531 | 117 | 86 | 30.179487 | 0.773787 | 0.038233 | 0 | 0.375 | 0 | 0 | 0.158903 | 0.061616 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0125 | false | 0 | 0.075 | 0 | 0.0875 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
022adfe6c49c357598ccce433031644908a43ff0 | 1,658 | py | Python | python/graph/graph/graph.py | Samerodeh/data-structures-and-algorithms | 29658d630ccf20cc77fab966668013778cd6895e | [
"MIT"
] | null | null | null | python/graph/graph/graph.py | Samerodeh/data-structures-and-algorithms | 29658d630ccf20cc77fab966668013778cd6895e | [
"MIT"
] | 1 | 2021-07-27T15:09:50.000Z | 2021-07-27T15:09:50.000Z | python/graph/graph/graph.py | Samerodeh/data-structures-and-algorithms | 29658d630ccf20cc77fab966668013778cd6895e | [
"MIT"
] | 1 | 2021-11-08T05:36:32.000Z | 2021-11-08T05:36:32.000Z | class Vertex:
def __init__(self, value):
self.value = value
self.next = None
def __str__(self):
return self.value
class Edge:
def __init__(self, vertex, weight=1):
self.vertex = vertex
self.weight = weight
class Graph:
def __init__(self):
self.graph = {}
def add_node(self, value):
node = Vertex(value)
self.graph[node] = []
return node
def add_edge(self, vertex1, vertex2, weight=1):
if vertex1 not in self.graph:
raise KeyError('Vertex1 is not in the graph')
if vertex2 not in self.graph:
raise KeyError('Vertex2 is not in the graph')
edge = Edge(vertex2, weight)
self.graph[vertex1].append(edge)
def get_nodes(self):
return self.graph.keys()
def get_neighbors(self, vertex):
collection = []
connections = self.graph.get(vertex, [])
for neighbor in connections:
holder = {}
holder[neighbor] = neighbor.weight
collection.append(holder)
return collection
def size(self):
return len(self.graph) if len(self.graph) > 0 else None
if __name__ == "__main__":
graph = Graph()
vertex1 = graph.add_node('a')
vertex2 = graph.add_node('b')
vertex3 = graph.add_node('c')
vertex4 = graph.add_node('d')
graph.add_edge(vertex1, vertex2)
graph.add_edge(vertex2, vertex1)
graph.add_edge(vertex1, vertex3)
graph.add_edge(vertex3, vertex1)
graph.add_edge(vertex1, vertex4)
graph.add_edge(vertex2, vertex3)
print(graph.get_nodes())
print('Size: ', graph.size()) | 25.507692 | 63 | 0.606152 | 207 | 1,658 | 4.666667 | 0.231884 | 0.082816 | 0.074534 | 0.059006 | 0.140787 | 0.055901 | 0 | 0 | 0 | 0 | 0 | 0.022651 | 0.281062 | 1,658 | 65 | 64 | 25.507692 | 0.787752 | 0 | 0 | 0 | 0 | 0 | 0.0434 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.18 | false | 0 | 0 | 0.06 | 0.34 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
022d5178da6fc8dc7659281127f0846c98db31f7 | 3,720 | py | Python | components/employee_extractor.py | rjaas/Delta | 439bd608c229b04e07d75c981d07bceba45eca8d | [
"Unlicense"
] | null | null | null | components/employee_extractor.py | rjaas/Delta | 439bd608c229b04e07d75c981d07bceba45eca8d | [
"Unlicense"
] | null | null | null | components/employee_extractor.py | rjaas/Delta | 439bd608c229b04e07d75c981d07bceba45eca8d | [
"Unlicense"
] | null | null | null | """
Komponent töötajate nimede hägusaks eraldamiseks.
Loodud Rasa Open Source komponendi RegexEntityExtractor põhjal.
https://github.com/RasaHQ/rasa/blob/main/rasa/nlu/extractors/regex_entity_extractor.py
"""
import typing
from components.helper_functions import parse_nlu
from components.levenshtein import manual_levenshtein
from typing import Any, Optional, Text, Dict, List
from rasa.nlu.extractors.extractor import EntityExtractor
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.constants import (
ENTITIES,
ENTITY_ATTRIBUTE_VALUE,
TEXT,
ENTITY_ATTRIBUTE_TYPE,
INTENT,
PREDICTED_CONFIDENCE_KEY
)
from fuzzywuzzy import process
if typing.TYPE_CHECKING:
from rasa.nlu.model import Metadata
class EmployeeExtractor(EntityExtractor):
# Vaikeväärtused
defaults = {
# töötaja nime ja teksti vastavuse lävend
"match_threshold": 80,
# Töötajate nimede andmetabeli asukoht
"employee_file_path": "data/employee.yml",
}
def __init__(self, component_config: Optional[Dict[Text, Any]] = None):
super().__init__(component_config)
self.employees = []
self.match_threshold = self.component_config["match_threshold"]
# Töötajate nimede mällu lugemine
with open(self.defaults['employee_file_path'], "r") as f:
for line in f.readlines()[4:]:
self.employees.append(line.replace(" - ", "").replace("\n", ""))
# Kavatsustes esinevate sõnade mällu lugemine
self.intent_words = parse_nlu(["- intent: request_employee_office\n"])
def remove_intent_words(self, text):
text_list = text.split(" ")
for word in text.split(" "):
# best_match = process.extractOne(word, self.intent_words)
best_match = manual_levenshtein(word, self.intent_words)
if best_match[1] < 2:
text_list.remove(word)
return " ".join(text_list)
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
pass
def _extract_entities(self, message: Message) -> List[Dict[Text, Any]]:
entities = []
# Väärtuste ebavajaliku eraldamise vältimine kavatsuse kontrolli abil
if message.get(INTENT)['name'] not in {"request_employee_office"}:
return entities
best_match = process.extractOne(self.remove_intent_words(message.get(TEXT)), self.employees)
if best_match[1] >= self.match_threshold:
entities.append({
ENTITY_ATTRIBUTE_TYPE: "employee",
ENTITY_ATTRIBUTE_VALUE: best_match[0],
PREDICTED_CONFIDENCE_KEY: best_match[1]
})
return entities
def process(self, message: Message, **kwargs: Any) -> None:
extracted_entities = self._extract_entities(message)
extracted_entities = self.add_extractor_name(extracted_entities)
message.set(ENTITIES, message.get(ENTITIES, []) + extracted_entities, add_to_output=True)
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
pass
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["EntityExtractor"] = None,
**kwargs: Any,
) -> "EntityExtractor":
if cached_component:
return cached_component
else:
return cls(meta)
| 34.12844 | 100 | 0.668011 | 415 | 3,720 | 5.787952 | 0.349398 | 0.026228 | 0.018318 | 0.021232 | 0.024147 | 0.024147 | 0 | 0 | 0 | 0 | 0 | 0.002811 | 0.234946 | 3,720 | 108 | 101 | 34.444444 | 0.841181 | 0.132796 | 0 | 0.077922 | 0 | 0 | 0.063843 | 0.014949 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.025974 | 0.142857 | 0 | 0.324675 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
022dd51656b90c3a87292ea72c5ffe92fbde6f0e | 1,462 | py | Python | my-answers/01-name-concatenation.py | ifenium/20-questions | dbc154ae41dc3cb09a36e6f580017ccfd95e8c4e | [
"MIT"
] | null | null | null | my-answers/01-name-concatenation.py | ifenium/20-questions | dbc154ae41dc3cb09a36e6f580017ccfd95e8c4e | [
"MIT"
] | null | null | null | my-answers/01-name-concatenation.py | ifenium/20-questions | dbc154ae41dc3cb09a36e6f580017ccfd95e8c4e | [
"MIT"
] | null | null | null | # Solution to Name Concatenation
# Print out Welcome, <First-Name> <Last-Name> (<Age>) in a new line.
first_name = str(input('Enter your first name: '))
last_name = str(input('Enter yout last name: '))
age = int(input('Enter your age: '))
print('Welcome,{} {} ({})'. format(first_name, last_name, age))
# Variant-01
# Print out Welcome, <First-Name> <Last-Name> (<Year-Of-Birth>), where Year-of-Birth is derived as Age subtracted from Current-Year.
from datetime import date
today = date.today()
first_name = str(input('Enter your first name: '))
last_name = str(input('Enter yout last name: '))
age = int(input('Enter your year of birth (e.g. 1992): '))
print('Welcome, {} {} ({})'. format(first_name, last_name, today.year-age))
# Variant-02
# Request the user's gender.
# Print out Welcome, <First-Name> [Son/Daughter] of <Last-Name> (<Year-Of-Birth>), where Son is printed out if male, and Daughter if female.
from datetime import date
today = date.today()
first_name = str(input('Enter your first name: '))
last_name = str(input('Enter yout last name: '))
age = int(input('Enter your year of birth (e.g. 1992): '))
sex = str(input('Enter your sex (e.g. female): '))
if sex.lower() == 'male':
print('Welcome, {} Son of {} ({})'. format(first_name, last_name, today.year-age))
elif sex.lower() =='female':
print('Welcome, {} Daughter of {} ({})'. format(first_name, last_name, today.year-age))
else:
print('Please check your reponses')
| 35.658537 | 140 | 0.674419 | 226 | 1,462 | 4.300885 | 0.265487 | 0.12037 | 0.12037 | 0.157407 | 0.679012 | 0.648148 | 0.606996 | 0.492798 | 0.45679 | 0.380658 | 0 | 0.009639 | 0.148427 | 1,462 | 40 | 141 | 36.55 | 0.771084 | 0.284542 | 0 | 0.545455 | 0 | 0 | 0.372832 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.227273 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
022e5ffeb24037970507fd920f75ae032ce2b33f | 9,792 | py | Python | hwmapping/cli/sandbox.py | phip123/workload-aware-k8s | 79e047916b7239467f299bd2ad605c6ac375cbca | [
"MIT"
] | 5 | 2021-03-08T10:27:27.000Z | 2022-03-24T14:37:17.000Z | hwmapping/cli/sandbox.py | phip123/workload-aware-k8s | 79e047916b7239467f299bd2ad605c6ac375cbca | [
"MIT"
] | null | null | null | hwmapping/cli/sandbox.py | phip123/workload-aware-k8s | 79e047916b7239467f299bd2ad605c6ac375cbca | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import pickle
import random
from collections import defaultdict
import numpy as np
from skippy.core.scheduler import Scheduler
from skippy.core.storage import StorageIndex
from hwmapping.calculations import calculate_diff_entropy as heterogeneity_score, calculate_requirements
from hwmapping.cli.eval_sim import save_sim_result, run_sim
from hwmapping.device import ArchProperties
from hwmapping.etheradapter import convert_to_ether_nodes, convert_to_devices
from hwmapping.evaluation import images
from hwmapping.evaluation.benchmarks.sine import SineBenchmark
from hwmapping.evaluation.deployments import create_all_deployments
from hwmapping.evaluation.fetdistributions import execution_time_distributions
from hwmapping.evaluation.functionsim import PythonHttpSimulatorFactory
from hwmapping.evaluation.resources import resources_per_node_image
from hwmapping.evaluation.results import set_requirements
from hwmapping.evaluation.run import EnvSettings
from hwmapping.evaluation.topology import urban_sensing_topology
from hwmapping.faas.predicates import NodeHasAcceleratorPred, NodeHasFreeTpu, NodeHasFreeGpu, CanRunPred
from hwmapping.faas.system import *
from hwmapping.generator import GeneratorSettings, generate_devices, xeon_reqs
from hwmapping.model import *
from hwmapping.notebook import skippy, ga
logging.basicConfig(level=logging.INFO)
base_reqs = xeon_reqs()
test_settings = GeneratorSettings(
arch={
Arch.X86: 0.3,
Arch.AARCH64: 0.5,
Arch.ARM32: 0.2
},
properties={
Arch.X86: ArchProperties(
arch=Arch.X86,
accelerator={
Accelerator.NONE: 0.9,
Accelerator.GPU: 0.1,
Accelerator.TPU: 0
},
cores={
Bins.LOW: 0,
Bins.MEDIUM: 0,
Bins.HIGH: 0.7,
Bins.VERY_HIGH: 0.3
},
location={
Location.CLOUD: 0.6,
Location.MEC: 0.4,
Location.EDGE: 0,
Location.MOBILE: 0
},
connection={
Connection.ETHERNET: 1,
Connection.WIFI: 0,
Connection.MOBILE: 0
},
network={
Bins.LOW: 0,
Bins.MEDIUM: 0,
Bins.HIGH: 0.1,
Bins.VERY_HIGH: 0.9
},
cpu_mhz={
Bins.LOW: 0,
Bins.MEDIUM: 0.7,
Bins.HIGH: 0.25,
Bins.VERY_HIGH: 0.05
},
cpu={
CpuModel.XEON: 0.7,
CpuModel.I7: 0.3
},
ram={
Bins.LOW: 0,
Bins.MEDIUM: 0.05,
Bins.HIGH: 0.45,
Bins.VERY_HIGH: 0.5
},
gpu_vram={
Bins.LOW: 0,
Bins.MEDIUM: 0,
Bins.HIGH: 0.9,
Bins.VERY_HIGH: 0.1
},
gpu_model={
GpuModel.TURING: 1,
},
gpu_mhz={
Bins.LOW: 0,
Bins.MEDIUM: 0,
Bins.HIGH: 1,
Bins.VERY_HIGH: 0
},
disk={
Disk.SSD: 1,
Disk.SD: 0,
Disk.NVME: 0,
Disk.FLASH: 0,
Disk.HDD: 0
}
),
Arch.AARCH64: ArchProperties(
arch=Arch.AARCH64,
accelerator={
Accelerator.NONE: 0.2,
Accelerator.GPU: 0.7,
Accelerator.TPU: 0.1
},
cores={
Bins.LOW: 0,
Bins.MEDIUM: 0.9,
Bins.HIGH: 0.1,
Bins.VERY_HIGH: 0
},
location={
Location.CLOUD: 0,
Location.MEC: 0.2,
Location.EDGE: 0.8,
Location.MOBILE: 0
},
connection={
Connection.ETHERNET: 0.2,
Connection.WIFI: 0.8,
Connection.MOBILE: 0
},
network={
Bins.LOW: 0.1,
Bins.MEDIUM: 0.7,
Bins.HIGH: 0.2,
Bins.VERY_HIGH: 0
},
cpu_mhz={
Bins.LOW: 0.1,
Bins.MEDIUM: 0.8,
Bins.HIGH: 0.1,
Bins.VERY_HIGH: 0
},
cpu={
CpuModel.ARM: 1
},
ram={
Bins.LOW: 0.3,
Bins.MEDIUM: 0.5,
Bins.HIGH: 0.2,
Bins.VERY_HIGH: 0
},
gpu_vram={
Bins.LOW: 0,
Bins.MEDIUM: 0.9,
Bins.HIGH: 0.1,
Bins.VERY_HIGH: 0
},
gpu_model={
GpuModel.PASCAL: 0.3,
GpuModel.MAXWELL: 0.4,
GpuModel.TURING: 0.3
},
gpu_mhz={
Bins.LOW: 0,
Bins.MEDIUM: 0.9,
Bins.HIGH: 0.1,
Bins.VERY_HIGH: 0
},
disk={
Disk.SSD: 0,
Disk.SD: 0.5,
Disk.NVME: 0,
Disk.FLASH: 0.5,
Disk.HDD: 0
}
),
Arch.ARM32: ArchProperties(
arch=Arch.ARM32,
accelerator={
Accelerator.NONE: 1,
Accelerator.GPU: 0,
Accelerator.TPU: 0
},
cores={
Bins.LOW: 0.5,
Bins.MEDIUM: 0.5,
Bins.HIGH: 0,
Bins.VERY_HIGH: 0
},
location={
Location.CLOUD: 0,
Location.MEC: 0,
Location.EDGE: 0.9,
Location.MOBILE: 0.1
},
connection={
Connection.ETHERNET: 0.05,
Connection.WIFI: 0.85,
Connection.MOBILE: 0.1
},
network={
Bins.LOW: 0.6,
Bins.MEDIUM: 0.4,
Bins.HIGH: 0,
Bins.VERY_HIGH: 0
},
cpu_mhz={
Bins.LOW: 0.5,
Bins.MEDIUM: 0.5,
Bins.HIGH: 0,
Bins.VERY_HIGH: 0
},
cpu={
CpuModel.ARM: 1
},
ram={
Bins.LOW: 0.4,
Bins.MEDIUM: 0.6,
Bins.HIGH: 0,
Bins.VERY_HIGH: 0
},
disk={
Disk.SSD: 0,
Disk.SD: 1,
Disk.NVME: 0,
Disk.FLASH: 0,
Disk.HDD: 0
},
gpu_vram={},
gpu_model={},
gpu_mhz={},
)
}
)
use_predefined_devices = True
if use_predefined_devices:
with open('data/collections/collection_01_04_2021/ga_devices/hybrid_balanced_score_7.384.pkl', 'rb') as fd:
devices = pickle.load(fd)
else:
num_devices = 100
devices = generate_devices(num_devices, test_settings)
print(len(devices))
print(heterogeneity_score(base_reqs, calculate_requirements(devices)))
ether_nodes = convert_to_ether_nodes(devices)
print(ether_nodes[0])
device_types = np.unique(list(map(lambda e: e.name[:e.name.rindex('_')], ether_nodes)))
devices_by_type = defaultdict(list)
for device in ether_nodes:
devices_by_type[device.name[:device.name.rindex('_')]].append(device)
print('\navailable nodes')
for device_type in device_types:
print(device_type, len(devices_by_type[device_type]))
print(len(ether_nodes))
print(len(devices))
print(heterogeneity_score(base_reqs, calculate_requirements(convert_to_devices(ether_nodes))))
fet_oracle = FetOracle(execution_time_distributions)
resource_oracle = ResourceOracle(resources_per_node_image)
deployments = list(create_all_deployments(fet_oracle, resource_oracle).values())
function_images = images.all_images
predicates = []
predicates.extend(Scheduler.default_predicates)
predicates.extend([
CanRunPred(fet_oracle, resource_oracle),
NodeHasAcceleratorPred(),
NodeHasFreeGpu(),
NodeHasFreeTpu()
])
priorities = []
skippy_priorities = skippy.get_priorities(
latency_weight=1,
data_weight=1
)
ga_priorities = ga.get_priorities(
fet_oracle,
resource_oracle,
capability_weight=1,
contention_weight=1,
fet_weight=1
)
np.random.seed(1234)
random.seed(1234)
priorities.extend(skippy_priorities)
priorities.extend(ga_priorities)
sched_params = {
'percentage_of_nodes_to_score': 100,
'priorities': priorities,
'predicates': predicates
}
model_folder = './data/collections/collection_01_04_2021/ml'
duration = 200
max_rps = 300
period = 75
benchmark = SineBenchmark('mixed',
duration=duration, max_rps=max_rps, period=period, model_folder=model_folder)
ga_file = 'data/collections/collection_01_04_2021/solutions/req_creation_01_13_2021_22_21_24/mixed/ga_results/k_7_clustering_4c07_edge_cloudlet.pkl'
with open(ga_file, 'rb') as fd:
ga_run = pickle.load(fd)
set_requirements(benchmark, ga_run)
type_run = 'ga'
settings = {
'percentage_nodes_to_score': 100,
'latency_weight': 1,
'data_weight': 1,
'contention_weight': 1,
'capability_weight': 1,
'fet_weight': 1,
'duration': duration,
'max_rps': max_rps,
'period': period,
'type': 'sine',
'optimization': type_run
}
result = run_sim((benchmark, 'all', sched_params, ga_run, settings))
save_sim_result('./data/collections/collection_01_04_2021/adhoc', result)
| 28.8 | 148 | 0.53268 | 1,054 | 9,792 | 4.773245 | 0.214421 | 0.030809 | 0.025442 | 0.041344 | 0.306897 | 0.283045 | 0.222222 | 0.186643 | 0.142119 | 0.105546 | 0 | 0.048199 | 0.370711 | 9,792 | 339 | 149 | 28.884956 | 0.768257 | 0.003472 | 0 | 0.335484 | 0 | 0 | 0.053506 | 0.036798 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.077419 | 0 | 0.077419 | 0.025806 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
022f03246b3d904691c7857113a3cd24068cf637 | 3,547 | py | Python | landlab/components/pet/tests/test_pet.py | awickert/landlab | 496de56717a5877db96f354a1b1285bfabe8b56f | [
"MIT"
] | 1 | 2015-08-17T19:29:50.000Z | 2015-08-17T19:29:50.000Z | landlab/components/pet/tests/test_pet.py | awickert/landlab | 496de56717a5877db96f354a1b1285bfabe8b56f | [
"MIT"
] | 1 | 2016-03-02T01:24:41.000Z | 2016-03-02T01:24:41.000Z | landlab/components/pet/tests/test_pet.py | awickert/landlab | 496de56717a5877db96f354a1b1285bfabe8b56f | [
"MIT"
] | 2 | 2017-07-03T20:21:13.000Z | 2018-09-06T23:58:19.000Z | """
Unit tests for landlab.components.pet.potential_evapotranspiration_field
"""
from nose.tools import assert_equal, assert_true, assert_raises, with_setup
from numpy.testing import assert_array_almost_equal
try:
from nose.tools import assert_is_instance
except ImportError:
from landlab.testing.tools import assert_is_instance
import numpy as np
from landlab import RasterModelGrid
from landlab.components.pet.potential_evapotranspiration_field \
import PotentialEvapotranspiration
(_SHAPE, _SPACING, _ORIGIN) = ((20, 20), (10e0, 10e0), (0., 0.))
_ARGS = (_SHAPE, _SPACING, _ORIGIN)
def setup_grid():
from landlab import RasterModelGrid
grid = RasterModelGrid((20, 20), spacing=10e0)
PET = PotentialEvapotranspiration(grid)
globals().update({
'PET': PotentialEvapotranspiration(grid)
})
@with_setup(setup_grid)
def test_name():
assert_equal(PET.name, 'Potential Evapotranspiration')
@with_setup(setup_grid)
def test_input_var_names():
assert_equal(PET.input_var_names,
('radiation__ratio_to_flat_surface',))
@with_setup(setup_grid)
def test_output_var_names():
assert_equal(sorted(PET.output_var_names),
['radiation__incoming_shortwave_flux',
'radiation__net_flux',
'radiation__net_longwave_flux',
'radiation__net_shortwave_flux',
'surface__potential_evapotranspiration_rate'])
@with_setup(setup_grid)
def test_var_units():
assert_equal(set(PET.input_var_names) |
set(PET.output_var_names),
set(dict(PET.units).keys()))
assert_equal(PET.var_units('radiation__incoming_shortwave_flux'), 'W/m^2')
assert_equal(PET.var_units('radiation__net_flux'), 'W/m^2')
assert_equal(PET.var_units('radiation__net_longwave_flux'), 'W/m^2')
assert_equal(PET.var_units('radiation__net_shortwave_flux'), 'W/m^2')
assert_equal(PET.var_units('radiation__ratio_to_flat_surface'), 'None')
assert_equal(PET.var_units('surface__potential_evapotranspiration_rate'),
'mm')
@with_setup(setup_grid)
def test_grid_shape():
assert_equal(PET.grid.number_of_node_rows, _SHAPE[0])
assert_equal(PET.grid.number_of_node_columns, _SHAPE[1])
@with_setup(setup_grid)
def test_grid_x_extent():
assert_equal(PET.grid.extent[1], (_SHAPE[1] - 1) * _SPACING[1])
@with_setup(setup_grid)
def test_grid_y_extent():
assert_equal(PET.grid.extent[0], (_SHAPE[0] - 1) * _SPACING[0])
@with_setup(setup_grid)
def test_field_getters():
for name in PET.grid['node']:
field = PET.grid['node'][name]
assert_is_instance(field, np.ndarray)
assert_equal(field.shape,
(PET.grid.number_of_node_rows *
PET.grid.number_of_node_columns, ))
for name in PET.grid['cell']:
field = PET.grid['cell'][name]
assert_is_instance(field, np.ndarray)
assert_equal(field.shape,
(PET.grid.number_of_cell_rows *
PET.grid.number_of_cell_columns, ))
assert_raises(KeyError, lambda: PET.grid['not_a_var_name'])
@with_setup(setup_grid)
def test_field_initialized_to_zero():
for name in PET.grid['node']:
field = PET.grid['node'][name]
assert_array_almost_equal(field, np.zeros(PET.grid.number_of_nodes))
for name in PET.grid['cell']:
field = PET.grid['cell'][name]
assert_array_almost_equal(field, np.zeros(PET.grid.number_of_cells)) | 32.842593 | 78 | 0.691006 | 467 | 3,547 | 4.85439 | 0.197002 | 0.058668 | 0.074107 | 0.07146 | 0.573004 | 0.493163 | 0.343185 | 0.277459 | 0.250993 | 0.250993 | 0 | 0.011543 | 0.193967 | 3,547 | 108 | 79 | 32.842593 | 0.781392 | 0.020299 | 0 | 0.291139 | 0 | 0 | 0.135813 | 0.095156 | 0 | 0 | 0 | 0 | 0.316456 | 1 | 0.126582 | false | 0 | 0.113924 | 0 | 0.240506 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02303cc62f713172d5f459ad8ba2b10e06d0fb18 | 9,107 | py | Python | tests/unit/lib/build_module/test_build_graph.py | awsed/aws-sam-cli | 6becd25c06caaa96a79d6c9211da05501dadd132 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | tests/unit/lib/build_module/test_build_graph.py | awsed/aws-sam-cli | 6becd25c06caaa96a79d6c9211da05501dadd132 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | tests/unit/lib/build_module/test_build_graph.py | awsed/aws-sam-cli | 6becd25c06caaa96a79d6c9211da05501dadd132 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | from unittest import TestCase
from uuid import uuid4
from pathlib import Path
import tomlkit
from parameterized import parameterized
from samcli.lib.build.build_graph import (
BuildDefinition,
_build_definition_to_toml_table,
CODE_URI_FIELD,
RUNTIME_FIELD,
METADATA_FIELD,
FUNCTIONS_FIELD,
_toml_table_to_build_definition,
BuildGraph,
InvalidBuildGraphException,
)
from samcli.lib.providers.provider import Function
from samcli.lib.utils import osutils
def generate_function(
name="name",
function_name="function_name",
runtime="runtime",
memory="memory",
timeout="timeout",
handler="handler",
codeuri="codeuri",
environment="environment",
rolearn="rolearn",
layers="layers",
events="events",
metadata={},
):
return Function(
name, function_name, runtime, memory, timeout, handler, codeuri, environment, rolearn, layers, events, metadata
)
class TestConversionFunctions(TestCase):
def test_build_definition_to_toml_table(self):
build_definition = BuildDefinition("runtime", "codeuri", {"key": "value"})
build_definition.add_function(generate_function())
toml_table = _build_definition_to_toml_table(build_definition)
self.assertEqual(toml_table[CODE_URI_FIELD], build_definition.codeuri)
self.assertEqual(toml_table[RUNTIME_FIELD], build_definition.runtime)
self.assertEqual(toml_table[METADATA_FIELD], build_definition.metadata)
self.assertEqual(toml_table[FUNCTIONS_FIELD], [f.name for f in build_definition.functions])
def test_toml_table_to_build_definition(self):
toml_table = tomlkit.table()
toml_table[CODE_URI_FIELD] = "codeuri"
toml_table[RUNTIME_FIELD] = "runtime"
toml_table[METADATA_FIELD] = {"key": "value"}
toml_table[FUNCTIONS_FIELD] = ["function1"]
uuid = str(uuid4())
build_definition = _toml_table_to_build_definition(uuid, toml_table)
self.assertEqual(build_definition.codeuri, toml_table[CODE_URI_FIELD])
self.assertEqual(build_definition.runtime, toml_table[RUNTIME_FIELD])
self.assertEqual(build_definition.metadata, toml_table[METADATA_FIELD])
self.assertEqual(build_definition.uuid, uuid)
self.assertEqual(build_definition.functions, [])
class TestBuildGraph(TestCase):
CODEURI = "hello_world_python/"
RUNTIME = "python3.8"
METADATA = {"Test": "hello", "Test2": "world"}
UUID = "3c1c254e-cd4b-4d94-8c74-7ab870b36063"
BUILD_GRAPH_CONTENTS = f"""
[build_definitions]
[build_definitions.{UUID}]
codeuri = "{CODEURI}"
runtime = "{RUNTIME}"
functions = ["HelloWorldPython", "HelloWorldPython2"]
[build_definitions.{UUID}.metadata]
Test = "{METADATA['Test']}"
Test2 = "{METADATA['Test2']}"
"""
def test_should_instantiate_first_time(self):
with osutils.mkdir_temp() as temp_base_dir:
build_dir = Path(temp_base_dir, ".aws-sam", "build")
build_dir.mkdir(parents=True)
build_graph1 = BuildGraph(str(build_dir.resolve()))
build_graph1.clean_redundant_functions_and_update(True)
build_graph2 = BuildGraph(str(build_dir.resolve()))
self.assertEqual(build_graph1.get_build_definitions(), build_graph2.get_build_definitions())
def test_should_instantiate_first_time_and_update(self):
with osutils.mkdir_temp() as temp_base_dir:
build_dir = Path(temp_base_dir, ".aws-sam", "build")
build_dir.mkdir(parents=True)
# create a build graph and persist it
build_graph1 = BuildGraph(str(build_dir))
build_definition1 = BuildDefinition(TestBuildGraph.RUNTIME, TestBuildGraph.CODEURI, TestBuildGraph.METADATA)
function1 = generate_function(
runtime=TestBuildGraph.RUNTIME, codeuri=TestBuildGraph.CODEURI, metadata=TestBuildGraph.METADATA
)
build_graph1.put_build_definition(build_definition1, function1)
build_graph1.clean_redundant_functions_and_update(True)
# read previously persisted graph and compare
build_graph2 = BuildGraph(str(build_dir))
self.assertEqual(len(build_graph1.get_build_definitions()), len(build_graph2.get_build_definitions()))
self.assertEqual(
list(build_graph1.get_build_definitions())[0], list(build_graph2.get_build_definitions())[0]
)
def test_should_read_existing_build_graph(self):
with osutils.mkdir_temp() as temp_base_dir:
build_dir = Path(temp_base_dir, ".aws-sam", "build")
build_dir.mkdir(parents=True)
build_graph_path = Path(build_dir.parent, "build.toml")
build_graph_path.write_text(TestBuildGraph.BUILD_GRAPH_CONTENTS)
build_graph = BuildGraph(str(build_dir))
for build_definition in build_graph.get_build_definitions():
self.assertEqual(build_definition.codeuri, TestBuildGraph.CODEURI)
self.assertEqual(build_definition.runtime, TestBuildGraph.RUNTIME)
self.assertEqual(build_definition.metadata, TestBuildGraph.METADATA)
def test_functions_should_be_added_existing_build_graph(self):
with osutils.mkdir_temp() as temp_base_dir:
build_dir = Path(temp_base_dir, ".aws-sam", "build")
build_dir.mkdir(parents=True)
build_graph_path = Path(build_dir.parent, "build.toml")
build_graph_path.write_text(TestBuildGraph.BUILD_GRAPH_CONTENTS)
build_graph = BuildGraph(str(build_dir))
build_definition1 = BuildDefinition(TestBuildGraph.RUNTIME, TestBuildGraph.CODEURI, TestBuildGraph.METADATA)
function1 = generate_function(
runtime=TestBuildGraph.RUNTIME, codeuri=TestBuildGraph.CODEURI, metadata=TestBuildGraph.METADATA
)
build_graph.put_build_definition(build_definition1, function1)
self.assertTrue(len(build_graph.get_build_definitions()), 1)
for build_definition in build_graph.get_build_definitions():
self.assertTrue(len(build_definition.functions), 1)
self.assertTrue(build_definition.functions[0], function1)
self.assertEqual(build_definition.uuid, TestBuildGraph.UUID)
build_definition2 = BuildDefinition("another_runtime", "another_codeuri", None)
function2 = generate_function(name="another_function")
build_graph.put_build_definition(build_definition2, function2)
self.assertTrue(len(build_graph.get_build_definitions()), 2)
class TestBuildDefinition(TestCase):
def test_single_function_should_return_function_and_handler_name(self):
build_definition = BuildDefinition("runtime", "codeuri", "metadata")
build_definition.add_function(generate_function())
self.assertEqual(build_definition.get_handler_name(), "handler")
self.assertEqual(build_definition.get_function_name(), "name")
def test_no_function_should_raise_exception(self):
build_definition = BuildDefinition("runtime", "codeuri", "metadata")
self.assertRaises(InvalidBuildGraphException, build_definition.get_handler_name)
self.assertRaises(InvalidBuildGraphException, build_definition.get_function_name)
def test_same_runtime_codeuri_metadata_should_reflect_as_same_object(self):
build_definition1 = BuildDefinition("runtime", "codeuri", {"key": "value"})
build_definition2 = BuildDefinition("runtime", "codeuri", {"key": "value"})
self.assertEqual(build_definition1, build_definition2)
@parameterized.expand(
[
("runtime", "codeuri", ({"key": "value"}), "runtime", "codeuri", ({"key": "different_value"})),
("runtime", "codeuri", ({"key": "value"}), "different_runtime", "codeuri", ({"key": "value"})),
("runtime", "codeuri", ({"key": "value"}), "runtime", "different_codeuri", ({"key": "value"})),
# custom build method with Makefile definition should always be identified as different
("runtime", "codeuri", ({"BuildMethod": "makefile"}), "runtime", "codeuri", ({"BuildMethod": "makefile"})),
]
)
def test_different_runtime_codeuri_metadata_should_not_reflect_as_same_object(
self, runtime1, codeuri1, metadata1, runtime2, codeuri2, metadata2
):
build_definition1 = BuildDefinition(runtime1, codeuri1, metadata1)
build_definition2 = BuildDefinition(runtime2, codeuri2, metadata2)
self.assertNotEqual(build_definition1, build_definition2)
def test_euqality_with_another_object(self):
build_definition = BuildDefinition("runtime", "codeuri", None)
self.assertNotEqual(build_definition, {})
def test_str_representation(self):
build_definition = BuildDefinition("runtime", "codeuri", None)
self.assertEqual(str(build_definition), f"BuildDefinition(runtime, codeuri, {build_definition.uuid}, {{}}, [])")
| 44.208738 | 120 | 0.701219 | 966 | 9,107 | 6.291925 | 0.153209 | 0.103653 | 0.042777 | 0.054294 | 0.549523 | 0.379401 | 0.281672 | 0.250411 | 0.201382 | 0.201382 | 0 | 0.011167 | 0.193697 | 9,107 | 205 | 121 | 44.42439 | 0.81656 | 0.018118 | 0 | 0.222222 | 0 | 0 | 0.115686 | 0.018572 | 0 | 0 | 0 | 0 | 0.17284 | 1 | 0.080247 | false | 0 | 0.049383 | 0.006173 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02316fb79336d21ef0435a76bd051d1f43577a4c | 1,133 | py | Python | web/server/api/serializers.py | ido-ran/ran-smart-frame2 | 2d8142e69ec638ef441d40d977183946162b9ea5 | [
"MIT"
] | 1 | 2019-02-11T09:05:02.000Z | 2019-02-11T09:05:02.000Z | web/server/api/serializers.py | ido-ran/ran-smart-frame2 | 2d8142e69ec638ef441d40d977183946162b9ea5 | [
"MIT"
] | 11 | 2020-04-29T23:09:23.000Z | 2022-02-26T09:00:14.000Z | web/server/api/serializers.py | ido-ran/ran-smart-frame2 | 2d8142e69ec638ef441d40d977183946162b9ea5 | [
"MIT"
] | 1 | 2019-01-14T10:13:24.000Z | 2019-01-14T10:13:24.000Z | from google.appengine.ext import ndb
def default_json_serializer(obj):
"""Default JSON serializer."""
import calendar, datetime
if isinstance(obj, datetime.datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
millis = int(
calendar.timegm(obj.timetuple()) * 1000 +
obj.microsecond / 1000
)
return millis
raise TypeError('Not sure how to serialize %s' % (obj,))
# I'll use this method to serialize objects instead of the complex json.dumps.
# this seem to be more compsable, still not the best.
def clone_for_json(obj):
import calendar, datetime
clone = obj.to_dict()
for attr, val in clone.items():
# logging.info("attr {0} type:{1}".format(attr, type(val)))
if (isinstance(val, datetime.datetime)):
clone[attr] = int(
calendar.timegm(val.timetuple()) * 1000 +
val.microsecond / 1000
)
elif (isinstance(val, ndb.Key)):
clone[attr] = val.id()
# Add the entity numeric id.
clone['id'] = obj.key.id()
return clone
| 32.371429 | 78 | 0.597529 | 142 | 1,133 | 4.732394 | 0.485915 | 0.032738 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02225 | 0.285966 | 1,133 | 34 | 79 | 33.323529 | 0.808405 | 0.210944 | 0 | 0.08 | 0 | 0 | 0.033898 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.12 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02327c2121e9cd3985b8d613146e24e3d4944446 | 13,086 | py | Python | mt/pandas/pdh5.py | inteplus/mtpandas | 02e8a9d05bcba6d3e6cb983261e8de7f0033980b | [
"MIT"
] | null | null | null | mt/pandas/pdh5.py | inteplus/mtpandas | 02e8a9d05bcba6d3e6cb983261e8de7f0033980b | [
"MIT"
] | null | null | null | mt/pandas/pdh5.py | inteplus/mtpandas | 02e8a9d05bcba6d3e6cb983261e8de7f0033980b | [
"MIT"
] | null | null | null | '''Loading and saving to column-based pdh5 format.'''
from typing import Optional
import os
import json
from contextlib import nullcontext
import pandas as pd
from io import BytesIO
from halo import Halo
from mt import np, cv
from mt.base import aio, path
from mt.base.str import text_filename
from .dftype import isnull, get_dftype
__all__ = ['save_pdh5', 'load_pdh5_asyn', 'Pdh5Cell']
def load_special_cell(grp, key, dftype):
if dftype == 'ndarray':
return grp[key][:]
if dftype == 'SparseNdarray':
grp2 = grp.require_group(key)
dense_shape = tuple(json.loads(grp2.attrs['dense_shape']))
values = grp2['values'][:]
indices = grp2['indices'][:]
return np.SparseNdarray(values, indices, dense_shape)
if dftype == 'Image':
grp2 = grp.require_group(key)
pixel_format = grp2.attrs['pixel_format']
meta = json.loads(grp2.attrs['meta'])
image = grp2['image'][:]
return cv.Image(image, pixel_format=pixel_format, meta=meta)
raise ValueError("Unknown dftype while loading cells: '{}'.".format(dftype))
class Pdh5Column:
'''A read-only column of a pdh5 file.'''
def __init__(self, filepath: str, col_id: str):
self.filepath = filepath
self.col_id = col_id
self.col = None
self.dftype = None
self.loaded = False
def get_item(self, row_id: int):
if not self.loaded:
import h5py
f = h5py.File(self.filepath, mode='r')
columns = json.loads(f.attrs['columns'])
self.dftype = columns[self.col_id]
key = 'column_'+text_filename(self.col_id)
if self.dftype != 'none':
self.col = f[key]
self.loaded = True
if self.dftype == 'none':
return None
if self.dftype == 'json':
x = self.col[row_id]
return None if x == b'' else json.loads(x)
if self.dftype in ('ndarray', 'Image', 'SparseNdarray'):
key = str(row_id)
if not key in self.col:
return None
return load_special_cell(self.col, key, self.dftype)
class Pdh5Cell:
'''A read-only cell of a pdh5 column.'''
def __init__(self, col: Pdh5Column, row_id: int):
self.col = col
self.row_id = row_id
self._value = None
self.loaded = False
@property
def value(self):
if not self.loaded:
self._value = self.col.get_item(self.row_id)
self.loaded = True
return self._value
def save_pdh5_index(f, df: pd.DataFrame, spinner=None):
f.attrs['format'] = 'pdh5'
f.attrs['version'] = '1.0'
size = len(df)
f.attrs['size'] = size
index = df.index
grp = f.create_group("index")
if spinner is not None:
spinner.text = 'saving index of type {}'.format(type(index))
if isinstance(index, pd.RangeIndex):
grp.attrs['type'] = 'RangeIndex'
if index.start is not None:
grp.attrs['start'] = index.start
if index.stop is not None:
grp.attrs['stop'] = index.stop
if index.step is not None:
grp.attrs['step'] = index.step
if index.name is not None:
grp.attrs['name'] = index.name
elif isinstance(index, (pd.Int64Index, pd.UInt64Index, pd.Float64Index)):
grp.attrs['type'] = type(index).__name__
if index.name is not None:
grp.attrs['name'] = index.name
data = grp.create_dataset(name='values', data=index.values, compression='gzip')
else:
raise ValueError("Unsupported index type '{}'.".format(type(index)))
def save_pdh5_columns(f, df: pd.DataFrame, spinner=None):
columns = {x: get_dftype(df[x]) for x in df.columns}
f.attrs['columns'] = json.dumps(columns)
for column in columns:
if spinner is not None:
spinner.text = "saving column '{}'".format(column)
key = 'column_'+text_filename(column)
dftype = columns[column]
if dftype == 'none':
pass
elif dftype == 'str':
# If we save in 'S' dtype, it cannot deal with non-ascii characters.
# If we save in h5py.string_dtype() dtype, we get "VLEN strings do not support embedded NULLs".
# What should we do?
import h5py
data = df[column].apply(lambda x: 'None_NaT_NaN' if isnull(x) else x).to_numpy().astype(h5py.string_dtype())
f.create_dataset(key, data=data, compression='gzip')
elif dftype in ('bool', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'float32', 'int64', 'uint64', 'float64'):
data = df[column].astype(dftype).to_numpy()
f.create_dataset(key, data=data, compression='gzip')
elif dftype == 'json':
data = df[column].apply(lambda x: '\0' if isnull(x) else json.dumps(x)).to_numpy().astype('S')
f.create_dataset(key, data=data, compression='gzip')
elif dftype in ('Timestamp', 'Timedelta'):
data = df[column].apply(lambda x: '\0' if isnull(x) else str(x)).to_numpy().astype('S')
f.create_dataset(key, data=data, compression='gzip')
elif dftype in ('ndarray', 'Image', 'SparseNdarray'):
data = df[column].tolist()
grp = f.create_group(key)
for i, item in enumerate(data):
if isnull(item):
continue
key = str(i)
if dftype == 'ndarray':
grp.create_dataset(key, data=item, compression='gzip')
elif dftype == 'SparseNdarray':
grp2 = grp.create_group(key)
grp2.attrs['dense_shape'] = json.dumps(item.dense_shape)
grp2.create_dataset('values', data=item.values, compression='gzip')
grp2.create_dataset('indices', data=item.indices, compression='gzip')
elif dftype == 'Image':
grp2 = grp.create_group(key)
grp2.attrs['pixel_format'] = item.pixel_format
grp2.attrs['meta'] = json.dumps(item.meta)
grp2.create_dataset('image', data=item.image, compression='gzip')
else:
data = df[column].apply(lambda x: type(x)).unique()
raise ValueError("Unable to save column '{}' with type list '{}'.".format(column, data))
def save_pdh5(filepath: str, df: pd.DataFrame, file_mode: Optional[int] = 0o664, show_progress: bool = False, **kwargs):
'''Saves a dataframe into a .pdh5 file.
Parameters
----------
filepath : str
path to the file to be written to
df : pandas.DataFrame
the dataframe to write from
file_mode : int, optional
file mode of the newly written file
show_progress : bool
show a progress spinner in the terminal
'''
if show_progress:
spinner = Halo("dfsaving '{}'".format(filepath), spinner='dots')
scope = spinner
else:
spinner = None
scope = nullcontext()
try:
import h5py
filepath2 = filepath+'.mttmp'
with scope, h5py.File(filepath2, 'w') as f:
save_pdh5_index(f, df, spinner=spinner)
save_pdh5_columns(f, df, spinner=spinner)
if file_mode is not None: # chmod
os.chmod(filepath2, file_mode)
path.rename(filepath2, filepath, overwrite=True)
if show_progress:
spinner.succeed("dfsaved '{}'".format(filepath))
except:
if show_progress:
spinner.fail("failed to dfsave '{}'".format(filepath))
raise
def load_pdh5_index(f, spinner=None, max_rows: Optional[int] = None) -> pd.DataFrame:
if f.attrs['format'] != 'pdh5':
raise ValueError("Input file does not have 'pdh5' format.")
size = f.attrs['size']
grp = f.require_group("index")
index_type = grp.attrs['type']
if spinner is not None:
spinner.text = 'loading index of type {}'.format(index_type)
if index_type == 'RangeIndex':
start = grp.attrs.get('start', None)
stop = grp.attrs.get('stop', None)
step = grp.attrs.get('step', None)
if stop is not None and start is not None and step is not None and max_rows is not None:
stop = start+step*max_rows
name = grp.attrs.get('name', None)
index = pd.RangeIndex(start=start, stop=stop, step=step, name=name)
elif index_type in ('Int64Index', 'UInt64Index', 'Float64Index'):
name = grp.attrs.get('name', None)
if max_rows is None:
values = grp['values'][:]
else:
values = grp['values'][:max_rows]
index = getattr(pd, index_type)(data=values, name=name)
else:
raise ValueError("Unsupported index type '{}'.".format(type(index)))
return pd.DataFrame(index=index)
def load_pdh5_columns(f, df: pd.DataFrame, spinner=None, file_read_delayed: bool = False, max_rows: Optional[int] = None):
columns = json.loads(f.attrs['columns'])
size = len(df.index)
if max_rows is not None:
size = min(size, max_rows)
for column in columns:
if spinner is not None:
spinner.text = "loading column '{}'".format(column)
key = 'column_'+text_filename(column)
dftype = columns[column]
if dftype == 'none':
df[column] = None
elif dftype == 'str':
df[column] = f[key][:size]
df[column] = df[column].apply(lambda x: None if x in (b'', b'None_NaT_NaN') else x.decode() if isinstance(x, bytes) else x)
elif dftype in ('bool', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'float32', 'int64', 'uint64', 'float64'):
df[column] = f[key][:size]
elif dftype == 'json':
if file_read_delayed:
col = Pdh5Column(f.filename, column)
df[column] = [Pdh5Cell(col, i) for i in range(size)]
else:
d = f[key]
df[column] = [None if d[i] == b'' else json.loads(d[i]) for i in range(size)] # slower than loading everything to memory but requires less memory to process
elif dftype == 'Timestamp':
df[column] = f[key][:size]
df[column] = df[column].apply(lambda x: pd.NaT if x == b'' else pd.Timestamp(x.decode()))
elif dftype == 'Timedelta':
df[column] = f[key][:size]
df[column] = df[column].apply(lambda x: pd.NaT if x == b'' else pd.Timedelta(x.decode()))
elif dftype in ('ndarray', 'Image', 'SparseNdarray'):
data = [None]*size
grp = f.require_group(key)
if file_read_delayed:
col = Pdh5Column(f.filename, column)
for key in grp.keys():
i = int(key)
if i < size:
data[i] = Pdh5Cell(col, i) if file_read_delayed else load_special_cell(grp, key, dftype)
df[column] = data
else:
raise ValueError("Unable to load column '{}' with dftype '{}'.".format(column, dftype))
async def load_pdh5_asyn(filepath: str, show_progress: bool = False, file_read_delayed: bool = False, max_rows: Optional[int] = None, context_vars: dict = {}, **kwargs) -> pd.DataFrame:
'''Loads the dataframe of a .pdh5 file.
Parameters
----------
filepath : str
path to the file to be read from
show_progress : bool
show a progress spinner in the terminal
file_read_delayed: bool
If True, columns of dftype 'json', 'ndarray', 'Image' and 'SparseNdarray' are proxied for
reading later, returning cells are instances of :class:`Pdh5Cell` instead. If False, these
columns are read thoroughly, which can be slow.
max_rows : int, optional
limit the maximum number of rows to be read from the file
context_vars : dict
a dictionary of context variables within which the function runs. It must include
`context_vars['async']` to tell whether to invoke the function asynchronously or not.
Ignored for '.pdh5' format.
Returns
-------
df : pandas.DataFrame
the loaded dataframe
'''
if show_progress:
spinner = Halo("dfloading '{}'".format(filepath), spinner='dots')
scope = spinner
else:
spinner = None
scope = nullcontext()
try:
import h5py
if file_read_delayed:
my_file = filepath
else:
data = await aio.read_binary(filepath, context_vars=context_vars)
my_file = BytesIO(data)
with scope, h5py.File(filepath, 'r') as f:
df = load_pdh5_index(f, spinner=spinner, max_rows=max_rows)
load_pdh5_columns(f, df, spinner=spinner, file_read_delayed=file_read_delayed, max_rows=max_rows)
if show_progress:
spinner.succeed("dfloaded '{}'".format(filepath))
return df
except:
if show_progress:
spinner.fail("failed to load '{}'".format(filepath))
raise
| 39.415663 | 185 | 0.588874 | 1,683 | 13,086 | 4.479501 | 0.154486 | 0.021223 | 0.017907 | 0.017642 | 0.362382 | 0.300968 | 0.257594 | 0.24035 | 0.209312 | 0.183048 | 0 | 0.012302 | 0.285649 | 13,086 | 331 | 186 | 39.534743 | 0.794181 | 0.051888 | 0 | 0.349206 | 0 | 0 | 0.09951 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039683 | false | 0.003968 | 0.059524 | 0 | 0.146825 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02334fa6a9bf90945b2c731a4b0bb4e429a5fba6 | 3,764 | py | Python | TEMPLATE_TRANSMISSION_TERRESTRIAL/make_spec.py | mrline/CHIMERA_TERRESTRIAL_PLANETS | fdfdf9590fe16e57720d165d49ced15784338908 | [
"MIT"
] | null | null | null | TEMPLATE_TRANSMISSION_TERRESTRIAL/make_spec.py | mrline/CHIMERA_TERRESTRIAL_PLANETS | fdfdf9590fe16e57720d165d49ced15784338908 | [
"MIT"
] | null | null | null | TEMPLATE_TRANSMISSION_TERRESTRIAL/make_spec.py | mrline/CHIMERA_TERRESTRIAL_PLANETS | fdfdf9590fe16e57720d165d49ced15784338908 | [
"MIT"
] | null | null | null | import matplotlib as mpl
mpl.use('TkAgg')
from matplotlib.pyplot import *
from fm import *
import pickle
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import time
xsects=xsects(909,3333) #lower wavenumber, upper wavenumber...to convert to wl [um] take 1E4/wno (here, 11 - 3 um)
#TP profile parameters--using a "4 layer" model--an isothermal region (below surface), a troposphere, a stratosphere, and an isothermal "thermosphere"
Tsfc=280. #this is "surface temp" (isothermal below surface pressure at this temperature)
logPsfc=0.0 #log surface pressure
gam_trop=0.#0.19 #troposphere adiabatic index, gamma (dlnT/dlnP=gamma)
logPtrop=-0.6 #log tropopause pressure
gam_strat=-0.0#-0.05 #strastophsere adiabatic index --
logPstrat=-3.0 #stratopause pressure--isothermal above this
#planet params
Rp= 0.910# Planet radius in Earth Radii
Rstar=0.117 #Stellar Radius in Solar Radii
M = 0.772 # Mass in Earth Masses
#cloud params
logPc=-0.25 #log cloud-top-pressure bar (here set to refractive boundary)
#log gas mixing ratios (loosley based off of Hu et al. 2012; Robinson et al. 2011)
logH2O=-5.5
logCH4=-6.3
logCO2=-3.45 #-3.4
logO3=-6.5
logN2O=-6.3
logCO=-7.0
Bkg_mmw=28.6 #unknown background gas mmw
'''
#refractive boundary
Rp= 0.910
Rstar=0.117
v0=2.93E-4
T0=200.
a=0.030
mu=28.6
g=10.
pmax=23E-3*(1.23E-4/v0)*(T0/130.)**1.5*(Rstar)*(5.2/a)*(10.973/(Rp))**0.5*(2.2/mu)**0.5*(24.8/g)**0.5
'''
#state vector
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#Tsfc,logPsfc,gam_trop,logPtrop,gam_strat,logPstrat, Rp, Rstar, M,logPc,Bkg_mmw, logH2O, logCH4, logCO2, logO3, logN2O,logCO
x=np.array([Tsfc,logPsfc,gam_trop,logPtrop,gam_strat,logPstrat, Rp, Rstar, M,logPc, Bkg_mmw, logH2O, logCH4, logCO2, logO3, logN2O,logCO])
y_mod,wno,atm=fx(x,xsects)
#read in external noise file if available--the noise_*.txt's are based on Tremblay + 2020 from the greene et al. 2016 noise model
# there are 5 noise files for 5 different resolving powers right now, noise_R10, *_R30, *_R50, *_R100, and *_R300
#If you switch this file, make sure to change the CK file lables in fm.py (e.g., from R100 to R300 etc., should
#obvious in that code)
wlgrid, junk,junk, err0=np.loadtxt('noise_R100.txt').T #must have same wlgrid as CK coeffs
err=np.interp(1E4/wno[::-1],wlgrid,err0)
ntran = 25.0
noise_floor = 5.0E-6
err=np.sqrt( (err[::-1]*(1.0/np.sqrt(ntran)))**2.0 + (noise_floor)**2.0 )
fname='DryEarth_2-11um_R100_25tran'
#defining data array
y_meas=np.zeros(len(y_mod))
#adding gaussian noise (note I turned this off now--see justification in Feng et al. 2018)
for i in range(y_meas.shape[0]): y_meas[i]=y_mod[i]#+np.random.randn(1)*err[i]
#computing chi-square of random noise instance
print(np.sum((y_meas-y_mod)**2/err**2)/len(y_meas))
#dumping pickles--model then noised up data data
output=[1E4/wno, y_mod]
pickle.dump(output,open("Model.pic","wb")) #spectral model to be noised up by instrument noise model
output=[1E4/wno, y_meas,err] #noised up "synthetic" spectrum
pickle.dump(output,open("data.pic","wb"))
#plotting stuff
wlgrid=1E4/wno
ymin=1E6*np.min(y_mod)*0.99
ymax=1E6*np.max(y_mod)*1.01
xmin=np.min(1E4/wno)
xmax=np.max(1E4/wno)
fig1, ax=subplots()
xlabel('$\lambda$ ($\mu$m)',fontsize=12)
ylabel('(R$_{p}$/R$_{\star}$)$^{2}$ [ppm]',fontsize=12)
ax.plot(1E4/wno, y_mod*1E6)
ax.errorbar(wlgrid, y_meas*1E6, yerr=err*1E6, xerr=None, fmt='ok',alpha=0.25)
ax.set_xscale('log')
ax.minorticks_off()
ax.set_xticks([1,2,3,4,5,6,7,8,9,10,15,20,30])
ax.axis([0.8*xmin*1.2,xmax,ymin,ymax])
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
savefig(fname+'_spectrum.pdf',fmt='pdf')
show()
close()
pdb.set_trace()
| 32.17094 | 150 | 0.706961 | 691 | 3,764 | 3.78437 | 0.441389 | 0.018356 | 0.008031 | 0.003059 | 0.081836 | 0.081836 | 0.081836 | 0.081836 | 0.081836 | 0.081836 | 0 | 0.089929 | 0.137354 | 3,764 | 116 | 151 | 32.448276 | 0.71543 | 0.463071 | 0 | 0 | 0 | 0 | 0.07809 | 0.030337 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.116667 | 0 | 0.116667 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0234a8839e299d50fbd68d0e23882f80a0c91bb4 | 1,979 | py | Python | gif/datasets/full_dataset.py | jm-begon/globally-induced-forest | bf41640a5f0d9db637877dfa077b1d529539dbc6 | [
"BSD-3-Clause"
] | 6 | 2018-01-05T11:56:27.000Z | 2018-10-13T13:14:05.000Z | gif/datasets/full_dataset.py | jm-begon/globally-induced-forest | bf41640a5f0d9db637877dfa077b1d529539dbc6 | [
"BSD-3-Clause"
] | 1 | 2018-01-05T12:04:37.000Z | 2018-01-05T13:56:20.000Z | gif/datasets/full_dataset.py | jm-begon/globally-induced-forest | bf41640a5f0d9db637877dfa077b1d529539dbc6 | [
"BSD-3-Clause"
] | null | null | null | from abc import ABCMeta, abstractmethod
import os
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from gif.datasets.utils import data_folder
class FullDataset(object, metaclass=ABCMeta):
@classmethod
def get_default_lengths(cls):
return 0, 0
@classmethod
def get_default_folder_name(cls):
return cls.__name__.lower()
def __init__(self, folder=None):
if folder is None:
folder = data_folder(self.__class__.get_default_folder_name())
self.folder = folder
self.tr_X_y = None
self.ts_X_y = None
def __repr__(self):
return "{}()".format(self.__class__.__name__)
def __len__(self):
if self.ts_X_y is None:
return sum(self.__class__.get_default_lengths())
return len(self.tr_X_y[-1]) + len(self.ts_X_y[-1])
def load_(self):
pass
def load(self):
if self.tr_X_y is None:
self.load_()
def partition(self, train_size=None, shuffle=True, random_state=1217):
self.load()
if train_size is None:
# Use default train size
train_size = len(self.tr_X_y[-1])
X_tr, y_tr = self.tr_X_y
X_ts, y_ts = self.ts_X_y
X = np.vstack((X_tr, X_ts))
y = np.hstack((y_tr, y_ts))
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_size, shuffle=shuffle,
random_state=random_state
)
self.tr_X_y = X_train, y_train
self.ts_X_y = X_test, y_test
@property
def training_set(self):
if self.tr_X_y is None:
return np.array([]), np.array([])
return self.tr_X_y
@property
def test_set(self):
if self.ts_X_y is None:
return np.array([]), np.array([])
return self.ts_X_y
def is_artificial(self):
return hasattr(self, "random_state")
| 25.371795 | 74 | 0.617484 | 295 | 1,979 | 3.766102 | 0.227119 | 0.028803 | 0.050405 | 0.057606 | 0.185419 | 0.153015 | 0.131413 | 0.131413 | 0.10261 | 0.068407 | 0 | 0.006351 | 0.283982 | 1,979 | 77 | 75 | 25.701299 | 0.777699 | 0.011117 | 0 | 0.178571 | 0 | 0 | 0.008184 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.196429 | false | 0.017857 | 0.107143 | 0.071429 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02357085d487f2ae7837701d19a3603ef895b697 | 9,390 | py | Python | osc_to_visca.py | agennaro1/VISCA-IP-Controller-main | b88fbc44c57039408b17af88b3397044418d7bb9 | [
"MIT"
] | null | null | null | osc_to_visca.py | agennaro1/VISCA-IP-Controller-main | b88fbc44c57039408b17af88b3397044418d7bb9 | [
"MIT"
] | null | null | null | osc_to_visca.py | agennaro1/VISCA-IP-Controller-main | b88fbc44c57039408b17af88b3397044418d7bb9 | [
"MIT"
] | null | null | null | # receive OSC messages and send VISCA control messages to camera (both UDP)
# pip3 install aiosc
# https://pypi.org/project/aiosc/
# https://github.com/artfwo/aiosc
import asyncio # for receiving OSC
import aiosc # for receiving OSC
# pip3 install python-osc
# https://pypi.org/project/python-osc/
from pythonosc import udp_client # for sending OSC
from math import floor # for fader
import socket
import binascii # for printing the visca messages
### VISCA sender (socket)
camera_ip = '10.50.2.145'
#camera_ip = '127.0.0.1'
camera_port = 52381
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # IPv4, UDP
### VISCA receiver
buffer_size = 1024
s.bind(('', camera_port)) # for testing use the port one higher than the camera's port
s.settimeout(1.0) # only wait for a response for 1 second
### VISCA Commands (Payloads)
camera_on = '81 01 04 00 02 FF'
information_display_off = '81 01 7E 01 18 03 FF'
memory_recall = '81 01 04 3F 02 0p FF' # p: Memory number (=0 to F)
memory_set = '81 01 04 3F 01 0p FF' # p: Memory number (=0 to F)
movement_speed = '01'
'''
pan_speed = '05'
tilt_speed = '05'
pan_up = '81 01 06 01 VV WW 03 01 FF'.replace('VV', str(pan_speed)).replace('WW', str(tilt_speed))
pan_down = '81 01 06 01 VV WW 03 02 FF'.replace('VV', str(pan_speed)).replace('WW', str(tilt_speed))
pan_left = '81 01 06 01 VV WW 01 03 FF'.replace('VV', str(pan_speed)).replace('WW', str(tilt_speed))
pan_right = '81 01 06 01 VV WW 02 03 FF'.replace('VV', str(pan_speed)).replace('WW', str(tilt_speed))
pan_up_left = '81 01 06 01 VV WW 01 01 FF'.replace('VV', str(pan_speed)).replace('WW', str(tilt_speed))
pan_up_right = '81 01 06 01 VV WW 02 01 FF'.replace('VV', str(pan_speed)).replace('WW', str(tilt_speed))
pan_down_left = '81 01 06 01 VV WW 01 02 FF'.replace('VV', str(pan_speed)).replace('WW', str(tilt_speed))
pan_down_right = '81 01 06 01 VV WW 02 02 FF'.replace('VV', str(pan_speed)).replace('WW', str(tilt_speed))
pan_stop = '81 01 06 01 VV WW 03 03 FF'.replace('VV', str(pan_speed)).replace('WW', str(tilt_speed))
'''
pan_dictionary = {
'pan_up' : '81 01 06 01 VV WW 03 01 FF',
'pan_down' : '81 01 06 01 VV WW 03 02 FF',
'pan_left' : '81 01 06 01 VV WW 01 03 FF',
'pan_right' : '81 01 06 01 VV WW 02 03 FF',
'pan_up_left' : '81 01 06 01 VV WW 01 01 FF',
'pan_up_right' : '81 01 06 01 VV WW 02 01 FF',
'pan_down_left' : '81 01 06 01 VV WW 01 02 FF',
'pan_down_right' : '81 01 06 01 VV WW 02 02 FF'}
# YYYY: Pan Position DE00 (−170 degree) to 2200 (170 degree) (CENTER 0000)
# ZZZZ: Tilt Position EE00 (–90 degree) to 0400 (90 degree) (CENTER 0000)
# for high speed VV = 18 and WW = 17
pan_direct = '8x 01 06 02 18 17 0Y 0Y 0Y 0Y 0Z 0Z 0Z 0Z FF' # absolute position
pan_stop = '81 01 06 01 15 15 03 03 FF' # replaced VV and WW with 15
pan_home = '81 01 06 04 FF'
pan_reset = '81 01 06 05 FF'
focus_stop = '81 01 04 08 00 FF'
focus_far = '81 01 04 08 02 FF'
focus_near = '81 01 04 08 03 FF'
focus_far_variable = '81 01 04 08 2p FF'.replace('p', '7') # 0 low to 7 high
focus_near_variable = '81 01 04 08 3p FF'.replace('p', '7') # 0 low to 7 high
focus_direct = '81 01 04 48 0p 0q 0r 0s FF' #.replace('p', ) q, r, s
focus_auto = '81 01 04 38 02 FF'
focus_manual = '81 01 04 38 03 FF'
focus_infinity = '81 01 04 18 02 FF'
zoom_stop = '81 01 04 07 00 FF'
zoom_tele = '81 01 04 07 02 FF'
zoom_wide = '81 01 04 07 03 FF'
zoom_tele_variable = '81 01 04 07 2p FF' # p=0 (Low) to 7 (High)
zoom_wide_variable = '81 01 04 07 3p FF' # p=0 (Low) to 7 (High)
zoom_direct = '81 01 04 47 0p 0q 0r 0s FF' # pqrs: Zoom Position
zoom_focus_direct = '81 01 04 47 0p 0q 0r 0s 0t 0u 0v 0w FF' # pqrs: Zoom Position tuvw: Focus Position
inquiry_lens_control = '81 09 7E 7E 00 FF'
# response: 81 50 0p 0q 0r 0s 0H 0L 0t 0u 0v 0w 00 xx xx FF
inquiry_camera_control = '81 09 7E 7E 01 FF'
def reset_sequence_number_function(): # this should probably be rolled into the send_visca function
reset_sequence_number_message = bytearray.fromhex('02 00 00 01 00 00 00 01 01')
s.sendto(reset_sequence_number_message,(camera_ip, camera_port))
global sequence_number
sequence_number = 1
print('Reset sequence number to', sequence_number)
try:
data = s.recvfrom(buffer_size)
received_message = binascii.hexlify(data[0])
#print('Received', received_message)
data = s.recvfrom(buffer_size)
received_message = binascii.hexlify(data[0])
#print('Received', received_message)
send_osc('reset_sequence_number', 1.0)
except socket.timeout: # s.settimeout(2.0) #above
received_message = 'No response from camera'
print(received_message)
send_osc('reset_sequence_number', 0.0)
return sequence_number
def send_visca(message_string):
global sequence_number
payload_type = bytearray.fromhex('01 00')
payload = bytearray.fromhex(message_string)
payload_length = len(payload).to_bytes(2, 'big')
visca_message = payload_type + payload_length + sequence_number.to_bytes(4, 'big') + payload
s.sendto(visca_message, (camera_ip, camera_port))
print(binascii.hexlify(visca_message), 'sent to', camera_ip, camera_port, sequence_number)
sequence_number += 1
# wait for acknowledge and completion messages
try:
data = s.recvfrom(buffer_size)
received_message = binascii.hexlify(data[0])
#print('Received', received_message)
data = s.recvfrom(buffer_size)
received_message = binascii.hexlify(data[0])
if received_message == b'9051ff':
print('Received okay')
else:
print('Error')
#print('Received', received_message)
except socket.timeout: # s.settimeout(2.0) #from above
received_message = 'No response from camera'
print(received_message)
send_osc('reset_sequence_number', 0.0)
#return visca_message
return received_message
### OSC server and client
osc_receive_port = 8000
touchOSC_ip = '10.0.0.32' # there must be a way to listen for this... maybe osc_address[0]
osc_send_port = 9000
def send_osc(osc_command, osc_send_argument):
osc_message_to_send = '/1/' + osc_command
osc_client = udp_client.SimpleUDPClient(touchOSC_ip, osc_send_port)
osc_client.send_message(osc_message_to_send, osc_send_argument)
### OSC receiving server
def parse_osc_message(osc_address, osc_path, args):
global touchOSC_ip
touchOSC_ip = osc_address[0]
osc_path_list = osc_path.split('/')
osc_command = osc_path_list[2]
osc_argument = args[0]
if osc_command == 'camera_on':
send_visca(camera_on)
elif osc_command == 'reset_sequence_number':
reset_sequence_number_function()
elif 'memory_' in osc_command:
memory_preset_number = osc_command[-1]
if osc_argument > 0:
if 'recall' in osc_command:
print('Memory recall', memory_preset_number)
send_visca(information_display_off) # so that it doesn't display on-screen
send_visca(memory_recall.replace('p', memory_preset_number))
elif 'set' in osc_command:
print('Memory set', memory_preset_number)
send_visca(memory_set.replace('p', memory_preset_number))
elif 'zoom' in osc_command:
if osc_argument > 0:
if osc_command == 'zoom_tele':
send_visca(zoom_tele)
elif osc_command == 'zoom_wide':
send_visca(zoom_wide)
else: # when the button is released the osc_argument should be 0
send_visca(zoom_stop)
elif 'focus' in osc_command:
if osc_command == 'focus_auto':
send_visca(focus_auto)
if osc_argument > 0:
if osc_command == 'focus_far':
send_visca(focus_far)
elif osc_command == 'focus_near':
send_visca(focus_near)
else: # when the button is released the osc_argument should be 0
send_visca(focus_stop)
elif 'speed' in osc_command: # e.g. speed01 or speed15, from buttons not a slider
global movement_speed
movement_speed = osc_command[5:]
send_osc('MovementSpeedLabel', movement_speed)
print('set speed to', movement_speed)
elif 'pan' in osc_command:
if 'speed' not in osc_command: # this is a relic of the old TouchOSC layout
if osc_argument > 0:
pan_command = pan_dictionary[osc_command].replace('VV', movement_speed).replace('WW', movement_speed)
send_visca(pan_command)
else: # when the button is released the osc_argument should be 0
send_visca(pan_stop)
else:
print("I don't know what to do with", osc_command, osc_argument)
send_osc('SentMessageLabel', osc_command)
## Start off by resetting sequence number
sequence_number = 1 # a global variable that we'll iterate each command, remember 0x0001
reset_sequence_number_function()
## Then start the OSC server to receive messages
def protocol_factory():
osc = aiosc.OSCProtocol({'//*': lambda osc_address, osc_path, *args: parse_osc_message(osc_address, osc_path, args)})
return osc
receive_loop = asyncio.get_event_loop()
coro = receive_loop.create_datagram_endpoint(protocol_factory, local_addr=('0.0.0.0', osc_receive_port))
transport, protocol = receive_loop.run_until_complete(coro)
receive_loop.run_forever() | 44.92823 | 121 | 0.681683 | 1,530 | 9,390 | 3.984967 | 0.191503 | 0.026242 | 0.019682 | 0.023618 | 0.3938 | 0.32393 | 0.310973 | 0.285386 | 0.260784 | 0.253567 | 0 | 0.089862 | 0.214271 | 9,390 | 209 | 122 | 44.92823 | 0.736243 | 0.200426 | 0 | 0.210191 | 0 | 0 | 0.195921 | 0.012979 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031847 | false | 0 | 0.038217 | 0 | 0.089172 | 0.063694 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0236c92865866a7368323047641148780ec5371e | 1,698 | py | Python | dynoname/address.py | aio-libs/dynoname | 3fc52db5457b04680b6ab1ad94183584eec3d802 | [
"Apache-2.0"
] | 9 | 2018-05-01T09:51:43.000Z | 2021-02-26T16:50:46.000Z | dynoname/address.py | aio-libs/dynoname | 3fc52db5457b04680b6ab1ad94183584eec3d802 | [
"Apache-2.0"
] | null | null | null | dynoname/address.py | aio-libs/dynoname | 3fc52db5457b04680b6ab1ad94183584eec3d802 | [
"Apache-2.0"
] | 1 | 2021-08-01T04:02:51.000Z | 2021-08-01T04:02:51.000Z | from ipaddress import IPv4Address, IPv6Address
from typing import List, Union, NewType
import socket
import random
import attr
IpAddress = Union[IPv4Address, IPv6Address]
LocalAddress = NewType('LocalAddress', str)
@attr.s
class SocketAddr:
ip = attr.ib(type=IpAddress)
port = attr.ib(type=int)
def as_tuple(self):
return (str(self.ip), self.port)
SingleAddress = Union[SocketAddr, LocalAddress]
class Address:
__slots__ = ('_first_priority',)
def diff(self, other: "Address") -> (List[SingleAddress], List[SingleAddress]):
raise NotImplemented()
# return (new, old)
def __eq__(self, other: "Address") -> bool:
if isinstance(other, Address):
return self._first_priority == other._first_priority
else:
return NotImplemented
def pick_one(self):
return random.choice(self._first_priority)
@classmethod
def from_getaddrinfo(Address, list_of_addresses) -> "Address":
first_priority = []
for (family, type, proto, canonname, sockaddr) in list_of_addresses:
if family == socket.AF_INET:
addr, port = sockaddr
first_priority.append(SocketAddr(
ip=IPv4Address(addr),
port=port,
))
elif family == socket.AF_INET6:
addr, port, *_ = sockaddr
first_priority.append(SocketAddr(
ip=IPv6Address(addr),
port=port,
))
else:
raise TypeError("Invalid address family")
me = Address()
me._first_priority = first_priority
return me
| 27.836066 | 83 | 0.597173 | 172 | 1,698 | 5.709302 | 0.377907 | 0.119145 | 0.020367 | 0.04277 | 0.095723 | 0.095723 | 0.095723 | 0.095723 | 0 | 0 | 0 | 0.005968 | 0.309187 | 1,698 | 60 | 84 | 28.3 | 0.831202 | 0.010012 | 0 | 0.173913 | 0 | 0 | 0.041691 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.108696 | 0.043478 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02375029e64bbe4f9d24ba135fd00b6bf6f98d9f | 2,424 | py | Python | connectors/abstract/abstract_folder.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | null | null | null | connectors/abstract/abstract_folder.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | null | null | null | connectors/abstract/abstract_folder.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Optional, Callable, Iterable, Union
try: # Assume we're a sub-module in a package.
from utils import arguments as arg
from interfaces import AUTO, Auto, AutoBool, AutoContext
from connectors.abstract.hierarchic_connector import HierarchicConnector
except ImportError: # Apparently no higher-level package has been imported, fall back to a local import.
from ...utils import arguments as arg
from ...interfaces import AUTO, Auto, AutoBool, AutoContext
from .hierarchic_connector import HierarchicConnector
Native = HierarchicConnector
AutoParent = Union[HierarchicConnector, arg.Auto]
class AbstractFolder(HierarchicConnector, ABC):
def __init__(
self,
name: str,
parent: HierarchicConnector,
children: Optional[dict] = None,
context: AutoContext = AUTO,
verbose: AutoBool = arg.AUTO,
):
super().__init__(
name=name,
parent=parent,
children=children,
context=context,
verbose=verbose,
)
def is_root(self) -> bool:
return False
@staticmethod
def is_storage() -> bool:
return False
@staticmethod
def is_folder() -> bool:
return True
class FlatFolder(AbstractFolder):
def __init__(
self,
name,
parent,
verbose=arg.AUTO,
):
super().__init__(
name=name,
parent=parent,
verbose=verbose,
)
@abstractmethod
def get_default_child_class(self) -> Callable:
pass
class HierarchicFolder(AbstractFolder):
def __init__(
self,
name: str,
parent: HierarchicConnector,
verbose: AutoBool = arg.AUTO,
):
super().__init__(
name=name,
parent=parent,
verbose=verbose,
)
def get_default_child_class(self) -> Callable:
return self.__class__
def get_folders(self) -> Iterable:
for obj in self.get_items():
if hasattr(obj, 'is_folder'):
if obj.is_folder(): # isinstance(obj, (AbstractFolder, ct.AbstractFolder, ct.AbstractFile)):
yield obj
def folder(self, name, **kwargs) -> AbstractFolder:
return self.child(name, parent=self, **kwargs)
| 27.545455 | 109 | 0.60396 | 240 | 2,424 | 5.920833 | 0.358333 | 0.035186 | 0.023223 | 0.031668 | 0.414497 | 0.384236 | 0.339198 | 0.229416 | 0.229416 | 0.229416 | 0 | 0 | 0.313944 | 2,424 | 87 | 110 | 27.862069 | 0.85448 | 0.07962 | 0 | 0.458333 | 0 | 0 | 0.004041 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0.013889 | 0.125 | 0.069444 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0238a1e550111bd00a64639f6e871df715bc7221 | 10,011 | py | Python | src/gui/checkbtn.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 63 | 2016-01-02T16:28:47.000Z | 2022-01-19T11:29:51.000Z | src/gui/checkbtn.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 12 | 2016-06-12T14:14:15.000Z | 2020-12-18T16:11:45.000Z | src/gui/checkbtn.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 17 | 2016-05-23T00:02:27.000Z | 2021-04-25T17:48:27.000Z | from .base import *
class CheckButton(Widget):
_checkmark = None
_box_size = (0, 0)
@classmethod
def init(cls):
gfx_id = Skin.atlas.gfx_ids["checkmark"][""][0][0]
x, y, w, h = Skin.atlas.regions[gfx_id]
cls._checkmark = img = PNMImage(w, h, 4)
img.copy_sub_image(Skin.atlas.image, 0, 0, x, y, w, h)
options = Skin.options
cls._box_size = (options["checkbox_width"], options["checkbox_height"])
def __init__(self, parent, mark_color, back_color, text="", text_offset=0):
container_type = parent.root_container.widget_type
Widget.__init__(self, container_type + "_checkbutton", parent, gfx_ids={})
self._is_clicked = False
self._is_checked = False
self._command = lambda checked: None
self._default_mark_color = self._mark_color = mark_color
self._default_back_color = self._back_color = back_color
self._delay_card_update = False
self._text = text
self._text_offset = text_offset
if text:
widget_type = container_type + "_checkbutton"
skin_text = Skin.text[widget_type]
font = skin_text["font"]
color = skin_text["color"]
self._label = label = font.create_image(text, color)
color = Skin.colors[f"disabled_{widget_type}_text"]
self._label_disabled = font.create_image(text, color)
gfx_id = Skin.atlas.gfx_ids["checkbox"][container_type][0][0]
x, y, w, h = Skin.atlas.regions[gfx_id]
l, _, b, t = self._btn_borders
w_l, h_l = label.size
w += text_offset + w_l - l
h = max(h - b - t, h_l)
self.set_size((w, h), is_min=True)
else:
self._label = self._label_disabled = None
self.set_size(self._box_size, is_min=True)
if not text:
l, r, b, t = Skin.atlas.outer_borders[container_type + "_checkbox"]
btn_borders = (l, r, b, t)
img_offset = (-l, -t)
elif "\n" in text:
l, _, b, t = Skin.atlas.outer_borders[container_type + "_checkbox"]
font = Skin.text[container_type + "_checkbutton"]["font"]
h_f = font.get_height() * (text.count("\n") + 1)
h = Skin.options["checkbox_height"]
dh = max(0, h_f - h) // 2
b = max(0, b - dh)
t = max(0, t - dh)
btn_borders = (l, 0, b, t)
img_offset = (-l, -t)
else:
btn_borders = self._btn_borders
img_offset = self._img_offset
self.outer_borders = btn_borders
self.image_offset = img_offset
def destroy(self):
Widget.destroy(self)
self._command = lambda checked: None
def get_text(self):
return self._text
def set_text(self, text):
if self._text == text:
return False
self._text = text
container_type = self.root_container.widget_type
if text:
widget_type = container_type + "_checkbutton"
skin_text = Skin.text[widget_type]
font = skin_text["font"]
color = skin_text["color"]
self._label = label = font.create_image(text, color)
color = Skin.colors[f"disabled_{widget_type}_text"]
self._label_disabled = font.create_image(text, color)
gfx_id = Skin.atlas.gfx_ids["checkbox"][container_type][0][0]
x, y, w, h = Skin.atlas.regions[gfx_id]
l, _, b, t = self._btn_borders
w_l, h_l = label.size
w += self._text_offset + w_l - l
h = max(h - b - t, h_l)
self.set_size((w, h), is_min=True)
else:
self._label = self._label_disabled = None
self.set_size(self._box_size, is_min=True)
if not text:
widget_type = container_type + "_checkbox"
l, r, b, t = Skin.atlas.outer_borders[widget_type]
btn_borders = (l, r, b, t)
img_offset = (-l, -t)
elif "\n" in text:
l, _, b, t = Skin.atlas.outer_borders[container_type + "_checkbox"]
font = Skin.text[container_type + "_checkbutton"]["font"]
h_f = font.get_height() * (text.count("\n") + 1)
h = Skin.options["checkbox_height"]
dh = max(0, h_f - h) // 2
b = max(0, b - dh)
t = max(0, t - dh)
btn_borders = (l, 0, b, t)
img_offset = (-l, -t)
else:
btn_borders = self._btn_borders
img_offset = self._img_offset
self.outer_borders = btn_borders
self.image_offset = img_offset
self.create_base_image()
return True
def set_text_offset(self, text_offset):
if self._text_offset == text_offset:
return False
if self._text:
w, h = self.get_size()
w += text_offset - self._text_offset
self.set_size((w, h), is_min=True)
self._text_offset = text_offset
self.create_base_image()
return True
@property
def command(self):
return self._command
@command.setter
def command(self, command):
self._command = command if command else lambda checked: None
def delay_card_update(self, delay=True):
self._delay_card_update = delay
def is_card_update_delayed(self):
return self._delay_card_update
def __card_update_task(self):
if self.is_hidden():
return
image = self.get_image(composed=False)
parent = self.parent
if not (image and parent):
return
img_offset_x, img_offset_y = self.image_offset
if self._label:
x, y = self.get_pos()
w, h = self.get_size()
w -= img_offset_x
h -= img_offset_y
x += img_offset_x
y += img_offset_y
img = PNMImage(w, h, 4)
parent_img = parent.get_image(composed=False)
if parent_img:
img.copy_sub_image(parent_img, 0, 0, x, y, w, h)
img.blend_sub_image(image, 0, 0, 0, 0)
self.card.copy_sub_image(self, img, w, h, img_offset_x, img_offset_y)
else:
w, h = image.size
self.card.copy_sub_image(self, image, w, h, img_offset_x, img_offset_y)
def __update_card_image(self):
task = self.__card_update_task
if self._delay_card_update:
task_id = "update_card_image"
PendingTasks.add(task, task_id, sort=1, id_prefix=self.widget_id,
batch_id="widget_card_update")
else:
task()
def update_images(self, recurse=True, size=None):
self._images = {"": self._base_img}
return self._images
def create_base_image(self):
border_image = self.get_border_image()
w_b, h_b = border_image.size
label = self._label
if label:
w_l, h_l = label.size
x_l = w_b + self._text_offset
w = x_l + w_l
h = max(h_b, h_l)
y_l = (h - h_l) // 2
y_b = (h - h_b) // 2
self._label_pos = (x_l, y_l)
else:
w, h = w_b, h_b
y_b = 0
img_offset_x, img_offset_y = self.get_box_image_offset()
self._box_pos = (-img_offset_x, y_b - img_offset_y)
self._base_img = img = PNMImage(w, h, 4)
box_img = PNMImage(*self._box_size, 4)
r, g, b, a = self._back_color
box_img.fill(r, g, b)
box_img.alpha_fill(a)
img.copy_sub_image(box_img, *self._box_pos, 0, 0)
img.blend_sub_image(border_image, 0, y_b, 0, 0)
def get_image(self, state=None, composed=True):
image = Widget.get_image(self, state, composed)
if not image:
return
img = PNMImage(image)
if not self.is_enabled():
label = self._label_disabled
else:
label = self._label
if label:
img.copy_sub_image(label, *self._label_pos, 0, 0)
if self._is_checked:
w, h = self._box_size
checkmark = PNMImage(self._checkmark) * self._mark_color
w_c, h_c = checkmark.size
x, y = self._box_pos
x += (w - w_c) // 2
y += (h - h_c) // 2
img.blend_sub_image(checkmark, x, y, 0, 0)
return img
def get_label_pos(self):
return self._label_pos
def on_leave(self):
self._is_clicked = False
def on_left_down(self):
self._is_clicked = True
def on_left_up(self):
if self._is_clicked:
self._is_checked = not self._is_checked
self._command(self._is_checked)
self._is_clicked = False
self.__update_card_image()
def set_checkmark_color(self, color=None):
checkmark_color = color if color else self._default_mark_color
if self._mark_color != checkmark_color:
self._mark_color = checkmark_color
self.__update_card_image()
def get_checkmark_color(self):
return self._mark_color
def set_back_color(self, color=None):
back_color = color if color else self._default_back_color
if self._back_color != back_color:
self._back_color = back_color
self.create_base_image()
self._images = {"": self._base_img}
self.__update_card_image()
def get_back_color(self):
return self._back_color
def check(self, check=True):
if self._is_checked != check:
self._is_checked = check
self.__update_card_image()
def is_checked(self):
return self._is_checked
def enable(self, enable=True):
if not Widget.enable(self, enable):
return False
self.__update_card_image()
return True
| 29.27193 | 83 | 0.56408 | 1,351 | 10,011 | 3.833457 | 0.085862 | 0.041707 | 0.020081 | 0.003862 | 0.508592 | 0.417841 | 0.371886 | 0.337903 | 0.30643 | 0.297934 | 0 | 0.007183 | 0.332534 | 10,011 | 341 | 84 | 29.357771 | 0.767884 | 0 | 0 | 0.447581 | 0 | 0 | 0.030267 | 0.005394 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104839 | false | 0 | 0.004032 | 0.028226 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0239192e0acdf2cc7d14a1f2cffa166997b16561 | 2,685 | py | Python | setup.py | DatagoHK/breadability | 95a364c43b00baf6664bea1997a7310827fb1ee9 | [
"BSD-2-Clause"
] | 156 | 2015-01-14T05:32:49.000Z | 2021-10-10T23:45:23.000Z | setup.py | DatagoHK/breadability | 95a364c43b00baf6664bea1997a7310827fb1ee9 | [
"BSD-2-Clause"
] | 5 | 2015-04-07T10:15:58.000Z | 2019-08-04T12:24:53.000Z | setup.py | DatagoHK/breadability | 95a364c43b00baf6664bea1997a7310827fb1ee9 | [
"BSD-2-Clause"
] | 21 | 2015-02-08T23:21:44.000Z | 2022-01-20T10:51:21.000Z | import sys
from os.path import (
abspath,
dirname,
join,
)
from setuptools import setup
VERSION = "0.1.20"
VERSION_SUFFIX = "%d.%d" % sys.version_info[:2]
CURRENT_DIRECTORY = abspath(dirname(__file__))
with open(join(CURRENT_DIRECTORY, "README.rst")) as readme:
with open(join(CURRENT_DIRECTORY, "CHANGELOG.rst")) as changelog:
long_description = "%s\n\n%s" % (readme.read(), changelog.read())
install_requires = [
"docopt>=0.6.1,<0.7",
"chardet",
"lxml>=2.0",
]
tests_require = [
"pytest",
"pytest-cov",
"coverage",
"pylint",
"pep8",
]
console_script_targets = [
"breadability = breadability.scripts.client:main",
"breadability-{0} = breadability.scripts.client:main",
"breadability_test = breadability.scripts.test_helper:main",
"breadability_test-{0} = breadability.scripts.test_helper:main",
]
console_script_targets = [
target.format(VERSION_SUFFIX) for target in console_script_targets
]
setup(
name="breadability",
version=VERSION,
description="Port of Readability HTML parser in Python",
long_description=long_description,
keywords=[
"bookie",
"breadability",
"content",
"HTML",
"parsing",
"readability",
"readable",
],
author="Rick Harding",
author_email="rharding@mitechie.com",
url="https://github.com/bookieio/breadability",
license="BSD",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Pre-processors",
"Topic :: Text Processing :: Filters",
"Topic :: Text Processing :: Markup :: HTML",
],
packages=['breadability', 'breadability.scripts'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
test_suite="tests",
entry_points={
"console_scripts": console_script_targets,
}
)
| 28.263158 | 73 | 0.627933 | 277 | 2,685 | 5.949458 | 0.454874 | 0.12682 | 0.166869 | 0.09466 | 0.156553 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015474 | 0.229795 | 2,685 | 94 | 74 | 28.56383 | 0.781431 | 0 | 0 | 0.048193 | 0 | 0 | 0.47635 | 0.067039 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.036145 | 0 | 0.036145 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02397c92a9df23d44ecbce1472f3122875dbec21 | 330 | py | Python | homework_1_b/2.py | kirilllapushinskiy/yandex-algorithm-training-2.0 | 712542296da8e61be34b86066a0618a7f144098a | [
"MIT"
] | null | null | null | homework_1_b/2.py | kirilllapushinskiy/yandex-algorithm-training-2.0 | 712542296da8e61be34b86066a0618a7f144098a | [
"MIT"
] | null | null | null | homework_1_b/2.py | kirilllapushinskiy/yandex-algorithm-training-2.0 | 712542296da8e61be34b86066a0618a7f144098a | [
"MIT"
] | null | null | null | n, i, j = map(int, input().split())
# Кол-во станций до последней и до первой.
i_forward = n - i
i_back = i - 1
j_forward = n - j
j_back = j - 1
first_way = (i_forward + j_back) % n
second_way = (i_back + j_forward) % n
print(first_way if first_way < second_way else second_way)
#stations = [s for s in range(1, n + 1)]
| 15.714286 | 58 | 0.642424 | 65 | 330 | 3.046154 | 0.430769 | 0.121212 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015686 | 0.227273 | 330 | 20 | 59 | 16.5 | 0.760784 | 0.239394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
023add54aae89c13a13c6629794ed43a32ae511d | 2,463 | py | Python | cs285/data/samplers/parallel_sampler.py | brandontrabucco/cs285 | 0ed5fca1d897bf197a43e2be14b204606ae4c36c | [
"MIT"
] | null | null | null | cs285/data/samplers/parallel_sampler.py | brandontrabucco/cs285 | 0ed5fca1d897bf197a43e2be14b204606ae4c36c | [
"MIT"
] | null | null | null | cs285/data/samplers/parallel_sampler.py | brandontrabucco/cs285 | 0ed5fca1d897bf197a43e2be14b204606ae4c36c | [
"MIT"
] | null | null | null | """Author: Brandon Trabucco, Copyright 2019, MIT License"""
from cs285.data.samplers.simple_sampler import SimpleSampler
from cs285.data.samplers.sampler import Sampler
import numpy as np
import threading
def collect_backend(
inner_paths,
inner_mean_returns,
inner_steps_collected,
inner_num_episodes,
inner_evaluate,
inner_render,
inner_render_kwargs,
inner_sampler
):
# only collect if work is given
if inner_num_episodes > 0:
result_paths, result_mean_return, result_steps_collected = inner_sampler.collect(
inner_num_episodes,
evaluate=inner_evaluate,
render=inner_render,
**inner_render_kwargs)
# push collected samplers into the main sampler thread
inner_paths.extend(result_paths)
inner_mean_returns.append(result_mean_return)
inner_steps_collected.append(result_steps_collected)
class ParallelSampler(Sampler):
def __init__(
self,
*args,
num_threads=1,
**kwargs
):
self.samplers = [SimpleSampler(*args, **kwargs) for i in range(num_threads)]
self.num_threads = num_threads
def collect(
self,
num_episodes,
evaluate=False,
render=False,
**render_kwargs
):
# only spawn threads if paths need to be collected
if num_episodes == 0:
return [], 0.0, 0
# collect many paths in parallel
paths = []
mean_returns = []
steps_collected = []
# start several sampler threads in parallel
threads = [threading.Thread(
target=collect_backend, args=(
paths,
mean_returns,
steps_collected,
# the first thread may have extension episodes to collect
num_episodes // self.num_threads + (num_episodes % self.num_threads if i == 0 else 0),
evaluate,
render,
render_kwargs,
self.samplers[i])) for i in range(self.num_threads)]
# wait until all samplers finish
for t in threads:
t.start()
for t in threads:
t.join()
# merge the statistics from every sampler in the main thread
return paths, np.mean(mean_returns, dtype=np.float32), np.sum(steps_collected, dtype=np.int32)
| 30.036585 | 102 | 0.601705 | 276 | 2,463 | 5.144928 | 0.315217 | 0.069014 | 0.039437 | 0.029577 | 0.13662 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013333 | 0.330085 | 2,463 | 81 | 103 | 30.407407 | 0.847273 | 0.164434 | 0 | 0.152542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0 | 0.067797 | 0 | 0.169492 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024020b5d3aefc10ad57a180342680efd5accc6c | 2,884 | py | Python | build_readme.py | philovdy/philovdy | 34f0c40adba66c0a3f24e2318db53eb305291dc2 | [
"Apache-2.0"
] | null | null | null | build_readme.py | philovdy/philovdy | 34f0c40adba66c0a3f24e2318db53eb305291dc2 | [
"Apache-2.0"
] | null | null | null | build_readme.py | philovdy/philovdy | 34f0c40adba66c0a3f24e2318db53eb305291dc2 | [
"Apache-2.0"
] | null | null | null | import feedparser
import httpx
import pathlib
import re
import os
import requests
import git
root = pathlib.Path(__file__).parent.resolve()
def replace_chunk(content, marker, chunk, inline=False):
r = re.compile(
r"<!\-\- {} starts \-\->.*<!\-\- {} ends \-\->".format(marker, marker),
re.DOTALL,
)
if not inline:
chunk = "\n{}\n".format(chunk)
chunk = "<!-- {} starts -->{}<!-- {} ends -->".format(marker, chunk, marker)
return r.sub(chunk, content)
def get_tils():
#til_readme = "https://raw.githubusercontent.com/philovdy/til/master/README.md"
til_readme = "https://raw.githubusercontent.com/vidyabhandary/TIL/master/README.md"
r = requests.get(til_readme)
print(r)
page = requests.get(til_readme)
all_text = page.text
print(all_text)
search_re = re.findall( r'(\*+).(\[.*?\])(\(.*?\)).?-(.+)', all_text, re.M|re.I)
dt_til = sorted(search_re, key=lambda search_re: search_re[3], reverse=True)[:3]
print('^' * 50)
print('DT_TIL upto 3', dt_til)
til_md = ""
for i in dt_til:
til_md += "\n" + i[0] + ' ' + i[1] + i[2]
print('^' * 50)
print('TIL_MD upto 3', til_md)
print(til_md)
return til_md
# with open(all_text, "r") as ins:
# line = ins.readline()
# searchObj = re.search( r'(\*+).(\[.*?\])(\(.*?\)).?-(.+)', line, re.M|re.I)
# print(line)
# til_read = "https://github.com/philovdy/til/blob/master/README.md?raw=true"
# with open(til_readme, "r") as ins:
# line = ins.readline()
# print(line)
# for filepath in root.glob("*/*.md"):
# fp = filepath.open()
# title = fp.readline().lstrip("#").strip()
# body = fp.read().strip()
# path = str(filepath.relative_to(root))
# with open(til_file, "r") as ins:
# for line in ins:
# print(line_test)
def fetch_blog_entries():
entries = feedparser.parse("https://philovdy.github.io/github-pages-with-jekyll/feed.xml")["entries"]
return [
{
"title": entry["title"],
"url": entry["link"].split("#")[0],
"published": entry["published"].split("T")[0],
}
for entry in entries
]
if __name__ == "__main__":
readme = root / "README.md"
print('root is ', root)
readme_contents = readme.open().read()
entries = fetch_blog_entries()[:5]
entries_md = "\n".join(
# ["* [{title}]({url}) - {published}".format(**entry) for entry in entries]
["* [{title}]({url})".format(**entry) for entry in entries]
)
rewritten = replace_chunk(readme_contents, "blog", entries_md)
til_readme_contents = get_tils()
rewritten = replace_chunk(rewritten, "tils", til_readme_contents)
readme.open("w").write(rewritten)
| 28 | 105 | 0.556865 | 358 | 2,884 | 4.329609 | 0.307263 | 0.040645 | 0.027097 | 0.032903 | 0.110968 | 0.110968 | 0 | 0 | 0 | 0 | 0 | 0.006475 | 0.250347 | 2,884 | 102 | 106 | 28.27451 | 0.710453 | 0.273232 | 0 | 0.035088 | 0 | 0 | 0.180067 | 0.014925 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.122807 | 0 | 0.22807 | 0.140351 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0240a605e6b0c80034c03bf2aa1bb46119680cf4 | 552 | py | Python | utils/async_cache.py | cyr580/Bloo | 53dc6ecc3474f5234938577a8fd06fc4656b43cf | [
"MIT"
] | null | null | null | utils/async_cache.py | cyr580/Bloo | 53dc6ecc3474f5234938577a8fd06fc4656b43cf | [
"MIT"
] | null | null | null | utils/async_cache.py | cyr580/Bloo | 53dc6ecc3474f5234938577a8fd06fc4656b43cf | [
"MIT"
] | null | null | null | from collections import OrderedDict
from functools import wraps
def async_cacher(size=1024):
cache = OrderedDict()
def decorator(fn):
@wraps(fn)
async def memoizer(*args, **kwargs):
key = str((args, kwargs))
try:
cache[key] = cache.pop(key)
except KeyError:
if len(cache) >= size:
cache.popitem(last=False)
cache[key] = await fn(*args, **kwargs)
return cache[key]
return memoizer
return decorator
| 26.285714 | 54 | 0.538043 | 58 | 552 | 5.103448 | 0.517241 | 0.101351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011396 | 0.36413 | 552 | 20 | 55 | 27.6 | 0.831909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.117647 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0241c8290b8c2d442c51bcd2775f361ec8807850 | 13,122 | py | Python | ephios/plugins/basesignup/signup/section_based.py | garinm90/ephios | 7d04d3287ae16ee332e31add1f25829b199f29a5 | [
"MIT"
] | null | null | null | ephios/plugins/basesignup/signup/section_based.py | garinm90/ephios | 7d04d3287ae16ee332e31add1f25829b199f29a5 | [
"MIT"
] | null | null | null | ephios/plugins/basesignup/signup/section_based.py | garinm90/ephios | 7d04d3287ae16ee332e31add1f25829b199f29a5 | [
"MIT"
] | null | null | null | import uuid
from functools import cached_property
from itertools import groupby
from operator import itemgetter
from django import forms
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.shortcuts import redirect
from django.template.loader import get_template
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import FormView
from django_select2.forms import Select2MultipleWidget
from dynamic_preferences.registries import global_preferences_registry
from ephios.core.models import AbstractParticipation, Qualification
from ephios.core.signup import (
AbstractParticipant,
BaseDispositionParticipationForm,
BaseSignupMethod,
BaseSignupView,
ParticipationError,
)
def sections_participant_qualifies_for(sections, participant: AbstractParticipant):
available_qualification_ids = set(q.id for q in participant.collect_all_qualifications())
return [
section
for section in sections
if set(section["qualifications"]) <= available_qualification_ids
]
class SectionBasedDispositionParticipationForm(BaseDispositionParticipationForm):
disposition_participation_template = "basesignup/section_based/fragment_participant.html"
section = forms.ChoiceField(
label=_("Section"),
required=False, # only required if participation is confirmed
widget=forms.Select(
attrs={"data-show-for-state": str(AbstractParticipation.States.CONFIRMED)}
),
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
sections = self.shift.signup_method.configuration.sections
qualified_sections = list(
sections_participant_qualifies_for(
sections,
self.instance.participant,
)
)
unqualified_sections = [
section for section in sections if section not in qualified_sections
]
self.fields["section"].choices = [("", "---")]
if qualified_sections:
self.fields["section"].choices += [
(
_("qualified"),
[(section["uuid"], section["title"]) for section in qualified_sections],
)
]
if unqualified_sections:
self.fields["section"].choices += [
(
_("unqualified"),
[(section["uuid"], section["title"]) for section in unqualified_sections],
)
]
if preferred_section_uuid := self.instance.data.get("preferred_section_uuid"):
self.fields["section"].initial = preferred_section_uuid
self.preferred_section = next(
filter(lambda section: section["uuid"] == preferred_section_uuid, sections), None
)
if initial := self.instance.data.get("dispatched_section_uuid"):
self.fields["section"].initial = initial
def clean(self):
super().clean()
if (
self.cleaned_data["state"] == AbstractParticipation.States.CONFIRMED
and not self.cleaned_data["section"]
):
self.add_error(
"section",
ValidationError(_("You must select a section when confirming a participation.")),
)
def save(self, commit=True):
self.instance.data["dispatched_section_uuid"] = self.cleaned_data["section"]
super().save(commit)
class SectionForm(forms.Form):
title = forms.CharField(label=_("Title"), required=True)
qualifications = forms.ModelMultipleChoiceField(
label=_("Required Qualifications"),
queryset=Qualification.objects.all(),
widget=Select2MultipleWidget,
required=False,
)
min_count = forms.IntegerField(label=_("min amount"), min_value=0, required=True)
uuid = forms.CharField(widget=forms.HiddenInput, required=False)
def clean_uuid(self):
return self.cleaned_data.get("uuid") or uuid.uuid4()
SectionsFormset = forms.formset_factory(
SectionForm, can_delete=True, min_num=1, validate_min=1, extra=0
)
class SectionBasedConfigurationForm(forms.Form):
def __init__(self, data=None, **kwargs):
super().__init__(data, **kwargs)
self.sections_formset = SectionsFormset(
data=data,
initial=self.initial.get("sections", list()),
prefix="sections",
)
def clean_sections(self):
if not self.sections_formset.is_valid():
raise ValidationError(_("The sections aren't configured correctly."))
sections = [
{
key: form.cleaned_data[key]
for key in ("title", "qualifications", "min_count", "uuid")
}
for form in self.sections_formset
]
return sections
class SectionSignupForm(forms.Form):
section = forms.ChoiceField(
label=_("Preferred Section"),
widget=forms.RadioSelect,
required=False,
# choices are set as (uuid, title) of section
)
class SectionBasedSignupView(FormView, BaseSignupView):
template_name = "basesignup/section_based/signup.html"
@cached_property
def sections_participant_qualifies_for(self):
return sections_participant_qualifies_for(
self.method.configuration.sections, self.participant
)
def get_form(self, form_class=None):
form = SectionSignupForm(self.request.POST)
form.fields["section"].choices = [
(section["uuid"], section["title"])
for section in self.sections_participant_qualifies_for
]
return form
def get_context_data(self, **kwargs):
kwargs.setdefault("shift", self.shift)
kwargs.setdefault(
"unqualified_sections",
[
section["title"]
for section in self.method.configuration.sections
if section not in self.sections_participant_qualifies_for
],
)
return super().get_context_data(**kwargs)
def form_valid(self, form):
return super().signup_pressed(preferred_section_uuid=form.cleaned_data.get("section"))
def signup_pressed(self, **kwargs):
if not self.method.configuration.choose_preferred_section:
# do straight signup if choosing is not enabled
return super().signup_pressed(**kwargs)
if not self.method.can_sign_up(self.participant):
# redirect a misled request
messages.warning(self.request, _("You can not sign up for this shift."))
return redirect(self.participant.reverse_event_detail(self.shift.event))
# all good, redirect to the form
return redirect(self.participant.reverse_signup_action(self.shift))
class SectionBasedSignupMethod(BaseSignupMethod):
slug = "section_based"
verbose_name = _("Apply for sections")
description = _(
"""This method lets you define sections for which people can choose from.
Sections contain qualifications that helpers need to fulfil."""
)
registration_button_text = _("Request")
signup_success_message = _("You have successfully requested a participation for {shift}.")
signup_error_message = _("Requesting a participation failed: {error}")
configuration_form_class = SectionBasedConfigurationForm
signup_view_class = SectionBasedSignupView
disposition_participation_form_class = SectionBasedDispositionParticipationForm
def get_configuration_fields(self):
return {
**super().get_configuration_fields(),
"choose_preferred_section": {
"formfield": forms.BooleanField(
label=_("Ask participants for a preferred section"),
help_text=_("This only makes sense if you configure multiple sections."),
widget=forms.CheckboxInput,
required=False,
),
"default": False,
},
"sections": {
"formfield": forms.Field(
label=_("Structure"),
widget=forms.HiddenInput,
required=False,
),
"default": [],
},
}
def get_participant_count_bounds(self):
return sum(section.get("min_count") or 0 for section in self.configuration.sections), None
@staticmethod
def check_qualification(method, participant):
if not sections_participant_qualifies_for(method.configuration.sections, participant):
return ParticipationError(_("You are not qualified."))
@property
def signup_checkers(self):
return super().signup_checkers + [self.check_qualification]
# pylint: disable=arguments-differ
def perform_signup(
self, participant: AbstractParticipant, preferred_section_uuid=None, **kwargs
):
participation = super().perform_signup(participant, **kwargs)
participation.data["preferred_section_uuid"] = preferred_section_uuid
if preferred_section_uuid:
# reset dispatch decision, as that would have overwritten the preferred choice
participation.data["dispatched_section_uuid"] = None
participation.state = AbstractParticipation.States.REQUESTED
participation.save()
def render_configuration_form(self, *args, form=None, **kwargs):
form = form or self.get_configuration_form(*args, **kwargs)
template = get_template("basesignup/section_based/configuration_form.html").render(
{"form": form}
)
return template
def _get_sections_with_users(self):
relevant_qualification_categories = global_preferences_registry.manager()[
"general__relevant_qualification_categories"
]
section_by_uuid = {section["uuid"]: section for section in self.configuration.sections}
# get name and preferred section uuid for confirmed participants
# if they have a section assigned and we have that section on record
confirmed_participations = [
{
"name": str(participation.participant),
"relevant_qualifications": ", ".join(
participation.participant.qualifications.filter(
category__in=relevant_qualification_categories
).values_list("abbreviation", flat=True)
),
"uuid": dispatched_section_uuid,
}
for participation in self.shift.participations.filter(
state=AbstractParticipation.States.CONFIRMED
)
if (dispatched_section_uuid := participation.data.get("dispatched_section_uuid"))
and dispatched_section_uuid in section_by_uuid
]
# group by section and do some stats
sections_with_users = [
(
section_by_uuid.pop(uuid),
[[user["name"], user["relevant_qualifications"]] for user in group],
)
for uuid, group in groupby(
sorted(confirmed_participations, key=itemgetter("uuid")), itemgetter("uuid")
)
]
# add sections without participants
sections_with_users += [(section, None) for section in section_by_uuid.values()]
return sections_with_users
def render_shift_state(self, request):
return get_template("basesignup/section_based/fragment_state.html").render(
{
"shift": self.shift,
"requested_participations": (
self.shift.participations.filter(state=AbstractParticipation.States.REQUESTED)
),
"sections_with_users": self._get_sections_with_users(),
"disposition_url": (
reverse(
"core:shift_disposition",
kwargs=dict(pk=self.shift.pk),
)
if request.user.has_perm("core.change_event", obj=self.shift.event)
else None
),
}
)
def get_participation_display(self):
confirmed_sections_with_users = self._get_sections_with_users()
participation_display = []
for section, users in confirmed_sections_with_users:
if users:
participation_display += [[user[0], user[1], section["title"]] for user in users]
if not users or len(users) < section["min_count"]:
required_qualifications = ", ".join(
Qualification.objects.filter(pk__in=section["qualifications"]).values_list(
"abbreviation", flat=True
)
)
participation_display += [["", required_qualifications, section["title"]]] * (
section["min_count"] - (len(users) if users else 0)
)
return participation_display
| 38.937685 | 98 | 0.630773 | 1,266 | 13,122 | 6.324645 | 0.21722 | 0.030224 | 0.024978 | 0.027101 | 0.173099 | 0.088048 | 0.049082 | 0.010241 | 0 | 0 | 0 | 0.001268 | 0.278997 | 13,122 | 336 | 99 | 39.053571 | 0.845048 | 0.038028 | 0 | 0.060498 | 0 | 0 | 0.110942 | 0.037863 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074733 | false | 0 | 0.05694 | 0.024911 | 0.27758 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024322e209b41af41b61f3f42f2ce9c40939a311 | 1,057 | py | Python | data/gas/preprocess.py | ingako/lifelong-ml | a0108502b3e1ba5556a6cf6f1123037900db6427 | [
"Apache-2.0"
] | 2 | 2020-06-24T08:00:31.000Z | 2022-01-21T11:38:18.000Z | data/gas/preprocess.py | ingako/lifelong-ml | a0108502b3e1ba5556a6cf6f1123037900db6427 | [
"Apache-2.0"
] | null | null | null | data/gas/preprocess.py | ingako/lifelong-ml | a0108502b3e1ba5556a6cf6f1123037900db6427 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import pandas as pd
import shutil
id_to_class = {}
class_to_code = {"banana":0, "wine":1, "background":2}
with open("HT_Sensor_metadata.dat") as f:
header = f.readline()
for line in f:
row = line.split()
id_to_class[int(row[0])] = class_to_code[row[2]]
df = pd.read_table("HT_Sensor_dataset.dat", sep="\s+")
df['id'] = df['id'].map(lambda x: id_to_class[x])
df = df.sort_values('time')
# swap cols
cols = list(df.columns)
first_col = cols[0]
last_col = cols[len(cols) - 1]
cols[0], cols[len(cols) - 1] = last_col, first_col
df=df.reindex(columns=cols)
df.to_csv("gas.csv", sep=',', index=None, header=False)
# generate arff headers
with open("headers.txt", "w") as out:
for col in cols[:-1]:
out.write(f"@attribute {col} numeric\n")
out.write("@attribute class {0,1,2}\n")
out.write("\n@data")
# merge arff headers and data files
with open('gas.arff','wb') as wfd:
for f in ["headers.txt", "gas.csv"]:
with open(f,'rb') as fd:
shutil.copyfileobj(fd, wfd)
| 24.022727 | 56 | 0.630085 | 182 | 1,057 | 3.543956 | 0.434066 | 0.049612 | 0.04186 | 0.037209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015046 | 0.182592 | 1,057 | 43 | 57 | 24.581395 | 0.731481 | 0.081362 | 0 | 0 | 0 | 0 | 0.189245 | 0.044467 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024335f422ea1c808458610143938a0833354c83 | 528 | py | Python | app/models.py | trevor-ngugi/citadel-news | b99c7c5d3425a0d25c4f5d06036825814f36be9e | [
"Unlicense"
] | null | null | null | app/models.py | trevor-ngugi/citadel-news | b99c7c5d3425a0d25c4f5d06036825814f36be9e | [
"Unlicense"
] | 7 | 2021-03-19T10:08:15.000Z | 2022-03-12T00:10:46.000Z | app/models.py | trevor-ngugi/citadel-news | b99c7c5d3425a0d25c4f5d06036825814f36be9e | [
"Unlicense"
] | null | null | null | class Article:
"""
article class to define the article objects
"""
def __init__(self,author,title,description,url,urlToImage,published_At,content):
self.author=author
self.title=title
self.description=description
self.url=url
self.urlToImage=urlToImage
self.published_At=published_At
self.content=content
class Source:
"""
source class to define the source objects
"""
def __init__(self,id,name):
self.id=id
self.name=name | 26.4 | 84 | 0.645833 | 63 | 528 | 5.238095 | 0.31746 | 0.1 | 0.078788 | 0.09697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.265152 | 528 | 20 | 85 | 26.4 | 0.850515 | 0.162879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0245aa669b4e6e5eff50948a70d095cbf640b347 | 1,242 | py | Python | accounts/urls.py | AlexTrapp/SteemLogs | da170b6a61c1b2cf74819db92238f9ae92dae7de | [
"MIT"
] | 2 | 2017-11-18T23:20:30.000Z | 2017-11-29T20:10:23.000Z | accounts/urls.py | AlexTrapp/SteemLogs | da170b6a61c1b2cf74819db92238f9ae92dae7de | [
"MIT"
] | 1 | 2017-12-31T05:55:19.000Z | 2017-12-31T05:55:19.000Z | accounts/urls.py | coffeesource-net/coffeesource_app | da170b6a61c1b2cf74819db92238f9ae92dae7de | [
"MIT"
] | 3 | 2017-12-30T15:26:59.000Z | 2018-12-08T20:02:04.000Z | from django.conf.urls import url
from .views import UsernameSearchFormView
from .views import AjaxLoadAccountPostsView
from .views import ImagesBacklinkView
from .views import ImagesBacklinkViewDetail
from .views import AjaxLoadPostsImagesView
from .views import PepperView
from .views import TrainingGrounds
urlpatterns = [
url(
r'^username_search_form/',
UsernameSearchFormView.as_view(),
name='username_search_form',
),
url(
r'^ax_load_account_posts/',
AjaxLoadAccountPostsView.as_view(),
name='ax_load_account_posts',
),
url(
r'^images_backlink/$',
ImagesBacklinkView.as_view(),
name='images_backlink',
),
url(
r'^images_backlink/(?P<username>[-\w.@]+)/$',
ImagesBacklinkViewDetail.as_view(),
name='images_backlink_detail',
),
url(
r'^ax_load_posts_images/',
AjaxLoadPostsImagesView.as_view(),
name='ax_load_posts_images',
),
url(
r'^papa-pepper-selfie-contest-1/',
PepperView.as_view(),
name='pepper_selfie_contest',
),
url(
r'^training_grounds/',
TrainingGrounds.as_view(),
name='training_grounds',
),
]
| 23 | 53 | 0.640902 | 123 | 1,242 | 6.211382 | 0.308943 | 0.082461 | 0.137435 | 0.026178 | 0.104712 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001065 | 0.243961 | 1,242 | 53 | 54 | 23.433962 | 0.812567 | 0 | 0 | 0.311111 | 0 | 0 | 0.248792 | 0.162641 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.177778 | 0 | 0.177778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024686c98a1f5bb54203018a00867a0f8ca7c720 | 736 | py | Python | users/utils.py | natbat/natlink | 02ddb4a9f779f448520a80e9b1912a345f4e199b | [
"Apache-2.0"
] | null | null | null | users/utils.py | natbat/natlink | 02ddb4a9f779f448520a80e9b1912a345f4e199b | [
"Apache-2.0"
] | 1 | 2020-06-05T20:24:39.000Z | 2020-06-05T20:24:39.000Z | users/utils.py | natbat/natlink | 02ddb4a9f779f448520a80e9b1912a345f4e199b | [
"Apache-2.0"
] | null | null | null | from .models import User
COOKIE_NAME = "natlink_auth"
COOKIE_SALT = "natlink-auth"
def set_cookie_for_user(response, user):
response.set_signed_cookie(
COOKIE_NAME,
user.pk,
salt=COOKIE_SALT,
max_age=365 * 24 * 60 * 60,
httponly=True,
samesite="Strict",
)
def clear_cookie_for_user(response):
response.delete_cookie(COOKIE_NAME)
def user_auth_middleware(get_response):
def middleware(request):
user_id = request.get_signed_cookie(COOKIE_NAME, default=None, salt=COOKIE_SALT)
if user_id:
request.auth = User.objects.get(pk=user_id)
else:
request.auth = None
return get_response(request)
return middleware
| 23 | 88 | 0.663043 | 94 | 736 | 4.904255 | 0.382979 | 0.086768 | 0.104121 | 0.091106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016304 | 0.25 | 736 | 31 | 89 | 23.741935 | 0.818841 | 0 | 0 | 0 | 0 | 0 | 0.040761 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.043478 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024919e590e6aba049969294dd376ea5097af44d | 843 | py | Python | scripts/s3_block_public_access.py | fortunecookiezen/aws-secure-account | 07726f710f5e6a4131ed7104d98335951d7c0bf5 | [
"Apache-2.0"
] | null | null | null | scripts/s3_block_public_access.py | fortunecookiezen/aws-secure-account | 07726f710f5e6a4131ed7104d98335951d7c0bf5 | [
"Apache-2.0"
] | null | null | null | scripts/s3_block_public_access.py | fortunecookiezen/aws-secure-account | 07726f710f5e6a4131ed7104d98335951d7c0bf5 | [
"Apache-2.0"
] | null | null | null | import sys, boto3
def get_account_id(profile):
session = boto3.Session(profile_name=profile)
client = session.client("sts")
account_id = client.get_caller_identity()["Account"]
return account_id
def secure_buckets(profile):
session = boto3.Session(profile_name=profile)
client = session.client("s3control", region_name="us-east-1")
response = client.put_public_access_block(
PublicAccessBlockConfiguration={
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
},
AccountId=get_account_id(profile),
)
print(response)
if (
__name__ == "__main__"
): # takes profile_name as an argument. this could be done simpler, but I use profiles.
secure_buckets(sys.argv[1])
| 25.545455 | 88 | 0.670225 | 92 | 843 | 5.869565 | 0.554348 | 0.066667 | 0.044444 | 0.07037 | 0.233333 | 0.233333 | 0.233333 | 0.233333 | 0.233333 | 0.233333 | 0 | 0.009174 | 0.224199 | 843 | 32 | 89 | 26.34375 | 0.816514 | 0.097272 | 0 | 0.086957 | 0 | 0 | 0.13834 | 0.027668 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.043478 | 0 | 0.173913 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02496065d7da090dd46860ce58e3ba331e49ba70 | 645 | py | Python | {{ cookiecutter.project_slug }}/backend/models/users.py | fletcheaston/FastAPI-SQLAlchemy | c2989820e797613ea5517effefd130e50f8389ad | [
"MIT"
] | null | null | null | {{ cookiecutter.project_slug }}/backend/models/users.py | fletcheaston/FastAPI-SQLAlchemy | c2989820e797613ea5517effefd130e50f8389ad | [
"MIT"
] | null | null | null | {{ cookiecutter.project_slug }}/backend/models/users.py | fletcheaston/FastAPI-SQLAlchemy | c2989820e797613ea5517effefd130e50f8389ad | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING, List
from sqlalchemy import Column, Text, UniqueConstraint
from sqlalchemy.orm import relationship
from .base import Base
if TYPE_CHECKING:
from .items import Item # noqa: F401
class User(Base):
full_name: str = Column(
Text,
nullable=False,
)
email: str = Column(
Text,
index=True,
unique=True,
nullable=False,
)
hashed_password: str = Column(
Text,
nullable=False,
)
items: List["Item"] = relationship(
"Item",
back_populates="owner",
)
__table_args__ = (UniqueConstraint(email),)
| 18.970588 | 53 | 0.613953 | 69 | 645 | 5.594203 | 0.521739 | 0.103627 | 0.101036 | 0.108808 | 0.134715 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006608 | 0.296124 | 645 | 33 | 54 | 19.545455 | 0.843612 | 0.015504 | 0 | 0.230769 | 0 | 0 | 0.020537 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.038462 | 0.192308 | 0 | 0.423077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024a0e83d4f93572185b4c979b1fe7b8597ef762 | 9,023 | py | Python | jarvis.py | Bangladesh-Coding-Soldierz/Jarvis-AI | f5382bef0fc931fc9b63cc344b6ec4cc63eb99da | [
"MIT"
] | 5 | 2021-03-26T13:14:27.000Z | 2021-08-14T13:17:22.000Z | jarvis.py | Bangladesh-Coding-Soldierz/Jarvis-AI | f5382bef0fc931fc9b63cc344b6ec4cc63eb99da | [
"MIT"
] | null | null | null | jarvis.py | Bangladesh-Coding-Soldierz/Jarvis-AI | f5382bef0fc931fc9b63cc344b6ec4cc63eb99da | [
"MIT"
] | 2 | 2021-03-18T14:35:47.000Z | 2021-03-27T15:04:00.000Z | import pyttsx3 # pip install pyttsx3
import datetime
import speech_recognition as sr #pip install SpeechRecognition
import wikipedia # pip install wikipedia
import webbrowser as wb
import psutil #pip install psutil
import pyjokes # pip install pyjokes
import os
import smtplib
import pyautogui #pip install pyautogui
import wolframalpha # pip install wolframalpha
import turtle as tt # pip install turtle
import pywhatkit as pwk # pip install pywhatkit
import time
import subprocess as sp
wolframalpha_app_id = 'your_api' #get your api from https://products.wolframalpha.com/api/
engine = pyttsx3.init()
def speak(audio): #deifning the speak function
engine.say(audio)
engine.runAndWait()
def time_(): #defining the time function
speak("the current time is")
Time=datetime.datetime.now().strftime("%I:%M:%S") # for 12-hour clock
speak(Time)
def date(): # defining the date function
year = (datetime.datetime.now().year)
month = (datetime.datetime.now().month)
date = (datetime.datetime.now().day)
speak("the current date is")
speak(date)
speak(month)
speak(year)
def wishme(): # defining the wish function
speak("Welcome back Tahsin!")
time_()
date()
hour = datetime.datetime.now().hour
if hour >=6 and hour<12:
speak("Good Morning Sir")
elif hour >=12 and hour<18:
speak("Good Afternoon Sir!")
elif hour >=18 and hour <24:
speak("Good Evening Sir!")
else:
speak("Good Night Sir!")
speak("Jarvis at your service. Please tell me how can I help you?")
def TakeCommand(): # defining the main function for taking commands
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(query)
except Exception as e:
print(e)
print("Say that again please...")
return "None"
return query
def cpu(): # defining the cpu function for cpu info
usage = str(psutil.cpu_percent())
speak("CPU is at" + usage)
def battery(): # defining the battery function for battery info
battery = psutil.sensors_battery()
speak("Batter is at")
speak(battery.percent)
def joke():
speak(pyjokes.get_joke)
def send_email():
speak("who is the reciever sir?")
TO = input("Please enter the reciever email: ")
FROM = "your email here"
passwd = 'your password here'
speak('what is the subject sir?')
SUBJECT = input('Subject: ')
speak('enter the email body please!')
text = input("Email body: ")
BODY = "\r\n".join((
"From: %s" % FROM,
"To: %s" % TO,
"Subject: %s" % SUBJECT ,
"",
text
))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(FROM, passwd)
server.sendmail(FROM, [TO], BODY)
print('email sent')
speak('email sent Sir')
server.quit()
def scrnshot():
speak('taken secreenshot sir!')
img = pyautogui.screenshot()
img.show()
def sqre():
tt.title('Square Shape')
speak('What should be the square color sir?')
sqcolor = input('Please type the color name: ')
tt.hideturtle()
tt.fillcolor(sqcolor)
tt.begin_fill()
tt.color(sqcolor)
tt.forward(200)
tt.left(90)
tt.forward(200)
tt.left(90)
tt.forward(200)
tt.left(90)
tt.forward(200)
tt.left(90)
tt.end_fill()
tt.done()
def trng():
speak('What should be the triangle color sir?')
sqcolor = input('Please type the color sir: ')
tt.hideturtle()
tt.fillcolor(sqcolor)
tt.begin_fill()
tt.color(sqcolor)
tt.forward(200)
tt.left(120)
tt.forward(200)
tt.left(120)
tt.forward(200)
tt.left(120)
tt.end_fill()
tt.done()
def hrt():
speak("What should be the heart color sir?")
hrtcolor = input('Please type the color sir: ')
speak('What should be the background color sir?')
hrtbg = input('Please type the background color sir: ')
tt.color(hrtcolor)
tt.fillcolor(hrtcolor)
tt.bgcolor(hrtbg)
tt.begin_fill()
tt.left(50)
tt.forward(100)
tt.circle(40, 180)
tt.left(260)
tt.circle(40, 180)
tt.forward(100)
tt.end_fill()
tt.done()
def shutdown():
speak('Sir, shloud I shutdown the cumputer?')
ans = TakeCommand()
if ans == 'yes':
speak('Shutting down the computer Sir')
os.system('systemctl poweroff -i')
elif ans == 'no':
speak('As you wish sir!')
else:
pass
if __name__ == "__main__":
wishme()
while True:
query = TakeCommand().lower()
if 'time' in query:
time_()
elif 'date' in query:
date()
elif 'wikipedia' in query:
speak('Searching...')
query = query.replace('wikipedia', 'wikipedia')
result = wikipedia.summary(query, sentences=3)
speak("according to wikipedia")
print(result)
speak(result)
elif 'search in firefox' in query:
speak("what should I search in firefox?")
firefox = '/usr/bin/firefox %s'
search = TakeCommand().lower()
wb.get(firefox).open_new_tab(search+'.com')
elif 'search in google' in query:
speak("what should I search?")
search_Term = TakeCommand().lower()
speak("searching...")
wb.open('https://www.google.com/search?q=' + search_Term)
elif 'cpu' in query:
cpu()
elif 'battery' in query:
battery()
elif 'joke' in query:
joke()
elif 'go offline' in query:
speak("Going offline sir.....")
quit()
elif 'spotify' in query:
try:
sp.call('spotify')
speak('Opening spotify Sir')
except Exception as e:
print(e)
elif 'discord' in query:
try:
sp.call('discord')
speak('Opening Discord Sir')
except Exception as e:
print(e)
elif 'vlc' in query:
try:
sp.call('vlc')
speak('opeaning vlc player Sir')
except Exception as e:
print(e)
speak('Sorry sir, there was a problem when opeaning the application')
elif 'terminal' in query:
try:
sp.call('gnome-terminal')
except Exception as e:
print(e)
elif 'rhythmbox' in query:
try:
sp.call('rhythmbox')
except Exception as e:
print(e)
elif 'screen recorder' in query:
try:
sp.call('obs')
except Exception as e:
print(e)
elif 'calculator' in query:
try:
sp.sp.call('gnome-calculator')
except Exception as e:
print(e)
elif 'notepad' in query:
try:
sp.sp.call('pluma')
except Exception as e:
print(e)
elif 'virtual keyboard' in query:
try:
sp.call('virtual-keyboard')
except Exception as e:
print(e)
elif 'send email' in query:
send_email()
elif 'screenshot' in query:
scrnshot()
elif 'draw a square' in query:
sqre()
elif 'draw a triangle' in query:
trng()
elif 'draw a heart' in query:
hrt()
elif 'calculate' in query:
client = wolframalpha.Client(wolframalpha_app_id)
indx = query.lower().split().index('calculate')
query = query.split()[indx + 1:]
res = client.query(' '.join(query))
answer = next(res.results).text
print("The answer is " + answer)
speak("The answer is " + answer)
elif 'play' in query:
song = query.replace('play', '')
speak('playing' + song)
pwk.playonyt(song)
elif 'shutdown' in query:
shutdown()
elif 'restart' in query:
speak('Sir, should I restart the computer?')
ans = TakeCommand()
if ans == 'yes':
os.system('systemctl reboot -i')
elif ans == 'no':
speak('As you wish sir')
else:
pass
elif 'stop listening' in query:
try:
speak('For how many seconds should I stop listening?')
ans = int(TakeCommand())
time.sleep(ans)
print(ans)
except Exception as e:
print(e)
speak('Invalid value')
TakeCommand()
| 26.854167 | 90 | 0.553918 | 1,076 | 9,023 | 4.610595 | 0.261152 | 0.039508 | 0.037694 | 0.039911 | 0.233219 | 0.179601 | 0.151582 | 0.093731 | 0.065914 | 0.065914 | 0 | 0.01344 | 0.33204 | 9,023 | 335 | 91 | 26.934328 | 0.80969 | 0.056411 | 0 | 0.285211 | 0 | 0 | 0.207416 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049296 | false | 0.014085 | 0.052817 | 0 | 0.109155 | 0.066901 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024a5d31f2abd0f5d295f5ce81c1f2bb5708d1f3 | 326 | py | Python | QtPyNetwork/__main__.py | desty2k/QtPyNetwork | 63e892370a0a1648646bdfed57fea9689d927494 | [
"MIT"
] | null | null | null | QtPyNetwork/__main__.py | desty2k/QtPyNetwork | 63e892370a0a1648646bdfed57fea9689d927494 | [
"MIT"
] | null | null | null | QtPyNetwork/__main__.py | desty2k/QtPyNetwork | 63e892370a0a1648646bdfed57fea9689d927494 | [
"MIT"
] | null | null | null | import argparse
from QtPyNetwork import __version__
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__version__)
parser.add_argument('-v', '--version', action='version', version='v{}'.format(__version__),
help='print version and exit')
args = parser.parse_args()
| 29.636364 | 95 | 0.677914 | 34 | 326 | 5.852941 | 0.647059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.193252 | 326 | 10 | 96 | 32.6 | 0.756654 | 0 | 0 | 0 | 0 | 0 | 0.156442 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024aae61d8346ffd6ac982bd673c98669968a301 | 1,412 | py | Python | pair_of_friends.py | farhankhwaja/MapReduce | 9a723c12e9f4d93909493b1109b50a577fec2a77 | [
"MIT"
] | 1 | 2021-03-11T22:20:58.000Z | 2021-03-11T22:20:58.000Z | pair_of_friends.py | farhankhwaja/MapReduce | 9a723c12e9f4d93909493b1109b50a577fec2a77 | [
"MIT"
] | null | null | null | pair_of_friends.py | farhankhwaja/MapReduce | 9a723c12e9f4d93909493b1109b50a577fec2a77 | [
"MIT"
] | null | null | null | __author__ = 'farhankhwaja'
import MapReduce
import sys
"""
A python program implements a MapReduce algorithm to identify symmetric friendships in the input data.
The program will output pairs of friends where personA is a friend of personB and personB is also a
friend of personA. If the friendship is asymmetric (only one person in the pair considers the other person a friend),
do not emit any output for that pair.
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: PersonA
# value: Friends of PersonA
key = record[0]
value = record[1]
for friend in value:
if not mr.intermediate:
mr.emit_intermediate(1,(key, friend))
else:
mr.emit_intermediate(max(mr.intermediate.keys())+1, (key, friend))
def reducer(key, list_of_values):
# key: Unique Number
# value: Pair of Friends
for v in list_of_values:
if ((v[1],v[0]) in [x for v in mr.intermediate.values() for x in v]) and ((v[1], v[0]) not in mr.result and (v[0], v[1]) not in mr.result):
if v[1] > v[0]:
mr.emit((v[0], v[1]))
else:
mr.emit((v[1], v[0]))
mr.result.sort()
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
| 30.695652 | 147 | 0.612606 | 211 | 1,412 | 4.014218 | 0.364929 | 0.014168 | 0.014168 | 0.01889 | 0.023613 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015829 | 0.239377 | 1,412 | 45 | 148 | 31.377778 | 0.772812 | 0.142351 | 0 | 0.086957 | 0 | 0 | 0.023923 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024b59b7ad668a68cc84137ca01818cfe47e2cc5 | 8,475 | py | Python | BSNet/framework.py | kai-zhang-er/Road-Extraction | ace81bc5c291048ec30005cee649cc847fd7daeb | [
"MIT"
] | 2 | 2021-12-10T09:16:36.000Z | 2022-03-04T23:36:41.000Z | BSNet/framework.py | kai-zhang-er/Road-Extraction | ace81bc5c291048ec30005cee649cc847fd7daeb | [
"MIT"
] | null | null | null | BSNet/framework.py | kai-zhang-er/Road-Extraction | ace81bc5c291048ec30005cee649cc847fd7daeb | [
"MIT"
] | 1 | 2020-10-14T07:22:49.000Z | 2020-10-14T07:22:49.000Z | import torch
import torch.nn as nn
from torch.autograd import Variable as V
from tensorboardX import SummaryWriter
import cv2
import numpy as np
import os
from loss import loss_func, dice_bce_loss
class MyFrame():
def __init__(self, net, lr, name, evalmode = False):
self.model = net
self.cuda_net = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
#self.optimizer = torch.optim.RMSprop(params=self.net.parameters(), lr=lr)
if evalmode:
for i in self.model.modules():
if isinstance(i, nn.BatchNorm2d):
i.eval()
self.isTrain = True
self.num_classes = 1
self.tensorborad_dir = "log/tensorboard_log/"
self.model_dir = "weights/"
# self.lr = 0.007
self.lr = lr
self.lr_power = 0.9
self.momentum = 0.9
self.wd = 0.0001 # weight decay
self.accum_steps = 1
self.iterSize = 10
self.net_name = name
self.which_epoch = 0
# self.device =
if self.isTrain:
# self.criterionSeg = torch.nn.CrossEntropyLoss(ignore_index=255).cuda() # maybe edit
# Change the crossentropyloss to BCEloss
# self.criterionSeg = torch.nn.BCELoss().cuda()
# self.criterionSeg = loss_func().cuda()
self.criterionSeg = dice_bce_loss().cuda()
self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.lr, momentum=self.momentum,
weight_decay=self.wd)
params_w = list(self.model.decoder.dupsample.conv_w.parameters())
params_p = list(self.model.decoder.dupsample.conv_p.parameters())
self.optimizer_w = torch.optim.SGD(params_w + params_p, lr=self.lr, momentum=self.momentum)
self.old_lr = self.lr
self.averageloss = []
self.writer = SummaryWriter(self.tensorborad_dir)
self.counter = 0
self.model.cuda()
self.normweightgrad = 0.
# if not self.isTrain and self.loaded_model != ' ':
# self.load_pretrained_network(self.model, self.opt.loaded_model, strict=True)
# print('test model load sucess!')
def set_input(self, img_batch, mask_batch=None, img_id=None):
self.img = img_batch
self.mask = mask_batch
self.img_id = img_id
return self.img
def forward(self, pre_compute_flag=0, isTrain=True):
# self.img = V(self.img.cuda(), volatile=volatile)
# if self.mask is not None:
# self.mask = V(self.mask.cuda(), volatile=volatile)
accum_steps = self.accum_steps
if pre_compute_flag == 1:
self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.lr, momentum=self.momentum,
weight_decay=self.wd)
print("pre compute OK")
self.img.requires_grad = not isTrain
if self.mask is not None:
self.seggt = self.mask.cuda()
else:
self.seggt = None
self.segpred = self.cuda_net.forward(self.img)
if self.isTrain:
self.loss = self.criterionSeg(self.segpred, self.seggt) / accum_steps
self.averageloss += [self.loss.data * accum_steps]
loss_pred, loss_vgg = 0., 0.
for i in range(len(results)):
loss_pred += (i + 1) * self.loss_pred(labels, results[i][0])
for j in range(len(gt_vgg)):
loss_vgg += (i + 1) * self.loss_topo(gt_vgg[j], results[i][1][j])
coeff = 0.5 * len(results) * (len(results) + 1)
curr_loss_pred = loss_pred / coeff
curr_loss_vgg = loss_vgg / coeff
train_loss = curr_loss_pred + 0.1 * curr_loss_vgg
train_loss.backward()
# self.seggt = torch.squeeze(self.seggt, dim=1)
if isTrain:
self.loss.backward()
return self.averageloss
def optimize(self,precompute_flag=0):
self.forward()
self.optimizer.zero_grad()
loss_list=self.forward(pre_compute_flag=precompute_flag)
self.optimizer.step()
return sum(loss_list)/len(loss_list)
def pre_compute_W(self, i):
self.model.zero_grad()
self.seggt = self.mask # N 1 H W
N, channel, H, W = self.seggt.size()
C = self.num_classes
scale = self.model.decoder.dupsample.scale
# N, C, H, W
# self.seggt = torch.squeeze(self.seggt, dim=1)
#
# self.seggt[self.seggt == 0] = 0
# self.seggt_onehot = torch.zeros(N, C, H, W).scatter_(1, self.seggt, self.seggt)
self.seggt_onehot = self.seggt
# N, H, W, C
self.seggt_onehot = self.seggt_onehot.permute(0, 2, 3, 1)
# N, H, W/sacle, C*scale
self.seggt_onehot = self.seggt_onehot.contiguous().view((N, H,
int(W / scale), C * scale))
# N, W/sacle, H, C*scale
self.seggt_onehot = self.seggt_onehot.permute(0, 2, 1, 3)
# N, W/scale, H/scale, C*scale*scale
self.seggt_onehot = self.seggt_onehot.contiguous().view((N, int(W / scale),
int(H / scale), C * scale * scale))
# N, C*scale*scale, H/scale, W/scale
self.seggt_onehot = self.seggt_onehot.permute(0, 3, 2, 1)
self.seggt_onehot = self.seggt_onehot.cuda()
self.seggt_onehot_reconstructed = self.model.decoder.dupsample.conv_w(
self.model.decoder.dupsample.conv_p(self.seggt_onehot))
self.reconstruct_loss = torch.mean(torch.pow(self.seggt_onehot -
self.seggt_onehot_reconstructed, 2))
self.reconstruct_loss.backward()
self.optimizer_w.step()
if i % 200 == 0: # output per 200 iters
print('pre_compute_loss: %f' % (self.reconstruct_loss))
def save(self, path):
torch.save(self.cuda_net.state_dict(), path)
def load(self, path):
dict=torch.load(path)
self.cuda_net.load_state_dict(dict)
def update_lr_poly(self, step, total_step, mylog, th):
# poly learning rate update
if step <= th:
new_lr = max(self.lr * (step / th) ** self.lr_power, 1e-6)
else:
new_lr = max(self.lr * (1 - step / total_step) ** self.lr_power, 1e-6)
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
print('update learning rate: %f -> %f' % (self.old_lr, new_lr), file=mylog)
print('update learning rate: %f -> %f' % (self.old_lr, new_lr))
self.old_lr = new_lr
def update_lr(self, new_lr, mylog, factor=False):
if factor:
new_lr = self.old_lr / new_lr
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
print('update learning rate: %f -> %f' % (self.old_lr, new_lr), file=mylog)
print('update learning rate: %f -> %f' % (self.old_lr, new_lr))
self.old_lr = new_lr
def update_tensorboard(self, step):
if self.isTrain:
# self.writer.add_scalar(self.net_name + '/Accuracy/', data[0], step)
# self.writer.add_scalar(self.net_name + '/Accuracy_Class/', data[1], step)
# self.writer.add_scalar(self.net_name + '/Mean_IoU/', data[2], step)
# self.writer.add_scalar(self.net_name + '/FWAV_Accuracy/', data[3], step)
self.trainingavgloss = sum(self.averageloss)/len(self.averageloss)
self.writer.add_scalars(self.net_name + '/loss', {"train": self.trainingavgloss}, step)
self.writer.add_scalar("learning rate",self.old_lr,step)
# file_name = os.path.join(self.save_dir, 'MIoU.txt')
# with open(file_name, 'wt') as opt_file:
# opt_file.write('%f\n' % (data[2]))
# self.writer.add_scalars('losses/'+self.opt.name, {"train": self.trainingavgloss,
# "val": np.mean(self.averageloss)}, step)
self.averageloss = []
def close_tensorboard(self):
self.writer.close()
def name(self):
return 'DUNet'
| 40.942029 | 104 | 0.575221 | 1,107 | 8,475 | 4.242096 | 0.192412 | 0.061329 | 0.057496 | 0.036414 | 0.310477 | 0.274489 | 0.228492 | 0.214651 | 0.168228 | 0.141397 | 0 | 0.01271 | 0.303717 | 8,475 | 206 | 105 | 41.140777 | 0.783088 | 0.189145 | 0 | 0.166667 | 0 | 0 | 0.031319 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.057971 | 0.007246 | 0.181159 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024b620ce06501cc829d93a4946fa30982edc7e8 | 1,753 | py | Python | projectdiffview/logger.py | jdpatt/project-diff-view | 20fe849aeb26bbdd52d3354c75489fd935eca648 | [
"MIT"
] | null | null | null | projectdiffview/logger.py | jdpatt/project-diff-view | 20fe849aeb26bbdd52d3354c75489fd935eca648 | [
"MIT"
] | 1 | 2020-05-02T15:30:59.000Z | 2020-08-19T00:24:14.000Z | projectdiffview/logger.py | jdpatt/project-diff-view | 20fe849aeb26bbdd52d3354c75489fd935eca648 | [
"MIT"
] | null | null | null | """The logging and debug functionality."""
import logging
from PySide2.QtCore import QObject, Signal
def setup_logger(root_name, log_file_path="", is_verbose: bool = False):
"""Create the Handlers and set the default level to DEBUG."""
log = logging.getLogger(root_name)
# Setup a Console Logger
console_handler = logging.StreamHandler()
ch_format = logging.Formatter("%(message)s")
console_handler.setFormatter(ch_format)
console_handler.setLevel(logging.ERROR)
log.addHandler(console_handler)
# Setup a File Logger
file_handler = logging.FileHandler(log_file_path, mode="w", delay=True)
fh_format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
file_handler.setFormatter(fh_format)
file_handler.setLevel(logging.DEBUG)
log.addHandler(file_handler)
if is_verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.info(f"Log file created at: {log_file_path}")
return log
class LogQObject(QObject):
"""Create a dummy object to get around the PySide multiple inheritance problem."""
new_record = Signal(str, str)
class ThreadLogHandler(logging.Handler):
"""Create a custom logging handler that appends each record to the TextEdit Widget."""
def __init__(self):
super().__init__()
self.log = LogQObject()
self.new_record = self.log.new_record
self.setFormatter(logging.Formatter("%(asctime)s - %(message)s"))
self.setLevel(logging.INFO)
def emit(self, record):
"""Append the record to the Widget. Color according to 'TEXT_COLOR'."""
msg = self.format(record)
level = record.levelname
self.new_record.emit(level, msg)
| 31.872727 | 90 | 0.691386 | 225 | 1,753 | 5.226667 | 0.386667 | 0.063776 | 0.028061 | 0.040816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000709 | 0.195094 | 1,753 | 54 | 91 | 32.462963 | 0.832743 | 0.205933 | 0 | 0 | 0 | 0 | 0.091575 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.060606 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024cc201ab039e7e23796a9e910be9adb30f6016 | 2,463 | py | Python | google/appengine/tools/devappserver2/watcher_common.py | theosp/google_appengine | 9ce87a20684dc99cf5968e6f488c060e1530c159 | [
"Apache-2.0"
] | 3 | 2019-01-28T03:57:20.000Z | 2020-02-20T01:37:33.000Z | google/appengine/tools/devappserver2/watcher_common.py | theosp/google_appengine | 9ce87a20684dc99cf5968e6f488c060e1530c159 | [
"Apache-2.0"
] | null | null | null | google/appengine/tools/devappserver2/watcher_common.py | theosp/google_appengine | 9ce87a20684dc99cf5968e6f488c060e1530c159 | [
"Apache-2.0"
] | 3 | 2019-01-18T11:33:56.000Z | 2020-01-05T10:44:05.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common functionality for file watchers."""
import os
# A prefix for files and directories that we should not watch at all.
_IGNORED_PREFIX = '.'
# File suffixes that should be ignored.
_IGNORED_FILE_SUFFIXES = (
# Python temporaries
'.pyc',
'.pyo',
# Backups
'~',
# Emacs
'#',
# Vim
'.swp',
'.swo',
)
def ignore_file(filename):
"""Report whether a file should not be watched."""
filename = os.path.basename(filename)
return (
filename.startswith(_IGNORED_PREFIX) or
any(filename.endswith(suffix) for suffix in _IGNORED_FILE_SUFFIXES))
def _remove_pred(lst, pred):
"""Remove items from a list that match a predicate."""
# Walk the list in reverse because once an item is deleted,
# the indexes of any subsequent items change.
for idx in reversed(xrange(len(lst))):
if pred(lst[idx]):
del lst[idx]
def skip_ignored_dirs(dirs):
"""Skip directories that should not be watched."""
_remove_pred(dirs, lambda d: d.startswith(_IGNORED_PREFIX))
def skip_local_symlinks(roots, dirpath, directories):
"""Skip symlinks that link to another watched directory.
Our algorithm gets confused when the same directory is watched multiple times
due to symlinks.
Args:
roots: The realpath of the root of all directory trees being watched.
dirpath: The base directory that each of the directories are in (i.e.
the first element of a triplet obtained from os.walkpath).
directories: A list of directories in dirpath. This list is modified so
that any element which is a symlink to another directory is removed.
"""
def is_local_symlink(d):
d = os.path.join(dirpath, d)
if not os.path.islink(d):
return False
d = os.path.realpath(d)
return any(d.startswith(root) for root in roots)
_remove_pred(directories, is_local_symlink)
| 28.976471 | 79 | 0.712546 | 363 | 2,463 | 4.760331 | 0.465565 | 0.034722 | 0.015046 | 0.018519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004061 | 0.200162 | 2,463 | 84 | 80 | 29.321429 | 0.873096 | 0.610637 | 0 | 0 | 0 | 0 | 0.021324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.172414 | false | 0 | 0.034483 | 0 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
024dc84c182fd5118a3990a9959cb5d0440432c1 | 4,520 | py | Python | util/config_util.py | samiraabnar/DistillingInductiveBias | 962f87e7d38a3d255846432286e048d176ed7a5d | [
"MIT"
] | 10 | 2020-07-04T09:11:36.000Z | 2021-12-16T13:06:35.000Z | util/config_util.py | samiraabnar/DistillingInductiveBias | 962f87e7d38a3d255846432286e048d176ed7a5d | [
"MIT"
] | null | null | null | util/config_util.py | samiraabnar/DistillingInductiveBias | 962f87e7d38a3d255846432286e048d176ed7a5d | [
"MIT"
] | 3 | 2021-07-09T16:24:07.000Z | 2022-02-07T15:49:05.000Z | from util.distill_params import DISTILL_PARAMS
from util.model_configs import GPT2Config, ModelConfig, MODEL_CONFIGS, CapsConfig, ResnetConfig
from util.train_params import TRAIN_PARAMS
class TrainParams(object):
def __init__(self, optimizer,
learning_rate=0.0001,
n_epochs=60,
warmup_steps=5000,
decay_steps=10000,
hold_base_rate_steps=1000,
total_training_steps=60000,
num_train_epochs=60,
decay_rate=0.96,
schedule='',
):
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
self.hold_base_rate_steps = hold_base_rate_steps
self.total_training_steps = total_training_steps
self.num_train_epochs = num_train_epochs
self.optimizer = optimizer
self.schedule = schedule
self.decay_rate = decay_rate
class DistillParams(object):
def __init__(self,
distill_temp=5.0,
student_distill_rate=0.9,
student_gold_rate=0.1,
student_learning_rate=0.0001,
student_decay_steps=10000,
student_warmup_steps=10000,
student_hold_base_rate_steps=1000,
student_decay_rate=0.96,
student_optimizer='adam',
teacher_learning_rate=0.0001,
teacher_decay_steps=10000,
teacher_warmup_steps=10000,
teacher_hold_base_rate_steps=1000,
teacher_decay_rate=0.96,
teacher_optimizer='radam',
n_epochs=60,
schedule='',
distill_decay_steps=1000000,
distill_warmup_steps=0,
hold_base_distillrate_steps=1000000,
student_distill_rep_rate=1.0,
distill_min_rate=0.0,
distill_schedule='cnst',
):
self.distill_temp = distill_temp
self.distill_schedule = distill_schedule
self.student_distill_rate = student_distill_rate
self.distill_min_rate = distill_min_rate
self.student_gold_rate = student_gold_rate
self.student_learning_rate = student_learning_rate
self.student_decay_steps = student_decay_steps
self.student_warmup_steps = student_warmup_steps
self.student_hold_base_rate_steps = student_hold_base_rate_steps
self.student_optimizer = student_optimizer
self.teacher_learning_rate = teacher_learning_rate
self.teacher_warmup_steps = teacher_warmup_steps
self.teacher_decay_steps = teacher_decay_steps
self.teacher_optimizer = teacher_optimizer
self.teacher_hold_base_rate_steps = teacher_hold_base_rate_steps
self.n_epochs = n_epochs
self.schedule = schedule
self.distill_decay_steps = distill_decay_steps
self.distill_warmup_steps = distill_warmup_steps
self.hold_base_distillrate_steps = hold_base_distillrate_steps
self.student_distill_rep_rate = student_distill_rep_rate
self.teacher_decay_rate = teacher_decay_rate
self.student_decay_rate = student_decay_rate
class TaskParams:
def __init__(self, batch_size=64, num_replicas_in_sync=1):
self.batch_size = batch_size
self.num_replicas_in_sync = num_replicas_in_sync
def get_train_params(train_config):
train_params = TrainParams(**TRAIN_PARAMS[train_config])
return train_params
def get_distill_params(distill_config):
if distill_config != 'base':
return DistillParams(**DISTILL_PARAMS[distill_config])
return DistillParams()
def get_task_params(**kwargs):
task_params = TaskParams(**kwargs)
return task_params
def get_model_params(task, config_name='', model_config='base'):
print("model config:", model_config)
if model_config in MODEL_CONFIGS:
model_cnfgs = MODEL_CONFIGS.get(model_config)
else:
model_cnfgs = MODEL_CONFIGS.get('base')
if 'gpt' in config_name or 'bert' in config_name:
return GPT2Config(vocab_size=task.vocab_size(),
output_dim=task.output_size(),
num_labels=task.output_size(),
**model_cnfgs)
elif 'caps' in config_name:
return CapsConfig(output_dim=task.output_size(),
**model_cnfgs)
elif 'resnet' in config_name:
return ResnetConfig(output_dim=task.output_size(),
**model_cnfgs)
else:
return ModelConfig(input_dim=task.vocab_size(),
output_dim=task.output_size(),**model_cnfgs)
| 35.873016 | 95 | 0.692478 | 561 | 4,520 | 5.130125 | 0.14795 | 0.040653 | 0.037526 | 0.053162 | 0.164003 | 0.077832 | 0.051425 | 0.025017 | 0 | 0 | 0 | 0.030787 | 0.238274 | 4,520 | 125 | 96 | 36.16 | 0.805112 | 0 | 0 | 0.138889 | 0 | 0 | 0.012176 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064815 | false | 0 | 0.027778 | 0 | 0.194444 | 0.009259 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0253e31736a347dd3af12a01568b50bb482c9574 | 1,346 | py | Python | 2021/14/1+2.py | alexpovel/aoc | e497dd585181ac636f7319e8d3310a956aff1df0 | [
"MIT"
] | null | null | null | 2021/14/1+2.py | alexpovel/aoc | e497dd585181ac636f7319e8d3310a956aff1df0 | [
"MIT"
] | null | null | null | 2021/14/1+2.py | alexpovel/aoc | e497dd585181ac636f7319e8d3310a956aff1df0 | [
"MIT"
] | null | null | null | """Made this work with `mypy --strict` for fun."""
from collections import Counter, deque
from typing import Any, Iterable, Sequence
with open("input.txt") as f:
template = list(next(f).strip())
next(f) # Skip blank
insertions: dict[tuple[str, ...], str] = {}
for line in f:
input_pair, _arrow, insertion = line.split()
insertions[tuple(input_pair)] = insertion
def sliding_window(sequence: Sequence[Any], size: int) -> Iterable[tuple[Any, ...]]:
initial, tail = sequence[:size], sequence[size:]
window = deque(initial, maxlen=size)
yield tuple(window)
for element in tail:
window.append(element)
yield tuple(window)
pairs = Counter(pairs for pairs in sliding_window(template, 2))
single_elements = Counter(template)
for i in range(40):
update: Counter[tuple[str, ...]] = Counter()
for pair, count in pairs.items():
left, right = pair
try:
insertion = insertions[pair]
except KeyError:
continue
update[pair] -= count
update[(left, insertion)] += count
update[(insertion, right)] += count
single_elements[insertion] += count
pairs.update(update)
if i == 9 or i == 39:
most_common = single_elements.most_common()
print(most_common[0][1] - most_common[-1][1])
| 28.041667 | 84 | 0.624071 | 168 | 1,346 | 4.928571 | 0.440476 | 0.048309 | 0.038647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009823 | 0.243685 | 1,346 | 47 | 85 | 28.638298 | 0.803536 | 0.041605 | 0 | 0.058824 | 0 | 0 | 0.007009 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.058824 | 0 | 0.088235 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0254b5220650ab8c7577af60b41a1881fcd495d9 | 9,091 | py | Python | lib/coginvasion/minigame/DodgeballFirstPerson.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | 1 | 2020-03-12T16:44:10.000Z | 2020-03-12T16:44:10.000Z | lib/coginvasion/minigame/DodgeballFirstPerson.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | null | null | null | lib/coginvasion/minigame/DodgeballFirstPerson.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.minigame.DodgeballFirstPerson
from panda3d.core import Point3
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm import ClassicFSM, State
from direct.actor.Actor import Actor
from direct.interval.IntervalGlobal import Sequence, Func, ActorInterval, Parallel, Wait, LerpPosInterval, LerpQuatInterval
from FirstPerson import FirstPerson
from MinigameUtils import *
ToonSpeedFactor = 1.0
ToonForwardSpeed = 16.0 * ToonSpeedFactor
ToonJumpForce = 0.0
ToonReverseSpeed = 16.0 * ToonSpeedFactor
ToonRotateSpeed = 80.0 * ToonSpeedFactor
class DodgeballFirstPerson(FirstPerson):
notify = directNotify.newCategory('DodgeballFirstPerson')
MaxPickupDistance = 5.0
def __init__(self, mg):
self.mg = mg
self.crosshair = None
self.soundCatch = None
self.vModelRoot = None
self.vModel = None
self.ival = None
self.soundPickup = base.loadSfx('phase_4/audio/sfx/MG_snowball_pickup.wav')
self.fakeSnowball = loader.loadModel('phase_5/models/props/snowball.bam')
self.hasSnowball = False
self.mySnowball = None
self.camPivotNode = base.localAvatar.attachNewNode('cameraPivotNode')
self.camFSM = ClassicFSM.ClassicFSM('DFPCamera', [
State.State('off', self.enterCamOff, self.exitCamOff),
State.State('frozen', self.enterFrozen, self.exitFrozen),
State.State('unfrozen', self.enterUnFrozen, self.exitUnFrozen)], 'off', 'off')
self.camFSM.enterInitialState()
self.fsm = ClassicFSM.ClassicFSM('DodgeballFirstPerson', [
State.State('off', self.enterOff, self.exitOff),
State.State('hold', self.enterHold, self.exitHold),
State.State('catch', self.enterCatch, self.exitCatch),
State.State('throw', self.enterThrow, self.exitThrow)], 'off', 'off')
self.fsm.enterInitialState()
FirstPerson.__init__(self)
return
def enterCamOff(self):
pass
def exitCamOff(self):
pass
def enterFrozen(self):
self.vModel.hide()
base.localAvatar.getGeomNode().show()
camera.wrtReparentTo(self.camPivotNode)
camHeight = max(base.localAvatar.getHeight(), 3.0)
nrCamHeight = base.localAvatar.getHeight()
heightScaleFactor = camHeight * 0.3333333333
defLookAt = Point3(0.0, 1.5, camHeight)
idealData = (Point3(0.0, -12.0 * heightScaleFactor, camHeight),
defLookAt)
self.camTrack = Parallel(LerpPosInterval(camera, duration=1.0, pos=idealData[0], startPos=camera.getPos(), blendType='easeOut'), LerpQuatInterval(camera, duration=1.0, hpr=idealData[1], startHpr=camera.getHpr(), blendType='easeOut'))
self.camTrack.start()
self.max_camerap = 0.0
self.disableMouse()
def cameraMovement(self, task):
if not self.camFSM:
return task.done
if self.camFSM.getCurrentState().getName() == 'frozen':
if hasattr(self, 'min_camerap') and hasattr(self, 'max_camerap'):
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, base.win.getXSize() / 2, base.win.getYSize() / 2):
self.camPivotNode.setP(self.camPivotNode.getP() - (y - base.win.getYSize() / 2) * 0.1)
self.camPivotNode.setH(self.camPivotNode.getH() - (x - base.win.getXSize() / 2) * 0.1)
if self.camPivotNode.getP() < self.min_camerap:
self.camPivotNode.setP(self.min_camerap)
elif self.camPivotNode.getP() > self.max_camerap:
self.camPivotNode.setP(self.max_camerap)
return task.cont
return task.done
return FirstPerson.cameraMovement(self, task)
def exitFrozen(self):
self.camTrack.finish()
del self.camTrack
self.max_camerap = 90.0
self.vModel.show()
self.enableMouse()
base.localAvatar.stopSmartCamera()
def enterUnFrozen(self):
base.localAvatar.getGeomNode().hide()
self.reallyStart()
camera.setPosHpr(0, 0, 0, 0, 0, 0)
camera.reparentTo(self.player_node)
camera.setZ(base.localAvatar.getHeight())
def exitUnFrozen(self):
self.end()
self.enableMouse()
def enterOff(self):
if self.vModel:
self.vModel.hide()
def exitOff(self):
if self.vModel:
self.vModel.show()
def enterHold(self):
self.ival = Sequence(ActorInterval(self.vModel, 'hold-start'), Func(self.vModel.loop, 'hold'))
self.ival.start()
def exitHold(self):
if self.ival:
self.ival.finish()
self.ival = None
self.vModel.stop()
return
def enterThrow(self):
self.ival = Parallel(Sequence(Wait(0.4), Func(self.mySnowball.b_throw)), Sequence(ActorInterval(self.vModel, 'throw'), Func(self.fsm.request, 'off')))
self.ival.start()
def exitThrow(self):
if self.ival:
self.ival.pause()
self.ival = None
self.vModel.stop()
return
def enterCatch(self):
self.ival = Parallel(Sequence(Wait(0.2), Func(self.__tryToCatchOrGrab)), Sequence(ActorInterval(self.vModel, 'catch'), Func(self.__maybeHold)))
self.ival.start()
def __maybeHold(self):
if self.hasSnowball:
self.fsm.request('hold')
else:
self.fsm.request('off')
def __tryToCatchOrGrab(self):
snowballs = list(self.mg.snowballs)
snowballs.sort(key=lambda snowball: snowball.getDistance(base.localAvatar))
for i in xrange(len(snowballs)):
snowball = snowballs[i]
if not snowball.hasOwner() and not snowball.isAirborne and snowball.getDistance(base.localAvatar) <= DodgeballFirstPerson.MaxPickupDistance:
snowball.b_pickup()
self.mySnowball = snowball
self.fakeSnowball.setPosHpr(0, 0.73, 0, 0, 0, 0)
self.fakeSnowball.reparentTo(self.vModel.exposeJoint(None, 'modelRoot', 'Bone.011'))
base.playSfx(self.soundPickup)
self.hasSnowball = True
break
return
def exitCatch(self):
self.vModel.stop()
if self.ival:
self.ival.pause()
self.ival = None
return
def start(self):
self.crosshair = getCrosshair(color=(0, 0, 0, 1), hidden=False)
self.soundCatch = base.loadSfx('phase_4/audio/sfx/MG_sfx_vine_game_catch.ogg')
self.vModelRoot = camera.attachNewNode('vModelRoot')
self.vModelRoot.setPos(-0.09, 1.38, -2.48)
self.vModel = Actor('phase_4/models/minigames/v_dgm.egg', {'hold': 'phase_4/models/minigames/v_dgm-ball-hold.egg', 'hold-start': 'phase_4/models/minigames/v_dgm-ball-hold-start.egg',
'throw': 'phase_4/models/minigames/v_dgm-ball-throw.egg',
'catch': 'phase_4/models/minigames/v_dgm-ball-catch.egg'})
self.vModel.setBlend(frameBlend=True)
self.vModel.reparentTo(self.vModelRoot)
self.vModel.setBin('fixed', 40)
self.vModel.setDepthTest(False)
self.vModel.setDepthWrite(False)
self.vModel.hide()
base.localAvatar.walkControls.setWalkSpeed(ToonForwardSpeed, ToonJumpForce, ToonReverseSpeed, ToonRotateSpeed)
FirstPerson.start(self)
def reallyStart(self):
FirstPerson.reallyStart(self)
base.localAvatar.startTrackAnimToSpeed()
self.accept('mouse3', self.__handleCatchOrGrabButton)
self.accept('mouse1', self.__handleThrowButton)
def end(self):
base.localAvatar.stopTrackAnimToSpeed()
self.ignore('mouse3')
self.ignore('mouse1')
FirstPerson.end(self)
def __handleThrowButton(self):
if self.hasSnowball and self.mySnowball and self.fsm.getCurrentState().getName() == 'hold':
self.fakeSnowball.reparentTo(hidden)
self.fsm.request('throw')
def __handleCatchOrGrabButton(self):
if not self.hasSnowball and not self.mySnowball and self.fsm.getCurrentState().getName() == 'off':
self.fsm.request('catch')
def reallyEnd(self):
base.localAvatar.setWalkSpeedNormal()
if self.camFSM:
self.camFSM.requestFinalState()
self.camFSM = None
if self.fsm:
self.fsm.requestFinalState()
self.fsm = None
if self.crosshair:
self.crosshair.destroy()
self.crosshair = None
if self.vModel:
self.vModel.removeNode()
self.vModel = None
if self.vModelRoot:
self.vModelRoot.removeNode()
self.vModelRoot = None
self.soundCatch = None
FirstPerson.reallyEnd(self)
return | 40.048458 | 241 | 0.633924 | 994 | 9,091 | 5.743461 | 0.255533 | 0.043791 | 0.003678 | 0.018392 | 0.125241 | 0.099317 | 0.081275 | 0.033981 | 0.012261 | 0 | 0 | 0.022518 | 0.247718 | 9,091 | 227 | 242 | 40.048458 | 0.812253 | 0.02431 | 0 | 0.218274 | 0 | 0 | 0.069704 | 0.037785 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121827 | false | 0.010152 | 0.035533 | 0 | 0.22335 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0256aa15ac924b23eccaf19fe0d8e1bd8d6b0a00 | 3,146 | py | Python | Assembler.py | kcal2845/kcal-cpu-assembler | 9ac33d8e0100e61d2efd797f83569fe481049551 | [
"MIT"
] | 1 | 2018-10-25T22:31:28.000Z | 2018-10-25T22:31:28.000Z | Assembler.py | kcal2845/kcal-cpu-assembler | 9ac33d8e0100e61d2efd797f83569fe481049551 | [
"MIT"
] | null | null | null | Assembler.py | kcal2845/kcal-cpu-assembler | 9ac33d8e0100e61d2efd797f83569fe481049551 | [
"MIT"
] | null | null | null | print("16비트 CPU 어셈블러 (https://github.com/kcal2845/Logisim-16bit-CPU)")
f = open('./format.txt','r')
# 설정 플래그
SLASH = ':' #슬래시
# fotmat.txt에서 포맷 불러옴
instruction_format = dict() #명령어 포맷
f.readline()
while True:
line = f.readline()
if not line: break
#공백, 개행 문자 제거
line=line.replace('\n','');line=line.replace('\r\n','');line=line.replace(' ','');
# 슬래시 기준으로 잘라서 저장
splited=line.split(SLASH)
# 포맷 추가
instruction_format[splited[0]] = splited[1]
print('명령어 형식 등록 완료')
f.close()
# 포맷으로 어셈블
# 프로그램명 입력받기
print("프로그램명 입력:")
programLink = input()
try:
f = open(programLink+'.txt','r')
except FileNotFoundError:
print(programLink+" = 이런 파일이 존재하지 않습니다.")
quit()
line = f.readline()
# 첫줄 어드래스 비트 검색
line=line.replace(' ','')
if line.find('addressbit:') == -1 :
print('address bit를 지정해주세요.');exit()
# 어드래스 비트 저장
addressbit = int(line.split(':')[1])
# 어드래스 비트 만큼 translated 배열 선언
translated=[0]*(2**addressbit)
print(programLink+'.txt '+'어셈블리어 -> 기계어 변환...')
i = 0
while True:
bined = '0b'
# 명령어 읽어오기
line = f.readline()
if not line: break
# 주석, 엔터 제거
line = line[:line.find('#')]
line=line.replace('\n','');line=line.replace('\r\n','')
# 공백 기준으로 자르기
lines = line.split(' ')
# ORG 처리
if lines[0] == 'ORG':
i = int(lines[1],16)
print("\nORG %x" %i)
elif lines[0] != '\n' and lines[0] != '' and lines[0] != ' ':
# 문자열 처리
if line[0] == "[":
print(line + '->')
x = 1
while True:
if line[x] == "]": break
character = hex(ord(line[x])).replace("0x","")
print(str(hex(i).replace("0x",""))+": "+character)
translated[i] = character
x += 1
i += 1
# 명령어,상수 처리
else:
for p in range(len(lines)):
if lines[p] != '' :
# 포멧에 있으면 그 값으로 변환, 숫자라면(그 이외에는) 2진수화
if lines[p] in instruction_format :
bined = bined + instruction_format[lines[p]]
else :
if lines[p].find("0b") != -1: numbers = lines[p].replace("0b","")
elif lines[p].find("0x") != -1: numbers = bin(int(lines[p],16)).replace("0b","")
else : numbers = bin(int(lines[p])).replace("0b","")
# 모자라는 0 채워주기
for a in range(addressbit - len(numbers)):
numbers = '0'+numbers
bined = bined + numbers
# 2진수를 10진수로 변경 후 16진수로 변경
hexed = hex(int(bined,2)).replace("0x","")
print(str(hex(i).replace("0x",""))+": "+line + ' -> ' +hexed)
translated[i] = hexed
i = i+1
f.close()
# text 조립
text = ''
for i in range(2**addressbit):
text = text + str(translated[i]) + ' '
print("변환 완료")
f = open(programLink+'_Assembled.txt','w')
f.write('v2.0 raw\n')
f.write(text)
f.close()
print(programLink+"_Assembled.txt로 저장")
| 25.168 | 104 | 0.484425 | 397 | 3,146 | 3.823678 | 0.350126 | 0.042161 | 0.059289 | 0.031621 | 0.14361 | 0.118577 | 0.118577 | 0.083004 | 0.043478 | 0 | 0 | 0.025371 | 0.335982 | 3,146 | 124 | 105 | 25.370968 | 0.701292 | 0.088366 | 0 | 0.173333 | 0 | 0 | 0.101266 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02588240a57cc1a8004e8eb009f788257082da30 | 2,388 | py | Python | 03.modeling.py | aditijoshi7/Cosmetic | 10308bf1356620941f9e3c56ab2f5cb1519e003f | [
"MIT"
] | null | null | null | 03.modeling.py | aditijoshi7/Cosmetic | 10308bf1356620941f9e3c56ab2f5cb1519e003f | [
"MIT"
] | null | null | null | 03.modeling.py | aditijoshi7/Cosmetic | 10308bf1356620941f9e3c56ab2f5cb1519e003f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 24 19:56:54 2018
@author: jjone
"""
# This is the part 2 of cosmetic recommendation: analyzing cosmetic items similarities based on their ingredients
# You can also daownload the csv file from same repository: cosmetic.csv
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE
# Load the data
cosm_2 = pd.read_csv('data/cosmetic_p.csv')
# All possible combinations for the option choices
option_1 = cosm_2.Label.unique().tolist()
option_2 = cosm_2.columns[6:].tolist()
## defining a function embedding ingredients and decomposition at once
def my_recommender(op_1, op_2):
df = cosm_2[cosm_2['Label'] == op_1][cosm_2[op_2] == 1]
df = df.reset_index()
# embedding each ingredients
ingredient_idx = {}
corpus = []
idx = 0
for i in range(len(df)):
ingreds = df['ingredients'][i]
ingreds = ingreds.lower()
tokens = ingreds.split(', ')
corpus.append(tokens)
for ingredient in tokens:
if ingredient not in ingredient_idx:
ingredient_idx[ingredient] = idx
idx += 1
# Get the number of items and tokens
M = len(df) # The number of the items
N = len(ingredient_idx) # The number of the ingredients
# Initialize a matrix of zeros
A = np.zeros(shape = (M, N))
# Define the oh_encoder function
def oh_encoder(tokens):
x = np.zeros(N)
for t in tokens:
# Get the index for each ingredient
idx = ingredient_idx[t]
# Put 1 at the corresponding indices
x[idx] = 1
return x
# Make a document-term matrix
i = 0
for tokens in corpus:
A[i, :] = oh_encoder(tokens)
i += 1
# Dimension reduction with t-SNE
model = TSNE(n_components = 2, learning_rate = 200)
tsne_features = model.fit_transform(A)
# Make X, Y columns
df['X'] = tsne_features[:, 0]
df['Y'] = tsne_features[:, 1]
return df
# Create the dataframe for all combinations
df_all = pd.DataFrame()
for op_1 in option_1:
for op_2 in option_2:
temp = my_recommender(op_1, op_2)
temp['Label'] = op_1 + '_' + op_2
df_all = pd.concat([df_all, temp])
# Save the file
df_all.to_csv('data/cosmetic_TSNE.csv', encoding = 'utf-8-sig', index = False)
| 26.831461 | 113 | 0.622697 | 349 | 2,388 | 4.12894 | 0.389685 | 0.063151 | 0.010409 | 0.012491 | 0.03331 | 0.026371 | 0 | 0 | 0 | 0 | 0 | 0.028373 | 0.276801 | 2,388 | 88 | 114 | 27.136364 | 0.806022 | 0.320771 | 0 | 0 | 0 | 0 | 0.047649 | 0.013793 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.065217 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
025c731943add7b96f4762f7e90250edc2669b56 | 5,769 | py | Python | app/domain/flow.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
] | null | null | null | app/domain/flow.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
] | null | null | null | app/domain/flow.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
] | null | null | null | import uuid
from tracardi_graph_runner.domain.flow import Flow as GraphFlow
import app.service.storage.crud as crud
from .entity import Entity
from .named_entity import NamedEntity
from ..exceptions.exception import TracardiException
from typing import Optional, List
from pydantic import BaseModel
from tracardi_graph_runner.domain.flow_graph_data import FlowGraphData, Edge, Position, Node
from tracardi_plugin_sdk.domain.register import MetaData, Plugin, Spec
from app.service.storage.crud import StorageCrud
from ..service.secrets import decrypt, encrypt
class Flow(GraphFlow):
projects: Optional[List[str]] = ["General"]
draft: Optional[str] = ""
lock: bool = False
# Persistence
def storage(self) -> crud.StorageCrud:
return crud.StorageCrud("flow", Flow, entity=self)
@staticmethod
async def decode(flow_id) -> 'Flow':
flow_record_entity = Entity(id=flow_id)
flow_record = await flow_record_entity.storage("flow").load(FlowRecord) # type: FlowRecord
if not flow_record:
raise TracardiException("Could not find flow `{}`".format(flow_id))
return flow_record.decode()
def encode_draft(self, draft: 'Flow'):
self.draft = encrypt(draft.dict())
def decode_draft(self) -> 'Flow':
flow = decrypt(self.draft)
return Flow.construct(_fields_set=self.__fields_set__, **flow)
@staticmethod
def new(id:str = None) -> 'Flow':
return Flow(
id=str(uuid.uuid4()) if id is None else id,
name="Empty",
enabled=False,
flowGraph=FlowGraphData(nodes=[], edges=[])
)
class SpecRecord(BaseModel):
className: str
module: str
inputs: Optional[List[str]] = []
outputs: Optional[List[str]] = []
init: Optional[str] = ""
manual: Optional[str] = None
@staticmethod
def encode(spec: Spec) -> 'SpecRecord':
return SpecRecord(
className=spec.className,
module=spec.module,
inputs=spec.inputs,
outputs=spec.outputs,
init=encrypt(spec.init),
manual=spec.manual
)
def decode(self) -> Spec:
return Spec(
className=self.className,
module=self.module,
inputs=self.inputs,
outputs=self.outputs,
init=decrypt(self.init),
manual=self.manual
)
class PluginRecord(BaseModel):
start: bool = False
debug: bool = False
spec: SpecRecord
metadata: MetaData
@staticmethod
def encode(plugin: Plugin) -> 'PluginRecord':
return PluginRecord(
start=plugin.start,
debug=plugin.debug,
spec=SpecRecord.encode(plugin.spec),
metadata=plugin.metadata
)
def decode(self) -> Plugin:
data = {
"start": self.start,
"debug": self.debug,
"spec": self.spec.decode(),
"metadata": self.metadata
}
return Plugin.construct(_fields_set=self.__fields_set__, **data)
class NodeRecord(BaseModel):
id: str
type: str
position: Position
data: PluginRecord
@staticmethod
def encode(node: Node):
return NodeRecord(
id=node.id,
type=node.type,
position=node.position,
data=PluginRecord.encode(node.data)
)
def decode(self) -> Node:
data = {
"id": self.id,
"type": self.type,
"data": self.data.decode(),
"position": self.position
}
return Node.construct(_fields_set=self.__fields_set__, **data)
class FlowGraphDataRecord(BaseModel):
nodes: List[NodeRecord]
edges: List[Edge]
@staticmethod
def encode(flowGraph: FlowGraphData) -> 'FlowGraphDataRecord':
if flowGraph:
return FlowGraphDataRecord(
edges=flowGraph.edges,
nodes=[NodeRecord.encode(node) for node in flowGraph.nodes]
)
return FlowGraphDataRecord(
edges=[],
nodes=[]
)
def decode(self) -> FlowGraphData:
data = {
"edges": self.edges,
"nodes": [node.decode() for node in self.nodes],
}
return FlowGraphData.construct(_fields_set=self.__fields_set__, **data)
class FlowRecord(NamedEntity):
description: Optional[str] = None
flowGraph: Optional[FlowGraphDataRecord] = None
enabled: Optional[bool] = True
projects: Optional[List[str]] = ["General"]
draft: Optional[str] = ''
lock: bool = False
# Persistence
def storage(self) -> StorageCrud:
return StorageCrud("flow", FlowRecord, entity=self)
@staticmethod
def encode(flow: Flow) -> 'FlowRecord':
return FlowRecord(
id=flow.id,
description=flow.description,
name=flow.name,
enabled=flow.enabled,
flowGraph=FlowGraphDataRecord.encode(flow.flowGraph),
projects=flow.projects,
draft=flow.draft,
lock=flow.lock
)
def decode(self) -> Flow:
data = {
"id": self.id,
"name": self.name,
"description": self.description,
"enabled": self.enabled,
"projects": self.projects,
"draft": self.draft,
"lock": self.lock,
"flowGraph": self.flowGraph.decode() if self.flowGraph else None,
}
return Flow.construct(_fields_set=self.__fields_set__, **data)
def decode_draft(self) -> 'Flow':
flow = decrypt(self.draft)
return Flow(**flow)
def encode_draft(self, draft: 'Flow'):
self.draft = encrypt(draft.dict())
| 28.418719 | 99 | 0.599064 | 597 | 5,769 | 5.691792 | 0.164154 | 0.026486 | 0.026486 | 0.032372 | 0.187758 | 0.187758 | 0.168334 | 0.167157 | 0.110653 | 0.110653 | 0 | 0.000244 | 0.290518 | 5,769 | 202 | 100 | 28.559406 | 0.829954 | 0.006934 | 0 | 0.164634 | 0 | 0 | 0.040175 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103659 | false | 0 | 0.073171 | 0.04878 | 0.469512 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
025eb9ffceede9a60da558c7537cd2fe3a84be9b | 1,354 | py | Python | paths.py | Mctigger/kaggle_amazon_planet_9th_place | ca0b235bcd194f29b9d9207e3349a22634dc76da | [
"MIT"
] | 22 | 2017-07-26T02:00:57.000Z | 2019-03-19T09:41:57.000Z | paths.py | Mctigger/kaggle_amazon_planet_9th_place | ca0b235bcd194f29b9d9207e3349a22634dc76da | [
"MIT"
] | 1 | 2020-12-12T17:26:34.000Z | 2020-12-12T17:26:34.000Z | paths.py | Mctigger/kaggle_amazon_planet_9th_place | ca0b235bcd194f29b9d9207e3349a22634dc76da | [
"MIT"
] | 8 | 2017-08-10T11:46:01.000Z | 2019-08-13T01:15:09.000Z | import os
logs = './logs/'
models = './models/'
submissions = './submissions/'
data = './data/'
validations = './validations/'
predictions = './predictions/'
thresholds = './thresholds/'
ensemble_weights = './ensemble_weights/'
xgb_configurations = './xgb_configurations/'
train_jpg = '../Planet/train-jpg/'
train_tif = '../Planet/train-tif-v2/'
test_jpg = '../Planet/test-jpg/'
train_csv = '../Planet/train_v2.csv'
dirs = [
logs,
models,
submissions,
data,
validations,
predictions,
thresholds,
ensemble_weights,
xgb_configurations
]
data = [train_jpg, train_tif, test_jpg]
files = [train_csv]
for supplementary_dir in dirs:
if os.path.isdir(supplementary_dir):
continue
if not os.path.isfile(supplementary_dir[:-1]):
os.makedirs(supplementary_dir)
print('Created directory', supplementary_dir)
for data_dir in data:
if os.path.isdir(data_dir):
continue
else:
print('Directoy {} does not exists. Please either put the training/test data in the appropriate directories or '
'change the path.'.format(data_dir))
for file in files:
if os.path.isfile(file):
continue
else:
print('File {} does not exists. Please either put the file in the appropriate directories or '
'change the path.'.format(file))
| 23.344828 | 120 | 0.660266 | 164 | 1,354 | 5.310976 | 0.317073 | 0.091848 | 0.027555 | 0.073479 | 0.181401 | 0.181401 | 0.181401 | 0.110218 | 0.110218 | 0 | 0 | 0.002812 | 0.211965 | 1,354 | 57 | 121 | 23.754386 | 0.813496 | 0 | 0 | 0.111111 | 0 | 0 | 0.325702 | 0.048744 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.022222 | 0 | 0.022222 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
026083d003cf9ee4b28e803aa785869dea6d6dd8 | 2,310 | py | Python | demos/graphql/graph/app.py | hzlmn/aiohttp-demos | 4cf9cc93914a597f64e60beb2fd23d5f2eb73998 | [
"Apache-2.0"
] | 1 | 2021-03-29T13:20:41.000Z | 2021-03-29T13:20:41.000Z | demos/graphql/graph/app.py | hzlmn/aiohttp-demos | 4cf9cc93914a597f64e60beb2fd23d5f2eb73998 | [
"Apache-2.0"
] | null | null | null | demos/graphql/graph/app.py | hzlmn/aiohttp-demos | 4cf9cc93914a597f64e60beb2fd23d5f2eb73998 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from functools import partial
import aiopg.sa
from aiohttp import web
import aioredis
import aiohttp_jinja2
import jinja2
from graph.routes import init_routes
from graph.utils import init_config
from graph.api.dataloaders import UserDataLoader
path = Path(__file__).parent
def init_jinja2(app: web.Application) -> None:
aiohttp_jinja2.setup(
app,
loader=jinja2.FileSystemLoader(str(path / 'templates'))
)
async def init_database(app: web.Application) -> None:
'''
This is signal for success creating connection with database
'''
config = app['config']['postgres']
engine = await aiopg.sa.create_engine(**config)
app['db'] = engine
async def init_redis(app: web.Application) -> None:
'''
This is signal for success creating connection with redis
'''
config = app['config']['redis']
sub = await aioredis.create_redis(
f'redis://{config["host"]}:{config["port"]}'
)
pub = await aioredis.create_redis(
f'redis://{config["host"]}:{config["port"]}'
)
create_redis = partial(
aioredis.create_redis,
f'redis://{config["host"]}:{config["port"]}'
)
app['redis_sub'] = sub
app['redis_pub'] = pub
app['create_redis'] = create_redis
async def close_database(app: web.Application) -> None:
'''
This is signal for success closing connection with database before shutdown
'''
app['db'].close()
await app['db'].wait_closed()
async def close_redis(app: web.Application) -> None:
'''
This is signal for success closing connection with redis before shutdown
'''
app['redis_sub'].close()
app['redis_pub'].close()
async def init_graph_loaders(app: web.Application) -> None:
'''
The function initialize data loaders for `graphene`. U should initialize it
after initialize a database.
'''
engine = app['db']
class Loaders:
users = UserDataLoader(engine, max_batch_size=100)
app['loaders'] = Loaders()
def init_app() -> web.Application:
app = web.Application()
init_jinja2(app)
init_config(app)
init_routes(app)
app.on_startup.extend([init_redis, init_database, init_graph_loaders])
app.on_cleanup.extend([close_redis, close_database])
return app
| 23.571429 | 79 | 0.668831 | 291 | 2,310 | 5.168385 | 0.274914 | 0.031915 | 0.090426 | 0.083777 | 0.285239 | 0.285239 | 0.285239 | 0.285239 | 0.285239 | 0.255319 | 0 | 0.004913 | 0.206926 | 2,310 | 97 | 80 | 23.814433 | 0.816048 | 0 | 0 | 0.055556 | 0 | 0 | 0.118407 | 0.0662 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.185185 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02624e05b234e7fe7fb990f48fa27eeabfa59f64 | 6,648 | py | Python | src/Willhaben_Grabber.py | atilla-zemo/Willhaben-Grabber | 7090528dcf0c7b02028907c899cdae0555269dfe | [
"MIT"
] | 9 | 2020-11-04T15:27:23.000Z | 2022-01-30T16:27:31.000Z | src/Willhaben_Grabber.py | atilla-zemo/Willhaben-Grabber | 7090528dcf0c7b02028907c899cdae0555269dfe | [
"MIT"
] | 4 | 2021-10-05T10:06:16.000Z | 2022-03-18T14:23:24.000Z | src/Willhaben_Grabber.py | atilla-zemo/Willhaben-Grabber | 7090528dcf0c7b02028907c899cdae0555269dfe | [
"MIT"
] | 4 | 2021-03-24T11:58:54.000Z | 2022-02-15T14:28:38.000Z | import os
import sys
import random
import time
from colorama import init, Fore
import colorama
from get_willhaben_item import get_willhaben_item
from build_url import build_willhaben_url
class Willhaben():
def __init__(self):
self.url_base = "https://www.willhaben.at/iad/kaufen-und-verkaufen/marktplatz"
self.menu = {"1": "/kaufen-und-verkaufen",
"links": {"1": "/antiquitaeten-kunst-6941", "2": "/kameras-tv-multimedia-6808", "3": "/baby-kind-3928",
"4": "/kfz-zubehoer-motorradteile-6142", "5": "/beauty-gesundheit-wellness-3076",
"6": "/mode-accessoires-3275", "7": "/boote-yachten-jetskis-5007823", "8": "/smartphones-telefonie-2691",
"9": "/buecher-filme-musik-387", "10": "/spielen-spielzeug-5136", "11": "/computer-software-5824",
"12": "/sport-sportgeraete-4390", "13": "/dienstleistungen-537", "14": "/tiere-tierbedarf-4915",
"15": "/freizeit-instrumente-kulinarik-6462", "16": "/uhren-schmuck-2409",
"17": "/games-konsolen-2785", "18": "/wohnen-haushalt-gastronomie-5387",
"19": "/haus-garten-werkstatt-3541", "20": "/zu-verschenken/"}}
self.links_with_product = []
self.marktplatz()
def logo(self):
init()
text = """
▄▄▄▄· ▪ ▄▄ • ▄▄▄▄· .▄▄ · .▄▄ · ▄▄▌ ▐ ▄▌▪ ▄▄▌ ▄▄▌ ▄ .▄ ▄▄▄· ▄▄▄▄· ▄▄▄ . ▐ ▄ ▄▄▄▄▄ ▄▄▌
▐█ ▀█▪██ ▐█ ▀ ▪▐█ ▀█▪ ▄█▀▄ ▐█ ▀. ▐█ ▀. ██· █▌▐███ ██• ██• ██▪▐█▐█ ▀█ ▐█ ▀█▪▀▄.▀·•█▌▐█ •██ ▄█▀▄ ▄█▀▄ ██•
▐█▀▀█▄▐█·▄█ ▀█▄▐█▀▀█▄▐█▌.▐▌▄▀▀▀█▄▄▀▀▀█▄ ██▪▐█▐▐▌▐█·██▪ ██▪ ██▀▐█▄█▀▀█ ▐█▀▀█▄▐▀▀▪▄▐█▐▐▌ ▐█.▪▐█▌.▐▌▐█▌.▐▌██▪
██▄▪▐█▐█▌▐█▄▪▐███▄▪▐█▐█▌.▐▌▐█▄▪▐█▐█▄▪▐█ ▐█▌██▐█▌▐█▌▐█▌▐▌▐█▌▐▌██▌▐▀▐█ ▪▐▌██▄▪▐█▐█▄▄▌██▐█▌ ▐█▌·▐█▌.▐▌▐█▌.▐▌▐█▌▐▌
·▀▀▀▀ ▀▀▀·▀▀▀▀ ·▀▀▀▀ ▀█▄▀▪ ▀▀▀▀ ▀▀▀▀ ▀▀▀▀ ▀▪▀▀▀.▀▀▀ .▀▀▀ ▀▀▀ · ▀ ▀ ·▀▀▀▀ ▀▀▀ ▀▀ █▪ ▀▀▀ ▀█▄▀▪ ▀█▄▀▪.▀▀▀
"""
bad_colors = ['LIGHTGREEN_EX', 'GREEN', 'RED', 'BLUE', 'YELLOW']
codes = vars(colorama.Fore)
colors = [codes[color] for color in codes if color in bad_colors]
colored_chars = [random.choice(colors) + char for char in text]
print(''.join(colored_chars) + Fore.CYAN)
def str_input(self, text):
while True:
rt = input(text)
if rt != "":
break
else:
print(Fore.RED + "--------------------\nInvalid selection!\n--------------------" + Fore.CYAN)
return rt
def int_input(self, text):
while True:
try:
rt = int(input(text))
break
except:
print(Fore.RED + "--------------------\nInvalid selection!\n--------------------" + Fore.CYAN)
return rt
def str_in_dict(self, stringo, dicto):
while True:
stringo_input = input(stringo).lower()
if stringo_input in dicto:
break
else:
print(Fore.RED + "--------------------\nInvalid selection!\n--------------------" + Fore.CYAN)
return stringo_input
def int_in_dict(self, stringo, dicto):
while True:
print(Fore.CYAN + stringo)
try:
into_input = int(input(""))
if into_input in dicto:
break
else:
print(Fore.RED + "--------------------\nInvalid selection!\n--------------------" + Fore.CYAN)
except:
print(Fore.RED + "--------------------\nInvalid selection!\n--------------------" + Fore.CYAN)
return into_input
def clear_console(self):
os.system('cls' if os.name == 'nt' else 'clear')
def marktplatz(self):
self.clear_console()
self.logo()
while True:
mp_first_under = input("Do you want to choose a subcategory? Y - Yes or N - No\n").lower()
if mp_first_under == "y":
while True:
mp_user_choice_uc = self.int_input("Choose a subcategory:\n"
"0. Back\n"
"1. Antiquitäten / Kunst\n"
"2. Kameras / TV / Multimedia\n"
"3. Baby / Kind\n"
"4. KFZ-Zubehör / Motorradteile\n"
"5. Beauty / Gesundheit / Wellness\n"
"6. Mode / Accessoires\n"
"7. Boote / Yachten / Jetskis\n"
"8. Smartphones / Telefonie\n"
"9. Bücher / Filme / Musik\n"
"10. Spielen / Spielzeug\n"
"11. Computer / Software\n"
"12. Sport / Sportgeräte\n"
"13. Dienstleistungen\n"
"14. Tiere / Tierbedarf\n"
"15. Freizeit / Instrumente / Kulinarik\n"
"16. Uhren / Schmuck\n"
"17. Games / Konsolen\n"
"18. Wohnen / Haushalt / Gastronomie\n"
"19. Haus / Garten / Werkstatt\n"
"20. To give away Free\n")
if mp_user_choice_uc in range(21):
if not mp_user_choice_uc:
break
else:
url = build_willhaben_url(self, mp_user_choice_uc)
self.zeit_start = time.time()
self.clear_console()
print(Fore.GREEN + "Searching for products..." + Fore.CYAN)
get_willhaben_item(self, url)
self.marktplatz()
else:
print(Fore.RED + "--------------------\nInvalid selection!\n--------------------" + Fore.CYAN)
elif mp_first_under == "n":
url = build_willhaben_url(self, "")
self.zeit_start = time.time()
get_willhaben_item(self, url)
break
else:
print(Fore.RED + "--------------------\nInvalid selection!\n--------------------" + Fore.CYAN)
Willhaben()
| 47.827338 | 136 | 0.393652 | 642 | 6,648 | 4.624611 | 0.384735 | 0.026945 | 0.028292 | 0.047154 | 0.218929 | 0.147524 | 0.147524 | 0.126642 | 0.126642 | 0.112496 | 0 | 0.036011 | 0.402678 | 6,648 | 138 | 137 | 48.173913 | 0.607152 | 0 | 0 | 0.317073 | 0 | 0.01626 | 0.348676 | 0.149067 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065041 | false | 0 | 0.065041 | 0 | 0.170732 | 0.081301 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0263e3c24682d6de8740497039a0da4536183e5c | 4,572 | py | Python | mergesort.py | Levi-Huynh/JS-INTERVIEW | 768e5577fd8f3f26c244154be9d9fd5a348f6171 | [
"MIT"
] | null | null | null | mergesort.py | Levi-Huynh/JS-INTERVIEW | 768e5577fd8f3f26c244154be9d9fd5a348f6171 | [
"MIT"
] | null | null | null | mergesort.py | Levi-Huynh/JS-INTERVIEW | 768e5577fd8f3f26c244154be9d9fd5a348f6171 | [
"MIT"
] | null | null | null | # TO-DO: complete the helpe function below to merge 2 sorted arrays
"""
How It Works
This is a “divide and conquer” algorithm. First, the original collection must be divided in half
until we have broken the entire thing down to single collections with a length of 1.
What is special about lists or arrays with a single element? They are already sorted!
It is impossible for a single element to not be sorted.
Then, we are able to “merge” these sorted pieces back together.
Runtime
The “divide” part of this algorithm requires us to cut a collection of elements in half until its length is 1.
If our collection contains n elements, we will have to perform more halving operations as n grows larger.
However, the rate of growth not quite linear. This part of the algorithm has a runtime of O(log(n)).
The “conquer” (merge) part of this algorithm requires no more than a single pass through each sorted sub-collection, giving it a runtime of O(n).
Since we divide, then conquer we can think about the total runtime of Merge Sort as O(log(n)) + O(n) or O(n log(n) )
https://codereview.stackexchange.com/questions/154135/recursive-merge-sort-in-python
"""
# TO-DO: implement the Merge Sort function below USING NONRECURSION
def merge(arrA, arrB):
elements = len(arrA) + len(arrB)
# merged_arr = [0] * elements
leftindex, rightindex = 0, 0 # starting point for iteration!
result = []
# while zero is less than len of arrA & arrB
if not len(arrA) or not len(arrB):
return arrA or arrB
# if len of resultArr is < len A + len B then:
while (len(result) < len(arrA) + len(arrB)):
if arrA[leftindex] < arrB[rightindex]: # check to see if arrA[0] < arrB[0]
# add to result array if arrA < arrB
# (Append which ever array is smaller)
result.append(arrA[leftindex])
# if ArrA[left] is < ArrB[right] way to account for all the elements for each appended & find the remaining
leftindex += 1
# ^ also a way to iterate using leftIndex if ArrA[L] < ArrB[R]
print("leftindex:", leftindex, "result left<right", result)
else:
# (Append which ever array is smaller)
result.append(arrB[rightindex])
rightindex += 1 # loop/iterate thru rightindex
print("rightindex:", rightindex, "result left>right:", result)
if leftindex == len(arrA) or rightindex == len(arrB):
result.extend(arrA[leftindex:] or arrB[rightindex:])
break
return result
def merge_sort(arr):
if len(arr) < 2:
return arr
middle = int(len(arr)/2)
left = merge_sort(arr[:middle])
right = merge_sort(arr[middle:])
return merge(left, right)
# result += arrA[leftindex:] # append to res whatever is left from ArrA
# result += arrB[rightindex:] # append to res whatever is left from arrB
# return result # ordered array with everything thats left
# https://www.geeksforgeeks.org/iterative-merge-sort/
print(merge_sort([5, 10, 2, 4, 1, 3, 5, 21, 2, 4]))
def merge_sort1(arr):
# TO-DO
if len(arr) > 1:
mid = int(len(arr)/2) # find the mid of array
L = arr[:mid] # split array to left
R = arr[mid:] # split array to right
merge_sort(L) # sort first half
merge_sort(R) # sort 2nd half
i = j = k = 0
while i < len(L) and j < len(R):
if L[i] < R[j]:
arr[k] = L[i]
i += 1 # iterates through i
else:
arr[k] = R[j]
j += 1 # iteraties through j
k += 1 # iterates through k
# check if any element was left
while i < len(L):
arr[k] = L[i]
i += 1 # moves through towards right
k += 1 # moves through towards right and makes each k = i
while j < len(R):
arr[k] = R[j]
j += 1 # moves through towards right of remaining j
k += 1 # moves through towards right and makes remaining j = k
print("merge:", arr)
return arr
# merge_sort([1, 5, 8, 4, 2, 9, 6, 0, 3, 7])
# TO-DO: implement the Merge Sort function below USING RECURSION
# STRETCH: implement an in-place merge sort algorithm
def merge_in_place(arr, start, mid, end):
# TO-DO
return arr
def merge_sort_in_place(arr, l, r):
# TO-DO
return arr
# STRETCH: implement the Timsort function below
# hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt
def timsort(arr):
return arr
| 36 | 145 | 0.629046 | 697 | 4,572 | 4.106169 | 0.295552 | 0.044025 | 0.018169 | 0.027952 | 0.162124 | 0.113906 | 0.102725 | 0.08246 | 0.030049 | 0 | 0 | 0.015705 | 0.275809 | 4,572 | 126 | 146 | 36.285714 | 0.848686 | 0.569991 | 0 | 0.305085 | 0 | 0 | 0.032342 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0 | 0.050847 | 0.237288 | 0.067797 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0264a6b7b00bf39bd8c56c7451f4a38a8edf8f89 | 1,051 | py | Python | tryingKeras.py | mrek235/robotLearning | 4da7282f169f5ed2735c6a70084ff1e4decd2cd2 | [
"BSD-3-Clause"
] | null | null | null | tryingKeras.py | mrek235/robotLearning | 4da7282f169f5ed2735c6a70084ff1e4decd2cd2 | [
"BSD-3-Clause"
] | null | null | null | tryingKeras.py | mrek235/robotLearning | 4da7282f169f5ed2735c6a70084ff1e4decd2cd2 | [
"BSD-3-Clause"
] | null | null | null | from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import glob
model = Sequential()
model.add(Dense(units=64,activation = 'relu',input_dim = 4096))
model.add(Dense(units=10,activation = 'softmax'))
#model.compile(loss= 'categorical_crossentropy',
# optimize = 'sgd',
# metrics = 'accuracy')
file_list = glob.glob("*.npz")
#print(file_list)
def file_list_to_positions_list(file_list):
positions_list = []
for file_name in file_list:
file_names_without_npz = file_name.split('.npz')
file_names_without_npz.remove('')
for name in file_names_without_npz:
file_name_without_npz_and_p = name.split('P')
file_name_without_npz_and_p.remove('')
floated_sublist = []
for position in file_name_without_npz_and_p:
floated_sublist.append(float(position))
positions_list.append(floated_sublist)
print(positions_list)
return positions_list
file_list_to_positions_list(file_list) | 28.405405 | 63 | 0.688868 | 138 | 1,051 | 4.898551 | 0.376812 | 0.08284 | 0.075444 | 0.093195 | 0.257396 | 0.257396 | 0.091716 | 0 | 0 | 0 | 0 | 0.009685 | 0.214082 | 1,051 | 37 | 64 | 28.405405 | 0.808717 | 0.121789 | 0 | 0 | 0 | 0 | 0.022826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.173913 | 0 | 0.26087 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0265a5c8cb4df7210db26f3c8b22cb86d120f80c | 946 | py | Python | Super_data/code.py | harshuvj/ga-learner-dst-repo | 6731fcacca4ae3965adbfdf5960468921ef6c20a | [
"MIT"
] | null | null | null | Super_data/code.py | harshuvj/ga-learner-dst-repo | 6731fcacca4ae3965adbfdf5960468921ef6c20a | [
"MIT"
] | null | null | null | Super_data/code.py | harshuvj/ga-learner-dst-repo | 6731fcacca4ae3965adbfdf5960468921ef6c20a | [
"MIT"
] | null | null | null | # --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Reading of the file
data=pd.read_csv(path)
# Code starts here
data['Gender'].replace('-','Agender',inplace=True)
data['Gender'].value_counts().plot(kind='bar')
plt.show()
alignment = pd.Series(data['Alignment'].value_counts())
plt.pie(alignment.values,explode = (0,0,0.5,),labels=alignment.index,autopct = '%1.1f%%')
plt.show()
new = data[['Intelligence','Strength','Combat']].copy()
corr = new.corr()
print(corr)
corr_IC = corr.iloc[0,2]
print('corr_IC:' , corr_IC)
corr_SC = corr.iloc[1,2]
print('corr_SC:',corr_SC)
if corr_IC > corr_SC:
print("Person's intelegence has more impact on his combat skills")
else:
print("Person's strength has more impact on his combat skills")
super_best_names = [i for i in data[data['Total']> data['Total'].quantile(0.99)]['Name']]
super_best_names
| 22.52381 | 90 | 0.667019 | 146 | 946 | 4.219178 | 0.527397 | 0.038961 | 0.064935 | 0.038961 | 0.097403 | 0.097403 | 0.097403 | 0 | 0 | 0 | 0 | 0.01623 | 0.153277 | 946 | 41 | 91 | 23.073171 | 0.752809 | 0.065539 | 0 | 0.086957 | 0 | 0 | 0.245238 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0.217391 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
026999aab1f848c8968e69c8166d47a547339413 | 453 | py | Python | socket_test/httpserver.py | nciefeiniu/python-test | d81fcfff8cdec724c3010d6b7a77aabad7f90595 | [
"Apache-2.0"
] | null | null | null | socket_test/httpserver.py | nciefeiniu/python-test | d81fcfff8cdec724c3010d6b7a77aabad7f90595 | [
"Apache-2.0"
] | null | null | null | socket_test/httpserver.py | nciefeiniu/python-test | d81fcfff8cdec724c3010d6b7a77aabad7f90595 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import socket
def httpserver():
with socket.socket() as server:
server.bind(('127.0.0.1', 80))
server.listen()
conn,addr = server.accept()
msg = b'''<html><head></head><body>Hello world!!</body></html>'''
while True:
request = conn.recv(1024)
print(request)
conn.sendall(msg)
if __name__ == "__main__":
httpserver() | 19.695652 | 73 | 0.540839 | 54 | 453 | 4.388889 | 0.740741 | 0.092827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042945 | 0.280353 | 453 | 23 | 74 | 19.695652 | 0.684049 | 0.092715 | 0 | 0 | 0 | 0 | 0.168293 | 0.12439 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
026d83efd82ed17c9772e4cf76f3b3efc498de1f | 897 | py | Python | menu.py | aphkyle/dpet | 444054d6bd67a5b2dcce94e7ed5561aaa0c20b50 | [
"MIT"
] | null | null | null | menu.py | aphkyle/dpet | 444054d6bd67a5b2dcce94e7ed5561aaa0c20b50 | [
"MIT"
] | 1 | 2022-01-26T09:58:46.000Z | 2022-01-26T09:58:46.000Z | menu.py | aphkyle/dpet | 444054d6bd67a5b2dcce94e7ed5561aaa0c20b50 | [
"MIT"
] | null | null | null | import platform
import subprocess
import tkinter as tk
from tkinter import filedialog
from config import sprite_sets
class SpriteMenu(tk.Menu):
def __init__(self, event, *args, **kwargs):
super().__init__(*args, **kwargs, tearoff=False)
self.fire_event = event
self.add_command(label="Select Sprite set", command=self.select_sprite_set)
self.add_separator()
self.add_command(label="Open recycle bin", command=self.open_recycle_bin)
self.add_separator()
self.add_command(label="Quit", command=quit)
def select_sprite_set(self):
self.fire_event(
filedialog.askdirectory(initialdir=sprite_sets, title="Select sprite set")
)
def open_recycle_bin(self):
if platform.system() == "Windows":
subprocess.run(["start", "shell:RecycleBinFolder"], shell=True)
# require shell
| 32.035714 | 86 | 0.675585 | 109 | 897 | 5.330275 | 0.422018 | 0.060241 | 0.10327 | 0.098107 | 0.120482 | 0.120482 | 0.120482 | 0 | 0 | 0 | 0 | 0 | 0.216276 | 897 | 27 | 87 | 33.222222 | 0.826458 | 0.014493 | 0 | 0.095238 | 0 | 0 | 0.099773 | 0.024943 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.238095 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02767f7d9d794537517e4037baaed00430914d0b | 603 | py | Python | PySpace/mysql/mysql_querydata.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | PySpace/mysql/mysql_querydata.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | PySpace/mysql/mysql_querydata.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# 文件名:mysql_createtable.py
import pymysql
# 打开数据库连接
db = pymysql.connect('localhost','root','1234','fdtest')
# 使用cursor()方法创建一个游标对象cursor
cursor = db.cursor()
# SQL查询语句
sql = "SELECT * FROM EMPLOYEE \
WHERE INCOME> '%d'" % (1000)
try:
# 执行sql语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
fname = row[0]
lname = row[1]
age = row[2]
sex = row[3]
income = row[4]
# 打印结果
print("fname=%s,lname=%s,age=%d,sex=%s,income=%d" % \
(fname,lname,age,sex,income))
except:
print("Error: unable to fetch data")
# 关闭数据库连接
db.close()
| 17.228571 | 56 | 0.651741 | 86 | 603 | 4.55814 | 0.662791 | 0.035714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028 | 0.170813 | 603 | 34 | 57 | 17.735294 | 0.756 | 0.190713 | 0 | 0 | 0 | 0 | 0.194154 | 0.085595 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02787064015649ca1cce6c62ee821b72f58da407 | 20,419 | py | Python | Development of Near Real-time Wireless Image Sequence Streaming Cloud Using Apache Kafka for Road Traffic Monitoring Application/monitoring_app_external_broker.py | IoTcloudServe/smart-mobility-chula | ad6ed00aace2901811f003c14c199e9b0dedbc93 | [
"Apache-2.0"
] | 1 | 2020-08-18T07:21:18.000Z | 2020-08-18T07:21:18.000Z | Development of Near Real-time Wireless Image Sequence Streaming Cloud Using Apache Kafka for Road Traffic Monitoring Application/monitoring_app_external_broker.py | IoTcloudServe/smart-mobility-chula | ad6ed00aace2901811f003c14c199e9b0dedbc93 | [
"Apache-2.0"
] | null | null | null | Development of Near Real-time Wireless Image Sequence Streaming Cloud Using Apache Kafka for Road Traffic Monitoring Application/monitoring_app_external_broker.py | IoTcloudServe/smart-mobility-chula | ad6ed00aace2901811f003c14c199e9b0dedbc93 | [
"Apache-2.0"
] | 5 | 2019-06-08T10:21:13.000Z | 2020-08-14T09:02:31.000Z | import time
import wx
import cStringIO
from kafka import KafkaConsumer
import threading
import Queue
from datetime import datetime
#import simplejson
import pickle
local_consumer1 = KafkaConsumer('PhayaThai-1', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view1', consumer_timeout_ms = 300)
local_consumer2 = KafkaConsumer('PhayaThai-2', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view2', consumer_timeout_ms = 300)
local_consumer3 = KafkaConsumer('PhayaThai-3', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view3', consumer_timeout_ms = 300)
local_consumer4 = KafkaConsumer('PhayaThai-4', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view4', consumer_timeout_ms = 300)
local_consumer5 = KafkaConsumer('PhayaThai-5', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view5', consumer_timeout_ms = 300)
local_consumer6 = KafkaConsumer('PhayaThai-6', bootstrap_servers = ['192.168.1.7:9092'], group_id = 'view6', consumer_timeout_ms = 300)
local_consumer1.poll()
local_consumer2.poll()
local_consumer3.poll()
local_consumer4.poll()
local_consumer5.poll()
local_consumer6.poll()
local_consumer1.seek_to_end()
local_consumer2.seek_to_end()
local_consumer3.seek_to_end()
local_consumer4.seek_to_end()
local_consumer5.seek_to_end()
local_consumer6.seek_to_end()
my_queue1 = Queue.Queue()
my_queue2 = Queue.Queue()
my_queue3 = Queue.Queue()
my_queue4 = Queue.Queue()
my_queue5 = Queue.Queue()
my_queue6 = Queue.Queue()
start = time.time()
period_of_time = 120
latency_list_of_pi1 = []
latency_list_of_pi2 = []
latency_list_of_pi3 = []
latency_list_of_pi4 = []
latency_list_of_pi5 = []
latency_list_of_pi6 = []
unix_timestamp_of_pi1 = []
unix_timestamp_of_pi2 = []
unix_timestamp_of_pi3 = []
unix_timestamp_of_pi4 = []
unix_timestamp_of_pi5 = []
unix_timestamp_of_pi6 = []
image_list_pi1 = []
image_list_pi2 = []
image_list_pi3 = []
image_list_pi4 = []
image_list_pi5 = []
image_list_pi6 = []
class MyPanel(wx.Panel):
""""""
#----------------------------------------------------------------------
def __init__(self, parent):
wx.Panel.__init__(self, parent)
background_image = 'new_one_1920_1080.png'
bmp_background = wx.Image(background_image, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.bitmap1 = wx.StaticBitmap(self, -1, bmp_background, (0, 0))
parent.SetTitle('consumer application')
self.font = wx.Font(25, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
self.flashingText1 = wx.StaticText(self, label = 'Phaya Thai - 1', pos = (530, 610))
self.flashingText2 = wx.StaticText(self, label = 'Phaya Thai - 2', pos = (950, 610))
self.flashingText3 = wx.StaticText(self, label = 'Phaya Thai - 3', pos = (1360, 610))
self.flashingText4 = wx.StaticText(self, label = 'Phaya Thai - 4', pos = (530, 360))
self.flashingText5 = wx.StaticText(self, label = 'Phaya Thai - 5', pos = (950, 360))
self.flashingText6 = wx.StaticText(self, label = 'Phaya Thai - 6', pos = (1360, 360))
self.flashingText1.SetForegroundColour('red')
self.flashingText2.SetForegroundColour('red')
self.flashingText3.SetForegroundColour('red')
self.flashingText4.SetForegroundColour('red')
self.flashingText5.SetForegroundColour('red')
self.flashingText6.SetForegroundColour('red')
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.update, self.timer)
self.timer.Start(50)
# self.timer.Start(200)
def save_list_pi1():
global latency_list_of_pi1
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
# threading.Timer(300.0, save_list_pi1).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi1 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi1, f)
# f.close()
threading.Timer(300.0, save_list_pi1).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi1 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi1, fp)
# fig, ax = plt.subplots()
#
# ax.plot(latency_list_of_pi1)
#
# ax.set(title="Latency per image vs messages (PhayaThai-1) at Local broker 2")
#
# ax.set(xlabel="Number of messages from PhayaThai-1", ylabel="Latency in ms")
#
# plt.show()
latency_list_of_pi1 *= 0
def save_list_pi2():
global latency_list_of_pi2
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_list_pi2).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi2 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi2, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi2 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi2, fp)
latency_list_of_pi2 *= 0
def save_list_pi3():
global latency_list_of_pi3
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_list_pi3).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi3 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi3, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi3 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi3, fp)
latency_list_of_pi3 *= 0
def save_list_pi4():
global latency_list_of_pi4
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_list_pi4).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi4 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi4, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi4 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi4, fp)
latency_list_of_pi4 *= 0
def save_list_pi5():
global latency_list_of_pi5
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_list_pi5).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi5 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi5, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi5 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi5, fp)
latency_list_of_pi5 *= 0
def save_list_pi6():
global latency_list_of_pi6
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_list_pi6).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi6 latency list' + current_time + '.txt', 'w')
# simplejson.dump(latency_list_of_pi6, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi6 latency list' + current_time + '.txt', 'w') as fp:
pickle.dump(latency_list_of_pi6, fp)
latency_list_of_pi6 *= 0
def save_loss_list_pi1():
global image_list_pi1
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
# threading.Timer(300.0, save_loss_list_pi1).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi1 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi1, f)
# f.close()
threading.Timer(300.0, save_loss_list_pi1).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi1 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi1, fp)
image_list_pi1 *= 0
def save_loss_list_pi2():
global image_list_pi2
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_loss_list_pi2).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi2 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi2, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi2 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi2, fp)
image_list_pi2 *= 0
def save_loss_list_pi3():
global image_list_pi3
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_loss_list_pi3).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi3 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi3, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi3 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi3, fp)
image_list_pi3 *= 0
def save_loss_list_pi4():
global image_list_pi4
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_loss_list_pi4).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi4 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi4, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi4 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi4, fp)
image_list_pi4 *= 0
def save_loss_list_pi5():
global image_list_pi5
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_loss_list_pi5).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi5 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi5, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi5 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi5, fp)
image_list_pi5 *= 0
def save_loss_list_pi6():
global image_list_pi6
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_loss_list_pi6).start()
# f = open('/home/gateway2/Downloads/lab-based_testing_result/testing_result/pi6 image list ' + current_time + '.txt', 'w')
# simplejson.dump(image_list_pi6, f)
# f.close()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi6 image list ' + current_time + '.txt', 'w') as fp:
pickle.dump(image_list_pi6, fp)
image_list_pi6 *= 0
def save_send_time_list_pi1():
global unix_timestamp_of_pi1
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi1).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi1 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi1, fp)
unix_timestamp_of_pi1 *= 0
def save_send_time_list_pi2():
global unix_timestamp_of_pi2
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi2).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi6 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi2, fp)
unix_timestamp_of_pi2 *= 0
def save_send_time_list_pi3():
global unix_timestamp_of_pi3
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi3).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi3 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi3, fp)
unix_timestamp_of_pi3 *= 0
def save_send_time_list_pi4():
global unix_timestamp_of_pi4
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi4).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi4 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi4, fp)
unix_timestamp_of_pi4 *= 0
def save_send_time_list_pi5():
global unix_timestamp_of_pi5
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi5).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi5 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi5, fp)
unix_timestamp_of_pi5 *= 0
def save_send_time_list_pi6():
global unix_timestamp_of_pi6
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_" + "%H:%M:%S.%f")
threading.Timer(300.0, save_send_time_list_pi6).start()
with open('/home/controller/Downloads/lab-based_testing_result/testing_result/pi6 send time list ' + current_time + '.txt', 'w') as fp:
pickle.dump(unix_timestamp_of_pi6, fp)
unix_timestamp_of_pi6 *= 0
save_list_pi1()
save_list_pi2()
save_list_pi3()
save_list_pi4()
save_list_pi5()
save_list_pi6()
save_loss_list_pi1()
save_loss_list_pi2()
save_loss_list_pi3()
save_loss_list_pi4()
save_loss_list_pi5()
save_loss_list_pi6()
save_send_time_list_pi1()
save_send_time_list_pi2()
save_send_time_list_pi3()
save_send_time_list_pi4()
save_send_time_list_pi5()
save_send_time_list_pi6()
def update(self, event):
""""""
global local_consumer1
global local_consumer2
global local_consumer3
global local_consumer4
global local_consumer5
global local_consumer6
global my_queue1
global my_queue2
global my_queue3
global my_queue4
global my_queue5
global my_queue6
global latency_list_of_pi1
global latency_list_of_pi2
global latency_list_of_pi3
global latency_list_of_pi4
global latency_list_of_pi5
global latency_list_of_pi6
global unix_timestamp_of_pi1
global unix_timestamp_of_pi2
global unix_timestamp_of_pi3
global unix_timestamp_of_pi4
global unix_timestamp_of_pi5
global unix_timestamp_of_pi6
global image_list_pi1
global image_list_pi2
global image_list_pi3
global image_list_pi4
global image_list_pi5
global image_list_pi6
def kafka_image(consumer, out_queue, latency_list, timestamp, camera_name, image_list):
msg = next(consumer)
message = msg[6].split(' chula ')
now = int(round(time.time() * 1000))
sending_time = message[1]
time_diff = abs(now - int(float(sending_time)))
stream = cStringIO.StringIO(message[2])
out_queue.put(stream)
print('The latency of' + camera_name+ ' is ' + str(time_diff) + 'ms')
latency_list.append(str(time_diff))
timestamp.append(str(sending_time))
frame = message[0]
image_list.append(frame)
def show_image(default_consumer, my_queue, camera_name, latency_list, timestamp, image_list):
try:
kafka_image(default_consumer, my_queue, latency_list, timestamp, camera_name, image_list)
print('reading message from default '+ camera_name)
except:
# print('message is not found and showing previous image ' + camera_name)
pass
t1 = threading.Thread(target=show_image, args=(local_consumer1, my_queue1, 'PhayaThai-1',latency_list_of_pi1, unix_timestamp_of_pi1, image_list_pi1, ))
t2 = threading.Thread(target=show_image, args=(local_consumer2, my_queue2, 'PhayaThai-2',latency_list_of_pi2, unix_timestamp_of_pi2, image_list_pi2, ))
# t3 = threading.Thread(target=show_image, args=(local_consumer3, my_queue3, 'PhayaThai-3',latency_list_of_pi3, unix_timestamp_of_pi3, image_list_pi3, ))
t4 = threading.Thread(target=show_image, args=(local_consumer4, my_queue4, 'PhayaThai-4',latency_list_of_pi4, unix_timestamp_of_pi4, image_list_pi4, ))
# t5 = threading.Thread(target=show_image, args=(local_consumer5, my_queue5, 'PhayaThai-5',latency_list_of_pi5, unix_timestamp_of_pi5, image_list_pi5, ))
# t6 = threading.Thread(target=show_image, args=(local_consumer6, my_queue6, 'PhayaThai-6',latency_list_of_pi6, unix_timestamp_of_pi6, image_list_pi6, ))
t1.start()
t2.start()
# t3.start()
t4.start()
# t5.start()
# t6.start()
dc = wx.PaintDC(self)
try:
self.bmp1 = wx.BitmapFromImage(wx.ImageFromStream(my_queue1.get_nowait()))
dc.DrawBitmap(self.bmp1, 450, 630)
except:
pass
try:
self.bmp2 = wx.BitmapFromImage(wx.ImageFromStream(my_queue2.get_nowait()))
dc.DrawBitmap(self.bmp2, 860, 630)
except:
pass
# try:
# self.bmp3 = wx.BitmapFromImage(wx.ImageFromStream(my_queue3.get_nowait()))
# dc.DrawBitmap(self.bmp3, 1270, 630)
# except:
# pass
try:
self.bmp4 = wx.BitmapFromImage(wx.ImageFromStream(my_queue4.get_nowait()))
dc.DrawBitmap(self.bmp4, 450, 380)
except:
pass
# try:
# self.bmp5 = wx.BitmapFromImage(wx.ImageFromStream(my_queue5.get_nowait()))
# dc.DrawBitmap(self.bmp5, 860, 380)
# except:
# pass
# try:
# self.bmp6 = wx.BitmapFromImage(wx.ImageFromStream(my_queue6.get_nowait()))
# dc.DrawBitmap(self.bmp6, 1270, 380)
# except:
# pass
#######################################################################################
class MyFrame(wx.Frame):
""""""
# ---------------------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Frame.__init__(self, None, title="An image on a panel", size=(1920, 1080))
panel = MyPanel(self)
self.Show()
# ----------------------------------------------------------------------
if __name__ == "__main__":
app = wx.App(False)
frame = MyFrame()
app.MainLoop()
| 41.586558 | 160 | 0.613791 | 2,620 | 20,419 | 4.474046 | 0.091603 | 0.066542 | 0.047688 | 0.061423 | 0.686231 | 0.525678 | 0.496758 | 0.471507 | 0.463999 | 0.440283 | 0 | 0.041675 | 0.250257 | 20,419 | 490 | 161 | 41.671429 | 0.724019 | 0.184191 | 0 | 0.27044 | 0 | 0 | 0.146109 | 0.077759 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072327 | false | 0.012579 | 0.025157 | 0 | 0.103774 | 0.006289 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0279b009897ee59174d857e086b99e545d108c7e | 3,383 | py | Python | src/naive_base.py | knmac/LCDC_release | f977ca1cda972983cac7e33b324f07f2e1463a19 | [
"MIT"
] | 24 | 2019-09-18T09:22:08.000Z | 2022-03-08T06:47:33.000Z | src/naive_base.py | knmac/LCDC_release | f977ca1cda972983cac7e33b324f07f2e1463a19 | [
"MIT"
] | 6 | 2019-09-18T09:21:02.000Z | 2022-02-09T23:31:48.000Z | src/naive_base.py | knmac/LCDC_release | f977ca1cda972983cac7e33b324f07f2e1463a19 | [
"MIT"
] | 4 | 2020-08-06T02:05:36.000Z | 2021-12-12T07:19:17.000Z | """Naive linear fusion for baseline with appearance stream ResNet and motion
stream VGG16
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import sys
import argparse
import pickle
import numpy as np
from data_utils import metrics_maker
_SEARCH_WEIGHT = False # True if search the best combination weight
def parse_args():
"""Parse input arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--motion_pth', type=str,
help='path to motion stream results dict')
parser.add_argument('-a', '--appear_pth', type=str,
help='path to appearance stream results dict')
parser.add_argument('-d', '--downsample_rate', type=int, default=1,
help='path to appearance stream results dict')
args = parser.parse_args()
assert args.appear_pth is not None and os.path.isfile(args.appear_pth)
assert args.motion_pth is not None and os.path.isfile(args.motion_pth)
return args
def linear_combine(appear_w, auto_make=False):
"""Linearly combine the scores
Args:
appear_w: weight for appearance stream. Weight for motion stream is
computed as (1 - appear_w)
Return:
acc: framewise accuracy
"""
appear_results = pickle.load(open(args.appear_pth, 'rb'))
motion_results = pickle.load(open(args.motion_pth, 'rb'))
n_vids = len(appear_results['y_true_in'])
# check groundtruth
for i in range(n_vids):
assert appear_results['y_true_in'][i] == motion_results['y_true_in'][i]
# combine score
score_appear = appear_results['y_score_in']
score_motion = motion_results['y_score_in']
gt = motion_results['y_true_in']
if args.downsample_rate != 1:
score_appear = [x[::args.downsample_rate] for x in score_appear]
score_motion = [x[::args.downsample_rate] for x in score_motion]
gt = [x[::args.downsample_rate] for x in gt]
score_combine, pred_combine = [], []
for i in range(n_vids):
foo = np.array(score_appear[i])
bar = np.array(score_motion[i])
tmp = appear_w*foo + (1-appear_w)*bar
score_combine.append(tmp)
pred_combine.append(tmp.argmax(axis=1))
if auto_make:
print('Appearance stream:')
metrics_maker.auto_make(score_appear, gt)
print('\n')
print('Motion stream:')
metrics_maker.auto_make(score_motion, gt)
print('\n')
print('Two streams:')
metrics_maker.auto_make(score_combine, gt)
acc = metrics_maker.accuracy(pred_combine, gt)
return acc
def main():
"""Main function"""
if _SEARCH_WEIGHT:
maxacc = 0.0
bestw = 0
for appear_w in np.arange(0.5, 1.0, 0.005):
output = linear_combine(appear_w)
if output > maxacc:
maxacc = output
bestw = appear_w
else:
bestw = 0.5
print('Appearance stream only: {:.02f}'.format(linear_combine(1.0)))
print('Motion stream only: {:.02f}'.format(linear_combine(0.0)))
print('Two-stream: {:.02f} (appear_w={:.03f})'.format(
linear_combine(bestw), bestw))
linear_combine(bestw, auto_make=True)
pass
if __name__ == '__main__':
args = parse_args()
sys.exit(main())
| 30.754545 | 79 | 0.643216 | 459 | 3,383 | 4.498911 | 0.263617 | 0.030508 | 0.023245 | 0.027119 | 0.298789 | 0.22276 | 0.107022 | 0.05908 | 0.030024 | 0 | 0 | 0.01251 | 0.243866 | 3,383 | 109 | 80 | 31.036697 | 0.794762 | 0.112918 | 0 | 0.082192 | 0 | 0 | 0.13042 | 0 | 0 | 0 | 0 | 0 | 0.041096 | 1 | 0.041096 | false | 0.013699 | 0.123288 | 0 | 0.191781 | 0.123288 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
027d53ae46bd5528944a77827c40580ea2f9b212 | 9,132 | py | Python | pyuvdata/tests/test_mwa_corr_fits.py | ntk688/pyuvdata | 96be086324ba8f35815dd590429c6415411c15ea | [
"BSD-2-Clause"
] | null | null | null | pyuvdata/tests/test_mwa_corr_fits.py | ntk688/pyuvdata | 96be086324ba8f35815dd590429c6415411c15ea | [
"BSD-2-Clause"
] | null | null | null | pyuvdata/tests/test_mwa_corr_fits.py | ntk688/pyuvdata | 96be086324ba8f35815dd590429c6415411c15ea | [
"BSD-2-Clause"
] | null | null | null | # -*- mode: python; coding: utf-8 -*
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for MWACorrFITS object."""
import pytest
import os
import numpy as np
from pyuvdata import UVData
from pyuvdata.data import DATA_PATH
import pyuvdata.tests as uvtest
from astropy.io import fits
# set up MWA correlator file list
testdir = os.path.join(DATA_PATH, 'mwa_corr_fits_testfiles/')
testfiles = ['1131733552.metafits', '1131733552_20151116182537_mini_gpubox01_00.fits',
'1131733552_20151116182637_mini_gpubox06_01.fits', '1131733552_mini_01.mwaf',
'1131733552_mini_06.mwaf', '1131733552_mod.metafits',
'1131733552_mini_cotter.uvfits']
filelist = [testdir + i for i in testfiles]
def test_ReadMWAWriteUVFits():
"""
MWA correlator fits to uvfits loopback test.
Read in MWA correlator files, write out as uvfits, read back in and check
for object equality.
"""
mwa_uv = UVData()
uvfits_uv = UVData()
messages = ['telescope_location is not set',
'some coarse channel files were not submitted']
uvtest.checkWarnings(mwa_uv.read_mwa_corr_fits, func_args=[filelist[0:2]],
func_kwargs={'correct_cable_len': True, 'phase_data': True},
nwarnings=2, message=messages)
testfile = os.path.join(DATA_PATH, 'test/outtest_MWAcorr.uvfits')
mwa_uv.write_uvfits(testfile, spoof_nonessential=True)
uvfits_uv.read_uvfits(testfile)
assert mwa_uv == uvfits_uv
def test_ReadMWA_ReadCotter():
"""
Pyuvdata and cotter equality test.
Read in MWA correlator files and the corresponding cotter file and check
for data array equality.
"""
mwa_uv = UVData()
cotter_uv = UVData()
# cotter data has cable correction and is unphased
mwa_uv.read_mwa_corr_fits(filelist[0:2], correct_cable_len=True)
cotter_uv.read(filelist[6])
# cotter doesn't record the auto xy polarizations
# due to a possible bug in cotter, the auto yx polarizations are conjugated
# fix these before testing data_array
autos = np.isclose(mwa_uv.ant_1_array - mwa_uv.ant_2_array, 0.0)
cotter_uv.data_array[autos, :, :, 2] = cotter_uv.data_array[autos, :, :, 3]
cotter_uv.data_array[autos, :, :, 3] = np.conj(cotter_uv.data_array[autos, :, :, 3])
assert np.allclose(mwa_uv.data_array[:, :, :, :],
cotter_uv.data_array[:, :, :, :], atol=1e-4, rtol=0)
def test_ReadMWAWriteUVFits_meta_mod():
"""
MWA correlator fits to uvfits loopback test with a modified metafits file.
Read in MWA correlator files, write out as uvfits, read back in and check
for object equality.
"""
# The metafits file has been modified to contain some coarse channels < 129,
# and to have an uncorrected cable length.
mwa_uv = UVData()
uvfits_uv = UVData()
messages = ['telescope_location is not set',
'some coarse channel files were not submitted']
files = [filelist[1], filelist[5]]
uvtest.checkWarnings(mwa_uv.read_mwa_corr_fits, func_args=[files],
func_kwargs={'correct_cable_len': True, 'phase_data': True},
nwarnings=2, message=messages)
testfile = os.path.join(DATA_PATH, 'test/outtest_MWAcorr.uvfits')
mwa_uv.write_uvfits(testfile, spoof_nonessential=True)
uvfits_uv.read_uvfits(testfile)
assert mwa_uv == uvfits_uv
def test_ReadMWA_multi():
"""Test reading in two sets of files."""
set1 = filelist[0:2]
set2 = [filelist[0], filelist[2]]
mwa_uv = UVData()
messages = ['telescope_location is not set',
'some coarse channel files were not submitted',
'telescope_location is not set',
'some coarse channel files were not submitted',
'Combined frequencies are not contiguous. This will make it impossible to write this data out to some file types.']
uvtest.checkWarnings(mwa_uv.read_mwa_corr_fits, func_args=[[[set1], [set2]]],
nwarnings=5, message=messages)
def test_ReadMWA_multi_concat():
"""Test reading in two sets of files with fast concatenation."""
# modify file so that time arrays are matching
mod_mini_6 = os.path.join(DATA_PATH, 'test/mini_gpubox06_01.fits')
with fits.open(filelist[2]) as mini6:
mini6[1].header['time'] = 1447698337
mini6.writeto(mod_mini_6)
set1 = filelist[0:2]
set2 = [filelist[0], mod_mini_6]
mwa_uv = UVData()
messages = ['telescope_location is not set',
'some coarse channel files were not submitted',
'telescope_location is not set',
'some coarse channel files were not submitted']
uvtest.checkWarnings(mwa_uv.read_mwa_corr_fits, func_args=[[[set1], [set2]]],
func_kwargs={"axis": "freq"}, nwarnings=4, message=messages)
def test_ReadMWA_flags():
"""Test handling of flag files."""
mwa_uv = UVData()
subfiles = [filelist[0], filelist[1], filelist[3], filelist[4]]
messages = ['mwaf files submitted with use_cotter_flags=False',
'telescope_location is not set',
'some coarse channel files were not submitted']
uvtest.checkWarnings(mwa_uv.read_mwa_corr_fits, func_args=[subfiles],
nwarnings=3, message=messages)
del(mwa_uv)
mwa_uv = UVData()
with pytest.raises(NotImplementedError) as cm:
mwa_uv.read_mwa_corr_fits(subfiles, use_cotter_flags=True)
assert str(cm.value).startswith('reading in cotter flag files')
del(mwa_uv)
mwa_uv = UVData()
with pytest.raises(ValueError) as cm:
mwa_uv.read_mwa_corr_fits(subfiles[0:2], use_cotter_flags=True)
assert str(cm.value).startswith('no flag files submitted')
del(mwa_uv)
def test_multiple_coarse():
"""
Test two coarse channel files.
Read in MWA correlator files with two different orderings of the files
and check for object equality.
"""
order1 = [filelist[0:3]]
order2 = [filelist[0], filelist[2], filelist[1]]
mwa_uv1 = UVData()
mwa_uv2 = UVData()
messages = ['telescope_location is not set',
'coarse channels are not contiguous for this observation',
'some coarse channel files were not submitted']
uvtest.checkWarnings(mwa_uv1.read_mwa_corr_fits, func_args=[order1],
nwarnings=3, message=messages)
uvtest.checkWarnings(mwa_uv2.read_mwa_corr_fits, func_args=[order2],
nwarnings=3, message=messages)
assert mwa_uv1 == mwa_uv2
def test_fine_channels():
"""
Break read_mwa_corr_fits by submitting files with different fine channels.
Test that error is raised if files with different numbers of fine channels
are submitted.
"""
mwa_uv = UVData()
bad_fine = os.path.join(DATA_PATH, 'test/bad_gpubox06_01.fits')
with fits.open(filelist[2]) as mini6:
mini6[1].data = np.concatenate((mini6[1].data, mini6[1].data))
mini6.writeto(bad_fine)
with pytest.raises(ValueError) as cm:
mwa_uv.read_mwa_corr_fits([bad_fine, filelist[1]])
assert str(cm.value).startswith('files submitted have different fine')
del(mwa_uv)
@pytest.mark.parametrize("files,err_msg",
[([filelist[0]], "no data files submitted"),
([filelist[1]], "no metafits file submitted"),
([filelist[0], filelist[1], filelist[5]],
"multiple metafits files in filelist")])
def test_break_ReadMWAcorrFITS(files, err_msg):
"""Break read_mwa_corr_fits by submitting files incorrectly."""
mwa_uv = UVData()
with pytest.raises(ValueError) as cm:
mwa_uv.read_mwa_corr_fits(files)
assert str(cm.value).startswith(err_msg)
del(mwa_uv)
def test_file_extension():
"""
Break read_mwa_corr_fits by submitting file with the wrong extension.
Test that error is raised if a file with an extension that is not fits,
metafits, or mwaf is submitted.
"""
mwa_uv = UVData()
bad_ext = os.path.join(DATA_PATH, 'test/1131733552.meta')
with fits.open(filelist[0]) as meta:
meta.writeto(bad_ext)
with pytest.raises(ValueError) as cm:
mwa_uv.read_mwa_corr_fits(bad_ext)
assert str(cm.value).startswith('only fits, metafits, and mwaf files supported')
del(mwa_uv)
def test_diff_obs():
"""
Break read_mwa_corr_fits by submitting files from different observations.
Test that error is raised if files from different observations are
submitted in the same file list.
"""
mwa_uv = UVData()
bad_obs = os.path.join(DATA_PATH, 'test/bad2_gpubox06_01.fits')
with fits.open(filelist[2]) as mini6:
mini6[0].header['OBSID'] = '1131733555'
mini6.writeto(bad_obs)
with pytest.raises(ValueError) as cm:
mwa_uv.read_mwa_corr_fits([bad_obs, filelist[0], filelist[1]])
assert str(cm.value).startswith('files from different observations')
del(mwa_uv)
| 39.532468 | 131 | 0.669733 | 1,244 | 9,132 | 4.727492 | 0.192122 | 0.032307 | 0.035538 | 0.045911 | 0.537154 | 0.479 | 0.432409 | 0.379867 | 0.34739 | 0.318313 | 0 | 0.036044 | 0.228318 | 9,132 | 230 | 132 | 39.704348 | 0.798496 | 0.200613 | 0 | 0.441379 | 0 | 0.006897 | 0.21498 | 0.05195 | 0 | 0 | 0 | 0 | 0.068966 | 1 | 0.075862 | false | 0 | 0.048276 | 0 | 0.124138 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
027f6a32c22345063a52534c524af8338f8841f7 | 2,600 | py | Python | utils.py | reetawwsum/Character-Model | 5517157b8153930bfa3c7600ac99737b44584b4b | [
"MIT"
] | 1 | 2020-09-29T08:38:00.000Z | 2020-09-29T08:38:00.000Z | Character-Model/utils.py | kinshuk4/kaggle-solutions | 58000e48b4196ee0a07233eb3038d31732ca4040 | [
"MIT"
] | null | null | null | Character-Model/utils.py | kinshuk4/kaggle-solutions | 58000e48b4196ee0a07233eb3038d31732ca4040 | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
import string
import zipfile
import numpy as np
import tensorflow as tf
def char2id(char):
first_letter = ord(string.ascii_lowercase[0])
if char in string.ascii_lowercase:
return ord(char) - first_letter + 1
elif char == ' ':
return 0
else:
print('Unexpected character: %s' % char)
return 0
def id2char(charid):
first_letter = ord(string.ascii_lowercase[0])
if charid > 0:
return chr(charid + first_letter - 1)
else:
return ' '
def logprob(prediction, label):
return np.sum(np.multiply(label, -np.log(prediction)))
class Dataset:
'''Load dataset'''
def __init__(self, config, dataset_type):
self.config = config
self.dataset_type = dataset_type
self.file_name = os.path.join(config.dataset_dir, config.dataset)
self.validation_size = config.validation_size
self.load_dataset()
def load_dataset(self):
self.load()
train_text, validation_text = self.split()
if self.dataset_type == 'train_dataset':
self.data = train_text
else:
self.data = validation_text
def load(self):
'''Reading dataset as a string'''
with zipfile.ZipFile(self.file_name) as f:
text = tf.compat.as_str(f.read(f.namelist()[0]))
self.text = text
def split(self):
validation_text = self.text[:self.validation_size]
train_text = self.text[self.validation_size:]
return train_text, validation_text
class BatchGenerator():
'''Generate batches'''
def __init__(self, config):
self.config = config
self.batch_size = config.batch_size
self.num_unrollings = config.num_unrollings
self.input_size = len(string.ascii_lowercase) + 1
self.batch_dataset_type = config.batch_dataset_type
self.load_dataset()
self.size = len(self.data)
assert self.size % self.batch_size == 0, 'Train size should be divisible by batch size'
segment = self.size / self.batch_size
self.cursor = [offset * segment for offset in xrange(self.batch_size)]
def load_dataset(self):
dataset = Dataset(self.config, self.batch_dataset_type)
self.data = dataset.data
def sequence(self, position):
'''Generate a sequence from a cursor position'''
sequence = np.zeros(shape=(self.num_unrollings + 1, self.input_size), dtype=np.float)
for i in xrange(self.num_unrollings + 1):
sequence[i, char2id(self.data[self.cursor[position]])] = 1.0
self.cursor[position] = (self.cursor[position] + 1) % self.size
return sequence
def next(self):
'''Generate next batch from the data'''
batch = []
for position in xrange(self.batch_size):
batch.append(self.sequence(position))
return np.array(batch)
| 25.490196 | 89 | 0.725385 | 378 | 2,600 | 4.820106 | 0.251323 | 0.042261 | 0.035675 | 0.021954 | 0.119649 | 0.073546 | 0.040615 | 0.040615 | 0 | 0 | 0 | 0.008204 | 0.156154 | 2,600 | 102 | 90 | 25.490196 | 0.822242 | 0.051538 | 0 | 0.183099 | 0 | 0 | 0.034002 | 0 | 0 | 0 | 0 | 0 | 0.014085 | 1 | 0.15493 | false | 0 | 0.084507 | 0.014085 | 0.394366 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0280079b5ed9511998e32de875337a0d299f346a | 15,631 | py | Python | rhasspy/handler/kodi.py | monalbert/smarthome | b8ec2fe2ac238071796ca577ac742cdc5bae46d1 | [
"Apache-2.0"
] | null | null | null | rhasspy/handler/kodi.py | monalbert/smarthome | b8ec2fe2ac238071796ca577ac742cdc5bae46d1 | [
"Apache-2.0"
] | null | null | null | rhasspy/handler/kodi.py | monalbert/smarthome | b8ec2fe2ac238071796ca577ac742cdc5bae46d1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
'''
Copyright 2021 - Albert Montijn (montijnalbert@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---------------------------------------------------------------------------
Programming is the result of learning from others and making errors.
A good programmer often follows the tips and tricks of better programmers.
The solution of a problem seldom leads to new or original code.
So any resemblance to already existing code is purely coincidental
'''
import datetime
import requests
import logging
import re
log = logging.getLogger(__name__)
class Kodi:
def __init__(self, url, path=""):
self.url = url+"/jsonrpc"
self.path = path
def do_post(self, data):
log.debug(f"Post data(url={self.url}:<{data}>")
try:
res = requests.post(self.url, data=data,
headers={"Content-Type": "application/json"})
if res.status_code != 200:
log.info("do_post(Url:[%s]\nResult:%s, text:[%s]"
% (url, res.status_code, res.text))
except ConnectionError:
log.warning(f"ConnectionError for url [{url}]")
return None
log.debug("Post Result:"+res.text)
return(res.json())
def get_whats_playing(self):
log.debug("get_whats_playing")
data = '{"jsonrpc":"2.0","method":"Player.GetItem","params":'\
+ '{"properties":["album", "artist", "genre", "title"]'\
+ ', "playerid": 0},"id":"itemData"}'
return self.do_post(data)
def stop_play(self):
data = '{"jsonrpc": "2.0", "method": "Player.Stop",'\
+ ' "params": { "playerid": 1 }, "id": 1}'
self.do_post(data)
def start_play(self):
data = '{"jsonrpc":"2.0", "id":1,"method":"Player.Open",'\
+ '"params":{"item":{"playlistid":0}}}'
self.do_post(data)
def pause_resume(self):
data = '{"jsonrpc": "2.0", "method": "Player.PlayPause",'\
+ ' "params": { "playerid": 0 }, "id": 1}'
self.do_post(data)
def next_track(self):
data = '{"jsonrpc": "2.0", "method": "Player.GoTo",'\
+ ' "params": { "playerid": 0 , "to":"next"}, "id": 1}'
self.do_post(data)
def previous_track(self):
data = '{"jsonrpc": "2.0", "method": "Player.GoTo",'\
+ ' "params": { "playerid": 0 , "to":"previous"}, "id": 1}'
self.do_post(data)
data = '{"jsonrpc": "2.0", "method": "Player.GoTo",'\
+ ' "params": { "playerid": 0 , "to":"previous"}, "id": 1}'
self.do_post(data)
def volume(self, volume):
data = '{"jsonrpc":"2.0", "method":"Application.SetVolume",'\
+ '"id":1,"params":{"volume":'+str(volume)+'}}'
self.do_post(data)
def clear_playlist(self):
data = '{"jsonrpc":"2.0", "id":1,"method":"Playlist.Clear",'\
+ '"params":{"playlistid":0}}'
self.do_post(data)
def add_album_to_playlist(self, albumid):
data = '{"jsonrpc":"2.0", "id":1,"method":"Playlist.Add","params":{'\
+ '"playlistid":0, "item":{"albumid":'+str(albumid)+'}}}'
self.do_post(data)
def add_song_to_playlist(self, songid):
data = '{"jsonrpc":"2.0", "id":1,"method":"Playlist.Add","params":{'\
+ '"playlistid":0, "item":{"songid":'+str(songid)+'}}}'
self.do_post(data)
def get_albums(self,artist="", album="", genre=""):
log.debug("get_albums")
data = '{"jsonrpc":"2.0","method":"AudioLibrary.GetAlbums"'\
+ ',"params":{"properties":["artist","genre"]'
if artist != "" or album != "" or genre != "":
data = data + ',"filter":{"and":[{"field":"artist", "operator":'\
+ '"contains", "value":"'+artist+'"}'\
+ ',{"field":"album", "operator":'\
+ '"contains", "value":"'+album+'"}'\
+ ',{"field": "genre", "operator":'\
+ '"contains","value": "'+genre+'"}]}'
data = data + ',"sort":{"order":"ascending","method":"album"}}'\
+ ',"id":"libAlbums"}'
res = self.do_post(data)
if "result" in res and "albums" in res["result"]:
albums = res["result"]["albums"]
else:
albums = []
return albums
def get_songs(self, artist="", composer="", title="", selection="", genre=""):
log.debug(f"get_songs artist={artist}, "\
+ f"composer={composer}, title={title}")
data = '{"jsonrpc": "2.0", "method": "AudioLibrary.GetSongs",'\
+ '"params": { "limits": { "start" : 0, "end": 50000 },'\
+ '"properties": ["displayartist", "displaycomposer"],'\
+ '"filter":{"and":[ '
comma = ""
if artist != "":
data = data + comma + '{"field": "artist", "operator": "contains",'\
+ '"value": "'+artist+'"}'
comma = ","
if composer != "":
data = data + comma + '{"field": "artist", "operator": "contains",'\
+ '"value": "'+composer+'"}'
comma = ","
if title != "":
data = data + comma + '{"field": "title", "operator": "contains",'\
+ '"value": "'+title+'"}'
comma = ","
if selection != "":
for select in selection.split(","):
data = data + comma + '{"field": "title", "operator": "contains",'\
+ '"value": "'+select+'"}'
comma = ","
if genre != "":
data = data + ',{"field": "genre", "operator": "contains",'\
+ '"value": "'+genre+'"}'
data = data + ']}'
# data = data + ',"sort": { "order": "ascending", "method": "title", "ignorearticle": true }'
data = data + '},"id": "libSongs"}'
res = self.do_post(data)
log.debug(f"get_songs:Found:{res['result']['limits']['end']}")
if "result" in res and "songs" in res["result"]:
songs = res["result"]["songs"]
else:
songs = []
return songs
def play_albums(self, albums):
log.debug(f"play_albums:albums:{albums}")
self.stop_play()
self.clear_playlist()
for album in albums:
log.debug(f"album={album}")
self.add_album_to_playlist(album["albumid"])
self.start_play()
def play_songs(self, songs):
log.debug(f"play_songs:gotSongs:{songs}")
self.stop_play()
self.clear_playlist()
for song in songs:
log.debug(f"On playlist: {song['songid']},label={song['label']}" \
+ f"van {song['displaycomposer']}")
self.add_song_to_playlist(song["songid"])
self.start_play()
#
# ========================================================================
# Methods to generate slots-files for Rhasspy
# ========================================================================
# Clean Albumtitle to match with filter
def clean_albumtitle_filter(self,albumtitle):
cleaned = albumtitle.lower()
# skip leading numbers followed by - with spaces
cleaned = re.sub('^[0-9 ]*-* *','',cleaned)
# remove [ until the end : [] give problems in kaldi (rhasspy)
cleaned = re.sub('\[.*','',cleaned)
# remove ( until the end : () give problems in kaldi (rhasspy)
cleaned = re.sub('\(.*','',cleaned)
# remove leading and trailing spaces
cleaned = cleaned.strip()
return cleaned
# Clean albumtitle to match with speech
def clean_albumtitle_speech(self,albumtitle):
cleaned = self.clean_albumtitle_filter(albumtitle)
# No.4 1
cleaned = re.sub('[^a-z0-9]',' ',cleaned)
cleaned = re.sub(' *',' ',cleaned)
cleaned = cleaned.strip()
return cleaned
# Clean Tracktitle to match with filter
def clean_tracktitle_filter(self,tracktitle):
cleaned = tracktitle.lower()
# skip leading numbers followed by - with spaces
cleaned = re.sub('^[0-9 ]*-* *','',cleaned)
# remove leading composername followed by :
cleaned = re.sub('^[a-z]*:','',cleaned)
# keep only part before - or :
cleaned = re.sub('[-:].*','',cleaned)
# remove [ until the end
cleaned = re.sub('\[.*','',cleaned)
# remove ( until the end
cleaned = re.sub('\(.*','',cleaned) # () give problems in kaldi (rhasspy)
# remove Op. 23
cleaned = re.sub('^[a-z]+[. ]*[0-9]+ *[0-9]*','',cleaned)
cleaned = re.sub(' in (bes|cis|des|fis|ges|as|es|[a-g])* *(sharp|flat|moll|dur)* *(majeur|mineur|major|minor|maj|min|klein|groot)* *$',' ',cleaned)
cleaned = re.sub('(bes|cis|des|fis|ges|as|es|[a-g]) (sharp|flat|moll|dur|majeur|mineur|major|minor|maj|min|klein|groot)$',' ',cleaned)
cleaned = re.sub('([0-9]) *[0-9a-z]\.* .*$','\\1',cleaned)
cleaned = re.sub('([0-9]) *[0-9.a-z]$','\\1',cleaned)
cleaned = re.sub('[0-9]\..*','',cleaned) # . after number gives compile error
cleaned = re.sub('.*contrapunctus.*','contrapunctus',cleaned)
cleaned = re.sub('canto ostinato.*','canto ostinato',cleaned)
cleaned = re.sub('goldberg variations.*','goldberg variations',cleaned)
# remove leading and trailing spaces
cleaned = cleaned.strip()
return cleaned
# Clean tracktitle to match with speech
def clean_tracktitle_speech(self,tracktitle):
cleaned = self.clean_tracktitle_filter(tracktitle)
# No.4 1
cleaned = re.sub('( no.\d) [1-9x].*','\\1',cleaned)
cleaned = re.sub('op[. ]+\d*[-/0-9]*', '', cleaned)
cleaned = re.sub('violin concerto.*','vioolconcert',cleaned)
cleaned = re.sub('\.\.\.',',',cleaned)
cleaned = re.sub('\[[^\]]*\]',' ',cleaned)
cleaned = re.sub('["\'!]',' ',cleaned)
cleaned = re.sub('[({].*[)}]',' ',cleaned)
cleaned = re.sub(',',' ',cleaned)
cleaned = re.sub('no\.','nummer ',cleaned)
cleaned = re.sub('nr\.','nummer ',cleaned)
cleaned = re.sub('&',' en ',cleaned)
cleaned = re.sub(' *',' ',cleaned)
cleaned = cleaned.strip()
return cleaned
def add_to_dict(self,slot_entries,new_speech,new_filter):
if new_speech in slot_entries:
old_filter = slot_entries[new_speech]
if new_filter in old_filter:
my_filter = new_filter
elif old_filter in new_filter:
my_filter = old_filter
else:
my_filter = old_filter
while not new_filter.startswith(old_filter):
old_filter = old_filter[:-1]
else:
my_filter = new_filter
slot_entries[new_speech] = my_filter
def save_slots(self,slots_dict,filename):
fslots = open(self.path+filename, "w+")
for speech,title in sorted(slots_dict.items()):
if title.startswith(speech):
slotstring = speech
else:
slotstring = (f"({speech}):({title})")
fslots.write(slotstring+'\n')
fslots.close()
def create_slots_albums(self,albums):
albumslots = {}
for album in albums:
speech_title = self.clean_albumtitle_speech(album["label"])
filter_title = self.clean_albumtitle_filter(album["label"])
if filter_title == "" or speech_title == "":
continue
self.add_to_dict(albumslots,speech_title,filter_title)
self.save_slots(albumslots,"albums")
def create_slots_tracks(self,tracks):
trackslots = {}
for track in tracks:
speech_title = self.clean_tracktitle_speech(track["label"])
filter_title = self.clean_tracktitle_filter(track["label"])
if filter_title == "" or speech_title == "":
continue
self.add_to_dict(trackslots,speech_title,filter_title)
self.save_slots(trackslots,"tracks")
def create_slots_composers(self,tracks):
composerset = set()
for track in tracks:
composer = re.sub('[;,].*','',track["displaycomposer"])
composerset.add(composer.lower())
fcomposers = open(self.path+"composers", "w+")
for composer in sorted(composerset):
fcomposers.write(composer+'\n')
fcomposers.close()
def create_slots_artists(self,albums):
artistset = set()
for album in albums:
for artist in album["artist"]:
artistset.add(artist.lower())
fartists = open(self.path+"artists", "w+")
for artist in sorted(artistset):
fartists.write(artist+'\n')
fartists.close()
def create_slots_genres(self,albums):
genreset = set()
for album in albums:
for genre in album["genre"]:
genreset.add(genre.lower())
fgenres = open(self.path+"genres", "w+")
for genre in sorted(genreset):
fgenres.write(genre+'\n')
fgenres.close()
def create_slots_files(self):
tracks = self.get_songs(genre="Klassiek")
if len(tracks) > 0:
self.create_slots_tracks(tracks)
self.create_slots_composers(tracks)
albums = self.get_albums()
if len(albums) > 0:
self.create_slots_artists(albums)
self.create_slots_albums(albums)
self.create_slots_genres(albums)
if __name__ == '__main__':
'''
getallalbums =
{"jsonrpc":"2.0","method":"AudioLibrary.GetAlbums","params":{"limits": { "start" : 0, "end": 5000 },"properties":["artist","genre"],
"sort":{"order":"ascending","method":"album"}},"id":"libAlbums"}
getalltracks =
{"jsonrpc": "2.0", "method": "AudioLibrary.GetSongs","params": { "limits": { "start" : 0, "end": 50000 },"properties": ["displayartist", "displaycomposer"],
"filter":{"field": "genre", "operator": "contains","value": "klassiek"}},
"id": "libSongs"}
# data = data + ',"sort": { "order": "ascending", "method": "title", "ignorearticle": true }'
data = data + '
'''
logging.basicConfig(filename='kodi.log',
level=logging.DEBUG,
format='%(asctime)s %(levelname)-4.4s %(module)-14.14s - %(message)s',
datefmt='%Y%m%d %H:%M:%S')
kodi_url = "http://192.168.0.5:8080/jsonrpc"
kodi = Kodi(kodi_url)
# #kodi.add_album_to_playlist("217")
# kodi.pause_resume()
import sys
if len(sys.argv) > 3:
matchtitle = sys.argv[3]
else: matchtitle = ""
if len(sys.argv) > 2:
artist = sys.argv[2]
else: artist = ""
if len(sys.argv) > 1:
composer = sys.argv[1]
else:
kodi.create_slots_files()
# End Of File
| 39.0775 | 160 | 0.53202 | 1,714 | 15,631 | 4.751459 | 0.2007 | 0.019646 | 0.045678 | 0.044327 | 0.40975 | 0.352284 | 0.314096 | 0.25528 | 0.209602 | 0.209602 | 0 | 0.012524 | 0.279765 | 15,631 | 399 | 161 | 39.175439 | 0.710872 | 0.128271 | 0 | 0.243816 | 0 | 0.007067 | 0.246928 | 0.074735 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09894 | false | 0 | 0.017668 | 0 | 0.14841 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |