hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e67e2b8d5cc36e4de07019122375c2f2fc7e621b
| 765
|
py
|
Python
|
ucs-python/create_ucs_sp_template.py
|
movinalot/ucs
|
dc0d37784592d6d78f46efee40c86b6f7ac928b4
|
[
"MIT"
] | null | null | null |
ucs-python/create_ucs_sp_template.py
|
movinalot/ucs
|
dc0d37784592d6d78f46efee40c86b6f7ac928b4
|
[
"MIT"
] | null | null | null |
ucs-python/create_ucs_sp_template.py
|
movinalot/ucs
|
dc0d37784592d6d78f46efee40c86b6f7ac928b4
|
[
"MIT"
] | 2
|
2020-06-17T15:49:37.000Z
|
2021-01-28T07:21:21.000Z
|
"""
create_ucs_sp_template.py
Purpose:
UCS Manager Create a UCS Service Profile Template
Author:
John McDonough (jomcdono@cisco.com) github: (@movinalot)
Cisco Systems, Inc.
"""
from ucsmsdk.ucshandle import UcsHandle
from ucsmsdk.mometa.ls.LsServer import LsServer
from ucsmsdk.mometa.org.OrgOrg import OrgOrg
HANDLE = UcsHandle(
"sandbox-ucsm1.cisco.com",
"admin",
"password"
)
HANDLE.login()
ORG_ORG = OrgOrg(
parent_mo_or_dn='org-root',
name="devnet",
)
HANDLE.add_mo(ORG_ORG, modify_present=True)
HANDLE.commit()
SP_TEMPLATE = LsServer(
parent_mo_or_dn='org-root/org-devnet',
name="devcore_template",
type="updating-template"
)
HANDLE.add_mo(SP_TEMPLATE, modify_present=True)
HANDLE.commit()
HANDLE.logout()
| 19.125
| 60
| 0.732026
| 104
| 765
| 5.211538
| 0.490385
| 0.055351
| 0.062731
| 0.04428
| 0.177122
| 0.070111
| 0
| 0
| 0
| 0
| 0
| 0.001534
| 0.147712
| 765
| 39
| 61
| 19.615385
| 0.829755
| 0.237909
| 0
| 0.086957
| 0
| 0
| 0.177391
| 0.04
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.043478
| 0.130435
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e67f72e9b27124ae9fe286846ee45d52e71dc993
| 4,105
|
py
|
Python
|
epab/core/config.py
|
132nd-etcher/epab
|
5226d3a36580f8cc50cf5dcac426adb1350a2c9b
|
[
"MIT"
] | 2
|
2018-12-13T06:49:10.000Z
|
2018-12-13T07:37:49.000Z
|
epab/core/config.py
|
etcher-be/epab
|
5226d3a36580f8cc50cf5dcac426adb1350a2c9b
|
[
"MIT"
] | 109
|
2018-08-22T04:25:56.000Z
|
2019-10-17T05:10:21.000Z
|
epab/core/config.py
|
etcher-be/epab
|
5226d3a36580f8cc50cf5dcac426adb1350a2c9b
|
[
"MIT"
] | 1
|
2018-02-25T05:53:18.000Z
|
2018-02-25T05:53:18.000Z
|
# coding=utf-8
"""
Handles EPAB's config file
"""
import logging
import pathlib
import elib_config
CHANGELOG_DISABLE = elib_config.ConfigValueBool(
'changelog', 'disable', description='Disable changelog building', default=False
)
CHANGELOG_FILE_PATH = elib_config.ConfigValuePath(
'changelog', 'file_path', description='Path to changelog file', default='CHANGELOG.md'
)
CHANGELOG_FILE_PATH.must_be_file()
TEST_RUNNER_OPTIONS = elib_config.ConfigValueString(
'test', 'runner_options', description='Additional options for test run', default=''
)
TEST_DURATION_COUNT = elib_config.ConfigValueInteger(
'test', 'duration_count', description='Amount of "slow" tests to show', default=10
)
TEST_DURATION_COUNT.set_limits(min_=0, max_=50)
TEST_TARGET = elib_config.ConfigValueString(
'test', 'target', description='Target of pytest', default='test'
)
TEST_COVERAGE_FAIL_UNDER = elib_config.ConfigValueInteger(
'test', 'coverage_fail_under', description='Minimal coverage to pass tests', default=20
)
TEST_COVERAGE_FAIL_UNDER.set_limits(min_=0, max_=100)
TEST_PYTEST_TIMEOUT = elib_config.ConfigValueInteger(
'test', 'timeout', description='Timeout in seconds for pytest runner', default=300
)
TEST_PYTEST_TIMEOUT.set_limits(min_=0, max_=3600)
LINT_LINE_LENGTH = elib_config.ConfigValueInteger(
'lint', 'line_length', description='Linter max line width', default=120
)
LINT_LINE_LENGTH.set_limits(min_=0, max_=500)
PACKAGE_NAME = elib_config.ConfigValueString(
'package_name', description='Package name'
)
FREEZE_ENTRY_POINT = elib_config.ConfigValueString(
'freeze', 'entry_point', description='Main entry point for pyinstaller', default=''
)
FREEZE_DATA_FILES = elib_config.ConfigValueList(
'freeze', 'data_files', description='PyInstaller data-files list', element_type=str, default=[]
)
DOC_REPO = elib_config.ConfigValueString(
'doc', 'repo', description='Documentation repository on Github', default=''
)
DOC_FOLDER = elib_config.ConfigValuePath(
'doc', 'folder', description='Local documentation directory', default='./doc'
)
DOC_FOLDER.must_be_dir()
QUIET = elib_config.ConfigValueBool(
'quiet', description='Less console output', default=False
)
VERBOSE = elib_config.ConfigValueBool(
'verbose', description='More console output', default=False
)
TEST_AV_RUNNER_OPTIONS = elib_config.ConfigValueString(
'appveyor', 'test_runner_options', description='Additional command line options for tests run on AV',
default='--long'
)
ARTIFACTS = elib_config.ConfigValueList(
'appveyor', 'artifacts', description='List of artifacts for Appveyor', element_type=str, default=[]
)
FLAKE8_EXCLUDE = elib_config.ConfigValueString(
'lint', 'flake8_exclude', description='List of comma separated files for flake8 to exclude', default=''
)
MYPY_ARGS = elib_config.ConfigValueString(
'lint', 'mypy_args', description='Additional MyPy arguments', default=''
)
QT_RES_SRC = elib_config.ConfigValueString(
'qt', 'res_src', description='Qt resource file (.qrc) location', default=''
)
QT_RES_TGT = elib_config.ConfigValueString(
'qt', 'res_tgt', description='Compiled Qt resource file (.py) target location', default=''
)
UPLOAD_TO_TWINE = elib_config.ConfigValueBool(
'twine', 'upload', description='Upload package to Twine after build',
default=True,
)
MAKE_GRAPH = elib_config.ConfigValueBool(
'graph', 'make',
description='Generate graphs using PyReverse',
default=True,
)
def setup_config(epab_version: str):
"""
Set up elib_config package
:param epab_version: installed version of EPAB as as string
"""
logger = logging.getLogger('EPAB')
logger.debug('setting up config')
elib_config.ELIBConfig.setup(
app_name='EPAB',
app_version=epab_version,
config_file_path='pyproject.toml',
config_sep_str='__',
root_path=['tool', 'epab']
)
elib_config.write_example_config('pyproject.toml.example')
if not pathlib.Path('pyproject.toml').exists():
raise FileNotFoundError('pyproject.toml')
elib_config.validate_config()
| 34.495798
| 107
| 0.747138
| 496
| 4,105
| 5.935484
| 0.316532
| 0.095109
| 0.091712
| 0.017663
| 0.096467
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008422
| 0.132278
| 4,105
| 118
| 108
| 34.788136
| 0.81808
| 0.031181
| 0
| 0.020833
| 0
| 0
| 0.285316
| 0.00557
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010417
| false
| 0.010417
| 0.03125
| 0
| 0.041667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e67fead92c8110015c821a38623a6b98e6c63185
| 5,793
|
py
|
Python
|
create_flask_app.py
|
Creativity-Hub/create_flask_app
|
4c4e2c7360c7773f6f5e3d2fd30e310777650f57
|
[
"MIT"
] | 2
|
2020-08-05T04:33:20.000Z
|
2020-08-06T23:03:40.000Z
|
create_flask_app.py
|
Creativity-Hub/create_flask_app
|
4c4e2c7360c7773f6f5e3d2fd30e310777650f57
|
[
"MIT"
] | null | null | null |
create_flask_app.py
|
Creativity-Hub/create_flask_app
|
4c4e2c7360c7773f6f5e3d2fd30e310777650f57
|
[
"MIT"
] | null | null | null |
import os
import argparse
def check_for_pkg(pkg):
try:
exec("import " + pkg)
except:
os.system("pip3 install --user " + pkg)
def create_flask_app(app='flask_app', threading=False, wsgiserver=False, unwanted_warnings=False, logging=False, further_logging=False, site_endpoints=None, endpoints=None, request_endpoints=None):
check_for_pkg('flask')
lines = ["from flask import Flask, send_from_directory","import codecs", "import os"]
params = {
'app': app,
'threading': threading,
'wsgiserver': wsgiserver,
'unwanted_warnings': unwanted_warnings,
'logging': logging,
'further_logging': further_logging,
'site_endpoints': site_endpoints,
'endpoints': endpoints,
'request_endpoints': request_endpoints
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
for param in params.keys():
if 'endpoints' in param:
parser.add_argument('-'+param[0].lower(), '--'+param.lower(), nargs='+', help='', required=False)
else:
parser.add_argument('-'+param[0].lower(), '--'+param.lower(), help='', required=False)
args = vars(parser.parse_args())
for param in args.keys():
if 'request' in param and len(args[param]) % 3 != 0:
print('Request method endpoint format invalid, enter "Method" "Endpoint" "Parameter"')
if param == 'app':
if args[param] != None:
params[param] = args[param]
else:
params[param] = args[param]
index = "<!DOCTYPE html>\n<html>\n<head>\n\t<title>endpoint</title>\n\t<link href='static/style.css' rel='stylesheet'>\n</head>\n<body>\n\n<script src='static/script.js'></script>\n</body>\n</html>"
project = params['app']
if not os.path.exists(project):
os.mkdir(project)
if not os.path.exists(project+'/web'):
os.mkdir(project+'/web')
if not os.path.exists(project+'/static'):
os.mkdir(project+'/static')
os.system('touch '+project+'/static/style.css')
os.system('touch '+project+'/static/script.js')
indexFile = open(project+"/web/index.html","w+")
indexFile.write(index.replace('endpoint', project))
indexFile.close()
f = open(project+'/'+project+".py","w+")
headers = {
'threading': ["", "#Threading", "from threading import Thread"],
'wsgiserver': ["", "#WSGIServer", "from gevent.pywsgi import WSGIServer"],
'unwanted_warnings': ["", "#Disable Warnings", "import warnings", "warnings.filterwarnings('ignore')"],
'logging': ["", "#Logging", "import logging", "", "#Logging configuration set to debug on debug.log file", "logging.basicConfig(filename='debug.log',level=logging.DEBUG)", "logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')"],
'further_logging': ["", "#Disable unneeded dependencies logging", "werkzeugLog = logging.getLogger('werkzeug')", "werkzeugLog.disabled = True", "requestsLog = logging.getLogger('urllib3.connectionpool')", "requestsLog.disabled = True"],
}
for param in headers.keys():
if params[param]:
for line in headers[param]:
lines.append(line)
lines.append("\ndef run():")
if params['wsgiserver']:
check_for_pkg('gevent')
lines.append("\t#WSGIServer")
lines.append("\tWSGIServer(('', 8081), app).serve_forever()")
else:
lines.append("\tapp.run(host='0.0.0.0',port=8081)")
if params['threading']:
for line in ["", "#Thread", "def keep_alive():", "\tt = Thread(target=run)", "\tt.start()"]:
lines.append(line)
for line in ["", "app = Flask(__name__)", "", "@app.route('/')", "def main():", "\t#index.html", "\treturn codecs.open('web/index.html', 'r', 'utf-8').read()", "", "@app.route('/favicon.ico')", "def favicon():", "\treturn send_from_directory(os.path.join(app.root_path, 'static'),'favicon.ico', mimetype='image/vnd.microsoft.icon')"]:
lines.append(line)
site_endpoints = params['site_endpoints']
if site_endpoints is not None:
for ep in site_endpoints:
print('Endpoint: ' + ep)
tp = ["\n@app.route('/endpoint')", "def endpoint():", "\t#endpoint.html", "\treturn codecs.open('web/endpoint.html', 'r', 'utf-8').read()"]
for line in tp:
lines.append(line.replace('endpoint', ep))
epFile = open(project+"/web/endpoint.html".replace('endpoint', ep),"w+")
epFile.write(index.replace('endpoint', ep).replace('style.css', ep+'.css').replace('script.js', ep+'.js'))
epFile.close()
os.system('touch '+project+'/static/'+ep+'.css')
os.system('touch '+project+'/static/'+ep+'.js')
endpoints = params['endpoints']
if endpoints is not None:
for ep in endpoints:
print('Endpoint: ' + ep)
tp = ["\n@app.route('/endpoint')", "def endpoint():", "\t#endpoint.html", "\treturn endpoint_route"]
for line in tp:
lines.append(line.replace('endpoint', ep))
request_endpoints = params['request_endpoints']
print(request_endpoints)
request_method = request_endpoints[0]
if request_endpoints is not None:
request_endpoints = [request_endpoints[i * 3:(i + 1) * 3] for i in range((len(request_endpoints) + 3 - 1) // 3)]
for request_method, ep, request_param in request_endpoints:
print('Endpoint: ' + ep, '\nMethod: ' + request_method, '\nParameter: ' + request_param)
tp = ["\n@app.route('/"+ep+"/<"+request_param+">', methods=['"+request_method+"'])", "def "+ep+"("+request_param+"):", "\t#"+request_method+" method endpoint", "\treturn do_something("+request_param+")"]
for line in tp:
lines.append(line)
lines.append("\nif __name__ == '__main__':")
if params['wsgiserver']:
lines.append("\t#Run server forever")
lines.append("\tkeep_alive()")
else:
lines.append("\t#Run server")
lines.append("\trun()")
for line in lines:
f.write(line+'\n')
f.close()
print('Created' + project + ' app succesfully.')
for param in params.keys():
if params[param] and param != 'app':
print(param, params[param])
os.system('open '+ project)
if __name__ == '__main__':
create_flask_app()
| 39.141892
| 335
| 0.666494
| 763
| 5,793
| 4.943644
| 0.235911
| 0.043743
| 0.016702
| 0.021209
| 0.200954
| 0.156416
| 0.102333
| 0.082185
| 0.062036
| 0.062036
| 0
| 0.005339
| 0.12705
| 5,793
| 147
| 336
| 39.408163
| 0.740558
| 0
| 0
| 0.191667
| 0
| 0.041667
| 0.399793
| 0.113758
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.066667
| 0
| 0.083333
| 0.058333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e680d5976ff70e83c58f67740990b745a8b0973b
| 1,835
|
py
|
Python
|
examples/flaskr/flaskr/__init__.py
|
Flared/flask-sqlalchemy
|
e73abd51d957a4436bca6b5eadbf5d63771cf5ef
|
[
"BSD-3-Clause"
] | 2
|
2020-04-09T15:28:49.000Z
|
2020-04-18T02:55:16.000Z
|
examples/flaskr/flaskr/__init__.py
|
Flared/flask-sqlalchemy
|
e73abd51d957a4436bca6b5eadbf5d63771cf5ef
|
[
"BSD-3-Clause"
] | null | null | null |
examples/flaskr/flaskr/__init__.py
|
Flared/flask-sqlalchemy
|
e73abd51d957a4436bca6b5eadbf5d63771cf5ef
|
[
"BSD-3-Clause"
] | 1
|
2020-06-19T11:49:30.000Z
|
2020-06-19T11:49:30.000Z
|
import os
import click
from flask import Flask
from flask.cli import with_appcontext
from flask_sqlalchemy import SQLAlchemy
__version__ = (1, 0, 0, "dev")
db = SQLAlchemy()
def create_app(test_config=None):
"""Create and configure an instance of the Flask application."""
app = Flask(__name__, instance_relative_config=True)
# some deploy systems set the database url in the environ
db_url = os.environ.get("DATABASE_URL")
if db_url is None:
# default to a sqlite database in the instance folder
db_url = "sqlite:///" + os.path.join(app.instance_path, "flaskr.sqlite")
# ensure the instance folder exists
os.makedirs(app.instance_path, exist_ok=True)
app.config.from_mapping(
# default secret that should be overridden in environ or config
SECRET_KEY=os.environ.get("SECRET_KEY", "dev"),
SQLALCHEMY_DATABASE_URI=db_url,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile("config.py", silent=True)
else:
# load the test config if passed in
app.config.update(test_config)
# initialize Flask-SQLAlchemy and the init-db command
db.init_app(app)
app.cli.add_command(init_db_command)
# apply the blueprints to the app
from flaskr import auth, blog
app.register_blueprint(auth.bp)
app.register_blueprint(blog.bp)
# make "index" point at "/", which is handled by "blog.index"
app.add_url_rule("/", endpoint="index")
return app
def init_db():
db.drop_all()
db.create_all()
@click.command("init-db")
@with_appcontext
def init_db_command():
"""Clear existing data and create new tables."""
init_db()
click.echo("Initialized the database.")
| 27.38806
| 80
| 0.689918
| 261
| 1,835
| 4.670498
| 0.409962
| 0.029532
| 0.031993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00208
| 0.214169
| 1,835
| 66
| 81
| 27.80303
| 0.843273
| 0.294823
| 0
| 0
| 0
| 0
| 0.076863
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.162162
| 0
| 0.27027
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6821c09b4a2b0ae38dad98719d218377bec1dfe
| 1,516
|
py
|
Python
|
conversions/decimal_to_binary.py
|
smukk9/Python
|
5f4da5d616926dbe77ece828986b8d19c7d65cb5
|
[
"MIT"
] | 6
|
2020-06-23T11:56:55.000Z
|
2021-10-03T17:21:34.000Z
|
conversions/decimal_to_binary.py
|
smukk9/Python
|
5f4da5d616926dbe77ece828986b8d19c7d65cb5
|
[
"MIT"
] | 3
|
2020-06-08T07:03:15.000Z
|
2020-06-08T08:41:22.000Z
|
conversions/decimal_to_binary.py
|
smukk9/Python
|
5f4da5d616926dbe77ece828986b8d19c7d65cb5
|
[
"MIT"
] | 2
|
2020-06-26T09:16:11.000Z
|
2020-07-01T08:55:48.000Z
|
"""Convert a Decimal Number to a Binary Number."""
def decimal_to_binary(num: int) -> str:
"""
Convert a Integer Decimal Number to a Binary Number as str.
>>> decimal_to_binary(0)
'0b0'
>>> decimal_to_binary(2)
'0b10'
>>> decimal_to_binary(7)
'0b111'
>>> decimal_to_binary(35)
'0b100011'
>>> # negatives work too
>>> decimal_to_binary(-2)
'-0b10'
>>> # other floats will error
>>> decimal_to_binary(16.16) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: 'float' object cannot be interpreted as an integer
>>> # strings will error as well
>>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: 'str' object cannot be interpreted as an integer
"""
if type(num) == float:
raise TypeError("'float' object cannot be interpreted as an integer")
if type(num) == str:
raise TypeError("'str' object cannot be interpreted as an integer")
if num == 0:
return "0b0"
negative = False
if num < 0:
negative = True
num = -num
binary = []
while num > 0:
binary.insert(0, num % 2)
num >>= 1
if negative:
return "-0b" + "".join(str(e) for e in binary)
return "0b" + "".join(str(e) for e in binary)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25.266667
| 77
| 0.558047
| 184
| 1,516
| 4.467391
| 0.347826
| 0.087591
| 0.145985
| 0.121655
| 0.549878
| 0.501217
| 0.43309
| 0.43309
| 0.3309
| 0.184915
| 0
| 0.038911
| 0.3219
| 1,516
| 59
| 78
| 25.694915
| 0.7607
| 0.437335
| 0
| 0
| 0
| 0
| 0.172727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e684146ff5ca787d26fd1c2feebd83d974744890
| 1,725
|
py
|
Python
|
algorithms_keeper/parser/rules/use_fstring.py
|
Fongeme/algorithms-keeper
|
ea80d9342b4d2efd246a6bc409889ed780accf08
|
[
"MIT"
] | 50
|
2021-02-27T04:13:11.000Z
|
2022-03-29T04:34:01.000Z
|
algorithms_keeper/parser/rules/use_fstring.py
|
dedsec-9/algorithms-keeper
|
0d98e4e24e239524c48d9eab19c493ac288ecf83
|
[
"MIT"
] | 52
|
2021-08-09T22:40:20.000Z
|
2022-03-07T16:56:36.000Z
|
algorithms_keeper/parser/rules/use_fstring.py
|
dedsec-9/algorithms-keeper
|
0d98e4e24e239524c48d9eab19c493ac288ecf83
|
[
"MIT"
] | 22
|
2021-04-28T06:56:27.000Z
|
2022-03-13T07:27:45.000Z
|
import libcst as cst
import libcst.matchers as m
from fixit import CstLintRule
from fixit import InvalidTestCase as Invalid
from fixit import ValidTestCase as Valid
class UseFstringRule(CstLintRule):
MESSAGE: str = (
"As mentioned in the [Contributing Guidelines]"
+ "(https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md), "
+ "please do not use printf style formatting or `str.format()`. "
+ "Use [f-string](https://realpython.com/python-f-strings/) instead to be "
+ "more readable and efficient."
)
VALID = [
Valid("assigned='string'; f'testing {assigned}'"),
Valid("'simple string'"),
Valid("'concatenated' + 'string'"),
Valid("b'bytes %s' % 'string'.encode('utf-8')"),
]
INVALID = [
Invalid("'hello, {name}'.format(name='you')"),
Invalid("'hello, %s' % 'you'"),
Invalid("r'raw string value=%s' % val"),
]
def visit_Call(self, node: cst.Call) -> None:
if m.matches(
node,
m.Call(
func=m.Attribute(value=m.SimpleString(), attr=m.Name(value="format"))
),
):
self.report(node)
def visit_BinaryOperation(self, node: cst.BinaryOperation) -> None:
if (
m.matches(
node, m.BinaryOperation(left=m.SimpleString(), operator=m.Modulo())
)
# SimpleString can be bytes and fstring don't support bytes.
# https://www.python.org/dev/peps/pep-0498/#no-binary-f-strings
and isinstance(
cst.ensure_type(node.left, cst.SimpleString).evaluated_value, str
)
):
self.report(node)
| 33.173077
| 85
| 0.576232
| 195
| 1,725
| 5.076923
| 0.517949
| 0.027273
| 0.045455
| 0.028283
| 0.038384
| 0.038384
| 0
| 0
| 0
| 0
| 0
| 0.004045
| 0.283478
| 1,725
| 51
| 86
| 33.823529
| 0.796926
| 0.068986
| 0
| 0.095238
| 0
| 0.02381
| 0.30025
| 0.031211
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.119048
| 0
| 0.261905
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e685406479e82ae52847e5dad03d1463ba77358b
| 5,000
|
py
|
Python
|
SiMon/visualization.py
|
Jennyx18/SiMon
|
522432ff708954ac37050609cfd6f42dd96467e4
|
[
"BSD-2-Clause"
] | 9
|
2017-03-04T08:00:58.000Z
|
2021-04-03T18:18:40.000Z
|
SiMon/visualization.py
|
Jennyx18/SiMon
|
522432ff708954ac37050609cfd6f42dd96467e4
|
[
"BSD-2-Clause"
] | 52
|
2016-09-23T14:06:06.000Z
|
2021-08-05T12:21:29.000Z
|
SiMon/visualization.py
|
Jennyx18/SiMon
|
522432ff708954ac37050609cfd6f42dd96467e4
|
[
"BSD-2-Clause"
] | 4
|
2016-09-15T02:09:42.000Z
|
2021-06-15T11:42:58.000Z
|
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import math
from datetime import datetime
from matplotlib.colors import ListedColormap, BoundaryNorm
from matplotlib.collections import LineCollection
from matplotlib import cm
from SiMon.simulation import Simulation
from SiMon.callback import Callback
from matplotlib.ticker import MaxNLocator
import time
class VisualizationCallback(Callback):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def run(self):
self.plot_progress()
def plot_progress(self):
"""
Creates a graph showing the progress of the simulations
:param num_sim: number of simulations
:return:
"""
if 'container' in self.kwargs:
sim_inst_dict = self.kwargs['container'].sim_inst_dict
else:
return
num_sim = len(sim_inst_dict)
status = np.array([])
progresses = np.array([])
sim_idx = np.array([])
for i, sim_name in enumerate(sim_inst_dict):
sim = sim_inst_dict[sim_name]
sim_id = sim.id
if sim_id == 0:
continue # skip the root simulation instance, which is only a place holder
# only plot level=1 simulations
if sim.level > 1:
continue
s = sim.sim_get_status()
if sim.t_max > 0:
p = sim.t / sim.t_max
else:
p = 0.0
status = np.append(s, status)
progresses = np.append(p, progresses)
sim_idx = np.append(sim_id, sim_idx)
# Checks if num_sim has a square
if int(math.sqrt(num_sim) + 0.5) ** 2 == num_sim:
number = int(math.sqrt(num_sim))
y_num = num_sim // number
# If not square, find divisible number to get rectangle
else:
number = int(math.sqrt(num_sim))
while num_sim % number != 0:
number = number - 1
y_num = num_sim // number # Y-axis limit
# If prime number
if number == 1:
number = int(math.sqrt(num_sim)) + 1 # Make sure graph fits all num_sim
y_num = number
# 'Removes' extra white line if graph is too big
if (y_num * number) > num_sim and ((y_num - 1) * number) >= num_sim:
y_num = y_num - 1
x_sim = sim_idx % number
y_sim = sim_idx // number
plt.figure(1, figsize=(12, 12))
ax = plt.gca() # get the axis
ax.set_ylim(ax.get_ylim()[::-1]) # invert the axis
ax.xaxis.tick_top() # and move the X-Axis
ax.yaxis.set_ticks(np.arange(-0.5, y_num)) # set y-ticks
ax.yaxis.set_major_locator(MaxNLocator(integer=True)) # set to integers
ax.yaxis.tick_left() # remove right y-Ticks
symbols = ['o', 's', '>', '^', '*', 'x']
labels = ['NEW', 'STOP', 'RUN', 'STALL', 'DONE', 'ERROR']
for i, symbol in enumerate(symbols):
if (status == i).sum() == 0:
continue
else:
plt.scatter(
x_sim[status == i],
y_sim[status == i],
marker=symbol,
s=500,
c=progresses[status == i],
cmap=cm.RdYlBu,
vmin = 0., vmax = 1.,
label=labels[i])
for i in range(sim_idx.shape[0]):
plt.annotate(
text=str(sim_inst_dict[i].id),
xy=(x_sim[i], y_sim[i]),
color='black',
weight='bold',
size=15
)
plt.legend(
bbox_to_anchor=(0., -.15, 1., .102),
loc='lower center',
ncol=4,
mode="expand",
borderaxespad=0.,
borderpad=2,
labelspacing=3
)
plt.colorbar()
# # Save file with a new name
# if os.path.exists('progress.pdf'):
# plt.savefig('progress_{}.pdf'.format(int(time.time())))
# else:
# print('saving figure')
if 'plot_dir' in self.kwargs:
plot_dir = self.kwargs['plot_dir']
else:
plot_dir = os.getcwd()
if not os.path.isdir(plot_dir):
os.mkdir(plot_dir)
fn = datetime.now().strftime("%d_%m_%Y-%H_%M_%S")
if 'format' in self.kwargs:
fmt = self.kwargs['format']
else:
fmt = 'png'
fullpath = os.path.join(plot_dir, '%s.%s' % (fn, fmt))
print('Progress plot saved on %s' % fullpath)
plt.savefig(fullpath)
plt.close(1)
| 32.894737
| 102
| 0.4948
| 593
| 5,000
| 4.026981
| 0.350759
| 0.035176
| 0.027638
| 0.023451
| 0.049414
| 0.028894
| 0
| 0
| 0
| 0
| 0
| 0.014594
| 0.397
| 5,000
| 152
| 103
| 32.894737
| 0.777446
| 0.1288
| 0
| 0.115044
| 0
| 0
| 0.036296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026549
| false
| 0
| 0.115044
| 0
| 0.159292
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6855e47f2ad7aa6ba42d8fa11c100eb19915033
| 3,700
|
py
|
Python
|
bin/psm/oil_jet.py
|
ChrisBarker-NOAA/tamoc
|
c797cbb6fee28d788b76d21cc5b0cc0df5444ba8
|
[
"MIT"
] | 18
|
2016-02-24T01:48:41.000Z
|
2021-11-05T03:18:24.000Z
|
bin/psm/oil_jet.py
|
ChrisBarker-NOAA/tamoc
|
c797cbb6fee28d788b76d21cc5b0cc0df5444ba8
|
[
"MIT"
] | 16
|
2016-08-09T07:06:35.000Z
|
2021-12-23T19:38:37.000Z
|
bin/psm/oil_jet.py
|
ChrisBarker-NOAA/tamoc
|
c797cbb6fee28d788b76d21cc5b0cc0df5444ba8
|
[
"MIT"
] | 9
|
2017-03-01T01:22:27.000Z
|
2021-09-17T12:13:40.000Z
|
"""
Particle Size Models: Pure Oil Jet
===================================
Use the ``TAMOC`` `particle_size_models` module to simulate a laboratory
scale pure oil jet into water. This script demonstrates the typical steps
involved in using the `particle_size_models.PureJet` object, which requires
specification of all of the fluid properties of the jet.
"""
# S. Socolofsky, March 2020, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
from tamoc import seawater, particle_size_models
import numpy as np
import warnings
warnings.filterwarnings("ignore")
if __name__ == '__main__':
print('\n---------------------------------------------------------------')
print('Demonstration using the PureJet class in the')
print('particle_size_models module of TAMOC for the ')
print('experiments in the paper by Brandvik et al. (2013).')
print('\nComparisons are for the data reported in Table 3')
print('of the paper')
print('---------------------------------------------------------------')
# Simulate an experiment from Brandvik et al. (2013). Their data uses
# Oseberg oil, with the following reported properties
rho_oil = 839.3
mu_oil = 5.e-3
sigma = 15.5e-3
# We will simulate data from Table 3 in the Brandvik et al. (2013) paper.
# These experiments have a nozzle diameter of 1.5 mm
d0 = 0.0015
# They also used seawater (assumed salinity of 34.5 psu) and released the
# oil from a depth of about 6 m at a temperature of 13 deg C
T = 273.15 + 13.
S = 34.5
rho = seawater.density(T, S, 101325.)
P = 101325. + rho * 9.81 * 6.
rho = seawater.density(T, S, P)
mu = seawater.mu(T, S, P)
# With this information, we can initialize a
# `particle_size_models.PureJet` object
jet = particle_size_models.PureJet(rho_oil, mu_oil, sigma, rho, mu,
fp_type = 1)
# Brandvik et al. (2013) report the exit velocity at the nozzle. We
# need to convert this to a mass flow rate. The mass flow rate should
# always be reported within a numpy array, which allows for different
# mass fluxes for different pseudocomponents of the oil.
u_oil = 11.3
A_oil = np.pi * (d0 / 2.)**2
q_oil = u_oil * A_oil
md_oil = np.array([rho_oil * q_oil])
# To simulate the no-dispersant case, all of the oil properties in the
# jet object are currently correct. Hence, we may use:
jet.simulate(d0, md_oil)
# We compare the result to the measured data as follows:
print('\nThe median droplet size for the no-disperant experiment is:')
print(' Measured: %3.3d um' % 237)
print(' Modeled : %3.3d um\n' % (jet.get_d50() * 1.e6))
# When dispersant is added in sufficient quantities, the interfacial
# tension reduces and the droplet size gets smaller. At a dispersant
# to oil ratio of 50, sigma is:
sigma = 0.05e-3
# We can run this case by updating the properties of the jet object and
# re-running the simualtion
jet.update_properties(rho_oil, mu_oil, sigma, rho, mu, fp_type = 1)
jet.simulate(d0, md_oil)
# We compare the result to the measured data as follows:
print('\nThe median droplet size for an experiments with a')
print('dispersant to oil ratio of 50 is:')
print(' Measured: %3.3d um' % 170)
print(' Modeled : %3.3d um\n' % (jet.get_d50() * 1.e6))
# We can also plot the size distribution
print('\nThe corresponding size distribution is plotted in Figure 1')
jet.get_distributions(15)
jet.plot_psd(1)
| 38.947368
| 78
| 0.635946
| 548
| 3,700
| 4.20438
| 0.374088
| 0.036458
| 0.054688
| 0.027778
| 0.211806
| 0.167535
| 0.12934
| 0.12934
| 0.12934
| 0.12934
| 0
| 0.041164
| 0.238378
| 3,700
| 95
| 79
| 38.947368
| 0.776437
| 0.451622
| 0
| 0.093023
| 0
| 0
| 0.322661
| 0.064032
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.093023
| 0
| 0.093023
| 0.372093
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6862496cf199e7f27dd40deb80fa8e54704b966
| 1,121
|
py
|
Python
|
tron/Nubs/hal.py
|
sdss/tron
|
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
|
[
"BSD-3-Clause"
] | null | null | null |
tron/Nubs/hal.py
|
sdss/tron
|
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
|
[
"BSD-3-Clause"
] | null | null | null |
tron/Nubs/hal.py
|
sdss/tron
|
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
|
[
"BSD-3-Clause"
] | null | null | null |
import os.path
import tron.Misc
from tron import g, hub
from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder
from tron.Hub.Nub.SocketActorNub import SocketActorNub
from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder
name = 'hal'
def start(poller):
cfg = tron.Misc.cfg.get(g.location, 'actors', doFlush=True)[name]
stop()
initCmds = ('ping', 'status', 'version')
safeCmdsList = ['ping', 'version', 'status']
safeCmds = r'^\s*({0})\s*$'.format('|'.join(safeCmdsList))
d = ASCIIReplyDecoder(cidFirst=True, debug=1)
e = ASCIICmdEncoder(sendCommander=True, useCID=False, debug=1)
nub = SocketActorNub(
poller,
cfg['host'],
cfg['port'],
name=name,
encoder=e,
decoder=d,
grabCID=True, # the actor spontaneously generates a line we can eat.
initCmds=initCmds,
safeCmds=safeCmds,
needsAuth=True,
logDir=os.path.join(g.logDir, name),
debug=3)
hub.addActor(nub)
def stop():
n = hub.findActor(name)
if n:
hub.dropActor(n)
del n
| 24.369565
| 77
| 0.637823
| 136
| 1,121
| 5.257353
| 0.514706
| 0.044755
| 0.046154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00464
| 0.231044
| 1,121
| 45
| 78
| 24.911111
| 0.824826
| 0.046387
| 0
| 0
| 0
| 0
| 0.060918
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.176471
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e68781e0de8404ad5b22f8d2f250a25084af55ff
| 1,092
|
py
|
Python
|
extensions/domain.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | 1
|
2019-08-31T17:06:41.000Z
|
2019-08-31T17:06:41.000Z
|
extensions/domain.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | null | null | null |
extensions/domain.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects used within multiple extensions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
import python_utils
class CustomizationArgSpec(python_utils.OBJECT):
"""Value object for a customization arg specification."""
def __init__(self, name, description, schema, default_value):
self.name = name
self.description = description
self.schema = schema
self.default_value = default_value
| 35.225806
| 77
| 0.746337
| 150
| 1,092
| 5.34
| 0.66
| 0.074906
| 0.032459
| 0.03995
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010033
| 0.178571
| 1,092
| 30
| 78
| 36.4
| 0.882943
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6889a8d19aba99a640a29f5b573f28a57dbd412
| 1,727
|
py
|
Python
|
rbc/externals/stdio.py
|
guilhermeleobas/rbc
|
4b568b91c6ce3ef7727fee001169302c3803c4fd
|
[
"BSD-3-Clause"
] | null | null | null |
rbc/externals/stdio.py
|
guilhermeleobas/rbc
|
4b568b91c6ce3ef7727fee001169302c3803c4fd
|
[
"BSD-3-Clause"
] | null | null | null |
rbc/externals/stdio.py
|
guilhermeleobas/rbc
|
4b568b91c6ce3ef7727fee001169302c3803c4fd
|
[
"BSD-3-Clause"
] | null | null | null |
"""https://en.cppreference.com/w/c/io
"""
from rbc import irutils
from llvmlite import ir
from rbc.targetinfo import TargetInfo
from numba.core import cgutils, extending
from numba.core import types as nb_types
from rbc.errors import NumbaTypeError # some errors are available for Numba >= 0.55
int32_t = ir.IntType(32)
def cg_fflush(builder):
int8_t = ir.IntType(8)
fflush_fnty = ir.FunctionType(int32_t, [int8_t.as_pointer()])
fflush_fn = irutils.get_or_insert_function(builder.module, fflush_fnty, name="fflush")
builder.call(fflush_fn, [int8_t.as_pointer()(None)])
@extending.intrinsic
def fflush(typingctx):
"""``fflush`` that can be called from Numba jit-decorated functions.
.. note::
``fflush`` is available only for CPU target.
"""
sig = nb_types.void(nb_types.void)
def codegen(context, builder, signature, args):
target_info = TargetInfo()
if target_info.is_cpu:
cg_fflush(builder)
return sig, codegen
@extending.intrinsic
def printf(typingctx, format_type, *args):
"""``printf`` that can be called from Numba jit-decorated functions.
.. note::
``printf`` is available only for CPU target.
"""
if isinstance(format_type, nb_types.StringLiteral):
sig = nb_types.void(format_type, nb_types.BaseTuple.from_types(args))
def codegen(context, builder, signature, args):
target_info = TargetInfo()
if target_info.is_cpu:
cgutils.printf(builder, format_type.literal_value, *args[1:])
cg_fflush(builder)
return sig, codegen
else:
raise NumbaTypeError(f"expected StringLiteral but got {type(format_type).__name__}")
| 28.783333
| 92
| 0.682687
| 229
| 1,727
| 4.973799
| 0.388646
| 0.036874
| 0.039508
| 0.033363
| 0.317823
| 0.317823
| 0.215979
| 0.215979
| 0.215979
| 0.215979
| 0
| 0.010249
| 0.209033
| 1,727
| 59
| 93
| 29.271186
| 0.823572
| 0.191662
| 0
| 0.375
| 0
| 0
| 0.048041
| 0.020695
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15625
| false
| 0
| 0.1875
| 0
| 0.40625
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e689526fba8d369acce37c9eab4574f56f8a1f4b
| 991
|
py
|
Python
|
setup.py
|
clach04/discoverhue
|
8f35cbc8ff9b5aab80b8be0443427058c1da51ed
|
[
"MIT"
] | 10
|
2017-09-26T22:34:38.000Z
|
2021-11-19T22:37:59.000Z
|
setup.py
|
clach04/discoverhue
|
8f35cbc8ff9b5aab80b8be0443427058c1da51ed
|
[
"MIT"
] | 7
|
2018-02-04T19:38:03.000Z
|
2021-10-30T13:20:33.000Z
|
setup.py
|
clach04/discoverhue
|
8f35cbc8ff9b5aab80b8be0443427058c1da51ed
|
[
"MIT"
] | 4
|
2019-06-28T15:26:45.000Z
|
2022-01-20T02:26:05.000Z
|
from setuptools import setup
try:
import pypandoc
long_description = pypandoc.convert_file('README.md', 'rst', extra_args=())
except ImportError:
import codecs
long_description = codecs.open('README.md', encoding='utf-8').read()
long_description = '\n'.join(long_description.splitlines())
setup(
name='discoverhue',
description='Auto discovery of Hue bridges',
long_description=long_description,
version='1.0.2',
url='https://github.com/Overboard/discoverhue',
author='Overboard',
author_email='amwroute-git@yahoo.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='philips hue',
packages=['discoverhue'],
install_requires=['httpfind'],
)
| 26.078947
| 79
| 0.649849
| 106
| 991
| 5.981132
| 0.669811
| 0.141956
| 0.118297
| 0.123028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012674
| 0.203835
| 991
| 37
| 80
| 26.783784
| 0.790875
| 0
| 0
| 0
| 0
| 0
| 0.388496
| 0.0222
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.137931
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e68a7efe5fb704c535ff7a5982b5a18ddc07817d
| 6,024
|
py
|
Python
|
utils/logmmse.py
|
dbonattoj/Real-Time-Voice-Cloning
|
7ce361b0e900cb0fad4289884f526578ba276481
|
[
"MIT"
] | 3
|
2020-07-10T02:23:00.000Z
|
2021-08-17T12:35:09.000Z
|
utils/logmmse.py
|
amoliu/Real-Time-Voice-Cloning
|
7808d6f80aa9bbaffe367fde07b1c6f96cd3697e
|
[
"MIT"
] | 1
|
2020-09-30T09:29:57.000Z
|
2020-10-31T15:38:50.000Z
|
utils/logmmse.py
|
amoliu/Real-Time-Voice-Cloning
|
7808d6f80aa9bbaffe367fde07b1c6f96cd3697e
|
[
"MIT"
] | 5
|
2020-04-23T10:52:30.000Z
|
2021-08-17T12:35:19.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2015 braindead
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# This code was extracted from the logmmse package (https://pypi.org/project/logmmse/) and I
# simply modified the interface to meet my needs.
import numpy as np
import math
from scipy.special import expn
from collections import namedtuple
NoiseProfile = namedtuple("NoiseProfile", "sampling_rate window_size len1 len2 win n_fft noise_mu2")
def profile_noise(noise, sampling_rate, window_size=0):
"""
Creates a profile of the noise in a given waveform.
:param noise: a waveform containing noise ONLY, as a numpy array of floats or ints.
:param sampling_rate: the sampling rate of the audio
:param window_size: the size of the window the logmmse algorithm operates on. A default value
will be picked if left as 0.
:return: a NoiseProfile object
"""
noise, dtype = to_float(noise)
noise += np.finfo(np.float64).eps
if window_size == 0:
window_size = int(math.floor(0.02 * sampling_rate))
if window_size % 2 == 1:
window_size = window_size + 1
perc = 50
len1 = int(math.floor(window_size * perc / 100))
len2 = int(window_size - len1)
win = np.hanning(window_size)
win = win * len2 / np.sum(win)
n_fft = 2 * window_size
noise_mean = np.zeros(n_fft)
n_frames = len(noise) // window_size
for j in range(0, window_size * n_frames, window_size):
noise_mean += np.absolute(np.fft.fft(win * noise[j:j + window_size], n_fft, axis=0))
noise_mu2 = (noise_mean / n_frames) ** 2
return NoiseProfile(sampling_rate, window_size, len1, len2, win, n_fft, noise_mu2)
def denoise(wav, noise_profile: NoiseProfile, eta=0.15):
"""
Cleans the noise from a speech waveform given a noise profile. The waveform must have the
same sampling rate as the one used to create the noise profile.
:param wav: a speech waveform as a numpy array of floats or ints.
:param noise_profile: a NoiseProfile object that was created from a similar (or a segment of
the same) waveform.
:param eta: voice threshold for noise update. While the voice activation detection value is
below this threshold, the noise profile will be continuously updated throughout the audio.
Set to 0 to disable updating the noise profile.
:return: the clean wav as a numpy array of floats or ints of the same length.
"""
wav, dtype = to_float(wav)
wav += np.finfo(np.float64).eps
p = noise_profile
nframes = int(math.floor(len(wav) / p.len2) - math.floor(p.window_size / p.len2))
x_final = np.zeros(nframes * p.len2)
aa = 0.98
mu = 0.98
ksi_min = 10 ** (-25 / 10)
x_old = np.zeros(p.len1)
xk_prev = np.zeros(p.len1)
noise_mu2 = p.noise_mu2
for k in range(0, nframes * p.len2, p.len2):
insign = p.win * wav[k:k + p.window_size]
spec = np.fft.fft(insign, p.n_fft, axis=0)
sig = np.absolute(spec)
sig2 = sig ** 2
gammak = np.minimum(sig2 / noise_mu2, 40)
if xk_prev.all() == 0:
ksi = aa + (1 - aa) * np.maximum(gammak - 1, 0)
else:
ksi = aa * xk_prev / noise_mu2 + (1 - aa) * np.maximum(gammak - 1, 0)
ksi = np.maximum(ksi_min, ksi)
log_sigma_k = gammak * ksi/(1 + ksi) - np.log(1 + ksi)
vad_decision = np.sum(log_sigma_k) / p.window_size
if vad_decision < eta:
noise_mu2 = mu * noise_mu2 + (1 - mu) * sig2
a = ksi / (1 + ksi)
vk = a * gammak
ei_vk = 0.5 * expn(1, np.maximum(vk, 1e-8))
hw = a * np.exp(ei_vk)
sig = sig * hw
xk_prev = sig ** 2
xi_w = np.fft.ifft(hw * spec, p.n_fft, axis=0)
xi_w = np.real(xi_w)
x_final[k:k + p.len2] = x_old + xi_w[0:p.len1]
x_old = xi_w[p.len1:p.window_size]
output = from_float(x_final, dtype)
output = np.pad(output, (0, len(wav) - len(output)), mode="constant")
return output
def to_float(_input):
if _input.dtype == np.float64:
return _input, _input.dtype
elif _input.dtype == np.float32:
return _input.astype(np.float64), _input.dtype
elif _input.dtype == np.uint8:
return (_input - 128) / 128., _input.dtype
elif _input.dtype == np.int16:
return _input / 32768., _input.dtype
elif _input.dtype == np.int32:
return _input / 2147483648., _input.dtype
raise ValueError('Unsupported wave file format')
def from_float(_input, dtype):
if dtype == np.float64:
return _input, np.float64
elif dtype == np.float32:
return _input.astype(np.float32)
elif dtype == np.uint8:
return ((_input * 128) + 128).astype(np.uint8)
elif dtype == np.int16:
return (_input * 32768).astype(np.int16)
elif dtype == np.int32:
print(_input)
return (_input * 2147483648).astype(np.int32)
raise ValueError('Unsupported wave file format')
| 36.957055
| 100
| 0.659529
| 926
| 6,024
| 4.177106
| 0.292657
| 0.054292
| 0.015512
| 0.019648
| 0.192347
| 0.153568
| 0.096949
| 0.05455
| 0.04757
| 0.031024
| 0
| 0.03681
| 0.242364
| 6,024
| 162
| 101
| 37.185185
| 0.810692
| 0.3667
| 0
| 0.022472
| 0
| 0
| 0.035405
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044944
| false
| 0
| 0.044944
| 0
| 0.224719
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e68aea4ed97106ccbd90e2eca6ee1a3772751cb0
| 3,780
|
py
|
Python
|
lib/core/session.py
|
6un9-h0-Dan/CIRTKit
|
58b8793ada69320ffdbdd4ecdc04a3bb2fa83c37
|
[
"MIT"
] | 97
|
2017-12-18T15:19:28.000Z
|
2022-03-25T07:10:00.000Z
|
lib/core/session.py
|
robertdigital/CIRTKit
|
58b8793ada69320ffdbdd4ecdc04a3bb2fa83c37
|
[
"MIT"
] | 1
|
2019-01-29T16:29:27.000Z
|
2019-01-29T16:29:27.000Z
|
lib/core/session.py
|
robertdigital/CIRTKit
|
58b8793ada69320ffdbdd4ecdc04a3bb2fa83c37
|
[
"MIT"
] | 21
|
2018-04-04T18:12:13.000Z
|
2021-06-12T09:40:58.000Z
|
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import time
import datetime
from lib.common.out import *
from lib.common.objects import File
from lib.core.database import Database
from lib.core.investigation import __project__
class Session(object):
def __init__(self):
self.id = None
# This will be assigned with the File object of the file currently
# being analyzed.
self.file = None
# Timestamp of the creation of the session.
self.created_at = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
# MISP event associated to the object
self.misp_event = None
class Sessions(object):
def __init__(self):
self.current = None
self.sessions = []
# Store the results of the last "find" command.
self.find = None
def close(self):
self.current = None
def is_set(self):
# Check if the session has been opened or not.
if self.current:
return True
else:
return False
def switch(self, session):
self.current = session
print_info("Switched to session #{0} on {1}".format(self.current.id, self.current.file.path))
def new(self, path=None, misp_event=None):
if path is None and misp_event is None:
print_error("You have to open a session on a path or on a misp event.")
return
if __project__.name:
pass
else:
print_error("You must open an investigation to store files")
return
session = Session()
total = len(self.sessions)
session.id = total + 1
if path is not None:
if self.is_set() and self.current.misp_event:
session.misp_event = self.current.misp_event
# Open a section on the given file.
session.file = File(path)
# Try to lookup the file in the database. If it is already present
# we get file name and
row = Database().find(key='sha256', value=session.file.sha256)
if row:
session.file.name = row[0].name
session.file.tags = ', '.join(tag.to_dict()['tag'] for tag in row[0].tag)
print_info("Session opened on {0}".format(path))
if misp_event is not None:
if self.is_set() and self.current.file:
session.file = self.current.file
refresh = False
if self.current is not None and self.current.misp_event is not None \
and self.current.misp_event.event_id == misp_event.event_id:
refresh = True
session.misp_event = misp_event
if refresh:
print_info("Session on MISP event {0} refreshed.".format(misp_event.event_id))
else:
print_info("Session opened on MISP event {0}.".format(misp_event.event_id))
if session.file is not None:
# Loop through all existing sessions and check whether there's another
# session open on the same file and delete it. This is to avoid
# duplicates in sessions.
# NOTE: in the future we might want to remove this if sessions have
# unique attributes (for example, an history just for each of them).
for entry in self.sessions:
if entry.file is not None and entry.file.sha256 == session.file.sha256:
self.sessions.remove(entry)
# Add new session to the list.
self.sessions.append(session)
# Mark the new session as the current one.
self.current = session
__sessions__ = Sessions()
| 36
| 101
| 0.603439
| 509
| 3,780
| 4.385069
| 0.29666
| 0.072581
| 0.024194
| 0.035842
| 0.12724
| 0.05914
| 0.05914
| 0.05914
| 0.030466
| 0.030466
| 0
| 0.007722
| 0.314815
| 3,780
| 104
| 102
| 36.346154
| 0.854054
| 0.22328
| 0
| 0.164179
| 0
| 0
| 0.085704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089552
| false
| 0.014925
| 0.089552
| 0
| 0.268657
| 0.089552
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e68c5bbc6721a5ef393bdd04f567f863f9c93e3b
| 3,810
|
py
|
Python
|
tests/ut/datavisual/common/test_error_handler.py
|
zengchen1024/mindinsight
|
228a448b46707e889efc1fb23502158e27ab56ca
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/datavisual/common/test_error_handler.py
|
zengchen1024/mindinsight
|
228a448b46707e889efc1fb23502158e27ab56ca
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/datavisual/common/test_error_handler.py
|
zengchen1024/mindinsight
|
228a448b46707e889efc1fb23502158e27ab56ca
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Function:
Test error handler.
Usage:
pytest tests/ut/datavisual
"""
from unittest.mock import patch
from werkzeug.exceptions import MethodNotAllowed, NotFound
from ...backend.datavisual.conftest import TRAIN_ROUTES
from ..mock import MockLogger
from ....utils.tools import get_url
from mindinsight.datavisual.processors import scalars_processor
from mindinsight.datavisual.processors.scalars_processor import ScalarsProcessor
class TestErrorHandler:
"""Test train visual api."""
@patch.object(ScalarsProcessor, 'get_metadata_list')
def test_handle_http_exception_error_not_found(self, mock_scalar_processor, client):
"""Test handle http exception error not found."""
scalars_processor.logger = MockLogger
text = 'Test Message'
# NotFound
def get_metadata_list(train_ids, tag):
raise NotFound("%s" % text)
mock_scalar_processor.side_effect = get_metadata_list
test_train_ids = "aa"
test_tag = "bb"
params = dict(train_ids=test_train_ids, tag=test_tag)
url = get_url(TRAIN_ROUTES['scalar_metadata'], params)
response = client.get(url)
assert response.status_code == 404
response = response.get_json()
assert response['error_code'] == '50545001'
assert response['error_msg'] == '404 Not Found.'
@patch.object(ScalarsProcessor, 'get_metadata_list')
def test_handle_http_exception_error_method_not_allowed(self, mock_scalar_processor, client):
"""Test handling http exception error method not allowed."""
scalars_processor.logger = MockLogger
text = 'Test Message'
# MethodNotAllowed
def get_metadata_list(train_ids, tag):
raise MethodNotAllowed("%s" % text)
mock_scalar_processor.side_effect = get_metadata_list
test_train_ids = "aa"
test_tag = "bb"
params = dict(train_ids=test_train_ids, tag=test_tag)
url = get_url(TRAIN_ROUTES['scalar_metadata'], params)
response = client.get(url)
assert response.status_code == 405
response = response.get_json()
assert response['error_code'] == '50545002'
assert response['error_msg'] == '405 Method Not Allowed.'
@patch.object(ScalarsProcessor, 'get_metadata_list')
def test_handle_http_exception_error_method_other_errors(self, mock_scalar_processor, client):
"""Test handling http exception error method other errors."""
scalars_processor.logger = MockLogger
text = 'Test Message'
# Other errors
def get_metadata_list(train_ids, tag):
raise KeyError("%s" % text)
mock_scalar_processor.side_effect = get_metadata_list
test_train_ids = "aa"
test_tag = "bb"
params = dict(train_ids=test_train_ids, tag=test_tag)
url = get_url(TRAIN_ROUTES['scalar_metadata'], params)
response = client.get(url)
assert response.status_code == 500
response = response.get_json()
assert response['error_code'] == '50540000'
assert response['error_msg'] == 'System error.'
| 36.990291
| 98
| 0.683727
| 464
| 3,810
| 5.387931
| 0.30819
| 0.0384
| 0.054
| 0.0368
| 0.578
| 0.578
| 0.5496
| 0.4756
| 0.3796
| 0.3796
| 0
| 0.015589
| 0.208661
| 3,810
| 102
| 99
| 37.352941
| 0.813599
| 0.24357
| 0
| 0.589286
| 0
| 0
| 0.099013
| 0
| 0
| 0
| 0
| 0
| 0.160714
| 1
| 0.107143
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e68dece75266882db686c493e81051a931627936
| 5,118
|
py
|
Python
|
src/.ipynb_checkpoints/headpose_model-checkpoint.py
|
geochri/Intel_Edge_AI-Computer_Pointer_controller
|
068947fa0cbe0c5d1b74e2c0eb69a85bbc439131
|
[
"MIT"
] | null | null | null |
src/.ipynb_checkpoints/headpose_model-checkpoint.py
|
geochri/Intel_Edge_AI-Computer_Pointer_controller
|
068947fa0cbe0c5d1b74e2c0eb69a85bbc439131
|
[
"MIT"
] | 3
|
2021-03-19T14:38:26.000Z
|
2022-03-12T00:43:27.000Z
|
src/.ipynb_checkpoints/headpose_model-checkpoint.py
|
geochri/Intel_Edge_AI-Computer_Pointer_controller
|
068947fa0cbe0c5d1b74e2c0eb69a85bbc439131
|
[
"MIT"
] | null | null | null |
'''
This is a sample class for a model. You may choose to use it as-is or make any changes to it.
This has been provided just to give you an idea of how to structure your model class.
'''
from openvino.inference_engine import IENetwork, IECore
import numpy as np
import os
import cv2
import sys
class Model_HeadPose:
'''
Class for the Head Pose Estimation Model.
'''
def __init__(self, model_name, device='CPU', extensions=None):
self.model_weights = model_name+'.bin'
self.model_structure = model_name+'.xml'
self.device = device
self.extensions = extensions
# self.check_model()
# try:
# self.input_name = next(iter(self.model.inputs))
# self.input_shape = self.model.inputs[self.input_name].shape
# self.output_name = next(iter(self.model.outputs))
# self.output_shape = self.model.outputs[self.output_name].shape
# print('Initialise.. completed.')
# except Exception as e:
# raise ValueError('Something is wrong with input and output values..')
def load_model(self):
'''
This method is for loading the model to the device specified by the user.
If your model requires any Plugins, this is where you can load them.
'''
try:
print('Model is loading...')
self.core = IECore()
self.net = self.core.read_network(model=self.model_structure,weights=self.model_weights)
supported = self.core.query_network(self.net, self.device)
not_supported = [layer for layer in self.net.layers.keys() if layer not in supported]
if len(not_supported) != 0 and self.device == 'CPU':
print('Unsuported', not_supported)
if not self.extensions == None:
print('***Quick fix.\n ~CPU Extension added')
self.core.add_extension(self.extensions, device)
supported = self.core.query_network(self.net, self.device)
not_supported = [layer for layer in self.net.layers.keys() if layer not in supported]
if len(not_supported) == 0:
print('***Quick fix, Failed.')
else:
print('Check the extension path.')
self.net_exec = self.core.load_network(network=self.net, device_name=self.device)
except Exception as e:
raise('Something is wrong.. ~debug load model~')
try:
self.input_name = next(iter(self.net.inputs))
self.input_shape = self.net.inputs[self.input_name].shape
self.output_name = next(iter(self.net.outputs))
self.output_shape = self.net.outputs[self.output_name].shape
print('Initialise.. completed.')
except Exception as e:
raise ValueError('Something is wrong with input and output values..')
def predict(self, image):
'''
This method is meant for running predictions on the input image.
'''
self.image = image
print('HeadPose predict..')
pre_image = self.preprocess_input(self.image)
input_name = self.input_name
input_dict = {input_name: pre_image}
# infer = self.net_exec.start_async(request_id=0, inputs=input_dict)
# status = infer.wait()
results = self.net_exec.infer(input_dict)
outputs = self.preprocess_output(results)
# if status == 0:
# results = infer.outputs[self.output_name]
# print(results)
# print(self.input_name)
# outputs = self.preprocess_output(results)
return outputs
def check_model(self):
'''
Check - initialise the model
'''
try:
self.model = IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
def preprocess_input(self, image):
'''
An input image in [1xCxHxW] format.
B - batch size
C - number of channels
H - image height
W - image width
'''
image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
image = image.transpose((2, 0, 1))
image = image.reshape(1, *image.shape)
return image
def preprocess_output(self, outputs):
'''
Output layer names in Inference Engine format:
name: "angle_y_fc", shape: [1, 1] - Estimated yaw (in degrees).
name: "angle_p_fc", shape: [1, 1] - Estimated pitch (in degrees).
name: "angle_r_fc", shape: [1, 1] - Estimated roll (in degrees).
'''
object_list = []
print('PreOutput-headpose..')
# print(outputs)
object_list.append(outputs['angle_y_fc'].tolist()[0][0])
object_list.append(outputs['angle_p_fc'].tolist()[0][0])
object_list.append(outputs['angle_r_fc'].tolist()[0][0])
return object_list
| 41.609756
| 107
| 0.59789
| 636
| 5,118
| 4.685535
| 0.27044
| 0.030537
| 0.026175
| 0.021477
| 0.365772
| 0.263423
| 0.252349
| 0.252349
| 0.230201
| 0.204698
| 0
| 0.006941
| 0.296209
| 5,118
| 123
| 108
| 41.609756
| 0.820378
| 0.314771
| 0
| 0.151515
| 0
| 0
| 0.115198
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.075758
| 0
| 0.227273
| 0.121212
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e692205969e07efd17736b63f7c1d2bf34e22ac0
| 833
|
py
|
Python
|
Contests/Snackdown19_Qualifier/CHEFPRMS.py
|
PK-100/Competitive_Programming
|
d0863feaaa99462b2999e85dcf115f7a6c08bb8d
|
[
"MIT"
] | 70
|
2018-06-25T21:20:15.000Z
|
2022-03-24T03:55:17.000Z
|
Contests/Snackdown19_Qualifier/CHEFPRMS.py
|
An3sha/Competitive_Programming
|
ee7eadf51939a360d0b004d787ebabda583e92f0
|
[
"MIT"
] | 4
|
2018-09-04T13:12:20.000Z
|
2021-06-20T08:29:12.000Z
|
Contests/Snackdown19_Qualifier/CHEFPRMS.py
|
An3sha/Competitive_Programming
|
ee7eadf51939a360d0b004d787ebabda583e92f0
|
[
"MIT"
] | 24
|
2018-12-26T05:15:32.000Z
|
2022-01-23T23:04:54.000Z
|
import math
def square(n):
tmp=round(math.sqrt(n))
if tmp*tmp==n:
return False
else:
return True
def semprime(n):
ch = 0
if square(n)==False:
return False
for i in range(2, int(math.sqrt(n)) + 1):
while n%i==0:
n//=i
ch+=1
if ch >= 2:
break
if(n > 1):
ch += 1
return ch == 2
def check(n):
if semprime(n) == True:
return True
else:
return False
for _ in range(int(input())):
n=int(input())
flag=0
for i in range(2,n//2+1):
if check(i)==True and check(n-i)==True:
#print(i,n-i,square(i),square(n-i),"Yes")
print("YES")
flag=1
break
if flag==0:
#print(i,n-i,square(i),square(n-i),"No")
print("NO")
| 21.921053
| 53
| 0.457383
| 129
| 833
| 2.945736
| 0.248062
| 0.036842
| 0.047368
| 0.057895
| 0.184211
| 0.121053
| 0.121053
| 0.121053
| 0.121053
| 0
| 0
| 0.029126
| 0.381753
| 833
| 37
| 54
| 22.513514
| 0.708738
| 0.094838
| 0
| 0.314286
| 0
| 0
| 0.006649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.028571
| 0
| 0.285714
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e692fc94ab5c1ffa86ca1f2d1e72224d55aaebca
| 8,474
|
py
|
Python
|
make_base_container.py
|
thiagodasilva/runway
|
a5455e885302df534fcfff0470881fbd2ad8eed5
|
[
"Apache-2.0"
] | null | null | null |
make_base_container.py
|
thiagodasilva/runway
|
a5455e885302df534fcfff0470881fbd2ad8eed5
|
[
"Apache-2.0"
] | null | null | null |
make_base_container.py
|
thiagodasilva/runway
|
a5455e885302df534fcfff0470881fbd2ad8eed5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
import random
import requests
import sys
import tempfile
import uuid
from libs import colorprint
from libs.cli import run_command
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
# assume well-known lvm volume group on host
# ...later we'll figure out how to make this dynamic
VG_NAME = "swift-runway-vg01"
SWIFTSTACK_IMAGES_PREFIX = "ss-"
SWIFTSTACK_IMAGES_BASE_URL = \
"https://tellus.swiftstack.com/v1/AUTH_runway/lxd-images"
IMAGE_MANIFEST_OBJECT_NAME = "manifest.json"
UNIFIED_TARBALL_TYPE = "unified"
SPLIT_TARBALL_TYPE = "split"
TARBALL_TYPES = [UNIFIED_TARBALL_TYPE, SPLIT_TARBALL_TYPE]
def exit_with_error(error_text):
colorprint.error(error_text)
sys.exit(1)
def get_default_image(distro):
if distro.lower() == "rhel":
return "images:centos/7/amd64"
else:
return "ubuntu:16.04"
def is_swiftstack_hosted_image(base_image):
return base_image.lower().startswith(SWIFTSTACK_IMAGES_PREFIX)
def get_image_manifest(swift_container_name):
manifest_obj_url = "{}/{}/{}".format(SWIFTSTACK_IMAGES_BASE_URL,
swift_container_name,
IMAGE_MANIFEST_OBJECT_NAME)
try:
r = requests.get(manifest_obj_url)
r.raise_for_status()
return r.json()
except Exception as e:
raise Exception("Could not download container image manifest from '{}'."
"\n{}".format(manifest_obj_url, e))
def is_image_already_imported(fingerprint):
try:
run_command("lxc image info {} >/dev/null 2>&1".format(fingerprint),
shell=True)
except Exception:
return False
return True
def delete_image_with_alias(alias):
try:
run_command("lxc image delete {}".format(alias))
except Exception:
pass
def download_unified_image_file(manifest):
tarball_url = "{}/{}".format(SWIFTSTACK_IMAGES_BASE_URL,
manifest["tarball-object"])
try:
r = requests.get(tarball_url, stream=True)
r.raise_for_status()
with tempfile.NamedTemporaryFile(delete=False) as f:
file_path = f.name
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except Exception as e:
print("Could not download file from '{}': {}".format(tarball_url, e))
return file_path
def import_unified_image(manifest, alias):
tarball_path = download_unified_image_file(manifest)
# There might be an older image with the same alias
delete_image_with_alias(alias)
run_command("lxc image import {} --alias {}".format(tarball_path, alias))
os.unlink(tarball_path)
def download_split_image_files(manifest):
metadata_tarball_url = "{}/{}".format(SWIFTSTACK_IMAGES_BASE_URL,
manifest["metadata-object"])
rootfs_tarball_url = "{}/{}".format(SWIFTSTACK_IMAGES_BASE_URL,
manifest["rootfs-object"])
file_paths = []
for url in [metadata_tarball_url, rootfs_tarball_url]:
try:
r = requests.get(url, stream=True)
r.raise_for_status()
with tempfile.NamedTemporaryFile(delete=False) as f:
file_paths.append(f.name)
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except Exception as e:
print("Could not download file from '{}': {}".format(url, e))
return tuple(file_paths)
def import_split_image(manifest, alias):
metadata_tarball_path, rootfs_tarball_path = \
download_split_image_files(manifest)
# There might be an older image with the same alias
delete_image_with_alias(alias)
run_command("lxc image import {} {} --alias {}".format(
metadata_tarball_path, rootfs_tarball_path, alias))
os.unlink(metadata_tarball_path)
os.unlink(rootfs_tarball_path)
def import_image(manifest, alias):
'''
There are 2 possible image formats: unified and split. We support both.
For unified format, the manifest will look like this:
{
"tarball_type": "unified",
"fingerprint": "629d2c18b7bb0b52b80dfe62ae309937123d05b563ef057233e7802c9e18c018",
"tarball-object": "centos7.5/629d2c18b7bb0b52b80dfe62ae309937123d05b563ef057233e7802c9e18c018.tar.gz"
}
For split format, the manifest will look like this:
{
"tarball_type": "split",
"fingerprint": "22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de",
"metadata-object": "centos7.5/meta-22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de.tar.xz",
"rootfs-object": "centos7.5/22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de.squashfs"
}
'''
if manifest["tarball_type"] not in TARBALL_TYPES:
raise Exception("Invalid tarball type: {}".format(
manifest["tarball_type"]))
elif manifest["tarball_type"] == UNIFIED_TARBALL_TYPE:
import_unified_image(manifest, alias)
elif manifest["tarball_type"] == SPLIT_TARBALL_TYPE:
import_split_image(manifest, alias)
else:
raise Exception("Tarball type '{}' is valid, but a method to import "
"it has not been implemented yet.")
def import_image_if_needed(base_image):
if not is_swiftstack_hosted_image(base_image):
raise Exception("{} is not an image hosted by "
"SwiftStack".format(base_image))
swift_container_name = base_image[len(SWIFTSTACK_IMAGES_PREFIX):]
manifest = get_image_manifest(swift_container_name)
if not is_image_already_imported(manifest["fingerprint"]):
print("Importing image '{}'...".format(base_image))
import_image(manifest, base_image)
else:
print("Image '{}' is already imported".format(base_image))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('distro', type=str, help='Container distro')
parser.add_argument('cname', metavar='containername', help='Container '
'name')
parser.add_argument('volsize', help='Volume size')
parser.add_argument('volcount', type=int, help='Volume count')
parser.add_argument('baseimage', nargs='?',
help='Base image. Defaults: \'images:centos/7/amd64\' '
'for RHEL distro, \'ubuntu:16.04\' otherwise')
args = parser.parse_args()
distro = args.distro
container_name = args.cname
base_image = args.baseimage
volume_size = args.volsize
volume_count = args.volcount
if is_swiftstack_hosted_image(distro):
import_image_if_needed(distro)
default_image = distro
else:
default_image = get_default_image(distro)
if base_image is None:
base_image = default_image
try:
# make a container profile that maps 8 block devices to the guest
rand_file_name = str(uuid.UUID(int=random.getrandbits(128)))
run_command("./make_lxc_profile.py {} {} {} {} > "
"/tmp/{}".format(container_name, VG_NAME, volume_size,
volume_count, rand_file_name),
cwd=SCRIPT_DIR, shell=True)
run_command("lxc profile create {}-profile".format(container_name))
run_command("cat /tmp/{} | lxc profile edit {}-profile".format(
rand_file_name, container_name), cwd=SCRIPT_DIR, shell=True)
# launch the new container
print("Trying to launch container from base image "
"{}".format(base_image))
run_command("lxc launch {} {} -p {}-profile || "
"lxc launch {} {} -p {}-profile".format(base_image,
container_name,
container_name,
default_image,
container_name,
container_name),
shell=True)
except Exception as e:
exit_with_error(str(e))
| 36.683983
| 116
| 0.628039
| 950
| 8,474
| 5.350526
| 0.235789
| 0.02833
| 0.015345
| 0.022624
| 0.325595
| 0.228015
| 0.171749
| 0.171749
| 0.144009
| 0.126697
| 0
| 0.038862
| 0.274251
| 8,474
| 230
| 117
| 36.843478
| 0.787642
| 0.132405
| 0
| 0.209877
| 0
| 0
| 0.149945
| 0.005772
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0.006173
| 0.148148
| 0.006173
| 0.271605
| 0.061728
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e693812c79b01a653cf7ed97ebf4b0c9deae4584
| 1,687
|
py
|
Python
|
exercicios_antigos/ex_01.py
|
jfklima/prog_pratica
|
72c795e3372e46f04ce0c92c05187aec651777cf
|
[
"MIT"
] | null | null | null |
exercicios_antigos/ex_01.py
|
jfklima/prog_pratica
|
72c795e3372e46f04ce0c92c05187aec651777cf
|
[
"MIT"
] | null | null | null |
exercicios_antigos/ex_01.py
|
jfklima/prog_pratica
|
72c795e3372e46f04ce0c92c05187aec651777cf
|
[
"MIT"
] | null | null | null |
"""Criar uma função que retorne min e max de uma sequência numérica
aleatória.
Só pode usar if, comparações, recursão e funções que sejam de sua
autoria.
Se quiser usar laços também pode.
Deve informar via docstring qual é a complexidade de tempo e espaço da
sua solução
"""
from math import inf
def minimo_e_maximo(sequencia_numerica):
''' Retorna o minimo e o maximo de uma sequência numérica aleatória.
Complexidade:
execução: O(n)
espaço: O(3)
'''
maximo = -inf # 1
minimo = +inf # 1
for elem in sequencia_numerica: # 1
if elem > maximo: # 2
maximo = elem # 1
if elem < minimo: # 2
minimo = elem # 2
return minimo, maximo # 1
def recursivo_minmax(sequencia_numerica):
def r_minimo(sequencia):
primeiro = sequencia[0]
if len(sequencia) == 1:
return primeiro
else:
menor = r_minimo(sequencia[1:])
return menor if menor < primeiro else primeiro
def r_maximo(sequencia):
primeiro = sequencia[0]
if len(sequencia) == 1:
return primeiro
else:
maior = r_maximo(sequencia[1:])
return maior if maior > primeiro else primeiro
return r_minimo(sequencia_numerica), r_maximo(sequencia_numerica)
def recursivo_minmax_1x(sequencia_numerica):
primeiro = sequencia_numerica[0]
if len(sequencia_numerica) == 1:
return primeiro, primeiro
else:
return
# print(minimo_e_maximo([1, 2, 3, 4]))
# print(minimo_e_maximo([1, 3, 10, 12, 44, 2, 24, 25]))
# print(minimo_e_maximo([88, 66, 10, 2, 8]))
print(recursivo_minmax([1, 2, 3, 4]))
| 23.760563
| 72
| 0.627742
| 227
| 1,687
| 4.550661
| 0.343612
| 0.131655
| 0.050339
| 0.043562
| 0.212972
| 0.116167
| 0.116167
| 0.116167
| 0.116167
| 0.116167
| 0
| 0.038843
| 0.28275
| 1,687
| 70
| 73
| 24.1
| 0.814876
| 0.318317
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0.030303
| 0
| 0.424242
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e693c2c06b451b4433b40c8836d35627ae32d7b5
| 806
|
py
|
Python
|
docs/demos/theme_explorer/util.py
|
harisbal/dash-bootstrap-components
|
d7c91c08e0821ccfd81330db912cde71ec57c171
|
[
"Apache-2.0"
] | 1
|
2021-05-08T08:21:41.000Z
|
2021-05-08T08:21:41.000Z
|
docs/demos/theme_explorer/util.py
|
harisbal/dash-bootstrap-components
|
d7c91c08e0821ccfd81330db912cde71ec57c171
|
[
"Apache-2.0"
] | null | null | null |
docs/demos/theme_explorer/util.py
|
harisbal/dash-bootstrap-components
|
d7c91c08e0821ccfd81330db912cde71ec57c171
|
[
"Apache-2.0"
] | null | null | null |
import dash_bootstrap_components as dbc
import dash_html_components as html
DBC_DOCS = (
"https://dash-bootstrap-components.opensource.faculty.ai/docs/components/"
)
def make_subheading(label, link):
slug = label.replace(" ", "")
heading = html.H2(
html.Span(
[
label,
html.A(
html.I(className="fas fa-book fa-xs ml-2"),
href=f"{DBC_DOCS}{link}",
target="_blank",
id=f"tooltip_target_{slug}",
),
],
),
)
return html.Div(
[
heading,
dbc.Tooltip(
f"See {label} documentation", target=f"tooltip_target_{slug}"
),
],
className="mt-3",
)
| 23.028571
| 78
| 0.473945
| 79
| 806
| 4.683544
| 0.544304
| 0.054054
| 0.124324
| 0.097297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006237
| 0.403226
| 806
| 34
| 79
| 23.705882
| 0.762994
| 0
| 0
| 0.172414
| 0
| 0
| 0.233251
| 0.052109
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.068966
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e693c649026985a8de2994906ab2b8b27870d123
| 2,858
|
py
|
Python
|
pytorch_toolbox/visualization/visdom_logger.py
|
MathGaron/pytorch_toolbox
|
2afd13e50ba71dfce66467a4b070d9b922668502
|
[
"MIT"
] | 10
|
2018-02-26T04:51:11.000Z
|
2021-10-01T02:30:37.000Z
|
pytorch_toolbox/visualization/visdom_logger.py
|
MathGaron/pytorch_toolbox
|
2afd13e50ba71dfce66467a4b070d9b922668502
|
[
"MIT"
] | 9
|
2017-11-16T16:11:16.000Z
|
2020-02-13T13:10:55.000Z
|
pytorch_toolbox/visualization/visdom_logger.py
|
MathGaron/pytorch_toolbox
|
2afd13e50ba71dfce66467a4b070d9b922668502
|
[
"MIT"
] | 7
|
2018-02-12T19:06:14.000Z
|
2021-03-25T19:13:51.000Z
|
'''
The visualization class provides an easy access to some of the visdom functionalities
Accept as input a number that will be ploted over time or an image of type np.ndarray
'''
from visdom import Visdom
import numpy as np
import numbers
class VisdomLogger:
items_iterator = {}
items_to_visualize = {}
windows = {}
vis = Visdom()
def check_availability(vis):
# check if the Visdom server is running. only once.
is_done = vis.text('visdom check')
if is_done is False:
raise RuntimeError('Visdom server is not running. Run the server first: python -m visdom.server')
else:
print('Visdom available at: %s:%s' % (vis.server, vis.port))
vis.close() # close visdom check
check_availability(vis)
@classmethod
def visualize(cls, item, name, **args):
"""
Visualize an item in a new window (if the parameter "name" is not on the list of previously given names) or
updates an existing window identified by "name"
:param item: Item to be visualized (a number or a numpy image).
:param name: String to identify the item.
:param args: dict containing options for visdom
"""
if name not in cls.items_to_visualize:
cls.new_item(item, name, **args)
else:
cls.update_item(item, name, **args)
cls.items_to_visualize[name] = item
@classmethod
def new_item(cls, item, name, **args):
if isinstance(item, numbers.Number):
cls.items_iterator[name] = 0
win = cls.vis.line(
X=np.array([cls.items_iterator[name]]),
Y=np.array([item]),
opts=dict(title=name)
)
cls.windows[name] = win
elif isinstance(item, np.ndarray):
win = cls.vis.image(
item,
opts=args,
)
cls.windows[name] = win
else:
print("type {} not supported for visualization".format(type(item)))
@classmethod
def update_item(cls, item, name, **args):
if isinstance(item, numbers.Number):
cls.vis.line(
# to plot the number we need to give its position in the x axis hence we keep track of how many times we
# updates this item (stored in items_iterator)
X=np.array([cls.items_iterator[name]]),
Y=np.array([item]),
win=cls.windows[name],
update='append'
)
cls.items_iterator[name] += 1
elif isinstance(item, np.ndarray):
cls.vis.image(
item,
opts=args,
win=cls.windows[name]
)
else:
print("type {} not supported for visualization".format(type(item)))
| 35.283951
| 120
| 0.569979
| 357
| 2,858
| 4.507003
| 0.336134
| 0.048477
| 0.03729
| 0.04972
| 0.24363
| 0.210068
| 0.181479
| 0.181479
| 0.181479
| 0.181479
| 0
| 0.001053
| 0.335199
| 2,858
| 80
| 121
| 35.725
| 0.845789
| 0.246326
| 0
| 0.389831
| 0
| 0
| 0.094349
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.050847
| 0
| 0.20339
| 0.050847
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e698cce58860b9d7c8249a1734c7596543b84bc7
| 1,843
|
py
|
Python
|
defects4cpp/errors/argparser.py
|
HansolChoe/defects4cpp
|
cb9e3db239c50e6ec38127cec117865f0ee7a5cf
|
[
"MIT"
] | 10
|
2021-06-23T01:53:19.000Z
|
2022-03-31T03:14:01.000Z
|
defects4cpp/errors/argparser.py
|
HansolChoe/defects4cpp
|
cb9e3db239c50e6ec38127cec117865f0ee7a5cf
|
[
"MIT"
] | 34
|
2021-05-27T01:09:04.000Z
|
2022-03-28T07:53:35.000Z
|
defects4cpp/errors/argparser.py
|
HansolChoe/defects4cpp
|
cb9e3db239c50e6ec38127cec117865f0ee7a5cf
|
[
"MIT"
] | 6
|
2021-09-03T07:16:56.000Z
|
2022-03-29T07:30:35.000Z
|
from pathlib import Path
from typing import Dict
from errors.common.exception import DppError
class DppArgparseError(DppError):
pass
class DppArgparseTaxonomyNotFoundError(DppArgparseError):
def __init__(self, taxonomy_name: str):
super().__init__(f"taxonomy '{taxonomy_name}' does not exist")
self.taxonomy_name: str = taxonomy_name
class DppArgparseNotProjectDirectory(DppArgparseError):
def __init__(self, path: Path):
super().__init__(f"directory '{str(path)}' is not a defect taxonomy project")
self.path: Path = path
class DppArgparseDefectIndexError(DppArgparseError):
def __init__(self, index: int):
super().__init__(f"invalid index '{index}' of defects")
self.index: int = index
class DppArgparseFileNotFoundError(DppArgparseError, FileNotFoundError):
def __init__(self, path: str):
super().__init__()
self.path: str = path
class DppArgparseInvalidEnvironment(DppArgparseError):
def __init__(self, value: str):
super().__init__(
f"invalid environment variable format '{value}' (should be KEY=VALUE)"
)
self.value: str = value
class DppArgparseInvalidConfigError(DppArgparseError):
def __init__(self):
super().__init__()
class DppArgparseConfigCorruptedError(DppArgparseError):
def __init__(self, data: Dict):
super().__init__(f"config is corrupted: {data}")
self.data = data
class DppArgparseInvalidCaseExpressionError(DppArgparseError):
def __init__(self, index: int, name: str, cases: int, expr: str):
super().__init__(
f"Defect#{index} of {name} has {cases} test cases, but expression was: {expr}"
)
self.index: int = index
self.name: str = name
self.cases: int = cases
self.expr: str = expr
| 29.253968
| 90
| 0.683668
| 196
| 1,843
| 6.081633
| 0.306122
| 0.060403
| 0.073826
| 0.158557
| 0.058725
| 0.058725
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213239
| 1,843
| 62
| 91
| 29.725806
| 0.822069
| 0
| 0
| 0.139535
| 0
| 0.023256
| 0.162778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.186047
| false
| 0.023256
| 0.069767
| 0
| 0.465116
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e69993a645167fee1fbafcf116e0729c914350fa
| 15,381
|
py
|
Python
|
fold_cur_trans.py
|
lucasforever24/arcface_noonan
|
9d805a0d4d478e347a9084ad6ce24fe4c8dc5e65
|
[
"MIT"
] | null | null | null |
fold_cur_trans.py
|
lucasforever24/arcface_noonan
|
9d805a0d4d478e347a9084ad6ce24fe4c8dc5e65
|
[
"MIT"
] | null | null | null |
fold_cur_trans.py
|
lucasforever24/arcface_noonan
|
9d805a0d4d478e347a9084ad6ce24fe4c8dc5e65
|
[
"MIT"
] | null | null | null |
import cv2
from PIL import Image
import argparse
from pathlib import Path
from multiprocessing import Process, Pipe,Value,Array
import torch
from config import get_config
from mtcnn import MTCNN
from Learner_trans_tf import face_learner
from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
from sklearn.model_selection import KFold
import os
import glob
import shutil
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='for face verification')
parser.add_argument("-ds", "--dataset_dir", help="where to get data", default="noonan", type=str)
parser.add_argument('-sd','--stored_result_dir',help='where to store data as np arrays',
default="results/trans/", type=str)
parser.add_argument("-k", "--kfold", help="returns the number of splitting iterations in the cross-validator.",
default=10, type=int)
parser.add_argument("-e", "--epochs", help="training epochs", default=20, type=int)
parser.add_argument("-n", "--names_considered", help="names for different types considered, separated by commas",
default="normal,noonan,others", type=str)
parser.add_argument("-g", "--gpu_id", help="gpu id to use", default="", type=str)
parser.add_argument("-s", "--use_shuffled_kfold", help="whether to use shuffled kfold.", action="store_true")
parser.add_argument("-rs", "--random_seed", help="random seed used for k-fold split.", default=6, type=int)
parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true")
parser.add_argument("-a", "--additional_data_dir", help="where to get the additional data",
default="", type=str)
parser.add_argument("-ta", "--additional_test_or_train", help="use additional data in only train, or test, or both",
default="", type=str)
parser.add_argument("-as", "--stylegan_data_dir", help="where to get the additional data",
default="", type=str)
parser.add_argument("-ts", "--stylegan_test_or_train", help="use stylegan data in only train, or test, or both",
default="", type=str)
parser.add_argument("-tf", "--transfer", help="how many layer(s) used for transfer learning, "
"but 0 means retraining the whole network.", default=0, type=int)
parser.add_argument("-ac", "--arch", help="types of model used for encoder", default="mobile", type=str)
args = parser.parse_args()
for arg in vars(args):
print(arg+':', getattr(args, arg))
emore_dir = 'faces_emore'
conf = get_config(True, args)
conf.emore_folder = conf.data_path/emore_dir
mtcnn = MTCNN()
print('mtcnn loaded')
names_considered = args.names_considered.strip().split(',')
exp_name = args.dataset_dir[:4]
if args.additional_data_dir:
if 'LAG' in args.additional_data_dir:
exp_name += '_lag'
elif 'literature' in args.additional_data_dir:
exp_name += '_ltr'
if args.kfold != 10:
exp_name += ('_k' + str(args.kfold))
if args.epochs != 20:
exp_name += ('_e' + str(args.epochs))
if args.transfer != 0 and args.transfer != 1:
exp_name += ('_td' + str(args.transfer))
if args.use_shuffled_kfold:
exp_name += ('_s' + str(args.random_seed))
print(exp_name)
# prepare folders
raw_dir = 'raw_112'
verify_type = 'trans'
if args.use_shuffled_kfold:
verify_type += '_shuffled'
# train_dir = conf.facebank_path/args.dataset_dir/verify_type/'train'
train_dir = conf.emore_folder/'imgs'
test_dir = conf.emore_folder/'test'
conf.facebank_path = train_dir
if os.path.exists(train_dir):
shutil.rmtree(train_dir)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(train_dir)
os.mkdir(test_dir)
for name in names_considered:
os.makedirs(str(train_dir) + '/' + name, exist_ok=True)
os.makedirs(str(test_dir) + '/' + name, exist_ok=True)
if args.stylegan_data_dir:
#e.g. smile_refine_mtcnn_112_divi
full_stylegan_dir = str(conf.data_path/'facebank'/'stylegan'/args.stylegan_data_dir)
stylegan_folders = os.listdir(full_stylegan_dir)
if args.additional_data_dir:
full_additional_dir = str(conf.data_path/'facebank'/args.additional_data_dir)
# init kfold
if args.use_shuffled_kfold:
kf = KFold(n_splits=args.kfold, shuffle=True, random_state=args.random_seed)
else:
kf = KFold(n_splits=args.kfold, shuffle=False, random_state=None)
# collect and split raw data
data_dict = {}
idx_gen = {}
for name in names_considered:
tmp_list = glob.glob(str(conf.data_path/'facebank'/args.dataset_dir/raw_dir) +
'/' + name + '*')
if 'innm' in args.stylegan_data_dir:
tmp_list = tmp_list + glob.glob(str(full_stylegan_dir) + '/' + name + '*')
stylegan_folders = []
print(str(conf.data_path/'facebank'/args.dataset_dir/raw_dir))
data_dict[name] = np.array(tmp_list)
idx_gen[name] = kf.split(data_dict[name])
if 'literature' in args.additional_data_dir:
data_dict['ltr'] = np.array(glob.glob(str(full_additional_dir) + '/*'))
idx_gen['ltr'] = kf.split(data_dict['ltr'])
score_names = []
scores = []
wrong_names = []
args.stored_result_path = args.stored_result_dir + os.sep + str(datetime.datetime.now())[:19]
if not os.path.exists(args.stored_result_path):
os.mkdir(args.stored_result_path)
# for fold_idx, (train_index, test_index) in enumerate(kf.split(data_dict[names_considered[0]])):
for fold_idx in range(args.kfold):
train_set = {}
test_set = {}
for name in names_considered:
(train_index, test_index) = next(idx_gen[name])
train_set[name], test_set[name] = data_dict[name][train_index], data_dict[name][test_index]
if 'ltr' in data_dict.keys():
(train_index, test_index) = next(idx_gen['ltr'])
train_set['ltr'], test_set['ltr'] = data_dict['ltr'][train_index], data_dict['ltr'][test_index]
if 'train' in args.additional_test_or_train:
train_set['noonan'] = np.concatenate((train_set['noonan'], train_set['ltr']))
if 'test' in args.additional_test_or_train:
test_set['noonan'] = np.concatenate((test_set['noonan'], test_set['ltr']))
# remove previous data
prev = glob.glob(str(train_dir) + '/*/*')
for p in prev:
os.remove(p)
prev = glob.glob(str(test_dir) + '/*/*')
for p in prev:
os.remove(p)
# save trains to conf.facebank_path/args.dataset_dir/'train' and
# tests to conf.data_path/'facebank'/args.dataset_dir/'test'
# count unbalanced data
train_count = {}
test_count = {}
for name in names_considered:
train_count[name] = 0
for i in range(len(train_set[name])):
img_folder = str(train_set[name][i])
for img in os.listdir(img_folder):
shutil.copy(img_folder + os.sep + str(img),
os.path.join(str(train_dir), name, str(img)))
train_count[name] += 1
# addition data from stylegan
if 'interp' not in data_dict.keys():
folder = os.path.basename(train_set[name][i])
if args.stylegan_data_dir and ('train' in args.stylegan_test_or_train) and (folder in stylegan_folders):
for img in os.listdir(full_stylegan_dir + os.sep + folder):
shutil.copy(os.path.join(full_stylegan_dir, folder, str(img)),
os.path.join(str(train_dir), name, str(img)))
# ('/'.join(train_set[name][i].strip().split('/')[:-2]) +
# '/' + verify_type + '/train/' + name + os.sep + img))
train_count[name] += 1
# test
for i in range(len(test_set[name])):
test_count[name] = 0
img_folder = str(test_set[name][i])
for img in os.listdir(img_folder):
shutil.copy(img_folder + os.sep + str(img),
os.path.join(str(test_dir), name, str(img)))
test_count[name] += 1
# addition data from stylegan
if 'interp' not in data_dict.keys():
folder = os.path.basename(test_set[name][i])
if args.stylegan_data_dir and ('test' in args.stylegan_test_or_train) and (folder in stylegan_folders):
# and
# (folder not in ['noonan7','noonan19','noonan23','normal9','normal20','normal23'])):
for img in os.listdir(full_stylegan_dir + os.sep + folder):
shutil.copy(os.path.join(full_stylegan_dir, folder, str(img)),
os.path.join(str(test_dir), name, str(img)))
test_count[name] += 1
print(train_count, test_count)
# deal with unbalanced data
"""
if train_count['normal'] // train_count['noonan'] > 1:
aug_num = train_count['normal'] // train_count['noonan'] - 1
for img in os.listdir(os.path.join(str(train_dir), 'noonan')):
for aug_idx in range(aug_num):
aug_img = img[:img.rfind('.')] + '_' + str(aug_idx) + img[img.rfind('.'):]
shutil.copy(os.path.join(str(train_dir), 'noonan', img),
os.path.join(str(train_dir), 'noonan', aug_img))
"""
if 'fake' in args.additional_data_dir:
fake_dict = {'noonan':'normal', 'normal':'noonan'}
full_additional_dir = conf.data_path/'facebank'/'noonan+normal'/args.additional_data_dir
add_data = glob.glob(str(full_additional_dir) + os.sep + '*.png')
print('additional:', args.additional_data_dir, len(add_data))
for name in names_considered:
for img_f in add_data:
if name in img_f.strip().split(os.sep)[-1]:
# print('source:', img_f)
# print('copy to:', img_f.replace(str(full_additional_dir),
# str(train_dir) + os.sep + fake_dict[name]))
# print('copy to:', img_f.replace(args.additional_data_dir,
# verify_type + '/train/' + name))
shutil.copy(img_f, os.path.join(str(train_dir), fake_dict[name], os.path.basename(img_f)))
print(fold_idx)
print('datasets ready')
conf_train = get_config(True, args)
conf_train.emore_folder = conf.data_path/emore_dir
conf_train.stored_result_dir = args.stored_result_path
learner = face_learner(conf=conf_train, transfer=args.transfer, ext=exp_name+'_'+str(fold_idx))
# conf, inference=False, transfer=0
if args.transfer != 0:
learner.load_state(conf.save_path, False, True)
print('learner loaded')
learner.train(conf_train, args.epochs)
print('learner retrained.')
learner.save_state()
print('Model is saved')
# prepare_facebank
targets, names, names_idx = prepare_facebank(conf, learner.model, mtcnn, tta = args.tta)
print('names_classes:', names)
noonan_idx = names_idx['noonan']
print('facebank updated')
for path in test_dir.iterdir():
if path.is_file():
continue
# print(path)
for fil in path.iterdir():
# print(fil)
orig_name = ''.join([i for i in fil.name.strip().split('.')[0].split('_')[0] if not i.isdigit()])
for name in names_idx.keys():
if name in orig_name:
score_names.append(names_idx[name])
"""
if orig_name not in names_considered:
print("Un-considered name:", fil.name)
continue
"""
frame = cv2.imread(str(fil))
image = Image.fromarray(frame)
faces = [image,]
distance = learner.binfer(conf, faces, targets, args.tta)
label = score_names[-1]
score = np.exp(distance.dot(-1))
pred = np.argmax(score, 1)
if pred != label:
wrong_names.append(orig_name)
scores.append(score)
score_names = np.array(score_names)
wrong_names = np.array(wrong_names)
score_np = np.squeeze(np.array(scores))
n_classes = score_np.shape[1]
score_names = label_binarize(score_names, classes=range(n_classes))
score_sum = np.zeros([score_np.shape[0], 1])
for i in range(n_classes):
score_sum += score_np[:, i, None] # keep the dimension
relative_scores = (score_np / score_sum)
total_scores = relative_scores.ravel()
total_names = score_names.ravel()
name_path = os.path.join(args.stored_result_path, 'wrong_names.npy')
save_label_score(name_path, wrong_names)
label_path = os.path.join(args.stored_result_path, 'labels_trans.npy')
save_label_score(label_path, score_names)
score_path = os.path.join(args.stored_result_path, 'scores_trans.npy')
save_label_score(score_path, relative_scores)
print('saved!')
# Compute ROC curve and ROC area for noonan
fpr, tpr, _ = roc_curve(total_names, total_scores) #scores_np[:, noonan_idx]
roc_auc = auc(fpr, tpr)
# For PR curve
precision, recall, _ = precision_recall_curve(total_names, total_scores)
average_precision = average_precision_score(total_names, total_scores)
# plots
plt.figure()
colors = list(mcolors.TABLEAU_COLORS)
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.4f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC_{}'.format(exp_name))
plt.legend(loc="lower right")
plt.savefig(args.stored_result_path + os.sep + '/fp_tp_{}.png'.format(exp_name))
plt.close()
# plt.show()
plt.figure()
plt.step(recall, precision, where='post')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Average precision score ({}): AP={:0.4f}'.format(exp_name, average_precision))
plt.savefig(args.stored_result_path + os.sep + '/pr_{}.png'.format(exp_name))
plt.close()
| 44.453757
| 124
| 0.594565
| 1,997
| 15,381
| 4.354532
| 0.161242
| 0.013109
| 0.029324
| 0.024149
| 0.367985
| 0.273459
| 0.207452
| 0.152714
| 0.127875
| 0.120515
| 0
| 0.007363
| 0.275925
| 15,381
| 345
| 125
| 44.582609
| 0.773458
| 0.076653
| 0
| 0.173913
| 0
| 0
| 0.115918
| 0.005269
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.075099
| 0
| 0.075099
| 0.055336
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e699c205aa18e90414c7e2eebb09f229e7cbf13e
| 2,603
|
py
|
Python
|
examples/tryclass.py
|
manahter/dirio
|
c33fcd6c114ffb275d7147156c7041389fab6cfc
|
[
"MIT"
] | null | null | null |
examples/tryclass.py
|
manahter/dirio
|
c33fcd6c114ffb275d7147156c7041389fab6cfc
|
[
"MIT"
] | null | null | null |
examples/tryclass.py
|
manahter/dirio
|
c33fcd6c114ffb275d7147156c7041389fab6cfc
|
[
"MIT"
] | null | null | null |
import time
class TryClass:
value = 1
valu = 2
val = 3
va = 4
v = 5
def __init__(self, value=4):
print("Created TryClass :", self)
self.value = value
def metod1(self, value, val2=""):
self.value += value
print(f"\t>>> metod 1, add: {value}, now value : {self.value}, val2: {val2}")
time.sleep(2)
return self.value
@classmethod
def metod2(cls, value, val2=""):
cls.value = 2
print(f"\t>>> metod 2, add: {value}, now value : {cls.value}, val2: {val2}")
return cls.value
@staticmethod
def metod3(value, val2=""):
TryClass.value += value
print(f"\t>>> metod 3, add: {value}, now value : {TryClass.value}, val2: {val2}")
return TryClass.value
def event_call(other_arg, kwarg="-", result=None):
"""Call this metod, on returned result"""
print(f"Bind Result, {result}\n"*10)
print("other_arg", other_arg)
print("kwarg", kwarg)
if __name__ == "__main__":
try:
from dirio import Dirio
except:
from ..dirio import Dirio
dr_cls = Dirio(target=TryClass, args=(888,), kwargs={}, worker=False)
print("Starting values :", dr_cls.value, dr_cls)
print("\n"*2)
print("Wait 1 sec for your reply. metod 1 :", dr_cls.metod1(5, val2="1", dr_wait=1))
print("Wait until the reply comes. metod 1 :", dr_cls.metod1(5, val2="1", dr_wait=-1))
code0 = dr_cls.metod1(5, val2="1", dr_code=True)
print("Metod 1, call, via bind to func", dr_cls.dr_bind(code0, event_call, args=("OtHeR aRg", ), kwargs={"kwarg": "KwArG"}))
while True:
#
dr_cls.dr_binds_check()
print("Run the method and give us the response reading code : dr_code=True")
code1 = dr_cls.metod1(5, val2="1", dr_code=True)
print("Is there data in the reading code? : dr_code=43534")
while not dr_cls.metod1(dr_code=code1):
print("We are waiting for the data with this code :", code1)
time.sleep(.5)
print("Returned metod 1 data :", dr_cls.metod1(dr_code=code1))
print("Methods called this way give the last return value : nothing or dr_code=False")
code2 = dr_cls.metod2(10, val2="2", dr_code=True)
print("Search by code only :", dr_cls.dr_code(code2, wait=1))
print("Trying metod 2, called and returned :", dr_cls.metod2(10, val2="2", dr_code=False))
print("Trying metod 3, called and returned :", dr_cls.metod3(15, val2="3"))
print("\n"*2)
time.sleep(3)
dr_cls.dr_terminate()
| 30.988095
| 128
| 0.594314
| 382
| 2,603
| 3.921466
| 0.287958
| 0.053405
| 0.044059
| 0.032043
| 0.206275
| 0.18024
| 0.150868
| 0.11482
| 0.082777
| 0.082777
| 0
| 0.045502
| 0.257011
| 2,603
| 83
| 129
| 31.361446
| 0.729059
| 0.013446
| 0
| 0.034483
| 0
| 0.051724
| 0.310426
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086207
| false
| 0
| 0.051724
| 0
| 0.293103
| 0.362069
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e69c64799a3175f6ca7da109f5305d614b082638
| 487
|
py
|
Python
|
arrays/jump2/Solution.py
|
shahbagdadi/py-algo-n-ds
|
ff689534b771ddb4869b001b20a0e21b4896bb0a
|
[
"MIT"
] | null | null | null |
arrays/jump2/Solution.py
|
shahbagdadi/py-algo-n-ds
|
ff689534b771ddb4869b001b20a0e21b4896bb0a
|
[
"MIT"
] | null | null | null |
arrays/jump2/Solution.py
|
shahbagdadi/py-algo-n-ds
|
ff689534b771ddb4869b001b20a0e21b4896bb0a
|
[
"MIT"
] | null | null | null |
from typing import List
import sys
class Solution:
def jump(self, nums: List[int]) -> int:
if len(nums) <=1: return 0
l , r , jumps = 0, nums[0] , 1
while r < len(nums)-1 :
jumps += 1
# you can land anywhere between l & r+1 in a jump and then use Num[i] to jump from there
nxt = max( i + nums[i] for i in range(l, r+1))
l , r = r, nxt
return jumps
s = Solution()
ans = s.jump([3,2,1,0,4])
print(ans)
| 27.055556
| 100
| 0.523614
| 85
| 487
| 3
| 0.529412
| 0.031373
| 0.062745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044164
| 0.349076
| 487
| 18
| 101
| 27.055556
| 0.760252
| 0.176591
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.357143
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e69c81543af0469c06adb5c970083f2d456e2ede
| 1,881
|
py
|
Python
|
share/tests.py
|
shared-tw/shared-tw
|
90dcf92744b4e0ec9e9aa085026b5543c9c3922c
|
[
"MIT"
] | 2
|
2021-12-09T10:39:37.000Z
|
2022-02-22T09:01:26.000Z
|
share/tests.py
|
shared-tw/backend
|
90dcf92744b4e0ec9e9aa085026b5543c9c3922c
|
[
"MIT"
] | 3
|
2021-07-03T12:56:38.000Z
|
2021-07-04T05:53:43.000Z
|
share/tests.py
|
shared-tw/shared-tw
|
90dcf92744b4e0ec9e9aa085026b5543c9c3922c
|
[
"MIT"
] | null | null | null |
import unittest
from . import states
class DonationStateTestCase(unittest.TestCase):
def test_approve_pending_state(self):
approve_pending_statue = states.PendingApprovalState()
approved_event = states.DonationApprovedEvent()
self.assertIsInstance(
approve_pending_statue.apply(approved_event),
states.PendingDispatchState,
)
cancelled_event = states.DonationCancelledEvent()
self.assertIsInstance(
approve_pending_statue.apply(cancelled_event), states.CancelledState
)
dispatch_event = states.DonationDispatchedEvent()
self.assertIsInstance(
approve_pending_statue.apply(dispatch_event), states.InvalidState
)
def test_dispatch_pending_state(self):
dispatch_pending_state = states.PendingDispatchState()
donation_dispatched_event = states.DonationDispatchedEvent()
self.assertIsInstance(
dispatch_pending_state.apply(donation_dispatched_event),
states.DoneState,
)
cancelled_event = states.DonationCancelledEvent()
self.assertIsInstance(
dispatch_pending_state.apply(cancelled_event), states.CancelledState
)
approved_event = states.DonationApprovedEvent()
self.assertIsInstance(
dispatch_pending_state.apply(approved_event), states.InvalidState
)
def test_collect_pending_state(self):
collect_pending_state = states.PendingDeliveryState()
collected_event = states.DonationDeliveredEvent()
self.assertIsInstance(
collect_pending_state.apply(collected_event), states.DoneState
)
cancelled_event = states.DonationCancelledEvent()
self.assertIsInstance(
collect_pending_state.apply(cancelled_event), states.InvalidState
)
| 33
| 80
| 0.700159
| 156
| 1,881
| 8.128205
| 0.217949
| 0.138801
| 0.094637
| 0.080442
| 0.646688
| 0.507886
| 0.129338
| 0.129338
| 0.129338
| 0
| 0
| 0
| 0.233918
| 1,881
| 56
| 81
| 33.589286
| 0.879944
| 0
| 0
| 0.302326
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 1
| 0.069767
| false
| 0
| 0.046512
| 0
| 0.139535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e69ec2353a5fed95b6dce8a05f828517c6009931
| 2,137
|
py
|
Python
|
app/extensions.py
|
grow/airpress
|
b46e951b27b8216f51f0fade3695049455866825
|
[
"MIT"
] | 1
|
2017-07-07T20:15:14.000Z
|
2017-07-07T20:15:14.000Z
|
app/extensions.py
|
grow/airpress
|
b46e951b27b8216f51f0fade3695049455866825
|
[
"MIT"
] | 4
|
2020-03-24T15:24:51.000Z
|
2021-06-01T21:42:43.000Z
|
app/extensions.py
|
grow/airpress
|
b46e951b27b8216f51f0fade3695049455866825
|
[
"MIT"
] | 1
|
2016-12-15T00:03:13.000Z
|
2016-12-15T00:03:13.000Z
|
from jinja2 import nodes
from jinja2.ext import Extension
class FragmentCacheExtension(Extension):
# a set of names that trigger the extension.
tags = set(['cache'])
def __init__(self, environment):
super(FragmentCacheExtension, self).__init__(environment)
# add the defaults to the environment
environment.extend(
fragment_cache_prefix='fragment',
fragment_cache=None
)
def parse(self, parser):
# the first token is the token that started the tag. In our case
# we only listen to ``'cache'`` so this will be a name token with
# `cache` as value. We get the line number so that we can give
# that line number to the nodes we create by hand.
lineno = next(parser.stream).lineno
# now we parse a single expression that is used as cache key.
args = [parser.parse_expression()]
# if there is a comma, the user provided a timeout. If not use
# None as second parameter.
if parser.stream.skip_if('comma'):
args.append(parser.parse_expression())
else:
args.append(nodes.Const(None))
# now we parse the body of the cache block up to `endcache` and
# drop the needle (which would always be `endcache` in that case)
body = parser.parse_statements(['name:endcache'], drop_needle=True)
# now return a `CallBlock` node that calls our _cache_support
# helper method on this extension.
return nodes.CallBlock(self.call_method('_cache_support', args),
[], [], body).set_lineno(lineno)
def _cache_support(self, name, timeout, caller):
"""Helper callback."""
key = self.environment.fragment_cache_prefix + name
# try to load the block from the cache
# if there is no fragment in the cache, render it and store
# it in the cache.
rv = self.environment.fragment_cache.get(key)
if rv is not None:
return rv
rv = caller()
self.environment.fragment_cache.add(key, rv, timeout)
return rv
| 37.491228
| 75
| 0.630323
| 282
| 2,137
| 4.677305
| 0.393617
| 0.04928
| 0.052312
| 0.063685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001318
| 0.289658
| 2,137
| 56
| 76
| 38.160714
| 0.867589
| 0.380908
| 0
| 0.071429
| 0
| 0
| 0.034562
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.071429
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6a26bf564f5d9a437cee65264d1566e43a4893e
| 10,198
|
py
|
Python
|
flatlander/runner/experiment_runner.py
|
wullli/flatlander
|
2c7fbd3d025f2a05c40895ec735a92d7a6bfb1ad
|
[
"MIT"
] | 3
|
2020-12-30T04:18:42.000Z
|
2022-03-17T13:15:30.000Z
|
flatlander/runner/experiment_runner.py
|
wullli/flatlander
|
2c7fbd3d025f2a05c40895ec735a92d7a6bfb1ad
|
[
"MIT"
] | null | null | null |
flatlander/runner/experiment_runner.py
|
wullli/flatlander
|
2c7fbd3d025f2a05c40895ec735a92d7a6bfb1ad
|
[
"MIT"
] | null | null | null |
import os
from argparse import ArgumentParser
from pathlib import Path
import gym
import ray
import ray.tune.result as ray_results
import yaml
from gym.spaces import Tuple
from ray.cluster_utils import Cluster
from ray.rllib.utils import try_import_tf, try_import_torch
from ray.tune import run_experiments, register_env
from ray.tune.logger import TBXLogger
from ray.tune.resources import resources_to_json
from ray.tune.tune import _make_scheduler
from ray.tune.utils import merge_dicts
from flatlander.envs import get_eval_config
from flatlander.envs.flatland_sparse import FlatlandSparse
from flatlander.envs.observations import make_obs
from flatlander.envs.utils.global_gym_env import GlobalFlatlandGymEnv
from flatlander.envs.utils.gym_env_fill_missing import FillingFlatlandGymEnv
from flatlander.logging.custom_metrics import on_episode_end
from flatlander.logging.wandb_logger import WandbLogger
from flatlander.utils.loader import load_envs, load_models
ray_results.DEFAULT_RESULTS_DIR = os.path.join(os.getcwd(), "..", "..", "..", "flatland-challenge-data/results")
class ExperimentRunner:
group_algorithms = ["QMIX", "QMIXApex"]
def __init__(self):
self.tf = try_import_tf()
self.torch, _ = try_import_torch()
load_envs(os.path.dirname(__file__))
load_models(os.path.dirname(__file__))
@staticmethod
def get_experiments(run_args, arg_parser: ArgumentParser = None):
if run_args.config_file:
with open(run_args.config_file) as f:
experiments = yaml.safe_load(f)
else:
experiments = {
run_args.experiment_name: { # i.e. log to ~/ray_results/default
"run": run_args.run,
"checkpoint_freq": run_args.checkpoint_freq,
"keep_checkpoints_num": run_args.keep_checkpoints_num,
"checkpoint_score_attr": run_args.checkpoint_score_attr,
"local_dir": run_args.local_dir,
"resources_per_trial": (
run_args.resources_per_trial and
resources_to_json(run_args.resources_per_trial)),
"stop": run_args.stop,
"config": dict(run_args.config, env=run_args.env),
"restore": run_args.restore,
"num_samples": run_args.num_samples,
"upload_dir": run_args.upload_dir,
}
}
if arg_parser is not None:
for exp in experiments.values():
if not exp.get("run"):
arg_parser.error("the following arguments are required: --run")
if not exp.get("envs") and not exp.get("config", {}).get("envs"):
arg_parser.error("the following arguments are required: --envs")
return experiments
@staticmethod
def setup_grouping(config: dict):
grouping = {
"group_1": list(range(config["env_config"]["max_n_agents"])),
}
obs_space = Tuple([make_obs(config["env_config"]["observation"],
config["env_config"]["observation_config"]).observation_space()
for _ in range(config["env_config"]["max_n_agents"])])
act_space = Tuple([GlobalFlatlandGymEnv.action_space for _ in range(config["env_config"]["max_n_agents"])])
register_env(
"flatland_sparse_grouped",
lambda config: FlatlandSparse(config).with_agent_groups(
grouping, obs_space=obs_space, act_space=act_space))
def setup_policy_map(self, config: dict):
obs_space = make_obs(config["env_config"]["observation"],
config["env_config"]["observation_config"]).observation_space()
config["multiagent"] = {
"policies": {"pol_" + str(i): (None, obs_space, FillingFlatlandGymEnv.action_space, {"agent_id": i})
for i in range(config["env_config"]["observation_config"]["max_n_agents"])},
"policy_mapping_fn": lambda agent_id: "pol_" + str(agent_id)}
def setup_hierarchical_policies(self, config: dict):
obs_space: gym.spaces.Tuple = make_obs(config["env_config"]["observation"],
config["env_config"]["observation_config"]).observation_space()
config["multiagent"] = {
"policies": {"meta": (None, obs_space.spaces[0], gym.spaces.Box(high=1, low=0, shape=(1,)), {}),
"agent": (None, obs_space.spaces[1], FillingFlatlandGymEnv.action_space, {})
},
"policy_mapping_fn": lambda agent_id: "meta" if 'meta' in str(agent_id) else "agent"
}
def apply_args(self, run_args, experiments: dict):
verbose = 1
webui_host = '127.0.0.1'
for exp in experiments.values():
if run_args.eager:
exp["config"]["eager"] = True
if run_args.torch:
exp["config"]["use_pytorch"] = True
if run_args.v:
exp["config"]["log_level"] = "INFO"
verbose = 2
if run_args.vv:
exp["config"]["log_level"] = "DEBUG"
verbose = 3
if run_args.trace:
if not exp["config"].get("eager"):
raise ValueError("Must enable --eager to enable tracing.")
exp["config"]["eager_tracing"] = True
if run_args.bind_all:
webui_host = "0.0.0.0"
if run_args.log_flatland_stats:
exp['config']['callbacks'] = {
'on_episode_end': on_episode_end,
}
return experiments, verbose
@staticmethod
def evaluate(exp):
eval_configs = get_eval_config(exp['config'].get('env_config',
{}).get('eval_generator', "default"))
eval_seed = eval_configs.get('evaluation_config', {}).get('env_config', {}).get('seed')
# add evaluation config to the current config
exp['config'] = merge_dicts(exp['config'], eval_configs)
if exp['config'].get('evaluation_config'):
exp['config']['evaluation_config']['env_config'] = exp['config'].get('env_config')
eval_env_config = exp['config']['evaluation_config'].get('env_config')
if eval_seed and eval_env_config:
# We override the envs seed from the evaluation config
eval_env_config['seed'] = eval_seed
# Remove any wandb related configs
if eval_env_config:
if eval_env_config.get('wandb'):
del eval_env_config['wandb']
# Remove any wandb related configs
if exp['config']['evaluation_config'].get('wandb'):
del exp['config']['evaluation_config']['wandb']
def run(self, experiments: dict, args=None):
verbose = 1
webui_host = "localhost"
for exp in experiments.values():
if exp.get("config", {}).get("input"):
if not isinstance(exp.get("config", {}).get("input"), dict):
if not os.path.exists(exp["config"]["input"]):
rllib_dir = Path(__file__).parent
input_file = rllib_dir.absolute().joinpath(exp["config"]["input"])
exp["config"]["input"] = str(input_file)
if exp["run"] in self.group_algorithms:
self.setup_grouping(exp.get("config"))
if exp["run"] == "contrib/MADDPG" or exp["config"].get("individual_policies", False):
self.setup_policy_map(exp.get("config"))
if exp["config"].get("individual_policies", False):
del exp["config"]["individual_policies"]
if exp["run"] == "contrib/MADDPG":
exp.get("config")["env_config"]["learning_starts"] = 100
exp.get("config")["env_config"]["actions_are_logits"] = True
if exp["env"] == "flatland_sparse_hierarchical":
self.setup_hierarchical_policies(exp.get("config"))
if args is not None:
experiments, verbose = self.apply_args(run_args=args, experiments=experiments)
if args.eval:
self.evaluate(exp)
if args.config_file:
# TODO should be in exp['config'] directly
exp['config']['env_config']['yaml_config'] = args.config_file
exp['loggers'] = [WandbLogger, TBXLogger]
if args.ray_num_nodes:
cluster = Cluster()
for _ in range(args.ray_num_nodes):
cluster.add_node(
num_cpus=args.ray_num_cpus or 1,
num_gpus=args.ray_num_gpus or 1,
object_store_memory=args.ray_object_store_memory,
memory=args.ray_memory,
redis_max_memory=args.ray_redis_max_memory)
ray.init(address=cluster.address)
else:
import multiprocessing
n_cpu = multiprocessing.cpu_count()
import tensorflow as tf
n_gpu = len(tf.config.experimental.list_physical_devices('GPU'))
print("NUM_CPUS AVAILABLE: ", n_cpu)
print("NUM_GPUS AVAILABLE: ", n_gpu)
print("NUM_CPUS ARGS: ", args.ray_num_cpus)
print("NUM_GPUS ARGS: ", args.ray_num_gpus)
ray.init(
local_mode=True if args.local else False,
address=args.ray_address,
object_store_memory=args.ray_object_store_memory,
num_cpus=args.ray_num_cpus if args.ray_num_cpus is not None else n_cpu,
num_gpus=args.ray_num_gpus if args.ray_num_gpus is not None else n_gpu)
run_experiments(
experiments,
scheduler=_make_scheduler(args),
queue_trials=args.queue_trials,
resume=args.resume,
verbose=verbose,
concurrent=True)
ray.shutdown()
| 44.72807
| 115
| 0.585507
| 1,175
| 10,198
| 4.812766
| 0.205957
| 0.032184
| 0.037135
| 0.032184
| 0.260654
| 0.174182
| 0.102564
| 0.097259
| 0.06649
| 0.06649
| 0
| 0.003515
| 0.30251
| 10,198
| 227
| 116
| 44.92511
| 0.791509
| 0.02324
| 0
| 0.088542
| 0
| 0
| 0.150794
| 0.010348
| 0
| 0
| 0
| 0.004405
| 0
| 1
| 0.041667
| false
| 0
| 0.140625
| 0
| 0.203125
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6a4e0e5dfdac6166da22e4d8c2409f996b05e0d
| 7,273
|
py
|
Python
|
syslib/utils_keywords.py
|
rahulmah/sample-cloud-native-toolchain-tutorial-20170720084529291
|
08540c0f083a25b5b4e7a4c839080fe54383038c
|
[
"Apache-2.0"
] | 1
|
2019-01-19T09:32:18.000Z
|
2019-01-19T09:32:18.000Z
|
syslib/utils_keywords.py
|
rahulmah/sample-cloud-native-toolchain-tutorial-20170720084529291
|
08540c0f083a25b5b4e7a4c839080fe54383038c
|
[
"Apache-2.0"
] | null | null | null |
syslib/utils_keywords.py
|
rahulmah/sample-cloud-native-toolchain-tutorial-20170720084529291
|
08540c0f083a25b5b4e7a4c839080fe54383038c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
r"""
This module contains keyword functions to supplement robot's built in
functions and use in test where generic robot keywords don't support.
"""
import time
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries import DateTime
import re
###############################################################################
def run_until_keyword_fails(retry, retry_interval, name, *args):
r"""
Execute a robot keyword repeatedly until it either fails or the timeout
value is exceeded.
Note: Opposite of robot keyword "Wait Until Keyword Succeeds".
Description of argument(s):
retry Max timeout time in hour(s).
retry_interval Time interval in minute(s) for looping.
name Robot keyword to execute.
args Robot keyword arguments.
"""
# Convert the retry time in seconds
retry_seconds = DateTime.convert_time(retry)
timeout = time.time() + int(retry_seconds)
# Convert the interval time in seconds
interval_seconds = DateTime.convert_time(retry_interval)
interval = int(interval_seconds)
BuiltIn().log(timeout)
BuiltIn().log(interval)
while True:
status = BuiltIn().run_keyword_and_return_status(name, *args)
# Return if keywords returns as failure.
if status is False:
BuiltIn().log("Failed as expected")
return False
# Return if retry timeout as success.
elif time.time() > timeout > 0:
BuiltIn().log("Max retry timeout")
return True
time.sleep(interval)
BuiltIn().log(time.time())
return True
###############################################################################
###############################################################################
def htx_error_log_to_list(htx_error_log_output):
r"""
Parse htx error log output string and return list of strings in the form
"<field name>:<field value>".
The output of this function may be passed to the build_error_dict function.
Description of argument(s):
htx_error_log_output Error entry string containing the stdout
generated by "htxcmdline -geterrlog".
Example of htx_error_log_output contents:
######################## Result Starts Here ###############################
Currently running ECG/MDT : /usr/lpp/htx/mdt/mdt.whit
===========================
---------------------------------------------------------------------
Device id:/dev/nvidia0
Timestamp:Mar 29 19:41:54 2017
err=00000027
sev=1
Exerciser Name:hxenvidia
Serial No:Not Available
Part No:Not Available
Location:Not Available
FRU Number:Not Available
Device:Not Available
Error Text:cudaEventSynchronize for stopEvent returned err = 0039 from file
, line 430.
---------------------------------------------------------------------
---------------------------------------------------------------------
Device id:/dev/nvidia0
Timestamp:Mar 29 19:41:54 2017
err=00000027
sev=1
Exerciser Name:hxenvidia
Serial No:Not Available
Part No:Not Available
Location:Not Available
FRU Number:Not Available
Device:Not Available
Error Text:Hardware Exerciser stopped on error
---------------------------------------------------------------------
######################### Result Ends Here ################################
Example output:
Returns the lists of error string per entry
['Device id:/dev/nvidia0',
'Timestamp:Mar 29 19:41:54 2017',
'err=00000027',
'sev=1',
'Exerciser Name:hxenvidia',
'Serial No:Not Available',
'Part No:Not Available',
'Location:Not Available',
'FRU Number:Not Available',
'Device:Not Available',
'Error Text:cudaEventSynchronize for stopEvent returned err = 0039
from file , line 430.']
"""
# List which will hold all the list of entries.
error_list = []
temp_error_list = []
parse_walk = False
for line in htx_error_log_output.splitlines():
# Skip lines starting with "#"
if line.startswith("#"):
continue
# Mark line starting with "-" and set parse flag.
if line.startswith("-") and parse_walk is False:
parse_walk = True
continue
# Mark line starting with "-" and reset parse flag.
# Set temp error list to EMPTY.
elif line.startswith("-"):
error_list.append(temp_error_list)
parse_walk = False
temp_error_list = []
# Add entry to list if line is not emtpy
elif parse_walk:
temp_error_list.append(str(line))
return error_list
###############################################################################
###############################################################################
def build_error_dict(htx_error_log_output):
r"""
Builds error list into a list of dictionary entries.
Description of argument(s):
error_list Error list entries.
Example output dictionary:
{
0:
{
'sev': '1',
'err': '00000027',
'Timestamp': 'Mar 29 19:41:54 2017',
'Part No': 'Not Available',
'Serial No': 'Not Available',
'Device': 'Not Available',
'FRU Number': 'Not Available',
'Location': 'Not Available',
'Device id': '/dev/nvidia0',
'Error Text': 'cudaEventSynchronize for stopEvent returned err = 0039
from file , line 430.',
'Exerciser Name': 'hxenvidia'
},
1:
{
'sev': '1',
'err': '00000027',
'Timestamp': 'Mar 29 19:41:54 2017',
'Part No': 'Not Available',
'Serial No': 'Not Available',
'Device': 'Not Available',
'FRU Number': 'Not Available',
'Location': 'Not Available',
'Device id': '/dev/nvidia0',
'Error Text': 'Hardware Exerciser stopped on error',
'Exerciser Name': 'hxenvidia'
}
},
"""
# List which will hold all the list of entries.
error_list = []
error_list = htx_error_log_to_list(htx_error_log_output)
# dictionary which holds the error dictionry entry.
error_dict = {}
temp_error_dict = {}
error_index = 0
# Loop through the error list.
for entry_list in error_list:
# Loop through the first error list entry.
for entry in entry_list:
# Split string into list for key value update.
# Example: 'Device id:/dev/nvidia0'
# Example: 'err=00000027'
parm_split = re.split("[:=]", entry)
# Populate temp dictionary with key value pair data.
temp_error_dict[str(parm_split[0])] = parm_split[1]
# Update the master dictionary per entry index.
error_dict[error_index] = temp_error_dict
# Reset temp dict to EMPTY and increment index count.
temp_error_dict = {}
error_index += 1
return error_dict
###############################################################################
| 32.61435
| 79
| 0.54063
| 793
| 7,273
| 4.851198
| 0.248424
| 0.077983
| 0.036392
| 0.030933
| 0.397713
| 0.3647
| 0.334546
| 0.31843
| 0.31843
| 0.300754
| 0
| 0.027405
| 0.262478
| 7,273
| 222
| 80
| 32.761261
| 0.689784
| 0.586828
| 0
| 0.210526
| 0
| 0
| 0.020125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.070175
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6a5f147ff440a3daeccaecdee477658d01cb25a
| 4,044
|
py
|
Python
|
DBParser/DBMove.py
|
lelle1234/Db2Utils
|
55570a1afbe6d4abe61c31952bc178c2443f4e5b
|
[
"Apache-2.0"
] | 4
|
2020-02-27T13:56:37.000Z
|
2022-02-07T23:07:24.000Z
|
DBParser/DBMove.py
|
lelle1234/Db2Utils
|
55570a1afbe6d4abe61c31952bc178c2443f4e5b
|
[
"Apache-2.0"
] | null | null | null |
DBParser/DBMove.py
|
lelle1234/Db2Utils
|
55570a1afbe6d4abe61c31952bc178c2443f4e5b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import ibm_db
import getopt
import sys
import os
from toposort import toposort_flatten
db = None
host = "localhost"
port = "50000"
user = None
pwd = None
outfile = None
targetdb = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:d:P:u:p:o:t:")
except getopt.GetoptError:
sys.exit(-1)
for o, a in opts:
if o == "-d":
db = a
if o == "-h":
host = a
if o == "-P":
port = a
if o == "-u":
user = a
if o == "-p":
pwd = a
if o == "-t":
targetdb = a
if db is None or user is None or pwd is None or targetdb is None:
print("Usage: DBMove.py [-h <host> -P <port>] -d <db> -u <user> -p <pwd> -t <target>")
sys.exit(1)
db = db.upper()
targetdb = targetdb.upper()
cfg = (db, host, port, user, pwd)
conn = ibm_db.connect("DATABASE=%s; HOSTNAME=%s; PORT=%s; PROTOCOL=TCPIP; UID=%s; PWD=%s" % cfg, "", "")
get_db_type = "values nya.get_db_type()"
find_edges = """
SELECT rtrim(t.tabschema) || '.' || rtrim(t.tabname)
, coalesce(rtrim(r.reftabschema) || '.' || rtrim(r.reftabname), 'dummy')
FROM syscat.tables t
LEFT JOIN syscat.references r
ON (t.tabschema, t.tabname) = (r.tabschema, r.tabname)
WHERE t.tabschema not like 'SYS%'
AND t.type = 'T'
AND rtrim(t.tabschema) not like 'NYA_%'
AND t.tabschema <> 'TMP'
ORDER BY 1
"""
identity_skip = """
select rtrim(tabschema) || '.' || rtrim(tabname) from syscat.columns
where identity = 'Y' and generated = 'D'
"""
stmt = ibm_db.prepare(conn, get_db_type)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
db_type = tpl[0]
edges = dict()
stmt = ibm_db.prepare(conn, find_edges)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
while tpl:
n1, n2 = tpl
try:
edges[n1].add(n2)
except KeyError:
edges[n1] = set()
edges[n1].add(n2)
tpl = ibm_db.fetch_tuple(stmt)
sorted_nodes = list(toposort_flatten(edges))
# print(sorted_nodes)
identity_skip_arr = []
edges = dict()
stmt = ibm_db.prepare(conn, identity_skip)
ibm_db.execute(stmt, ())
tpl = ibm_db.fetch_tuple(stmt)
while tpl:
identity_skip_arr.append(tpl[0])
tpl = ibm_db.fetch_tuple(stmt)
# print(identity_skip)
os.makedirs(db, exist_ok=True)
export_file = open("%s/export.sql" % db, "w")
load_file = open("%s/load.sql" % db, "w")
export_file.write("connect to %s;\n" % db)
load_file.write("connect to %s;\n" % targetdb)
if db_type == "N":
load_file.write("""set integrity for nya.person off;\n""")
load_file.write("""alter table nya.person
alter column EMAIL_UC drop generated
alter column NORMALIZED_FIRSTNAME drop generated
alter column NORMALIZED_LASTNAME drop generated;\n""")
load_file.write("""set integrity for nya.person immediate checked;\n""")
for t in sorted_nodes:
if t == "dummy":
continue
export_file.write("export to %s.ixf of ixf lobs to . modified by codepage=819 messages export_%s.msg select * from %s;\n" % (t,t,t))
identityskip = "identityoverride"
if t in identity_skip_arr:
identityskip = " "
load_file.write("load from %s.ixf of ixf lobs from . modified by generatedoverride %s messages load_%s.msg replace into %s;\n" % (t, identityskip, t, t))
if db_type == "N":
load_file.write("""set integrity for nya.person off;\n""")
load_file.write("""alter table nya.person
alter column EMAIL_UC set generated always as ( upper(email))
alter column NORMALIZED_FIRSTNAME set generated always as ( NYA.REMOVE_DIACRITICS( FIRSTNAME ) )
alter column NORMALIZED_LASTNAME set generated always as ( NYA.REMOVE_DIACRITICS( LASTNAME ) );\n""")
load_file.write("""set integrity for nya.person immediate checked force generated;\n""")
load_file.write("""echo set integrity for all tables;\n""")
export_file.write("connect reset;\n")
load_file.write("connect reset;\n")
export_file.close()
load_file.close()
| 29.304348
| 157
| 0.633778
| 605
| 4,044
| 4.117355
| 0.261157
| 0.026094
| 0.052188
| 0.044962
| 0.340426
| 0.264151
| 0.23043
| 0.175833
| 0.175833
| 0.175833
| 0
| 0.006975
| 0.220079
| 4,044
| 137
| 158
| 29.518248
| 0.782815
| 0.014342
| 0
| 0.218182
| 0
| 0.045455
| 0.464456
| 0.018588
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.045455
| 0.009091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6a6b8f37ebe80036ee8d9a83872d377cb863d68
| 732
|
py
|
Python
|
utils/glove.py
|
MirunaPislar/Word2vec
|
e9dd01488f081a7b8d7c00a0b21efe0d401d4927
|
[
"MIT"
] | 13
|
2018-05-19T22:29:27.000Z
|
2022-03-25T13:28:17.000Z
|
utils/glove.py
|
MirunaPislar/Word2vec
|
e9dd01488f081a7b8d7c00a0b21efe0d401d4927
|
[
"MIT"
] | 1
|
2019-01-14T09:55:50.000Z
|
2019-01-25T22:17:03.000Z
|
utils/glove.py
|
MirunaPislar/Word2vec
|
e9dd01488f081a7b8d7c00a0b21efe0d401d4927
|
[
"MIT"
] | 6
|
2018-05-19T22:29:29.000Z
|
2022-03-11T12:00:37.000Z
|
import numpy as np
DEFAULT_FILE_PATH = "utils/datasets/glove.6B.50d.txt"
def loadWordVectors(tokens, filepath=DEFAULT_FILE_PATH, dimensions=50):
"""Read pretrained GloVe vectors"""
wordVectors = np.zeros((len(tokens), dimensions))
with open(filepath) as ifs:
for line in ifs:
line = line.strip()
if not line:
continue
row = line.split()
token = row[0]
if token not in tokens:
continue
data = [float(x) for x in row[1:]]
if len(data) != dimensions:
raise RuntimeError("wrong number of dimensions")
wordVectors[tokens[token]] = np.asarray(data)
return wordVectors
| 33.272727
| 71
| 0.577869
| 86
| 732
| 4.872093
| 0.581395
| 0.052506
| 0.071599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014141
| 0.32377
| 732
| 21
| 72
| 34.857143
| 0.832323
| 0.039617
| 0
| 0.111111
| 0
| 0
| 0.081779
| 0.044476
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6ab4939fc5a6bc71ee2ae80221a8f7dd6549b7a
| 2,753
|
py
|
Python
|
gremlin-python/src/main/jython/setup.py
|
EvKissle/tinkerpop
|
84195e38fc22a1a089c345fade9c75711e6cfdfe
|
[
"Apache-2.0"
] | null | null | null |
gremlin-python/src/main/jython/setup.py
|
EvKissle/tinkerpop
|
84195e38fc22a1a089c345fade9c75711e6cfdfe
|
[
"Apache-2.0"
] | null | null | null |
gremlin-python/src/main/jython/setup.py
|
EvKissle/tinkerpop
|
84195e38fc22a1a089c345fade9c75711e6cfdfe
|
[
"Apache-2.0"
] | null | null | null |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import codecs
import os
import sys
import time
from setuptools import setup
# Folder containing the setup.py
root = os.path.dirname(os.path.abspath(__file__))
# Path to __version__ module
version_file = os.path.join(root, 'gremlin_python', '__version__.py')
# Check if this is a source distribution.
# If not create the __version__ module containing the version
if not os.path.exists(os.path.join(root, 'PKG-INFO')):
timestamp = int(os.getenv('TIMESTAMP', time.time() * 1000)) / 1000
fd = codecs.open(version_file, 'w', 'utf-8')
fd.write("'''")
fd.write(__doc__)
fd.write("'''\n")
fd.write('version = %r\n' % os.getenv('VERSION', '?').replace('-SNAPSHOT', '.dev-%d' % timestamp))
fd.write('timestamp = %d\n' % timestamp)
fd.close()
# Load version
from gremlin_python import __version__
version = __version__.version
install_requires = [
'aenum==1.4.5',
'tornado==4.4.1',
'six==1.10.0'
]
if sys.version_info < (3,2):
install_requires += ['futures==3.0.5']
setup(
name='gremlinpython',
version=version,
packages=['gremlin_python', 'gremlin_python.driver',
'gremlin_python.driver.tornado', 'gremlin_python.process',
'gremlin_python.structure', 'gremlin_python.structure.io'],
license='Apache 2',
url='http://tinkerpop.apache.org',
description='Gremlin-Python for Apache TinkerPop',
long_description=codecs.open("README", "r", "UTF-8").read(),
test_suite="tests",
data_files=[("", ["LICENSE", "NOTICE"])],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
'mock'
],
install_requires=install_requires,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
)
| 32.388235
| 104
| 0.682528
| 363
| 2,753
| 5.044077
| 0.443526
| 0.0639
| 0.040961
| 0.017477
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016093
| 0.187432
| 2,753
| 84
| 105
| 32.77381
| 0.802414
| 0.335634
| 0
| 0.037037
| 0
| 0
| 0.362135
| 0.067694
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6ab63dd0a627fd5e3fd6b78f7716ef38a63c388
| 1,112
|
py
|
Python
|
src/_bar.py
|
yoshihikosuzuki/plotly_light
|
cef2465486e9147e27feae1193a1b4487e4fc543
|
[
"MIT"
] | null | null | null |
src/_bar.py
|
yoshihikosuzuki/plotly_light
|
cef2465486e9147e27feae1193a1b4487e4fc543
|
[
"MIT"
] | null | null | null |
src/_bar.py
|
yoshihikosuzuki/plotly_light
|
cef2465486e9147e27feae1193a1b4487e4fc543
|
[
"MIT"
] | null | null | null |
from typing import Optional, Sequence
import plotly.graph_objects as go
def bar(x: Sequence,
y: Sequence,
text: Optional[Sequence] = None,
width: Optional[int] = None,
col: Optional[str] = None,
opacity: float = 1,
name: Optional[str] = None,
show_legend: bool = False,
show_init: bool = True) -> go.Bar:
"""Create a simple Trace object of a histogram.
positional arguments:
@ x : Coordinates of data on x-axis.
@ y : Coordinates of data on y-axis.
optional arguments:
@ col : Color of bars.
@ opacity : Opacity of bars.
@ name : Display name of the trace in legend.
@ show_legend : Show this trace in legend.
@ show_init : Show this trace initially.
"""
return go.Bar(x=x,
y=y,
text=text,
width=width,
marker_color=col,
opacity=opacity,
name=name,
showlegend=show_legend,
visible=None if show_init else "legendonly")
| 31.771429
| 62
| 0.539568
| 129
| 1,112
| 4.589147
| 0.44186
| 0.050676
| 0.050676
| 0.064189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001441
| 0.375899
| 1,112
| 34
| 63
| 32.705882
| 0.851585
| 0.340827
| 0
| 0
| 0
| 0
| 0.014577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6acb4fde9c00fed8d158a1a19ae4c34b7d7d64e
| 4,029
|
py
|
Python
|
pennylane/templates/subroutines/arbitrary_unitary.py
|
doomhammerhell/pennylane
|
f147f22d8d99ba5891edd45a6a1f7dd679c8a23c
|
[
"Apache-2.0"
] | 3
|
2021-02-22T18:30:55.000Z
|
2021-02-23T10:54:58.000Z
|
pennylane/templates/subroutines/arbitrary_unitary.py
|
doomhammerhell/pennylane
|
f147f22d8d99ba5891edd45a6a1f7dd679c8a23c
|
[
"Apache-2.0"
] | null | null | null |
pennylane/templates/subroutines/arbitrary_unitary.py
|
doomhammerhell/pennylane
|
f147f22d8d99ba5891edd45a6a1f7dd679c8a23c
|
[
"Apache-2.0"
] | 1
|
2021-03-27T09:03:15.000Z
|
2021-03-27T09:03:15.000Z
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Contains the ArbitraryUnitary template.
"""
import pennylane as qml
from pennylane.operation import Operation, AnyWires
from pennylane.ops import PauliRot
_PAULIS = ["I", "X", "Y", "Z"]
def _tuple_to_word(index_tuple):
"""Convert an integer tuple to the corresponding Pauli word.
The Pauli operators are converted as ``0 -> I``, ``1 -> X``,
``2 -> Y``, ``3 -> Z``.
Args:
index_tuple (Tuple[int]): An integer tuple describing the Pauli word
Returns:
str: The corresponding Pauli word
"""
return "".join([_PAULIS[i] for i in index_tuple])
def _n_k_gray_code(n, k, start=0):
"""Iterates over a full n-ary Gray code with k digits.
Args:
n (int): Base of the Gray code. Needs to be greater than one.
k (int): Number of digits of the Gray code. Needs to be greater than zero.
start (int, optional): Optional start of the Gray code. The generated code
will be shorter as the code does not wrap. Defaults to 0.
"""
for i in range(start, n ** k):
codeword = [0] * k
base_repesentation = []
val = i
for j in range(k):
base_repesentation.append(val % n)
val //= n
shift = 0
for j in reversed(range(k)):
codeword[j] = (base_repesentation[j] + shift) % n
shift += n - codeword[j]
yield codeword
def _all_pauli_words_but_identity(num_wires):
# Start at 1 to ignore identity
yield from (_tuple_to_word(idx_tuple) for idx_tuple in _n_k_gray_code(4, num_wires, start=1))
class ArbitraryUnitary(Operation):
"""Implements an arbitrary unitary on the specified wires.
An arbitrary unitary on :math:`n` wires is parametrized by :math:`4^n - 1`
independent real parameters. This templates uses Pauli word rotations to
parametrize the unitary.
**Example**
ArbitraryUnitary can be used as a building block, e.g. to parametrize arbitrary
two-qubit operations in a circuit:
.. code-block:: python
@qml.template
def arbitrary_nearest_neighbour_interaction(weights, wires):
qml.broadcast(unitary=ArbitraryUnitary, pattern="double", wires=wires, params=weights)
Args:
weights (tensor_like): The angles of the Pauli word rotations, needs to have length :math:`4^n - 1`
where :math:`n` is the number of wires the template acts upon.
wires (Iterable): wires that the template acts on
"""
num_params = 1
num_wires = AnyWires
par_domain = "A"
def __init__(self, weights, wires, do_queue=True, id=None):
shape = qml.math.shape(weights)
if shape != (4 ** len(wires) - 1,):
raise ValueError(
f"Weights tensor must be of shape {(4 ** len(wires) - 1,)}; got {shape}."
)
super().__init__(weights, wires=wires, do_queue=do_queue, id=id)
def expand(self):
weights = self.parameters[0]
with qml.tape.QuantumTape() as tape:
for i, pauli_word in enumerate(_all_pauli_words_but_identity(len(self.wires))):
PauliRot(weights[i], pauli_word, wires=self.wires)
return tape
@staticmethod
def shape(n_wires):
"""Compute the expected shape of the weights tensor.
Args:
n_wires (int): number of wires that template acts on
"""
return (4 ** n_wires - 1,)
| 31.476563
| 107
| 0.647803
| 569
| 4,029
| 4.483304
| 0.377856
| 0.024696
| 0.010584
| 0.015288
| 0.056448
| 0.025872
| 0.025872
| 0.025872
| 0.025872
| 0
| 0
| 0.011682
| 0.256391
| 4,029
| 127
| 108
| 31.724409
| 0.839786
| 0.540581
| 0
| 0
| 0
| 0
| 0.04483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6aec9eead70cf9709e4908f8e9466e087fc8de3
| 5,271
|
py
|
Python
|
vae_celeba.py
|
aidiary/generative-models-pytorch
|
c9ae23a4ecbe4bf8f82dbaf9e4e3e1e61530e6b0
|
[
"MIT"
] | null | null | null |
vae_celeba.py
|
aidiary/generative-models-pytorch
|
c9ae23a4ecbe4bf8f82dbaf9e4e3e1e61530e6b0
|
[
"MIT"
] | null | null | null |
vae_celeba.py
|
aidiary/generative-models-pytorch
|
c9ae23a4ecbe4bf8f82dbaf9e4e3e1e61530e6b0
|
[
"MIT"
] | null | null | null |
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import CelebA
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
)
self.mu_layer = nn.Linear(4096, 200)
self.logvar_layer = nn.Linear(4096, 200)
def forward(self, imgs):
out = self.conv_layers(imgs)
out = nn.Flatten()(out)
mu = self.mu_layer(out)
logvar = self.logvar_layer(out)
return mu, logvar
class Decoder(nn.Module):
def __init__(self):
super().__init__()
self.decoder_input = nn.Linear(200, 4096)
self.deconv_layers = nn.Sequential(
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(),
nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(),
nn.ConvTranspose2d(32, 3, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(3),
nn.Sigmoid(),
)
def forward(self, z):
out = self.decoder_input(z)
out = out.view(-1, 64, 8, 8)
recon_img = self.deconv_layers(out)
return recon_img
class VanillaVAE(pl.LightningModule):
def __init__(self):
super().__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, img):
mu, logvar = self.encoder(img)
return mu
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=0.005)
return optimizer
def training_step(self, train_batch, batch_idx):
img, labels = train_batch
mu, logvar = self.encoder(img)
z = self.reparameterize(mu, logvar)
recon_img = self.decoder(z)
recon_loss_factor = 10000
recon_loss = F.mse_loss(recon_img, img)
kld_loss = torch.mean(-0.5 * torch.sum(1 + logvar - mu**2 - logvar.exp(), dim=1))
loss = recon_loss_factor * recon_loss + kld_loss
self.log('train/loss', loss)
self.log('train/recon_loss', recon_loss)
self.log('train/kl_loss', kld_loss)
return loss
def validation_step(self, val_batch, batch_idx):
img, labels = val_batch
mu, logvar = self.encoder(img)
z = self.reparameterize(mu, logvar)
recon_img = self.decoder(z)
recon_loss_factor = 10000
recon_loss = F.mse_loss(recon_img, img)
kld_loss = torch.mean(-0.5 * torch.sum(1 + logvar - mu**2 - logvar.exp(), dim=1))
loss = recon_loss_factor * recon_loss + kld_loss
self.log('val/loss', loss)
self.log('val/recon_loss', recon_loss)
self.log('val/kl_loss', kld_loss)
return loss
def reconstruct(self, img):
mu, _ = self.encoder(img)
recon_img = self.decoder(mu)
return recon_img
def sample(self, num_samples=64):
z = torch.randn(num_samples, 200)
samples = self.decoder(z)
return samples
if __name__ == '__main__':
# data
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.CenterCrop(148),
transforms.Resize(128),
transforms.ToTensor()
])
train_dataset = CelebA(root='data', split='train', transform=transform, download=False)
val_dataset = CelebA(root='data', split='test', transform=transform, download=False)
train_loader = DataLoader(train_dataset,
batch_size=32,
num_workers=8,
shuffle=True,
drop_last=True)
val_loader = DataLoader(val_dataset,
batch_size=32,
num_workers=8,
shuffle=False,
drop_last=True)
# model
model = VanillaVAE()
# training
tb_logger = TensorBoardLogger('lightning_logs', name='vanilla_vae_celeba', default_hp_metric=False)
trainer = pl.Trainer(gpus=[0], max_epochs=200, logger=tb_logger)
trainer.fit(model, train_loader, val_loader)
| 31.189349
| 103
| 0.592677
| 652
| 5,271
| 4.599693
| 0.214724
| 0.032011
| 0.029343
| 0.045348
| 0.466822
| 0.414138
| 0.389463
| 0.372124
| 0.315438
| 0.302434
| 0
| 0.043502
| 0.293493
| 5,271
| 168
| 104
| 31.375
| 0.761815
| 0.003605
| 0
| 0.379845
| 0
| 0
| 0.024581
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.069767
| 0
| 0.255814
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6afcad02c1d49dbed0f7930d88f9219376906a4
| 2,686
|
py
|
Python
|
data/process_data.py
|
julat/DisasterResponse
|
140489e521a96dc2ff9c9a95f0ce4e99403f03af
|
[
"MIT"
] | null | null | null |
data/process_data.py
|
julat/DisasterResponse
|
140489e521a96dc2ff9c9a95f0ce4e99403f03af
|
[
"MIT"
] | null | null | null |
data/process_data.py
|
julat/DisasterResponse
|
140489e521a96dc2ff9c9a95f0ce4e99403f03af
|
[
"MIT"
] | null | null | null |
# Import libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Load the data from the disaster response csvs
Parameters:
messages_filepath (str): Path to messages csv
categories_filepath (str): Path to categories csv
Returns:
Dataframe: Merged data
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages,categories,on='id')
return df
def clean_data(df):
"""
Cleans the categories
Parameters:
df (DataFrame): Messy DataFrame
Returns:
Dataframe: Cleaned dataframe
"""
categories = df['categories'].str.split( pat=';', expand=True)
row = categories.iloc[[1]]
category_colnames = row.apply(lambda x : x.values[0].split("-")[0])
categories.columns = category_colnames
for column in categories:
categories[column] = categories[column].astype(str).str[-1:]
categories[column] = categories[column].astype(int)
categories[column] = categories[column].map(lambda x: 1 if x > 1 else x)
df.drop(['categories'], axis=1, inplace=True)
df = df = pd.concat([df,categories], axis=1)
df.drop_duplicates(inplace=True)
return df
def save_data(df, database_filename):
"""
Saves the DataFrame
Parameters:
df (DataFrame): Cleaned DataFrame
database_filename (DataFrame): Path to the SQLite Database
"""
engine = create_engine('sqlite:///' + database_filename + '.db')
df.to_sql(database_filename, engine, index=False, if_exists='replace')
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| 28.574468
| 80
| 0.652271
| 320
| 2,686
| 5.334375
| 0.33125
| 0.056239
| 0.076157
| 0.079672
| 0.093732
| 0.049209
| 0
| 0
| 0
| 0
| 0
| 0.004883
| 0.237528
| 2,686
| 94
| 81
| 28.574468
| 0.828613
| 0.208861
| 0
| 0.045455
| 0
| 0
| 0.228518
| 0.020437
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.068182
| 0
| 0.204545
| 0.113636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b027e44688ca01138133b153494c3bc7370758
| 3,658
|
py
|
Python
|
contrail-controller/files/plugins/check_contrail_status_controller.py
|
atsgen/tf-charms
|
81110aef700b2f227654d52709614ddb3d62ba17
|
[
"Apache-2.0"
] | null | null | null |
contrail-controller/files/plugins/check_contrail_status_controller.py
|
atsgen/tf-charms
|
81110aef700b2f227654d52709614ddb3d62ba17
|
[
"Apache-2.0"
] | null | null | null |
contrail-controller/files/plugins/check_contrail_status_controller.py
|
atsgen/tf-charms
|
81110aef700b2f227654d52709614ddb3d62ba17
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import sys
import json
SERVICES = {
'control': [
'control',
'nodemgr',
'named',
'dns',
],
'config-database': [
'nodemgr',
'zookeeper',
'rabbitmq',
'cassandra',
],
'webui': [
'web',
'job',
],
'config': [
'svc-monitor',
'nodemgr',
'device-manager',
'api',
'schema',
],
}
WARNING = 1
CRITICAL = 2
def get_contrail_status_txt(services):
try:
output = subprocess.check_output("export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status", shell=True).decode('UTF-8')
except subprocess.CalledProcessError as err:
message = ('CRITICAL: Could not get contrail-status.'
' return code: {} cmd: {} output: {}'.
format(err.returncode, err.cmd, err.output))
print(message)
sys.exit(CRITICAL)
statuses = dict()
group = None
for line in output.splitlines()[1:]:
words = line.split()
if len(words) == 4 and words[0] == '==' and words[3] == '==':
group = words[2]
continue
if len(words) == 0:
group = None
continue
if group and len(words) >= 2 and group in services:
srv = words[0].split(':')[0]
statuses.setdefault(group, list()).append(
{srv: ' '.join(words[1:])})
return statuses
def get_contrail_status_json(services):
try:
output = json.loads(subprocess.check_output("export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status --format json", shell=True).decode('UTF-8'))
except subprocess.CalledProcessError as err:
message = ('CRITICAL: Could not get contrail-status.'
' return code: {} cmd: {} output: {}'.
format(err.returncode, err.cmd, err.output))
print(message)
sys.exit(CRITICAL)
statuses = output["pods"]
return statuses
def check_contrail_status(services, version=None):
if version > 1912:
statuses = get_contrail_status_json(services)
else:
statuses = get_contrail_status_txt(services)
for group in services:
if group not in statuses:
message = ('WARNING: POD {} is absent in the contrail-status'
.format(group))
print(message)
sys.exit(WARNING)
for srv in services[group]:
if not any(srv in key for key in statuses[group]):
message = ('WARNING: {} is absent in the contrail-status'
.format(srv))
print(message)
sys.exit(WARNING)
status = next(stat[srv] for stat in statuses[group] if srv in stat)
if status not in ['active', 'backup']:
message = ('CRITICAL: {} is not ready. Reason: {}'
.format(srv, status))
print(message)
sys.exit(CRITICAL)
print('Contrail status OK')
sys.exit()
if __name__ == '__main__':
cver = sys.argv[1]
if '.' in str(cver):
if cver == '5.0':
version = 500
elif cver == '5.1':
version = 510
else:
print("CRITICAL: invalid version: {}".format(cver))
sys.exit(CRITICAL)
elif not cver.isdigit():
print("CRITICAL: invalid version: {}".format(cver))
sys.exit(CRITICAL)
else:
version = int(cver)
check_contrail_status(SERVICES, version=version)
| 29.739837
| 193
| 0.54538
| 394
| 3,658
| 4.982234
| 0.294416
| 0.121243
| 0.051961
| 0.048395
| 0.511971
| 0.379012
| 0.379012
| 0.34539
| 0.34539
| 0.29241
| 0
| 0.01221
| 0.328321
| 3,658
| 122
| 194
| 29.983607
| 0.786732
| 0.005741
| 0
| 0.377358
| 0
| 0
| 0.203795
| 0.034103
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028302
| false
| 0
| 0.028302
| 0
| 0.075472
| 0.075472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b042b87a1d5f3672a72f7fa6b5679e20f39682
| 2,693
|
py
|
Python
|
leaderboard-server/leaderboard-server.py
|
harnitsignalfx/skogaming
|
c860219c89149d686106dfb7a93d27df39830842
|
[
"MIT"
] | 1
|
2021-03-01T20:56:24.000Z
|
2021-03-01T20:56:24.000Z
|
leaderboard-server/leaderboard-server.py
|
harnitsignalfx/skogaming
|
c860219c89149d686106dfb7a93d27df39830842
|
[
"MIT"
] | null | null | null |
leaderboard-server/leaderboard-server.py
|
harnitsignalfx/skogaming
|
c860219c89149d686106dfb7a93d27df39830842
|
[
"MIT"
] | 1
|
2021-02-20T17:36:47.000Z
|
2021-02-20T17:36:47.000Z
|
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
import simplejson as json
from leaderboard.leaderboard import Leaderboard
import uwsgidecorators
import signalfx
app = Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
cors = CORS(app)
highscore_lb_starship = Leaderboard('highscores-starship',host='redis-instance')
sfx = signalfx.SignalFx(ingest_endpoint='http://otelcol:9943').ingest('token-at-collector')
def parseData(row):
metricDump1 = {}
counterArray = []
metricDump1["dimensions"] = {}
metricDump1["dimensions"]["ip"] = row["ip"] # dimension
metricDump1["metric"] = "starship.shots"
metricDump1["value"] = row["shots"]
counterArray.append(metricDump1)
print('Sending data:',counterArray)
sfx.send(counters=counterArray)
@app.route('/health')
def health():
return '{"status":"OK"}', 200
@app.route('/leaders/<game>')
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
def returnLeaders(game):
if game == "starship":
return json.dumps(highscore_lb_starship.all_leaders()), 200
return '{}', 200
@app.route('/submitScores', methods=['POST'])
@cross_origin(origin='localhost',headers=['Content-Type','application/json'])
def submitScores():
content = request.get_json(force=True)
print('Content:',content)
if "game" in content:
if content["game"]=="starship":
highscore_lb_starship.rank_member(content["aduser"], content["score"])
return '{"status":"OK"}', 200
@app.route("/get_my_ip", methods=["GET"])
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
def get_my_ip():
if 'X-Real-Ip' in request.headers:
return jsonify({'ip':request.headers['X-Real-Ip']}), 200
else:
return jsonify({'ip':'-'}), 200
#return json.dumps({k:v for k, v in request.headers.items()}), 200
@app.route('/submitShots', methods=['POST'])
@cross_origin(origin='localhost',headers=['Content-Type','application/json'])
def submitShots():
content = request.get_json(force=True)
print('Content:',content)
shotSubmission = {}
totalShots = 0
if "game" in content:
if content["game"]=="starship":
if "shots" in content:
totalShots = content["shots"]
shotSubmission["shots"] = totalShots
if 'X-Real-Ip' in request.headers:
shotSubmission["ip"] = request.headers['X-Real-Ip']
else:
shotSubmission["ip"] = "-"
parseData(shotSubmission)
return '{"status":"OK"}', 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=6001)
| 26.663366
| 95
| 0.649833
| 309
| 2,693
| 5.553398
| 0.307443
| 0.032051
| 0.052448
| 0.060606
| 0.339161
| 0.339161
| 0.283217
| 0.254079
| 0.212121
| 0.085082
| 0
| 0.019475
| 0.180097
| 2,693
| 100
| 96
| 26.93
| 0.757699
| 0.027479
| 0
| 0.287879
| 0
| 0
| 0.21445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0.015152
| 0.287879
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b183e72d2aff2b604bbf82d32e69244b409f59
| 1,591
|
py
|
Python
|
meshio/_cli/_info.py
|
jorgensd/meshio
|
0600ac9e9e8d1e1a27d5f3f2f4235414f4482cac
|
[
"MIT"
] | 1
|
2020-09-01T11:26:15.000Z
|
2020-09-01T11:26:15.000Z
|
meshio/_cli/_info.py
|
jorgensd/meshio
|
0600ac9e9e8d1e1a27d5f3f2f4235414f4482cac
|
[
"MIT"
] | null | null | null |
meshio/_cli/_info.py
|
jorgensd/meshio
|
0600ac9e9e8d1e1a27d5f3f2f4235414f4482cac
|
[
"MIT"
] | null | null | null |
import argparse
import numpy as np
from .._helpers import read, reader_map
from ._helpers import _get_version_text
def info(argv=None):
# Parse command line arguments.
parser = _get_info_parser()
args = parser.parse_args(argv)
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
print(mesh)
# check if the cell arrays are consistent with the points
is_consistent = True
for cells in mesh.cells:
if np.any(cells.data > mesh.points.shape[0]):
print("\nATTENTION: Inconsistent mesh. Cells refer to nonexistent points.")
is_consistent = False
break
# check if there are redundant points
if is_consistent:
point_is_used = np.zeros(mesh.points.shape[0], dtype=bool)
for cells in mesh.cells:
point_is_used[cells.data] = True
if np.any(~point_is_used):
print("ATTENTION: Some points are not part of any cell.")
def _get_info_parser():
parser = argparse.ArgumentParser(
description=("Print mesh info."), formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--version",
"-v",
action="version",
version=_get_version_text(),
help="display version information",
)
return parser
| 26.966102
| 87
| 0.637335
| 201
| 1,591
| 4.885572
| 0.437811
| 0.03666
| 0.033605
| 0.028513
| 0.038697
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001704
| 0.262099
| 1,591
| 58
| 88
| 27.431034
| 0.834753
| 0.085481
| 0
| 0.095238
| 0
| 0
| 0.164828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.166667
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b2c4874559385c0807dca69b9f07a62e9a1d08
| 1,324
|
py
|
Python
|
ccslink/Zip.py
|
Data-Linkage/ccslink
|
ee1105888d43c6a2b307deb96ddede34d03a965f
|
[
"MIT"
] | null | null | null |
ccslink/Zip.py
|
Data-Linkage/ccslink
|
ee1105888d43c6a2b307deb96ddede34d03a965f
|
[
"MIT"
] | null | null | null |
ccslink/Zip.py
|
Data-Linkage/ccslink
|
ee1105888d43c6a2b307deb96ddede34d03a965f
|
[
"MIT"
] | null | null | null |
import os, shutil
from CCSLink import Spark_Session as SS
def add_zipped_dependency(zip_from, zip_target):
"""
This method creates a zip of the code to be sent to the executors.
It essentially zips the Python packages installed by PIP and
submits them via addPyFile in the current PySpark context
E.g. if we want to submit "metaphone" package so that we
can do use `import metaphone` and use its methods inside UDF,
we run this method with:
- zip_from = /home/cdsw/.local/lib/python3.6/site-packages/
- zip_target = metaphone
"""
# change this to a path in your project
zipped_fpath = f'/home/cdsw/zipped_packages/{zip_target}'
if os.path.exists(zipped_fpath + '.zip'):
os.remove(zipped_fpath + '.zip')
shutil.make_archive(
# path to the resulting zipped file (without the suffix)
base_name=zipped_fpath, # resulting filename
# specifies the format --> implies .zip suffix
format='zip',
# the root dir from where we want to zip
root_dir=zip_from,
# the dir (relative to root dir) which we want to zip
# (all files in the final zip will have this prefix)
base_dir=zip_target,
)
# add the files to the executors
SS.SPARK().sparkContext.addPyFile(f'{zipped_fpath}.zip')
| 33.1
| 70
| 0.676737
| 199
| 1,324
| 4.40201
| 0.507538
| 0.062785
| 0.027397
| 0.025114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002004
| 0.246224
| 1,324
| 39
| 71
| 33.948718
| 0.875752
| 0.563444
| 0
| 0
| 0
| 0
| 0.129032
| 0.074004
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b2fbff1fb4792ec87b5e0830c85e32ea769936
| 2,484
|
py
|
Python
|
moltemplate/nbody_Angles.py
|
Mopolino8/moltemplate
|
363df364fcb012e8e4beb7bc616a77d696b8b707
|
[
"BSD-3-Clause"
] | null | null | null |
moltemplate/nbody_Angles.py
|
Mopolino8/moltemplate
|
363df364fcb012e8e4beb7bc616a77d696b8b707
|
[
"BSD-3-Clause"
] | null | null | null |
moltemplate/nbody_Angles.py
|
Mopolino8/moltemplate
|
363df364fcb012e8e4beb7bc616a77d696b8b707
|
[
"BSD-3-Clause"
] | 1
|
2019-11-24T17:32:28.000Z
|
2019-11-24T17:32:28.000Z
|
try:
from .nbody_graph_search import Ugraph
except (SystemError, ValueError):
# not installed as a package
from nbody_graph_search import Ugraph
# This file defines how 3-body angle interactions are generated by moltemplate
# by default. It can be overridden by supplying your own custom file.
# To find 3-body "angle" interactions, we would use this subgraph:
#
#
# *---*---* => 1st bond connects atoms 0 and 1
# 0 1 2 2nd bond connects atoms 1 and 2
#
bond_pattern = Ugraph([(0, 1), (1, 2)])
# (Ugraph atom indices begin at 0, not 1)
# The next function eliminates the redundancy between 0-1-2 and 2-1-0:
def canonical_order(match):
"""
Before defining a new interaction, we must check to see if an
interaction between these same 3 atoms has already been created
(perhaps listed in a different, but equivalent order).
If we don't check for this this, we will create many unnecessary redundant
interactions (which can slow down he simulation).
To avoid this, I define a "canonical_order" function which sorts the atoms
and bonds in a way which is consistent with the symmetry of the interaction
being generated... Later the re-ordered list of atom and bond ids will be
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far. Note that
the energy of an angle interaction is a function of the angle between.
three consecutively bonded atoms (referred to here as: 0,1,2).
This angle does not change when swapping the atoms at either end (0 and 2).
So it does not make sense to define a separate 3-body angle
interaction between atoms 0,1,2 AS WELL AS an interaction between 2,1,0.
So we sort the atoms and bonds so that the first atom has a always has
a lower atomID than the third atom. (Later we will check to see if we
have already defined an interaction between these 3 atoms. If not then
we create a new one.)
"""
# match[0][0:2] contains the ID numbers for the 3 atoms in the match
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
# match[1][0:1] contains the ID numbers for the 2 bonds
bond0 = match[1][0]
bond1 = match[1][1]
if atom0 < atom2:
# return ((atom0, atom1, atom2), (bond0, bond1)) same thing as:
return match
else:
return ((atom2, atom1, atom0), (bond1, bond0))
| 42.827586
| 79
| 0.68599
| 409
| 2,484
| 4.149144
| 0.400978
| 0.00825
| 0.007071
| 0.023571
| 0.068356
| 0.068356
| 0
| 0
| 0
| 0
| 0
| 0.0384
| 0.245169
| 2,484
| 57
| 80
| 43.578947
| 0.866667
| 0.771739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b3c1a04d6b23957a4328b1a4d335f1079479f3
| 8,099
|
py
|
Python
|
extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py
|
DougRogers-DigitalFish/USD
|
d8a405a1344480f859f025c4f97085143efacb53
|
[
"BSD-2-Clause"
] | 3,680
|
2016-07-26T18:28:11.000Z
|
2022-03-31T09:55:05.000Z
|
extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py
|
DougRogers-DigitalFish/USD
|
d8a405a1344480f859f025c4f97085143efacb53
|
[
"BSD-2-Clause"
] | 1,759
|
2016-07-26T19:19:59.000Z
|
2022-03-31T21:24:00.000Z
|
extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py
|
DougRogers-DigitalFish/USD
|
d8a405a1344480f859f025c4f97085143efacb53
|
[
"BSD-2-Clause"
] | 904
|
2016-07-26T18:33:40.000Z
|
2022-03-31T09:55:16.000Z
|
#!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
'''
Creates a top-level, referenceable asset USD file from one or more
'variant' files, each of which can contain arbitrary scene description.
When supplying multiple files, one must also provide the name for a
variantSet that will be constructed to switch between the files.
The asset file will place the variant files behind a "payload", which will
enable consumers to defer loading and processing of the data when composed
onto a UsdStage.
The names of the created variations will be taken directly from the basename
of their corresponding input file.
'''
from __future__ import print_function
from pxr import Tf, Kind, Sdf, Usd
# ToDo:
# - handle multiple variantSets
# - layer multiple kinds of files (e.g. shading.usd over geom.usd)
# - allow output filename to be independently specifiable? (Breaks with Pixar
# convention)
# - allow variant names to be specified independently of variant file names
# - Compute and present (per-variant) UsdGeomModelAPI.extentsHint
# - Compute and author UsdModelAPI::SetPayloadAssetDependencies()
def CreateModelStage(assetName,
assetIdentifier=None,
kind=Kind.Tokens.component,
filesToReference=None,
variantSetName=None,
defaultVariantSelection=None):
# Preconditions....
if not Tf.IsValidIdentifier(assetName):
print("assetName '%s' must be a valid identifier. Aborting." %
assetName)
return None
if variantSetName and not Tf.IsValidIdentifier(variantSetName):
print("variantSetName '%s' must be a valid identifier. Aborting." %
variantSetName)
return None
if filesToReference and len(filesToReference) > 1 and not variantSetName:
# For now, we only allow multiple files to reference if we're switching
# them with a variantSet. We can relax this restriction when we can
# make internal payload arcs (bug #119960)
print("Cannot create multiple-file-reference without a variantSet. Aborting")
return None
if not Kind.Registry.IsA(kind, Kind.Tokens.model):
print("kind '%s' is not a valid model kind, which must be one of:" %
kind)
print(Kind.Registry.GetAllKinds())
return None
# Create the root file for the stage, and make it ASCII text.
# We need some nicer sugar for this.
fileName = assetName + ".usd"
rootLayer = Sdf.Layer.CreateNew(fileName, args = {'format':'usda'})
stage = Usd.Stage.Open(rootLayer)
# Name the root prim after the asset. Don't give it a type, since we
# want that to come from referenced files. Make it be the "default prim"
# so that we can reference the resulting file without specifiying a
# prim path
rootPath = Sdf.Path.absoluteRootPath
modelRootPrim = stage.DefinePrim(rootPath.AppendChild(assetName))
stage.SetDefaultPrim(modelRootPrim)
modelAPI = Usd.ModelAPI(modelRootPrim)
modelAPI.SetKind(kind)
# See http://openusd.org/docs/api/class_usd_model_a_p_i.html#details
# for more on assetInfo
modelAPI.SetAssetName(assetName)
modelAPI.SetAssetIdentifier(assetIdentifier or fileName)
# Add a class named after the asset, and make the asset inherit from it.
# This is not necessary for a valid asset, and the class-naming is a Pixar
# convention. But always having a class associated with each asset is
# extremely useful for non-destructively editing many referenced or
# instanced assets of the same type.
classPrim = stage.CreateClassPrim(rootPath.AppendChild("_class_"+assetName))
modelRootPrim.GetInherits().AddInherit(classPrim.GetPath())
if not filesToReference:
# weird edge case... we're done
return stage
elif len(filesToReference) == 1 and not variantSetName:
# The other, more plausible edge case: we're just wrapping
# some other file (e.g. alembic) in order to give it a payload
# and other proper USD trappings - no variants
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(filesToReference[0]))
return stage
# OK, we're making a variantSet, and we are going to vary the payload
# in each variant
varSet = modelRootPrim.GetVariantSet(variantSetName)
for variantFile in filesToReference:
import os
variantName = os.path.splitext(os.path.basename(variantFile))[0]
# If we didn't specify a default selection, choose the first one
if not defaultVariantSelection:
defaultVariantSelection = variantName
varSet.AddVariant(variantName)
varSet.SetVariantSelection(variantName)
# The context object makes all edits "go inside" the variant we
# just created.
with varSet.GetVariantEditContext():
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(variantFile))
# Now put the variantSet into the state we want it to be in by default
varSet.SetVariantSelection(defaultVariantSelection)
return stage
if __name__ == "__main__":
import argparse, os, sys
descr = __doc__.strip()
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
description=descr)
parser.add_argument('assetName')
parser.add_argument('variantFiles', nargs='+')
parser.add_argument(
'-k', '--kind', default='component', action='store', metavar='kind',
help="Model kind, one of: component, group, or assembly")
parser.add_argument(
'-v', '--variantSet', default='', action='store', metavar='variantSet',
help="Variantset to create to modulate variantFiles. Can be elided "
"if only one file is supplied")
parser.add_argument(
'-i', '--identifier', default='', action='store', metavar='identifier',
help="The identifier you would expect your Ar asset-resolver plugin "
"to resolve to the (installed) assetName.usd file this script creates. "
" If unspecified, defaults to assetName.usd")
parser.add_argument(
'-d', '--defaultVariantSelection', default='', action='store',
metavar='defaultVariantSelection',
help="This variant will be selected by default when the asset is "
"added to a composition. If unspecified, will be the variant for "
"'variantFile1'")
args = parser.parse_args()
if not args.assetName or args.assetName == '':
parser.error("No assetName specified")
stage = CreateModelStage(args.assetName,
assetIdentifier=args.identifier,
kind=args.kind,
filesToReference=args.variantFiles,
variantSetName=args.variantSet,
defaultVariantSelection=args.defaultVariantSelection)
if stage:
stage.GetRootLayer().Save()
exit(0)
else:
exit(1)
| 44.256831
| 85
| 0.684159
| 991
| 8,099
| 5.56004
| 0.380424
| 0.011434
| 0.020327
| 0.011434
| 0.050091
| 0.025771
| 0.011252
| 0
| 0
| 0
| 0
| 0.004056
| 0.238918
| 8,099
| 182
| 86
| 44.5
| 0.889844
| 0.416224
| 0
| 0.119565
| 0
| 0
| 0.192489
| 0.015236
| 0
| 0
| 0
| 0.005495
| 0
| 1
| 0.01087
| false
| 0
| 0.043478
| 0
| 0.130435
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b3c20df06992b958887a2ed1583c032b8b6295
| 7,079
|
py
|
Python
|
src/main.py
|
fbdp1202/pyukf_kinect_body_tracking
|
c44477149cfc22abfe9121c2604dc284c93fbd42
|
[
"MIT"
] | 7
|
2020-04-23T06:03:10.000Z
|
2022-01-16T21:16:23.000Z
|
src/main.py
|
fbdp1202/pyukf_kinect_body_tracking
|
c44477149cfc22abfe9121c2604dc284c93fbd42
|
[
"MIT"
] | null | null | null |
src/main.py
|
fbdp1202/pyukf_kinect_body_tracking
|
c44477149cfc22abfe9121c2604dc284c93fbd42
|
[
"MIT"
] | 3
|
2020-07-12T15:07:52.000Z
|
2021-12-05T09:27:18.000Z
|
import sys
import os
sys.path.append('./code/')
from skeleton import Skeleton
from read_data import *
from calibration import Calibration
from ukf_filter import ukf_Filter_Controler
from canvas import Canvas
from regression import *
import time
from functools import wraps
import os
def check_time(function):
@wraps(function)
def measure(*args, **kwargs):
start_time = time.time()
result = function(*args, **kwargs)
end_time = time.time()
print(f"@check_time: {function.__name__} took {end_time - start_time}")
return result
return measure
def get_dir_name(dir):
dir_list = []
for name in os.listdir(dir):
path = dir + '/' + name
if not os.path.isfile(path):
dir_list.append(name)
return dir_list
def scan_dir(dir):
dir_list = []
for name in os.listdir(dir):
path = dir + '/' + name
if os.path.isfile(path):
dir_list.append(path)
return dir_list
@check_time
def merge_skeleton_data(folder_name):
save_file_name = folder_name + '.txt'
dir_list = scan_dir(folder_name)
wf = open(save_file_name, 'w')
for filename in dir_list:
f = open(filename, 'r')
line = f.readline()
wf.write(line)
wf.close()
return save_file_name
@check_time
def init_simul(filename, test_num, cbr_num=50, div_step=1):
data = read_data_skeleton(filename)
# test_num, data = interval_compasation(data, test_num, div_step)
test_num = min(test_num, len(data))
skeletons = []
for i in range(test_num):
skeletons.append(Skeleton(data[i]))
cbr_num = min(test_num, cbr_num)
cal_skeletons = []
for i in range(cbr_num):
cal_skeletons.append(skeletons[i*div_step])
calibration = Calibration(cal_skeletons)
lower_init_mean, upper_init_mean = calibration.get_init_mean(0, filename)
return skeletons, lower_init_mean, upper_init_mean, test_num
@check_time
def make_filter(lower_init_mean, lower_init_cov, upper_init_mean, upper_init_cov, model):
flt = None
if model == 'ukf':
flt = ukf_Filter_Controler(lower_init_mean, lower_init_cov, upper_init_mean, upper_init_cov)
else:
print(model, "is not exist model name")
return flt
@check_time
def run_ukf(ukf, skeletons, test_num):
original_data = []
estimate_data = []
estimate_state = []
test_num = min(len(skeletons), test_num)
print("total test is {}".format(test_num))
print("test_num:", end=' ')
for i in range(test_num):
curr_input = skeletons[i].get_measurement()
original_data.append(curr_input)
state, data = ukf.update(curr_input)
estimate_data.append(data)
estimate_state.append(state)
if i % 10 == 0:
print(i, end=' ')
print('')
return original_data, estimate_data, estimate_state
def make_folder(folder_name):
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
return folder_name
def get_save_skeleton_data_folder_name(person_name, pos_mode, model):
folder_name = make_folder('result')
folder_name = make_folder(folder_name + '/' + person_name)
folder_name = make_folder(folder_name + '/' + pos_mode)
folder_name = make_folder(folder_name + '/' + model)
return folder_name + '/'
def save_sk_data_to_csv(folder_name, filename, data):
filename = folder_name + filename
f = open(filename, "w", encoding="UTF-8")
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(3):
f.write(str(data[i][j][k]))
if j == (len(data[i])-1) and k == 2:
f.write('\n')
else:
f.write(',')
def save_sk_state_to_csv(folder_name, filename, data):
filename = folder_name + filename
f = open(filename, 'w', encoding="UTF-8")
for i in range(len(data)):
for j in range(len(data[i])):
f.write(str(data[i][j]))
if j == (len(data[i])-1):
f.write('\n')
else:
f.write(',')
@check_time
def save_skeleton_data_to_csv(person_name, pos_mode, original_data, estimate_data, estimate_state, model):
csv_folder_name = get_save_skeleton_data_folder_name(person_name, pos_mode, model)
save_sk_data_to_csv(csv_folder_name, 'original_data.csv', original_data)
save_sk_data_to_csv(csv_folder_name, 'estimate_data.csv', estimate_data)
save_sk_state_to_csv(csv_folder_name, 'estimate_state.csv', estimate_state)
def read_csv(filename):
data = []
with open(filename, 'r') as reader:
for line in reader:
fields = line.split(',')
fields[len(fields)-1] = fields[len(fields)-1].replace('\n', '')
for i in range(len(fields)):
data.append(float(fields[i]))
data = np.array(data).reshape((int)(len(data)/32/3), 32, 3)
skeletons = []
for d in data:
skeletons.append(Skeleton(d))
return skeletons
@check_time
def read_skeleton_data_from_csv(person_name, pos_mode, model):
csv_folder_name = get_save_skeleton_data_folder_name(person_name, pos_mode, model)
original_data = read_csv(csv_folder_name + 'original_data.csv')
estimate_data = read_csv(csv_folder_name + 'estimate_data.csv')
return original_data, estimate_data
def get_save_image_file_name(person_name, pos_mode, model, plot_mode):
folder_name = make_folder('result')
folder_name = make_folder(folder_name + '/' + person_name)
folder_name = make_folder(folder_name + '/' + pos_mode)
folder_name = make_folder(folder_name + '/' + model)
folder_name = make_folder(folder_name + '/' + plot_mode)
return folder_name + '/'
@check_time
def skeleton_draw(person_name, pos_mode, model, original_data, estimate_data, sleep_t=100):
canvas = Canvas()
img_name_point = get_save_image_file_name(person_name, pos_mode, model, 'point')
img_name_length = get_save_image_file_name(person_name, pos_mode, model, 'length')
img_name_3D = get_save_image_file_name(person_name, pos_mode, model, 'plot_3D')
# canvas.skeleton_3D_plot(original_data, estimate_data)
canvas.skeleton_3D_animation_save(original_data, estimate_data, sleep_t, img_name_3D)
canvas.skeleton_point_plot(original_data, estimate_data, img_name_point)
canvas.skeleton_length_plot(original_data, estimate_data, img_name_length)
def set_lower_init_cov(value_cov=1e-6, velo_cov_0=1e-4, velo_cov_1=1e-2, len_cov=1e-10, obs_cov_factor=1e-4, trans_factor=100):
return [value_cov, velo_cov_0,value_cov, velo_cov_0,value_cov, velo_cov_1,value_cov, velo_cov_1,value_cov, velo_cov_0, len_cov,obs_cov_factor, trans_factor]
def set_upper_init_cov(value_cov=1e-6, velo_cov=1e-4, len_cov=1e-10, obs_cov_factor=1e-4, trans_factor=100):
return [value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,value_cov,velo_cov,len_cov,obs_cov_factor,trans_factor]
@check_time
def simulation_ukf(filename, test_num, cbr_num, model):
skeletons, lower_init_mean, upper_init_mean, test_num = init_simul(filename, test_num, cbr_num)
lower_init_cov = set_lower_init_cov()
upper_init_cov = set_upper_init_cov()
flt = make_filter(lower_init_mean, lower_init_cov, upper_init_mean, upper_init_cov, model)
original_data, estimate_data, estimate_state = run_ukf(flt, skeletons, test_num)
return original_data, estimate_data, estimate_state
| 34.198068
| 368
| 0.759288
| 1,166
| 7,079
| 4.249571
| 0.127787
| 0.078708
| 0.05328
| 0.066599
| 0.574975
| 0.526942
| 0.456105
| 0.354188
| 0.332795
| 0.302926
| 0
| 0.009957
| 0.120356
| 7,079
| 206
| 369
| 34.364078
| 0.785772
| 0.016669
| 0
| 0.288235
| 0
| 0
| 0.039667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.064706
| 0.011765
| 0.276471
| 0.035294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b3d6bc9a4bc463c1dd688594551748653895d4
| 2,683
|
py
|
Python
|
cfgov/scripts/initial_data.py
|
Mario-Kart-Felix/cfgov-refresh
|
7978fedeb7aaf4d96a87720e6545567085e056a9
|
[
"CC0-1.0"
] | 1
|
2019-12-29T17:50:07.000Z
|
2019-12-29T17:50:07.000Z
|
cfgov/scripts/initial_data.py
|
ascott1/cfgov-refresh
|
9c916aaed3a48110a199eb4675474290a51f815d
|
[
"CC0-1.0"
] | 1
|
2021-04-22T01:09:52.000Z
|
2021-04-22T01:09:52.000Z
|
cfgov/scripts/initial_data.py
|
ascott1/cfgov-refresh
|
9c916aaed3a48110a199eb4675474290a51f815d
|
[
"CC0-1.0"
] | 1
|
2021-02-02T08:59:38.000Z
|
2021-02-02T08:59:38.000Z
|
from __future__ import print_function
import json
import os
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from wagtail.wagtailcore.models import Page, Site
from v1.models import HomePage, BrowseFilterablePage
def run():
print('Running script \'scripts.initial_data\' ...')
admin_user = None
site_root = None
events = None
admin_user = User.objects.filter(username='admin')
if not admin_user:
admin_user = User(username='admin',
password=make_password(os.environ.get('WAGTAIL_ADMIN_PW')),
is_superuser=True, is_active=True, is_staff=True)
admin_user.save()
else:
admin_user = admin_user[0]
# Creates a new site root `CFGov`
site_root = HomePage.objects.filter(title='CFGOV')
if not site_root:
root = Page.objects.first()
site_root = HomePage(title='CFGOV', slug='home-page', depth=2, owner=admin_user)
site_root.live = True
root.add_child(instance=site_root)
latest = site_root.save_revision(user=admin_user, submitted_for_moderation=False)
latest.save()
else:
site_root = site_root[0]
# Setting new site root
if not Site.objects.filter(hostname='content.localhost').exists():
site = Site.objects.first()
site.port = 8000
site.root_page_id = site_root.id
site.save()
content_site = Site(hostname='content.localhost', port=8000, root_page_id=site_root.id)
content_site.save()
# Clean Up
old_site_root = Page.objects.filter(id=2)[0]
if old_site_root:
old_site_root.delete()
# Events Browse Page required for event `import-data` command
if not BrowseFilterablePage.objects.filter(title='Events').exists():
events = BrowseFilterablePage(title='Events', slug='events', owner=admin_user)
site_root.add_child(instance=events)
revision = events.save_revision(
user=admin_user,
submitted_for_moderation=False,
)
revision.publish()
# Archived Events Browse Filterable Page
if not BrowseFilterablePage.objects.filter(title='Archive').exists():
archived_events = BrowseFilterablePage(title='Archive', slug='archive', owner=admin_user)
if not events:
events = BrowseFilterablePage.objects.get(title='Events')
events.add_child(instance=archived_events)
revision = archived_events.save_revision(
user=admin_user,
submitted_for_moderation=False,
)
revision.publish()
| 34.844156
| 97
| 0.666045
| 327
| 2,683
| 5.266055
| 0.281346
| 0.083624
| 0.037747
| 0.036585
| 0.213705
| 0.188153
| 0.114983
| 0.114983
| 0.114983
| 0.084785
| 0
| 0.006826
| 0.235557
| 2,683
| 76
| 98
| 35.302632
| 0.832765
| 0.060007
| 0
| 0.135593
| 0
| 0
| 0.057234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0.033898
| 0.135593
| 0
| 0.152542
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b40095f02ec8f60d6c2306673d054478953aba
| 1,456
|
py
|
Python
|
Scripts/compareOutputs.py
|
harmim/vut-avs-project1
|
d36e6b5cdebce748d2bdf2afc43950968ecf0a91
|
[
"MIT"
] | null | null | null |
Scripts/compareOutputs.py
|
harmim/vut-avs-project1
|
d36e6b5cdebce748d2bdf2afc43950968ecf0a91
|
[
"MIT"
] | null | null | null |
Scripts/compareOutputs.py
|
harmim/vut-avs-project1
|
d36e6b5cdebce748d2bdf2afc43950968ecf0a91
|
[
"MIT"
] | null | null | null |
# Simple python3 script to compare output with a reference output.
# Usage: python3 compareOutputs.py testOutput.h5 testRefOutput.h5
import sys
import h5py
import numpy as np
if len(sys.argv) != 3:
print("Expected two arguments. Output and reference output file.")
sys.exit(1)
filename = sys.argv[1]
ref_filename = sys.argv[2]
f = h5py.File(filename, 'r')
ref_f = h5py.File(ref_filename, 'r')
out = np.array(f['output_data'])
out_ref = np.array(ref_f['output_data'])
if out.shape != out_ref.shape:
print("The files do not contain the same number of outputs.")
print("The output size: {0}.".format(out.shape[0]))
print("The reference size: {0}.".format(out_ref.shape[0]))
sys.exit(1)
ref_value = np.copy(out_ref)
ref_value[ref_value == 0.0] = 1.0
error = (out_ref - out) / ref_value
maximal_error = np.amax(error)
print("Maximal error between the output and the reference is {0}.".format(maximal_error))
if maximal_error < 10**(-6):
print("OK:Output seems to match the reference.")
sys.exit(0)
print("Failure:Output does not match the reference.")
maximal_error = np.amax(error, axis=1)
print(maximal_error.shape)
for i in range(0, 5):
print("Image", i)
print("Expected:", end="")
for j in range(0, 10):
print(out_ref[i, j], end = " ")
print("\nGot:", end="")
for j in range(0, 10):
print(out[i, j], end=" ")
print("\nMaximal error:", maximal_error[i], "\n")
sys.exit(1)
| 26.472727
| 89
| 0.666896
| 239
| 1,456
| 3.970711
| 0.334728
| 0.044257
| 0.02529
| 0.029505
| 0.101159
| 0.052687
| 0.052687
| 0.052687
| 0.052687
| 0
| 0
| 0.029094
| 0.173764
| 1,456
| 54
| 90
| 26.962963
| 0.759767
| 0.087912
| 0
| 0.131579
| 0
| 0
| 0.270943
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078947
| 0
| 0.078947
| 0.368421
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b741334252c43868c1ae3bb0661b811481f368
| 1,048
|
py
|
Python
|
src/zvt/recorders/em/meta/em_stockhk_meta_recorder.py
|
vishalbelsare/zvt
|
d55051147274c0a4157f08ec60908c781a323c8f
|
[
"MIT"
] | 2,032
|
2019-04-16T14:10:32.000Z
|
2022-03-31T12:40:13.000Z
|
src/zvt/recorders/em/meta/em_stockhk_meta_recorder.py
|
vishalbelsare/zvt
|
d55051147274c0a4157f08ec60908c781a323c8f
|
[
"MIT"
] | 162
|
2019-05-07T09:57:46.000Z
|
2022-03-25T16:23:08.000Z
|
src/zvt/recorders/em/meta/em_stockhk_meta_recorder.py
|
vishalbelsare/zvt
|
d55051147274c0a4157f08ec60908c781a323c8f
|
[
"MIT"
] | 755
|
2019-04-30T10:25:16.000Z
|
2022-03-29T17:50:49.000Z
|
# -*- coding: utf-8 -*-
from zvt.contract.api import df_to_db
from zvt.contract.recorder import Recorder
from zvt.domain.meta.stockhk_meta import Stockhk
from zvt.recorders.em import em_api
class EMStockhkRecorder(Recorder):
provider = "em"
data_schema = Stockhk
def run(self):
df_south = em_api.get_tradable_list(entity_type="stockhk", hk_south=True)
df_south = df_south.set_index("code", drop=False)
df_south["south"] = True
df = em_api.get_tradable_list(entity_type="stockhk")
df = df.set_index("code", drop=False)
df_other = df.loc[~df.index.isin(df_south.index)].copy()
df_other["south"] = False
df_to_db(df=df_south, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
df_to_db(df=df_other, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
if __name__ == "__main__":
recorder = EMStockhkRecorder()
recorder.run()
# the __all__ is generated
__all__ = ["EMStockhkRecorder"]
| 33.806452
| 115
| 0.711832
| 151
| 1,048
| 4.589404
| 0.337748
| 0.060606
| 0.025974
| 0.046176
| 0.40404
| 0.37518
| 0.308802
| 0.308802
| 0.20202
| 0.20202
| 0
| 0.001149
| 0.169847
| 1,048
| 30
| 116
| 34.933333
| 0.795402
| 0.043893
| 0
| 0
| 0
| 0
| 0.059059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b7cb0bb44951e0d2ab9c8433c064285f85c4f7
| 6,362
|
py
|
Python
|
src/main.py
|
yanwunhao/auto-mshts
|
7a4b690bbb6ae55e2f6fad77d176c2c0822db7a0
|
[
"MIT"
] | null | null | null |
src/main.py
|
yanwunhao/auto-mshts
|
7a4b690bbb6ae55e2f6fad77d176c2c0822db7a0
|
[
"MIT"
] | null | null | null |
src/main.py
|
yanwunhao/auto-mshts
|
7a4b690bbb6ae55e2f6fad77d176c2c0822db7a0
|
[
"MIT"
] | null | null | null |
from util.io import read_setting_json, read_0h_data, read_24h_data, draw_single_curve
from util.convert import split_array_into_samples, calculate_avg_of_sample, convert_to_percentage
from util.calculus import calculate_summary_of_sample, fit_sigmoid_curve
import matplotlib.pyplot as plt
import numpy as np
import csv
setting = read_setting_json()
setting = setting["rule"]
# load experiment parameter
# experiment parameter is stored in file of ./data/setting.json
initial_filename = setting["0h_datafile"]
final_filename = setting["24h_datafile"]
# sample width and height are the size of each sample area
sample_width = setting["sample_width"]
sample_height = setting["sample_height"]
dilution_protocol = setting["dilution_protocol"]
# width of each dilution
basic_width = setting["basic_width"]
# number of each control group
control_number_list = setting["control_number"]
# output directory
output_directory = setting["output_directory"]
# import initial concentration and calculate x_data
initial_concentration = setting["initial_concentration"]
repeat_times = int(sample_width / basic_width)
x_data = []
current_concentration = initial_concentration
for i in range(repeat_times):
x_data.append(current_concentration)
current_concentration /= dilution_protocol
# load raw data
initial_sd_data = read_0h_data()
final_sd_data = read_24h_data()
# reshape data into the size of board
rebuild_0h_data = initial_sd_data.reshape((32, -1))
rebuild_24h_data = final_sd_data.reshape((32, -1))
# reshape data into a 2-dimensional array contains each group data
sample_divided_list_0h = split_array_into_samples(rebuild_0h_data, sample_width, sample_height)
sample_divided_list_24h = split_array_into_samples(rebuild_24h_data, sample_width, sample_height)
# handle data of control groups
control_0h_summary = 0
for number in control_number_list:
number = number - 1
sample = sample_divided_list_0h[number]
control_0h_summary = control_0h_summary + calculate_summary_of_sample(sample)
control_0h_average = control_0h_summary / (sample_width * sample_height * len(control_number_list))
control_24h_summary = 0
for number in control_number_list:
number = number - 1
sample = sample_divided_list_24h[number]
control_24h_summary = control_24h_summary + calculate_summary_of_sample(sample)
control_24h_average = control_24h_summary / (sample_width * sample_height * len(control_number_list))
# calculate standard deviation of each grid
sd_matrix = []
for line in rebuild_24h_data:
new_line = []
for element in line:
sd_data = (float(element) - control_0h_average.item()) \
/ (control_24h_average.item() - control_0h_average.item())
new_line.append(sd_data)
sd_matrix.append(new_line)
sd_matrix = np.array(sd_matrix)
# split array into different samples
sd_groups = split_array_into_samples(sd_matrix, sample_width, sample_height)
sd_groups = np.array(sd_groups, dtype=float)
RESULT_LIST = []
for sample in sd_groups:
result = calculate_avg_of_sample(sample, sample_width, basic_width)
RESULT_LIST.append(result)
RESULT_LIST = np.array(RESULT_LIST)
FULL_RESULT_LIST = []
for group in sd_groups:
x_index = 0
y_index = 0
sample_buffer = []
data_buffer = []
while y_index < sample_height:
while x_index < basic_width:
x = x_index
while x < sample_width:
data_buffer.append(group[y_index][x])
x += basic_width
sample_buffer.append(data_buffer)
data_buffer = []
x_index += 1
y_index += 1
x_index = 0
FULL_RESULT_LIST.append(sample_buffer)
FULL_RESULT_LIST = np.array(FULL_RESULT_LIST, dtype=float)
optional_color = ['red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple']
EC50_LIST = []
EC50_AVG_LIST = []
sample_num = 0
for SAMPLE in FULL_RESULT_LIST:
sample_num += 1
fig, ax = plt.subplots()
index = 0
ax.set_title('Sample '+str(sample_num))
x_buffer = []
x_sampling_buffer = []
y_sampling_buffer = []
for repeat in SAMPLE:
x, y, x_sampling, y_sampling = fit_sigmoid_curve(x_data, repeat)
x_buffer.append(x)
x_sampling_buffer.append(x_sampling)
y_sampling_buffer.append(y_sampling)
draw_single_curve(ax, x, y, x_sampling, y_sampling, optional_color[index])
index += 1
EC50_LIST.append(x_buffer)
# draw the average result
avg = np.mean(x_buffer)
EC50_AVG_LIST.append(avg)
# draw the average curve
x_sampling_buffer = np.array(x_sampling_buffer).T
y_sampling_buffer = np.array(y_sampling_buffer).T
x_sampling_avg = []
y_sampling_avg = []
for line in x_sampling_buffer:
x_sampling_avg.append(np.mean(line))
for line in y_sampling_buffer:
y_sampling_avg.append(np.mean(line))
ax.plot(avg, 0.5, 'o', color='black')
ax.plot(x_sampling_avg, y_sampling_avg, color='black')
plt.savefig("./output/" + output_directory + "/figs" + "/Sample " + str(sample_num))
plt.cla()
plt.close(fig)
# output grouped result
output_f_grouped = open("./output/" + output_directory + "/result_grouped.csv", "w")
csv_writer_grouped = csv.writer(output_f_grouped)
csv_writer_grouped.writerow(["initial concentration: " + str(initial_concentration), "dilution protocol: " + str(dilution_protocol)])
csv_writer_grouped.writerow("")
sample_num = 0
for SAMPLE in FULL_RESULT_LIST:
SAMPLE = SAMPLE.T
sample_num += 1
csv_writer_grouped.writerow(["Sample " + str(sample_num)])
for repeat in SAMPLE:
csv_writer_grouped.writerow(repeat)
csv_writer_grouped.writerow("")
ec50_result_list = []
for ec50_index in EC50_LIST[sample_num-1]:
ec50_result_list.append(10**ec50_index)
csv_writer_grouped.writerow(ec50_result_list)
average_ec50 = np.power(10, EC50_AVG_LIST[sample_num-1])
csv_writer_grouped.writerow([])
csv_writer_grouped.writerow(["Average EC50", "Std"])
csv_writer_grouped.writerow([average_ec50, np.std(ec50_result_list)])
csv_writer_grouped.writerow("")
output_f_grouped.close()
output_f_full = open("./output/" + output_directory + "/result_full.csv", "w")
csv_writer_full = csv.writer(output_f_full)
for line in sd_matrix:
csv_writer_full.writerow(line)
output_f_full.close()
print("Finished")
| 31.651741
| 133
| 0.735618
| 909
| 6,362
| 4.786579
| 0.161716
| 0.031027
| 0.04045
| 0.05516
| 0.235808
| 0.176971
| 0.128246
| 0.074925
| 0.074925
| 0.051942
| 0
| 0.019285
| 0.168658
| 6,362
| 200
| 134
| 31.81
| 0.803365
| 0.08708
| 0
| 0.136691
| 0
| 0
| 0.05733
| 0.003626
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043165
| 0
| 0.043165
| 0.007194
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b8a82e6b0282dee965fc93d3c31abaae481d21
| 6,492
|
py
|
Python
|
twisted/names/root.py
|
twonds/twisted
|
d6e270a465d371c3bed01bf369af497b77eb9f1e
|
[
"Unlicense",
"MIT"
] | 1
|
2021-01-27T19:11:21.000Z
|
2021-01-27T19:11:21.000Z
|
twisted/names/root.py
|
twonds/twisted
|
d6e270a465d371c3bed01bf369af497b77eb9f1e
|
[
"Unlicense",
"MIT"
] | null | null | null |
twisted/names/root.py
|
twonds/twisted
|
d6e270a465d371c3bed01bf369af497b77eb9f1e
|
[
"Unlicense",
"MIT"
] | 3
|
2017-01-04T01:24:15.000Z
|
2020-06-18T16:14:56.000Z
|
# -*- test-case-name: twisted.names.test.test_rootresolve -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Resolver implementation for querying successive authoritative servers to
lookup a record, starting from the root nameservers.
@author: Jp Calderone
todo::
robustify it
break discoverAuthority into several smaller functions
documentation
"""
from twisted.internet import defer
from twisted.names import dns
from twisted.names import common
def retry(t, p, *args):
assert t, "Timeout is required"
t = list(t)
def errback(failure):
failure.trap(defer.TimeoutError)
if not t:
return failure
return p.query(timeout=t.pop(0), *args
).addErrback(errback
)
return p.query(timeout=t.pop(0), *args
).addErrback(errback
)
class _DummyController:
def messageReceived(self, *args):
pass
class Resolver(common.ResolverBase):
def __init__(self, hints):
common.ResolverBase.__init__(self)
self.hints = hints
def _lookup(self, name, cls, type, timeout):
d = discoverAuthority(name, self.hints
).addCallback(self.discoveredAuthority, name, cls, type, timeout
)
return d
def discoveredAuthority(self, auth, name, cls, type, timeout):
from twisted.names import client
q = dns.Query(name, type, cls)
r = client.Resolver(servers=[(auth, dns.PORT)])
d = r.queryUDP([q], timeout)
d.addCallback(r.filterAnswers)
return d
def lookupNameservers(host, atServer, p=None):
# print 'Nameserver lookup for', host, 'at', atServer, 'with', p
if p is None:
p = dns.DNSDatagramProtocol(_DummyController())
p.noisy = False
return retry(
(1, 3, 11, 45), # Timeouts
p, # Protocol instance
(atServer, dns.PORT), # Server to query
[dns.Query(host, dns.NS, dns.IN)] # Question to ask
)
def lookupAddress(host, atServer, p=None):
# print 'Address lookup for', host, 'at', atServer, 'with', p
if p is None:
p = dns.DNSDatagramProtocol(_DummyController())
p.noisy = False
return retry(
(1, 3, 11, 45), # Timeouts
p, # Protocol instance
(atServer, dns.PORT), # Server to query
[dns.Query(host, dns.A, dns.IN)] # Question to ask
)
def extractAuthority(msg, cache):
records = msg.answers + msg.authority + msg.additional
nameservers = [r for r in records if r.type == dns.NS]
# print 'Records for', soFar, ':', records
# print 'NS for', soFar, ':', nameservers
if not nameservers:
return None, nameservers
if not records:
raise IOError("No records")
for r in records:
if r.type == dns.A:
cache[str(r.name)] = r.payload.dottedQuad()
for r in records:
if r.type == dns.NS:
if str(r.payload.name) in cache:
return cache[str(r.payload.name)], nameservers
for addr in records:
if addr.type == dns.A and addr.name == r.name:
return addr.payload.dottedQuad(), nameservers
return None, nameservers
def discoverAuthority(host, roots, cache=None, p=None):
if cache is None:
cache = {}
rootAuths = list(roots)
parts = host.rstrip('.').split('.')
parts.reverse()
authority = rootAuths.pop()
soFar = ''
for part in parts:
soFar = part + '.' + soFar
# print '///////', soFar, authority, p
msg = defer.waitForDeferred(lookupNameservers(soFar, authority, p))
yield msg
msg = msg.getResult()
newAuth, nameservers = extractAuthority(msg, cache)
if newAuth is not None:
# print "newAuth is not None"
authority = newAuth
else:
if nameservers:
r = str(nameservers[0].payload.name)
# print 'Recursively discovering authority for', r
authority = defer.waitForDeferred(discoverAuthority(r, roots, cache, p))
yield authority
authority = authority.getResult()
# print 'Discovered to be', authority, 'for', r
## else:
## # print 'Doing address lookup for', soFar, 'at', authority
## msg = defer.waitForDeferred(lookupAddress(soFar, authority, p))
## yield msg
## msg = msg.getResult()
## records = msg.answers + msg.authority + msg.additional
## addresses = [r for r in records if r.type == dns.A]
## if addresses:
## authority = addresses[0].payload.dottedQuad()
## else:
## raise IOError("Resolution error")
# print "Yielding authority", authority
yield authority
discoverAuthority = defer.deferredGenerator(discoverAuthority)
def makePlaceholder(deferred, name):
def placeholder(*args, **kw):
deferred.addCallback(lambda r: getattr(r, name)(*args, **kw))
return deferred
return placeholder
class DeferredResolver:
def __init__(self, resolverDeferred):
self.waiting = []
resolverDeferred.addCallback(self.gotRealResolver)
def gotRealResolver(self, resolver):
w = self.waiting
self.__dict__ = resolver.__dict__
self.__class__ = resolver.__class__
for d in w:
d.callback(resolver)
def __getattr__(self, name):
if name.startswith('lookup') or name in ('getHostByName', 'query'):
self.waiting.append(defer.Deferred())
return makePlaceholder(self.waiting[-1], name)
raise AttributeError(name)
def bootstrap(resolver):
"""Lookup the root nameserver addresses using the given resolver
Return a Resolver which will eventually become a C{root.Resolver}
instance that has references to all the root servers that we were able
to look up.
"""
domains = [chr(ord('a') + i) for i in range(13)]
# f = lambda r: (log.msg('Root server address: ' + str(r)), r)[1]
f = lambda r: r
L = [resolver.getHostByName('%s.root-servers.net' % d).addCallback(f) for d in domains]
d = defer.DeferredList(L)
d.addCallback(lambda r: Resolver([e[1] for e in r if e[0]]))
return DeferredResolver(d)
| 33.989529
| 91
| 0.59658
| 742
| 6,492
| 5.169811
| 0.269542
| 0.006257
| 0.014338
| 0.013556
| 0.202815
| 0.191345
| 0.180396
| 0.158498
| 0.138686
| 0.112617
| 0
| 0.006545
| 0.2939
| 6,492
| 190
| 92
| 34.168421
| 0.830279
| 0.26525
| 0
| 0.209677
| 0
| 0
| 0.016181
| 0
| 0
| 0
| 0
| 0.005263
| 0.008065
| 1
| 0.129032
| false
| 0.008065
| 0.032258
| 0
| 0.306452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b8dc6f73954e378a1c4ed802de05ace9457d1e
| 2,056
|
py
|
Python
|
tools/apply_colormap_dir.py
|
edwardyehuang/iDS
|
36bde3a9e887eb7e1a8d88956cf041909ee84da4
|
[
"MIT"
] | null | null | null |
tools/apply_colormap_dir.py
|
edwardyehuang/iDS
|
36bde3a9e887eb7e1a8d88956cf041909ee84da4
|
[
"MIT"
] | null | null | null |
tools/apply_colormap_dir.py
|
edwardyehuang/iDS
|
36bde3a9e887eb7e1a8d88956cf041909ee84da4
|
[
"MIT"
] | null | null | null |
# ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import os, sys
rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.insert(1, rootpath)
import tensorflow as tf
import numpy as np
from PIL import Image
from absl import app
from absl import flags
from common_flags import FLAGS
from ids.voc2012 import get_colormap as get_voc2012_colormap
from ids.cityscapes_fine import get_colormap as get_cityscapes_colormap
flags.DEFINE_string("input_dir", None, "input dir path")
flags.DEFINE_string("output_dir", None, "output dir path")
flags.DEFINE_string("colormap", "voc2012", "colormap name")
flags.DEFINE_integer("ignore_label", 255, "ignore label")
def apply_colormap_to_dir(input_dir, output_dir=None, colormap=None):
colormap = colormap.astype(np.uint8)
counter = 0
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for filename in tf.io.gfile.listdir(input_dir):
input_path = os.path.join(input_dir, filename)
output_path = os.path.join(output_dir, filename)
img = Image.open(input_path)
if img.mode != "L" and img.mode != "P":
continue
img = img.convert("P")
img.putpalette(colormap)
img.save(output_path, format="PNG")
counter += 1
tf.print("Processed {}".format(counter))
def main(argv):
colormap_name = FLAGS.colormap
colormap_name = colormap_name.lower()
if colormap_name == "voc2012":
colormap = get_voc2012_colormap()
elif colormap_name == "cityscapes":
colormap = get_cityscapes_colormap()
else:
raise ValueError(f"Not support colormap = {colormap_name}")
if FLAGS.ignore_label == 0:
colormap = colormap[1:]
apply_colormap_to_dir(FLAGS.input_dir, FLAGS.output_dir, colormap=colormap)
if __name__ == "__main__":
app.run(main)
| 25.073171
| 89
| 0.651751
| 264
| 2,056
| 4.859848
| 0.344697
| 0.049104
| 0.023383
| 0.029618
| 0.071707
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01962
| 0.181907
| 2,056
| 81
| 90
| 25.382716
| 0.743163
| 0.101654
| 0
| 0
| 0
| 0
| 0.098263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.2
| 0
| 0.244444
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6b94f55392b1866e86cdeb5f1344d92e8c4dea3
| 6,007
|
py
|
Python
|
EDScoutCore/JournalInterface.py
|
bal6765/ed-scout
|
0c2ee6141a5cd86a660c2319d7c4be61614b13fb
|
[
"MIT"
] | null | null | null |
EDScoutCore/JournalInterface.py
|
bal6765/ed-scout
|
0c2ee6141a5cd86a660c2319d7c4be61614b13fb
|
[
"MIT"
] | null | null | null |
EDScoutCore/JournalInterface.py
|
bal6765/ed-scout
|
0c2ee6141a5cd86a660c2319d7c4be61614b13fb
|
[
"MIT"
] | null | null | null |
from inspect import signature
import json
import time
import os
import glob
import logging
from pathlib import Path
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from watchdog.events import PatternMatchingEventHandler
from EDScoutCore.FileSystemUpdatePrompter import FileSystemUpdatePrompter
default_journal_path = os.path.join(str(Path.home()), "Saved Games\\Frontier Developments\\Elite Dangerous")
journal_file_pattern = "journal.*.log"
logger = logging.getLogger('JournalInterface')
class JournalChangeIdentifier:
def __init__(self, journal_path=default_journal_path):
pass
self.journals = {}
self.journal_path = journal_path
logger.debug(f"watching for journal changes in {self.journal_path}")
self._init_journal_lists()
self._new_journal_entry_callback = None
self.latest_journal = self.identify_latest_journal()
# Prompter is required to force the file system to do updates on some systems so we get regular updates from the
# journal watcher.
self.prompter = FileSystemUpdatePrompter(self.latest_journal)
def identify_latest_journal(self):
if len(self.journals.keys()) == 0:
return None
keys = sorted(self.journals.keys())
return keys[-1]
def process_journal_change(self, changed_file):
if changed_file != self.latest_journal:
self.latest_journal = changed_file
self.prompter.set_watch_file(self.latest_journal)
new_size = os.stat(changed_file).st_size
new_data = None
# If the game was loaded after the scout it will start a new journal which we need to treat as unscanned.
if changed_file not in self.journals:
self.journals[changed_file] = 0
logger.debug(f'{changed_file} - Size change: {self.journals[changed_file]} to {new_size}')
if new_size > 0: # Don't try and read it if this is the first notification (we seem to get two; one from the file being cleared).
# Check how much it has grown and read the excess
size_diff = new_size - self.journals[changed_file]
if size_diff > 0:
with open(changed_file, 'rb') as f:
f.seek(-size_diff, os.SEEK_END) # Note minus sign
new_data = f.read()
entries = []
if new_data:
new_journal_lines = JournalChangeIdentifier.binary_file_data_to_lines(new_data)
try:
for line in new_journal_lines:
logger.debug(f'New journal entry detected: {line}')
entry = json.loads(line)
entry['type'] = "JournalEntry" # Add an identifier that's common to everything we shove down the outgoing pipe so the receiver can distiguish.
entries.append(entry)
logger.debug(f'Found {len(entries)} new entries')
for entry in entries:
yield entry
self.journals[changed_file] = new_size
except json.decoder.JSONDecodeError as e:
logger.exception(e)
@staticmethod
def binary_file_data_to_lines(binary_data):
as_ascii = binary_data.decode('UTF-8')
all_lines = as_ascii.split("\r\n")
all_lines.pop() # Drop the last empty line
return all_lines
def _init_journal_lists(self):
journal_files = glob.glob(os.path.join(self.journal_path, journal_file_pattern))
for journal_file in journal_files:
self.journals[journal_file] = os.stat(journal_file).st_size
class JournalWatcher:
def __init__(self, path=default_journal_path, force_polling=False):
self.path = path
self.force_polling = force_polling
self._configure_watchers()
def set_callback(self, on_journal_change):
self.event_handler.set_callback(on_journal_change)
def stop(self):
self.observer.stop()
self.observer.join()
class _EntriesChangeHandler(PatternMatchingEventHandler):
def __init__(self):
super(JournalWatcher._EntriesChangeHandler, self).__init__(
patterns=['*Journal*.log'],
ignore_patterns=[],
ignore_directories=True)
self.on_journal_change = None
def set_callback(self, on_new_journal_entry):
self.on_journal_change = on_new_journal_entry
def on_modified(self, event):
changed_file = str(event.src_path)
logger.debug("Journal change: " + changed_file)
self.on_journal_change(changed_file)
def on_created(self, event):
file = str(event.src_path)
logger.debug("Journal created: " + file)
def on_deleted(self, event):
file = str(event.src_path)
logger.debug("Journal deleted: " + file)
def on_moved(self, event):
file = str(event.src_path)
logger.debug("Journal moved: " + file)
def _configure_watchers(self):
self.event_handler = JournalWatcher._EntriesChangeHandler()
if self.force_polling:
self.observer = PollingObserver(0.25) # Poll every quarter of a second
else:
self.observer = Observer()
self.observer.schedule(self.event_handler, self.path, recursive=False)
self.observer.start()
if __name__ == '__main__':
def ReportJournalChange(journal_hange):
print('New route detected:' + str(journal_hange))
journalWatcher = JournalWatcher()
journalWatcher.set_callback(ReportJournalChange)
print('running')
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print('done')
journalWatcher.stop()
| 34.522989
| 164
| 0.632096
| 699
| 6,007
| 5.201717
| 0.307582
| 0.042354
| 0.020627
| 0.025303
| 0.070682
| 0.04813
| 0.04813
| 0.04813
| 0.037954
| 0.037954
| 0
| 0.00234
| 0.288497
| 6,007
| 173
| 165
| 34.722543
| 0.848386
| 0.095389
| 0
| 0.041322
| 0
| 0
| 0.078652
| 0.005523
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132231
| false
| 0.008264
| 0.090909
| 0
| 0.272727
| 0.024793
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6ba0dc97e3a9015e73a33e1fbadd9852c0606ea
| 1,355
|
py
|
Python
|
labs-python/lab9/add_files.py
|
xR86/ml-stuff
|
2a1b79408897171b78032ff2531ab6f8b18be6c4
|
[
"MIT"
] | 3
|
2018-12-11T03:03:15.000Z
|
2020-02-11T19:38:07.000Z
|
labs-python/lab9/add_files.py
|
xR86/ml-stuff
|
2a1b79408897171b78032ff2531ab6f8b18be6c4
|
[
"MIT"
] | 6
|
2017-05-31T20:58:32.000Z
|
2021-02-16T23:13:15.000Z
|
labs-python/lab9/add_files.py
|
xR86/ml-stuff
|
2a1b79408897171b78032ff2531ab6f8b18be6c4
|
[
"MIT"
] | null | null | null |
import sqlite3
conn = sqlite3.connect('example.db')
c = conn.cursor()
import os
import hashlib
import time
def get_file_md5(filePath):
h = hashlib.md5()
h.update(open(filePath,"rb").read())
return h.hexdigest()
def get_file_sha256(filePath):
h = hashlib.sha256()
h.update(open(filePath,"rb").read())
return h.hexdigest()
def get_dir_data(dir_path):
dir_path = os.path.realpath(dir_path)
#print next(os.walk(dir_path))[2]
#print os.path.basename(dir_path)
id_location = 0
id_file = 0
for dir_file in next(os.walk(dir_path))[2]:
file_name = dir_file
file_md5 = get_file_md5(dir_file)
file_sha256 = get_file_sha256(dir_file)
file_size = os.path.getsize(dir_file)
file_time = time.gmtime(os.path.getctime(dir_file))
file_formatted_time = time.strftime("%Y-%m-%d %I:%M:%S %p", file_time)
file_path = os.path.realpath(dir_file)
location_values = (id_location, file_path)
c.execute("INSERT INTO location VALUES (?, ?)", location_values)
files_values = (id_location, id_file)
c.execute("INSERT INTO files VALUES (?, ?)", files_values)
file_info_values = (id_file, file_name, file_size, file_formatted_time, file_md5)
c.execute("INSERT INTO file_info VALUES (?, ?, ?, ?, ?)", file_info_values)
id_location += 1
id_file += 1
get_dir_data('./')
# Save (commit) the changes
conn.commit()
conn.close()
| 22.966102
| 83
| 0.710701
| 216
| 1,355
| 4.194444
| 0.291667
| 0.054084
| 0.060706
| 0.059603
| 0.238411
| 0.143488
| 0.103753
| 0.103753
| 0.103753
| 0.103753
| 0
| 0.021496
| 0.141697
| 1,355
| 59
| 84
| 22.966102
| 0.757524
| 0.066421
| 0
| 0.108108
| 0
| 0
| 0.114806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.108108
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6bacf59de7852cf3a5c740a8171a4aa7144b26c
| 4,083
|
py
|
Python
|
Replication Python and R Codes/Figure_6/cMCA_ESS2018_LABCON_org.py
|
tzuliu/Contrastive-Multiple-Correspondence-Analysis-cMCA
|
a59a5c36dd5d4ac04205627827e792322742462d
|
[
"MIT"
] | 3
|
2020-09-25T07:11:46.000Z
|
2022-02-08T05:07:34.000Z
|
Replication Python and R Codes/Figure_6/cMCA_ESS2018_LABCON_org.py
|
tzuliu/Contrastive-Multiple-Correspondence-Analysis-cMCA
|
a59a5c36dd5d4ac04205627827e792322742462d
|
[
"MIT"
] | null | null | null |
Replication Python and R Codes/Figure_6/cMCA_ESS2018_LABCON_org.py
|
tzuliu/Contrastive-Multiple-Correspondence-Analysis-cMCA
|
a59a5c36dd5d4ac04205627827e792322742462d
|
[
"MIT"
] | 1
|
2021-02-06T16:44:44.000Z
|
2021-02-06T16:44:44.000Z
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import prince
from sklearn import utils
from sklearn.cluster import DBSCAN
import itertools
from cmca import CMCA
from ccmca import CCMCA
from matplotlib import rc
plt.style.use('ggplot')
df = pd.read_csv("./uk2018.csv")
df["prtclcgb"].replace({5: 8, 9: 8, 10:8, 11:8, 12:8, 13:8, 15:8, 19:8}, inplace=True)
df["prtclcgb"].replace({6: 5}, inplace=True)
df["prtclcgb"].replace({7: 6}, inplace=True)
df["prtclcgb"].replace({8: 7}, inplace=True)
alpha = r'$ \alpha $'
tableau10 = {
'teal': '#78B7B2',
'blue': '#507AA6',
'orange': '#F08E39',
'red': '#DF585C',
'green': '#5BA053',
'purple': '#AF7BA1',
'yellow': '#ECC854',
'brown': '#9A7460',
'pink': '#FD9EA9',
'gray': '#BAB0AC',
7: '#9A7460',
1: '#507AA6',
2: '#F08E39',
3: '#DF585C',
4: '#5BA053',
0: '#78B7B2',
6: '#ECC854',
5: '#AF7BA1',
8: '#FD9EA9',
9: '#BAB0AC',
-1: '#BAB0AC',
99: '#BAB0AC',
'LDP': '#507AA6',
'DPJ': '#F08E39'
}
def fillna_based_on_dtype(df):
for key in dict(df.dtypes).keys():
if df.dtypes[key] == np.object:
df[key] = df[key].fillna('na')
else:
df[key] = df[key].fillna(99)
def df_to_mat(df):
X = df.iloc[:,np.r_[1:(df.shape[1])]]
X_con = X[X["prtclcgb"] == 1]
X_lab = X[X["prtclcgb"] == 2]
X_ldp = X[X["prtclcgb"] == 3]
X_snp = X[X["prtclcgb"] == 4]
X_gre = X[X["prtclcgb"] == 5]
X_uip = X[X["prtclcgb"] == 6]
X_oth = X[X["prtclcgb"] == 7]
print("missing value ratio (CON)", X_con.isna().sum().sum() / (X_con.shape[0] * X_con.shape[1]))
print("missing value ratio (LAB)", X_lab.isna().sum().sum() / (X_lab.shape[0] * X_lab.shape[1]))
print("missing value ratio (LDP)", X_ldp.isna().sum().sum() / (X_ldp.shape[0] * X_ldp.shape[1]))
print("missing value ratio (SNP)", X_snp.isna().sum().sum() / (X_snp.shape[0] * X_snp.shape[1]))
print("missing value ratio (GRE)", X_gre.isna().sum().sum() / (X_gre.shape[0] * X_gre.shape[1]))
print("missing value ratio (UIP)", X_uip.isna().sum().sum() / (X_uip.shape[0] * X_uip.shape[1]))
print("missing value ratio (OTH)", X_oth.isna().sum().sum() / (X_oth.shape[0] * X_oth.shape[1]))
fillna_based_on_dtype(X_con)
fillna_based_on_dtype(X_lab)
fillna_based_on_dtype(X_ldp)
fillna_based_on_dtype(X_snp)
fillna_based_on_dtype(X_gre)
fillna_based_on_dtype(X_uip)
fillna_based_on_dtype(X_oth)
return(X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth)
X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth = df_to_mat(df)
X = pd.concat([X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth])
print(X_con.shape, X_lab.shape, X_ldp.shape, X_snp.shape, X_gre.shape, X_uip.shape, X_oth.shape, X.shape)
##Disctionay for Level and Party
party = {1:"Con", 2:"Lab", 3:"LD", 4:"SNP", 5:"Green", 6:"UKIP", 7:"Other"}
##Fitting cMCA and export plots
cmca = CMCA(n_components=2, copy=True, check_input=True)
cmca = cmca.fit(fg=X_lab.iloc[:,0:(X_lab.shape[1]-3)], bg=X_con.iloc[:,0:(X_con.shape[1]-3)], alpha=1.5)
Y_fg = np.array(cmca.transform(X_lab.iloc[:,0:(X.shape[1]-3)]))
Y_bg = np.array(cmca.transform(X_con.iloc[:,0:(X.shape[1]-3)]))
Y_fg_col = np.array(cmca.transform(X_lab.iloc[:,0:(X.shape[1]-3)], axis='col'))
prefix_to_info = cmca.gen_prefix_to_info()
f_6 = plt.figure()
plt.xlim([-2.5, 2.5])
plt.ylim([-2.5, 2.5])
plt.scatter(Y_fg[:, 0], Y_fg[:, 1], c=tableau10[X_lab["prtclcgb"].iloc[0]], label=party[X_lab["prtclcgb"].iloc[0]], alpha=0.3, linewidths=0)
plt.scatter(Y_bg[:, 0], Y_bg[:, 1], c=tableau10[X_con["prtclcgb"].iloc[0]], label=party[X_con["prtclcgb"].iloc[0]], alpha=0.3, linewidths=0)
handles, labels = plt.gca().get_legend_handles_labels()
handles = [handles[1],handles[0]]
labels = ["Con","Lab"]
plt.legend(handles, labels, loc="lower right", shadow=False, scatterpoints=1, fontsize=8)
plt.xlabel('cPC1')
plt.ylabel('cPC2')
plt.title("cMCA (tg: LAB, bg: CON, " + str(alpha) + ": 1.5)")
plt.show()
f_6.savefig("cMCA_ESS2018_labcon_org.pdf", bbox_inches='tight')
| 35.198276
| 140
| 0.624051
| 715
| 4,083
| 3.384615
| 0.234965
| 0.024793
| 0.042975
| 0.059504
| 0.338843
| 0.186364
| 0.097107
| 0.090909
| 0.065289
| 0.065289
| 0
| 0.060983
| 0.152584
| 4,083
| 115
| 141
| 35.504348
| 0.638439
| 0.01445
| 0
| 0
| 0
| 0
| 0.164428
| 0.006716
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020619
| false
| 0
| 0.103093
| 0
| 0.123711
| 0.082474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6bb99021b44144da731911de204a7afc66e8789
| 1,196
|
py
|
Python
|
Solutions/077.py
|
ruppysuppy/Daily-Coding-Problem-Solutions
|
37d061215a9af2ce39c51f8816c83039914c0d0b
|
[
"MIT"
] | 70
|
2021-03-18T05:22:40.000Z
|
2022-03-30T05:36:50.000Z
|
Solutions/077.py
|
ungaro/Daily-Coding-Problem-Solutions
|
37d061215a9af2ce39c51f8816c83039914c0d0b
|
[
"MIT"
] | null | null | null |
Solutions/077.py
|
ungaro/Daily-Coding-Problem-Solutions
|
37d061215a9af2ce39c51f8816c83039914c0d0b
|
[
"MIT"
] | 30
|
2021-03-18T05:22:43.000Z
|
2022-03-17T10:25:18.000Z
|
"""
Problem:
Given a list of possibly overlapping intervals, return a new list of intervals where
all overlapping intervals have been merged.
The input list is not necessarily ordered in any way.
For example, given [(1, 3), (5, 8), (4, 10), (20, 25)], you should return
[(1, 3), (4, 10), (20, 25)].
"""
from typing import List, Tuple
def merge_intervals(intervals: List[Tuple[int, int]]) -> List[Tuple[int, int]]:
intervals.sort(key=lambda x: x[0])
merged_intervals = []
start = intervals[0][0]
end = intervals[0][1]
# generating the merged intervals
for interval in intervals[1:]:
curr_start, curr_end = interval
if end < curr_start:
merged_intervals.append((start, end))
start = curr_start
end = curr_end
elif end < curr_end and end > curr_start:
end = curr_end
# adding the last interval
merged_intervals.append((start, end))
return merged_intervals
if __name__ == "__main__":
print(merge_intervals([(1, 3), (5, 8), (4, 10), (20, 25)]))
print(merge_intervals([(1, 3), (5, 8), (4, 10), (20, 25), (6, 12)]))
"""
SPECS:
TIME COMPLEXITY: O(n)
SPACE COMPLEXITY: O(n)
"""
| 26
| 84
| 0.622074
| 173
| 1,196
| 4.16763
| 0.404624
| 0.104022
| 0.027739
| 0.038835
| 0.231623
| 0.098474
| 0.098474
| 0.098474
| 0.083218
| 0.083218
| 0
| 0.055677
| 0.234114
| 1,196
| 45
| 85
| 26.577778
| 0.731441
| 0.296823
| 0
| 0.210526
| 0
| 0
| 0.010363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.157895
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6bbb3606fdfbd374577782a243b3f2af19f5e8d
| 3,163
|
py
|
Python
|
slackbot_wems/chris/slacklib.py
|
wray/wems
|
69caedfb8906f04175196d610a1ca516db01f72a
|
[
"MIT"
] | 4
|
2016-11-10T21:43:01.000Z
|
2017-02-24T21:36:45.000Z
|
slackbot_wems/chris/slacklib.py
|
wray/wems
|
69caedfb8906f04175196d610a1ca516db01f72a
|
[
"MIT"
] | 1
|
2019-04-26T10:48:34.000Z
|
2019-05-18T15:59:35.000Z
|
slackbot_wems/chris/slacklib.py
|
wray/wems
|
69caedfb8906f04175196d610a1ca516db01f72a
|
[
"MIT"
] | 8
|
2016-11-09T22:25:14.000Z
|
2019-04-26T19:53:37.000Z
|
import time
import emoji
# Put your commands here
COMMAND1 = "testing testing"
COMMAND2 = "roger roger"
BLUEON = str("blue on")
BLUEOFF = str("blue off")
REDON = str("red on")
REDOFF = str("red off")
GREENON = str("green on")
GREENOFF = str("green off")
YELLOWON = str("yellow on")
YELLOWOFF = str("yellow off")
CLOCK = str("update clock")
SCRAMBLE = str('scramble the 7')
HACKER = str('hack the 7')
SINGLEREADING = str('light')
def setup():
import RPi.GPIO as GPIO
import slackbot_wems.chris.light as lite
import slackbot_wems.chris.segment7 as segment
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Pin Setup
GPIO.setup(17, GPIO.OUT) # BLUE LED
GPIO.setup(27, GPIO.OUT) # RED LED
GPIO.setup(5, GPIO.OUT) # GREEN LED
GPIO.setup(22, GPIO.OUT) # YELLOW LED
GPIO.setup(12, GPIO.OUT) # LDR
setup = False
# Your handling code goes in this function
def handle_command(command):
"""
Determine if the command is valid. If so, take action and return
a response, if necessary.
"""
if not setup:
setup_gpio()
setup = True
response = ""
if command.find(COMMAND1) >= 0:
response = str("Surprise!")
elif command.find(COMMAND2) >= 0:
response = (emoji.emojize('Python\n is\n :thumbs_up: :thumbs_up: :thumbs_up:'))
# Blue LED Commands
elif command.find(BLUEON) >= 0:
GPIO.output(17, True)
response = emoji.emojize("" + "Turning :radio_button: ON...")
elif command.find(BLUEOFF) >= 0:
GPIO.output(17, False)
response = emoji.emojize("" + "Turning :radio_button: OFF...")
# Red LED Commands
elif command.find(REDON) >= 0:
GPIO.output(27, True)
response = emoji.emojize("" + "Turning :red_circle: ON...")
elif command.find(REDOFF) >= 0:
GPIO.output(27, False)
response = emoji.emojize("" + "Turning :red_circle: OFF...")
# Green LED Commands
elif command.find(GREENON) >= 0:
GPIO.output(5, True)
response = emoji.emojize("" + "Turning :green_apple: ON...")
elif command.find(GREENOFF) >= 0:
GPIO.output(5, False)
response = emoji.emojize("" + "Turning :green_apple: OFF...")
# Yellow LED Commands
elif command.find(YELLOWON) >= 0:
GPIO.output(22, True)
response = emoji.emojize("" + "Turning :sunny: ON...")
elif command.find(YELLOWOFF) >= 0:
GPIO.output(22, False)
response = emoji.emojize("" + "Turning :sunny: OFF...")
# 7 Segment Commands
elif command.find(CLOCK) >= 0:
print('Updating the clock!')
response = segment.updateClock()
elif command.find(SCRAMBLE) >= 0:
print(emoji.emojize(":egg: There is nothing better than scrambled eggs! :egg:"))
response = segment.scramble()
elif command.find(HACKER) >= 0:
print('Message')
response = segment.hacker()
elif command.find(SINGLEREADING) >= 0:
a = lite.printReading()
a = int(a)
time.sleep(1)
print(a)
response = ('Here is what the LDR Sensor said to me: ' + str(a))
return response
| 26.140496
| 88
| 0.607651
| 403
| 3,163
| 4.736973
| 0.290323
| 0.080671
| 0.102148
| 0.113148
| 0.223153
| 0.116291
| 0
| 0
| 0
| 0
| 0
| 0.019442
| 0.251976
| 3,163
| 120
| 89
| 26.358333
| 0.787405
| 0.095163
| 0
| 0
| 0
| 0
| 0.184043
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025974
| false
| 0
| 0.064935
| 0
| 0.103896
| 0.064935
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6bde93bee8b10728e74b15763f724d08484c86a
| 4,640
|
py
|
Python
|
homeassistant/components/tasmota/discovery.py
|
yura505/core
|
0fc5f4b0421c6c5204d3ccb562153ac3836441a9
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/tasmota/discovery.py
|
yura505/core
|
0fc5f4b0421c6c5204d3ccb562153ac3836441a9
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/tasmota/discovery.py
|
yura505/core
|
0fc5f4b0421c6c5204d3ccb562153ac3836441a9
|
[
"Apache-2.0"
] | null | null | null |
"""Support for MQTT discovery."""
import asyncio
import logging
from hatasmota.discovery import (
TasmotaDiscovery,
get_device_config as tasmota_get_device_config,
get_entities_for_platform as tasmota_get_entities_for_platform,
get_entity as tasmota_get_entity,
has_entities_with_platform as tasmota_has_entities_with_platform,
unique_id_from_hash,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SUPPORTED_PLATFORMS = [
"switch",
]
ALREADY_DISCOVERED = "tasmota_discovered_components"
CONFIG_ENTRY_IS_SETUP = "tasmota_config_entry_is_setup"
DATA_CONFIG_ENTRY_LOCK = "tasmota_config_entry_lock"
TASMOTA_DISCOVERY_DEVICE = "tasmota_discovery_device"
TASMOTA_DISCOVERY_ENTITY_NEW = "tasmota_discovery_entity_new_{}"
TASMOTA_DISCOVERY_ENTITY_UPDATED = "tasmota_discovery_entity_updated_{}_{}_{}_{}"
def clear_discovery_hash(hass, discovery_hash):
"""Clear entry in ALREADY_DISCOVERED list."""
del hass.data[ALREADY_DISCOVERED][discovery_hash]
def set_discovery_hash(hass, discovery_hash):
"""Set entry in ALREADY_DISCOVERED list."""
hass.data[ALREADY_DISCOVERED][discovery_hash] = {}
async def async_start(
hass: HomeAssistantType, discovery_topic, config_entry, tasmota_mqtt
) -> bool:
"""Start MQTT Discovery."""
async def _load_platform(platform):
"""Load a Tasmota platform if not already done."""
async with hass.data[DATA_CONFIG_ENTRY_LOCK]:
config_entries_key = f"{platform}.tasmota"
if config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]:
await hass.config_entries.async_forward_entry_setup(
config_entry, platform
)
hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key)
async def _discover_entity(tasmota_entity_config, discovery_hash, platform):
"""Handle adding or updating a discovered entity."""
if not tasmota_entity_config:
# Entity disabled, clean up entity registry
entity_registry = await hass.helpers.entity_registry.async_get_registry()
unique_id = unique_id_from_hash(discovery_hash)
entity_id = entity_registry.async_get_entity_id(platform, DOMAIN, unique_id)
if entity_id:
_LOGGER.debug("Removing entity: %s %s", platform, discovery_hash)
entity_registry.async_remove(entity_id)
return
if discovery_hash in hass.data[ALREADY_DISCOVERED]:
_LOGGER.debug(
"Entity already added, sending update: %s %s",
platform,
discovery_hash,
)
async_dispatcher_send(
hass,
TASMOTA_DISCOVERY_ENTITY_UPDATED.format(*discovery_hash),
tasmota_entity_config,
)
else:
_LOGGER.debug("Adding new entity: %s %s", platform, discovery_hash)
tasmota_entity = tasmota_get_entity(tasmota_entity_config, tasmota_mqtt)
hass.data[ALREADY_DISCOVERED][discovery_hash] = None
async_dispatcher_send(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(platform),
tasmota_entity,
discovery_hash,
)
async def async_device_discovered(payload, mac):
"""Process the received message."""
if ALREADY_DISCOVERED not in hass.data:
hass.data[ALREADY_DISCOVERED] = {}
_LOGGER.debug("Received discovery data for tasmota device: %s", mac)
tasmota_device_config = tasmota_get_device_config(payload)
async_dispatcher_send(
hass, TASMOTA_DISCOVERY_DEVICE, tasmota_device_config, mac
)
if not payload:
return
for platform in SUPPORTED_PLATFORMS:
if not tasmota_has_entities_with_platform(payload, platform):
continue
await _load_platform(platform)
for platform in SUPPORTED_PLATFORMS:
tasmota_entities = tasmota_get_entities_for_platform(payload, platform)
for (tasmota_entity_config, discovery_hash) in tasmota_entities:
await _discover_entity(tasmota_entity_config, discovery_hash, platform)
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
tasmota_discovery = TasmotaDiscovery(discovery_topic, tasmota_mqtt)
await tasmota_discovery.start_discovery(async_device_discovered, None)
| 37.419355
| 88
| 0.694612
| 525
| 4,640
| 5.727619
| 0.182857
| 0.073495
| 0.02993
| 0.02993
| 0.372132
| 0.227802
| 0.08979
| 0.035916
| 0
| 0
| 0
| 0
| 0.236207
| 4,640
| 123
| 89
| 37.723577
| 0.848476
| 0.031897
| 0
| 0.122222
| 0
| 0
| 0.079082
| 0.042208
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.066667
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6be7a1b7add8b9481d98005ea50f939d83dd351
| 15,696
|
py
|
Python
|
tfx/components/infra_validator/executor.py
|
TimoKerr/tfx
|
10d13d57eeac21514fed73118cb43464dada67f1
|
[
"Apache-2.0"
] | 1
|
2021-05-10T10:41:06.000Z
|
2021-05-10T10:41:06.000Z
|
tfx/components/infra_validator/executor.py
|
TimoKerr/tfx
|
10d13d57eeac21514fed73118cb43464dada67f1
|
[
"Apache-2.0"
] | null | null | null |
tfx/components/infra_validator/executor.py
|
TimoKerr/tfx
|
10d13d57eeac21514fed73118cb43464dada67f1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX InfraValidator executor definition."""
import contextlib
import functools
import os
import signal
import threading
import time
from typing import Any, Dict, List, Optional
from absl import logging
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import request_builder
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator import types as iv_types
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.dsl.components.base import base_executor
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import BLESSING_KEY
from tfx.types.standard_component_specs import EXAMPLES_KEY
from tfx.types.standard_component_specs import MODEL_KEY
from tfx.types.standard_component_specs import REQUEST_SPEC_KEY
from tfx.types.standard_component_specs import SERVING_SPEC_KEY
from tfx.types.standard_component_specs import VALIDATION_SPEC_KEY
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils.model_paths import tf_serving_flavor
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
from tensorflow_serving.apis import regression_pb2
_DEFAULT_NUM_TRIES = 5
_DEFAULT_POLLING_INTERVAL_SEC = 1
_DEFAULT_MAX_LOADING_TIME_SEC = 300
_DEFAULT_MODEL_NAME = 'infra-validation-model'
# Proto message keys for oneof block.
_TENSORFLOW_SERVING = 'tensorflow_serving'
_LOCAL_DOCKER = 'local_docker'
_KUBERNETES = 'kubernetes'
# Artifact property keys
_BLESSED_KEY = 'blessed'
_MODEL_FLAG_KEY = 'has_model'
# Filename of infra blessing artifact on succeed.
_BLESSED_FILENAME = 'INFRA_BLESSED'
# Filename of infra blessing artifact on fail.
_NOT_BLESSED_FILENAME = 'INFRA_NOT_BLESSED'
def _create_model_server_runner(
model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec):
"""Create a ModelServerRunner from a model, a ServingBinary and a ServingSpec.
Args:
model_path: An IV-flavored model path. (See model_path_utils.py)
serving_binary: One of ServingBinary instances parsed from the
`serving_spec`.
serving_spec: A ServingSpec instance of this infra validation.
Returns:
A ModelServerRunner.
"""
platform = serving_spec.WhichOneof('serving_platform')
if platform == 'local_docker':
return local_docker_runner.LocalDockerRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
elif platform == 'kubernetes':
return kubernetes_runner.KubernetesRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
else:
raise NotImplementedError('Invalid serving_platform {}'.format(platform))
def _convert_to_prediction_log(request: iv_types.Request):
"""Try convert infra validation request to TF-Serving PredictionLog."""
if isinstance(request, classification_pb2.ClassificationRequest):
return prediction_log_pb2.PredictionLog(
classify_log=prediction_log_pb2.ClassifyLog(request=request))
elif isinstance(request, regression_pb2.RegressionRequest):
return prediction_log_pb2.PredictionLog(
regress_log=prediction_log_pb2.RegressLog(request=request))
elif isinstance(request, predict_pb2.PredictRequest):
return prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=request))
else:
raise NotImplementedError(
f'Cannot convert {type(request)} to PredictionLog')
def _mark_blessed(blessing: types.Artifact) -> None:
logging.info('Model passed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 1)
def _mark_not_blessed(blessing: types.Artifact) -> None:
logging.info('Model failed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _NOT_BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 0)
class Executor(base_executor.BaseExecutor):
"""TFX infra validator executor."""
def __init__(self,
context: Optional[base_executor.BaseExecutor.Context] = None):
super(Executor, self).__init__(context)
self._cleanups = []
def _AddCleanup(self, function, *args, **kwargs):
self._cleanups.append(functools.partial(function, *args, **kwargs))
def _Cleanup(self):
for cleanup in self._cleanups:
try:
cleanup()
except: # pylint: disable=broad-except, bare-except
logging.warning('Error occurred during cleanup.', exc_info=True)
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
"""Contract for running InfraValidator Executor.
Args:
input_dict:
- `model`: Single `Model` artifact that we're validating.
- `examples`: `Examples` artifacts to be used for test requests.
output_dict:
- `blessing`: Single `InfraBlessing` artifact containing the validated
result and optinally validated model if warmup requests are appended.
Artifact URI includes an empty file with the name either of
INFRA_BLESSED or INFRA_NOT_BLESSED.
exec_properties:
- `serving_spec`: Serialized `ServingSpec` configuration.
- `validation_spec`: Serialized `ValidationSpec` configuration.
- `request_spec`: Serialized `RequestSpec` configuration.
"""
self._log_startup(input_dict, output_dict, exec_properties)
model = artifact_utils.get_single_instance(input_dict[MODEL_KEY])
blessing = artifact_utils.get_single_instance(output_dict[BLESSING_KEY])
if input_dict.get(EXAMPLES_KEY):
examples = artifact_utils.get_single_instance(input_dict[EXAMPLES_KEY])
else:
examples = None
serving_spec = infra_validator_pb2.ServingSpec()
proto_utils.json_to_proto(exec_properties[SERVING_SPEC_KEY], serving_spec)
if not serving_spec.model_name:
serving_spec.model_name = _DEFAULT_MODEL_NAME
validation_spec = infra_validator_pb2.ValidationSpec()
if exec_properties.get(VALIDATION_SPEC_KEY):
proto_utils.json_to_proto(exec_properties[VALIDATION_SPEC_KEY],
validation_spec)
if not validation_spec.num_tries:
validation_spec.num_tries = _DEFAULT_NUM_TRIES
if not validation_spec.max_loading_time_seconds:
validation_spec.max_loading_time_seconds = _DEFAULT_MAX_LOADING_TIME_SEC
if exec_properties.get(REQUEST_SPEC_KEY):
request_spec = infra_validator_pb2.RequestSpec()
proto_utils.json_to_proto(exec_properties[REQUEST_SPEC_KEY],
request_spec)
else:
request_spec = None
with self._InstallGracefulShutdownHandler():
self._Do(
model=model,
examples=examples,
blessing=blessing,
serving_spec=serving_spec,
validation_spec=validation_spec,
request_spec=request_spec,
)
@contextlib.contextmanager
def _InstallGracefulShutdownHandler(self):
# pylint: disable=g-doc-return-or-yield
"""Install graceful shutdown behavior.
Caveat: InfraValidator currently only recognizes SIGTERM signal as a
graceful shutdown. Furthermore, SIGTERM can be handled only if the executor
is running on the MainThread (the thread that runs the python interpreter)
due to the limitation of Python API.
When the executor is running on Kubernetes, SIGTERM is a standard way to
signal the graceful shutdown. Python default behavior for receiving SIGTERM
is to terminate the process without raising any exception. By registering a
handler that raises on signal, we can effectively transform the signal to an
exception, and we can reuse our cleanup code inside "except" or "finally"
block during the grace period.
When the executor is run by the local Beam DirectRunner, the executor thread
is one of the worker threads (not a MainThread) therefore SIGTERM cannot
be recognized. If either of MainThread or worker thread receives SIGTERM,
executor will die immediately without grace period.
Even if the executor fails to shutdown gracefully, external resources that
are created by model server runner can be cleaned up if the platform
supports such mechanism (e.g. activeDeadlineSeconds in Kubernetes).
"""
def _handler(signum, frame):
del frame # Unused.
raise error_types.GracefulShutdown('Got signal {}.'.format(signum))
try:
old_handler = signal.signal(signal.SIGTERM, _handler)
except ValueError:
# If current thread is not a MainThread, it is not allowed to register
# the signal handler (ValueError raised).
logging.info('Unable to register signal handler for non-MainThread '
'(name=%s). SIGTERM will not be handled.',
threading.current_thread().name)
old_handler = None
try:
yield
finally:
self._Cleanup()
if old_handler:
signal.signal(signal.SIGTERM, old_handler)
def _Do(
self,
model: types.Artifact,
examples: Optional[types.Artifact],
blessing: types.Artifact,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
request_spec: Optional[infra_validator_pb2.RequestSpec],
):
if examples and request_spec:
logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
requests = request_builder.build_requests(
model_name=serving_spec.model_name,
model=model,
examples=examples,
request_spec=request_spec)
else:
logging.info('InfraValidator will be run in LOAD_ONLY mode.')
requests = []
model_path = self._PrepareModelPath(model, serving_spec)
# TODO(jjong): Make logic parallel.
all_passed = True
for serving_binary in serving_bins.parse_serving_binaries(serving_spec):
all_passed &= self._ValidateWithRetry(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
if all_passed:
_mark_blessed(blessing)
if requests and request_spec.make_warmup:
self._CreateWarmupModel(blessing, model_path, warmup_requests=requests)
else:
_mark_not_blessed(blessing)
def _CreateWarmupModel(self, blessing: types.Artifact, model_path: str,
warmup_requests: List[iv_types.Request]):
output_model_path = path_utils.stamped_model_path(blessing.uri)
io_utils.copy_dir(src=model_path, dst=output_model_path)
io_utils.write_tfrecord_file(
path_utils.warmup_file_path(output_model_path),
*[_convert_to_prediction_log(r) for r in warmup_requests])
blessing.set_int_custom_property(_MODEL_FLAG_KEY, 1)
def _PrepareModelPath(self, model: types.Artifact,
serving_spec: infra_validator_pb2.ServingSpec) -> str:
model_path = path_utils.serving_model_path(
model.uri, path_utils.is_old_model_artifact(model))
serving_binary = serving_spec.WhichOneof('serving_binary')
if serving_binary == _TENSORFLOW_SERVING:
# TensorFlow Serving requires model to be stored in its own directory
# structure flavor. If current model_path does not conform to the flavor,
# we need to make a copy to the temporary path.
try:
# Check whether current model_path conforms to the tensorflow serving
# model path flavor. (Parsed without exception)
tf_serving_flavor.parse_model_path(
model_path,
expected_model_name=serving_spec.model_name)
except ValueError:
# Copy the model to comply with the tensorflow serving model path
# flavor.
temp_model_path = tf_serving_flavor.make_model_path(
model_base_path=self._get_tmp_dir(),
model_name=serving_spec.model_name,
version=int(time.time()))
io_utils.copy_dir(src=model_path, dst=temp_model_path)
self._AddCleanup(io_utils.delete_dir, self._context.get_tmp_path())
return temp_model_path
return model_path
def _ValidateWithRetry(
self, model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
for i in range(validation_spec.num_tries):
logging.info('Starting infra validation (attempt %d/%d).', i + 1,
validation_spec.num_tries)
try:
self._ValidateOnce(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
except error_types.GracefulShutdown:
# GracefulShutdown means infra validation aborted. No more retry and
# escalate the error.
raise
except Exception as e: # pylint: disable=broad-except
# Other exceptions indicates validation failure. Log the error and
# retry.
logging.exception('Infra validation (attempt %d/%d) failed.', i + 1,
validation_spec.num_tries)
if isinstance(e, error_types.DeadlineExceeded):
logging.info('Consider increasing the value of '
'ValidationSpec.max_loading_time_seconds.')
else:
# If validation has passed without any exception, succeeded.
return True
# Every trial has failed. Marking model as not blessed.
return False
def _ValidateOnce(
self, model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
deadline = time.time() + validation_spec.max_loading_time_seconds
runner = _create_model_server_runner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec)
try:
logging.info('Starting %r.', runner)
runner.Start()
# Check model is successfully loaded.
runner.WaitUntilRunning(deadline)
client = serving_binary.MakeClient(runner.GetEndpoint())
client.WaitUntilModelLoaded(
deadline, polling_interval_sec=_DEFAULT_POLLING_INTERVAL_SEC)
# Check model can be successfully queried.
if requests:
client.SendRequests(requests)
finally:
logging.info('Stopping %r.', runner)
runner.Stop()
| 39.24
| 83
| 0.73248
| 1,952
| 15,696
| 5.627049
| 0.215676
| 0.031136
| 0.025492
| 0.021031
| 0.31819
| 0.270302
| 0.193645
| 0.165696
| 0.122178
| 0.093955
| 0
| 0.003496
| 0.198203
| 15,696
| 399
| 84
| 39.338346
| 0.869289
| 0.257964
| 0
| 0.238636
| 0
| 0
| 0.061587
| 0.005424
| 0
| 0
| 0
| 0.002506
| 0
| 1
| 0.056818
| false
| 0.015152
| 0.121212
| 0
| 0.215909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6bfbff8f4c4eb14d73dd394e1c8390a8c552bf9
| 18,474
|
py
|
Python
|
metr-la/model/Double_C_STTN.py
|
happys2333/DL-2021-fall
|
e110d737d1a70c8238f2de3278e6aebce07c7a66
|
[
"Apache-2.0"
] | 1
|
2022-02-11T12:24:08.000Z
|
2022-02-11T12:24:08.000Z
|
metr-la/model/Double_C_STTN.py
|
happys2333/DL-2021-fall
|
e110d737d1a70c8238f2de3278e6aebce07c7a66
|
[
"Apache-2.0"
] | null | null | null |
metr-la/model/Double_C_STTN.py
|
happys2333/DL-2021-fall
|
e110d737d1a70c8238f2de3278e6aebce07c7a66
|
[
"Apache-2.0"
] | null | null | null |
# from folder workMETRLA
# MODEL CODE
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 10:28:06 2020
@author: wb
"""
import torch
import torch.nn as nn
import math
# from GCN_models import GCN
# from One_hot_encoder import One_hot_encoder
import torch.nn.functional as F
import numpy as np
from scipy.sparse.linalg import eigs
from Param import *
from torchsummary import summary
DEVICE = 'cuda:1'
class One_hot_encoder(nn.Module):
def __init__(self, embed_size, time_num=288):
super(One_hot_encoder, self).__init__()
self.time_num = time_num
self.I = nn.Parameter(torch.eye(time_num, time_num, requires_grad=True))
self.onehot_Linear = nn.Linear(time_num, embed_size) # 线性层改变one hot编码维度
def forward(self, i, N=25, T=12):
if i % self.time_num + T > self.time_num:
o1 = self.I[i % self.time_num:, :]
o2 = self.I[0: (i + T) % self.time_num, :]
onehot = torch.cat((o1, o2), 0)
else:
onehot = self.I[i % self.time_num: i % self.time_num + T, :]
# onehot = onehot.repeat(N, 1, 1)
onehot = onehot.expand(N, T, self.time_num)
onehot = self.onehot_Linear(onehot)
return onehot
'''
Attention 基础代码
ScaledDotProductAttention 是通用的
解释dk:
数据进来的时候是B,N,T,C,做attention的时候,C=1 ,不能很好的表征数据高维空间的特征,C ---> embedded size 32 or 64 加入dk = 32,
那么一个头就是32,然后加上多头注意力机制的话,比如8个head,8个头,那就是32*8=256,如果要跟NIPS17 tranformer论文完全对应上,那么dk=64,head = 8 ,all embeded size = 512
'''
class ScaledDotProductAttention(nn.Module):
def __init__(self):
super(ScaledDotProductAttention, self).__init__()
def forward(self, Q, K, V):
'''
Q: [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]
K: [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]
V: [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]
attn_mask: [batch_size, n_heads, seq_len, seq_len] 可能没有
'''
B, n_heads, len1, len2, d_k = Q.shape
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k)
# scores : [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), N(Spatial) or T(Temporal)]
# scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is True.
attn = nn.Softmax(dim=-1)(scores)
context = torch.matmul(attn,
V) # [batch_size, n_heads, T(Spatial) or N(Temporal), N(Spatial) or T(Temporal), d_k]]
return context
'''
S 代表spatial ,MultiHeadAttention 代表多头注意力机制
'''
class SMultiHeadAttention(nn.Module):
def __init__(self, embed_size, heads):
super(SMultiHeadAttention, self).__init__()
self.embed_size = embed_size
self.heads = heads
self.head_dim = embed_size // heads
assert (
self.head_dim * heads == embed_size
), "Embedding size needs to be divisible by heads"
# 用Linear来做投影矩阵
# 但这里如果是多头的话,是不是需要声明多个矩阵???
self.W_V = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_K = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_Q = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.fc_out = nn.Linear(heads * self.head_dim, embed_size)
def forward(self, input_Q, input_K, input_V):
'''
input_Q: [batch_size, N, T, C]
input_K: [batch_size, N, T, C]
input_V: [batch_size, N, T, C]
attn_mask: [batch_size, seq_len, seq_len]
'''
B, N, T, C = input_Q.shape
# [B, N, T, C] --> [B, N, T, h * d_k] --> [B, N, T, h, d_k] --> [B, h, T, N, d_k]
Q = self.W_Q(input_Q).view(B, N, T, self.heads, self.head_dim).transpose(1,
3) # Q: [B, N, T, C] --[B, N, T, self.heads, self.head_dim] -> [B,h,T,N,dk] 然后是为了把N,dk这两维度考虑去做ScaledDotProductAttention ,代表着是spatial attention
K = self.W_K(input_K).view(B, N, T, self.heads, self.head_dim).transpose(1, 3) # K: [B, h, T, N, d_k]
V = self.W_V(input_V).view(B, N, T, self.heads, self.head_dim).transpose(1, 3) # V: [B, h, T, N, d_k]
# attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len] seq_len = N
# context: [batch_size, n_heads, len_q, d_v], attn: [batch_size, n_heads, len_q, len_k]
context = ScaledDotProductAttention()(Q, K, V) # [B, h, T, N, d_k]
context = context.permute(0, 3, 2, 1, 4) # [B, N, T, h, d_k]
context = context.reshape(B, N, T, self.heads * self.head_dim) # [B, N, T, C]
# context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v) # context: [batch_size, len_q, n_heads * d_v]
output = self.fc_out(context) # [batch_size, len_q, d_model]
return output
'''
T 代表Temporal ,MultiHeadAttention 代表多头注意力机制
'''
class TMultiHeadAttention(nn.Module):
def __init__(self, embed_size, heads):
super(TMultiHeadAttention, self).__init__()
self.embed_size = embed_size
self.heads = heads
self.head_dim = embed_size // heads
assert (
self.head_dim * heads == embed_size
), "Embedding size needs to be divisible by heads"
# 用Linear来做投影矩阵
# 但这里如果是多头的话,是不是需要声明多个矩阵???
self.W_V = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_K = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.W_Q = nn.Linear(self.embed_size, self.head_dim * self.heads, bias=False)
self.fc_out = nn.Linear(heads * self.head_dim, embed_size)
def forward(self, input_Q, input_K, input_V):
'''
input_Q: [batch_size, N, T, C]
input_K: [batch_size, N, T, C]
input_V: [batch_size, N, T, C]
attn_mask: [batch_size, seq_len, seq_len]
'''
B, N, T, C = input_Q.shape
# [B, N, T, C] --> [B, N, T, h * d_k] --> [B, N, T, h, d_k] --> [B, h, N, T, d_k]
Q = self.W_Q(input_Q).view(B, N, T, self.heads, self.head_dim).permute(0, 3, 1, 2,
4) # Q: [B, h, N, T, d_k] T,dk 就代表是temporal attention
K = self.W_K(input_K).view(B, N, T, self.heads, self.head_dim).permute(0, 3, 1, 2, 4) # K: [B, h, N, T, d_k]
V = self.W_V(input_V).view(B, N, T, self.heads, self.head_dim).permute(0, 3, 1, 2, 4) # V: [B, h, N, T, d_k]
# attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len]
# context: [batch_size, n_heads, len_q, d_v], attn: [batch_size, n_heads, len_q, len_k]
context = ScaledDotProductAttention()(Q, K, V) # [B, h, N, T, d_k]
context = context.permute(0, 2, 3, 1, 4) # [B, N, T, h, d_k]
context = context.reshape(B, N, T, self.heads * self.head_dim) # [B, N, T, C]
# context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v) # context: [batch_size, len_q, n_heads * d_v]
output = self.fc_out(context) # [batch_size, len_q, d_model]
return output
class STransformer(nn.Module):
def __init__(self, embed_size, heads, adj, cheb_K, dropout, forward_expansion):
super(STransformer, self).__init__()
# Spatial Embedding
self.adj = adj
self.D_S = adj.to(DEVICE)
self.embed_liner = nn.Linear(adj.shape[0], embed_size)
self.attention = SMultiHeadAttention(embed_size, heads)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.feed_forward = nn.Sequential(
nn.Linear(embed_size, forward_expansion * embed_size),
nn.ReLU(),
nn.Linear(forward_expansion * embed_size, embed_size),
)
# 调用GCN
self.norm_adj = nn.InstanceNorm2d(1) # 对邻接矩阵归一化
self.dropout = nn.Dropout(dropout)
self.fs = nn.Linear(embed_size, embed_size)
self.fg = nn.Linear(embed_size, embed_size)
def forward(self, value, key, query):
# value, key, query: [N, T, C] [B, N, T, C]
# Spatial Embedding 部分
# N, T, C = query.shape
# D_S = self.embed_liner(self.D_S) # [N, C]
# D_S = D_S.expand(T, N, C) #[T, N, C]相当于在第一维复制了T份
# D_S = D_S.permute(1, 0, 2) #[N, T, C]
B, N, T, C = query.shape
D_S = self.embed_liner(self.D_S) # [N, C] ---position encoding
D_S = D_S.expand(B, T, N, C) # [B, T, N, C] 相当于在第2维复制了T份, 第一维复制B份
D_S = D_S.permute(0, 2, 1, 3) # [B, N, T, C]
# Spatial Transformer 部分
query = query + D_S
attention = self.attention(query, query, query) # (B, N, T, C)
# Add skip connection, run through normalization and finally dropout
x = self.dropout(self.norm1(attention + query))
forward = self.feed_forward(x)
U_S = self.dropout(self.norm2(forward + x))
# 融合 STransformer and GCN
g = torch.sigmoid(self.fs(U_S)) # (7)
out = g * U_S + (1 - g) # (8)
return out # (B, N, T, C)
class TTransformer(nn.Module):
def __init__(self, embed_size, heads, time_num, dropout, forward_expansion):
super(TTransformer, self).__init__()
# Temporal embedding One hot
self.time_num = time_num
# self.one_hot = One_hot_encoder(embed_size, time_num) # temporal embedding选用one-hot方式 或者
self.temporal_embedding = nn.Embedding(time_num, embed_size) # temporal embedding选用nn.Embedding
self.attention = TMultiHeadAttention(embed_size, heads)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.feed_forward = nn.Sequential(
nn.Linear(embed_size, forward_expansion * embed_size),
nn.ReLU(),
nn.Linear(forward_expansion * embed_size, embed_size),
)
self.dropout = nn.Dropout(dropout)
def forward(self, value, key, query, t):
B, N, T, C = query.shape
# D_T = self.one_hot(t, N, T) # temporal embedding选用one-hot方式 或者
D_T = self.temporal_embedding(torch.arange(0, T).to(DEVICE)) # temporal embedding选用nn.Embedding
D_T = D_T.expand(B, N, T, C)
# temporal embedding加到query。 原论文采用concatenated
query = query + D_T
attention = self.attention(query, query, query)
# Add skip connection, run through normalization and finally dropout
x = self.dropout(self.norm1(attention + query))
forward = self.feed_forward(x)
out = self.dropout(self.norm2(forward + x))
return out
### STBlock
class STTransformerBlock(nn.Module):
def __init__(self, embed_size, heads, adj, time_num, cheb_K, dropout, forward_expansion):
super(STTransformerBlock, self).__init__()
self.STransformer = STransformer(embed_size, heads, adj, cheb_K, dropout, forward_expansion)
self.TTransformer = TTransformer(embed_size, heads, time_num, dropout, forward_expansion)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.dropout = nn.Dropout(dropout)
def forward(self, value, key, query, t):
# value, key, query: [N, T, C] [B, N, T, C]
# Add skip connection,run through normalization and finally dropout
x1 = self.norm1(self.STransformer(value, key, query) + query) # (B, N, T, C)
x2 = self.dropout(self.norm2(self.TTransformer(x1, x1, x1, t) + x1))
return x2
### Encoder
class Encoder(nn.Module):
# 堆叠多层 ST-Transformer Block
def __init__(
self,
embed_size,
num_layers,
heads,
adj,
time_num,
device,
forward_expansion,
cheb_K,
dropout,
):
super(Encoder, self).__init__()
self.embed_size = embed_size
self.device = device
self.layers = nn.ModuleList(
[
STTransformerBlock(
embed_size,
heads,
adj,
time_num,
cheb_K,
dropout=dropout,
forward_expansion=forward_expansion
)
for _ in range(num_layers)
]
)
self.dropout = nn.Dropout(dropout)
def forward(self, x, t):
# x: [N, T, C] [B, N, T, C]
out = self.dropout(x)
# In the Encoder the query, key, value are all the same.
for layer in self.layers:
out = layer(out, out, out, t)
return out
### Transformer
class Transformer(nn.Module):
def __init__(
self,
adj,
embed_size,
num_layers,
heads,
time_num,
forward_expansion, ##?
cheb_K,
dropout,
device=DEVICE
):
super(Transformer, self).__init__()
self.encoder = Encoder(
embed_size,
num_layers,
heads,
adj,
time_num,
device,
forward_expansion,
cheb_K,
dropout
)
self.device = device
def forward(self, src, t):
## scr: [N, T, C] [B, N, T, C]
enc_src = self.encoder(src, t)
return enc_src # [B, N, T, C]
### ST Transformer: Total Model
class STTransformer(nn.Module):
def __init__(
self,
adj,
in_channels,
embed_size,
time_num,
num_layers,
T_dim,
output_T_dim,
heads,
cheb_K,
forward_expansion,
dropout=0
):
super(STTransformer, self).__init__()
self.forward_expansion = forward_expansion # feed forward 的 embeded size 8,16,32....1024
# 第一次卷积扩充通道数
self.conv1 = nn.Conv2d(in_channels, embed_size, 1) # Channel = 1 给 扩维,成 embeded size
self.Transformer = Transformer(
adj,
embed_size,
num_layers,
heads,
time_num,
forward_expansion,
cheb_K,
dropout=0
)
# 缩小时间维度。 例:T_dim=12到output_T_dim=3,输入12维降到输出3维 or 12in 12 out
self.conv2 = nn.Conv2d(T_dim, output_T_dim, 1)
# 缩小通道数,降到1维。
self.conv3 = nn.Conv2d(embed_size, in_channels, 1)
self.relu = nn.ReLU() # 和归一化搭配好,防止梯度爆炸,消失。
def forward(self, x):
# platform: (CHANNEL, TIMESTEP_IN, N_NODE)
# input x shape[ C, N, T]
# C:通道数量。 N:传感器数量。 T:时间数量
# x = x.unsqueeze(0)
# x = np.transpose(x,(0,2,1)).to(DEVICE)
input_Transformer = self.conv1(x) # conv 要求第二维度是C, 也就是必须得B C + 其他
# input_Transformer = input_Transformer.squeeze(0)
# input_Transformer = input_Transformer.permute(1, 2, 0)
input_Transformer = input_Transformer.permute(0, 2, 3, 1)
# input_Transformer shape[N, T, C] [B, N, T, C]
output_Transformer = self.Transformer(input_Transformer, self.forward_expansion) # [B, N, T, C]
output_Transformer = output_Transformer.permute(0, 2, 1, 3)
# output_Transformer shape[B, T, N, C]
# output_Transformer = output_Transformer.unsqueeze(0)
out = self.relu(self.conv2(output_Transformer)) # 等号左边 out shape: [1, output_T_dim, N, C]
out = out.permute(0, 3, 2, 1) # 等号左边 out shape: [B, C, N, output_T_dim]
out = self.conv3(out) # 等号左边 out shape: [B, 1, N, output_T_dim]
# out = out.squeeze(1)
out = out.permute(0, 1, 3, 2)
# print('out: ',out.shape)
return out # [B, N, output_dim]
# return out shape: [N, output_dim]
def print_params(model_name, model):
param_count = 0
for name, param in model.named_parameters():
if param.requires_grad:
param_count += param.numel()
print(f'{model_name}, {param_count} trainable parameters in total.')
return
import sys
import pandas as pd
def main():
GPU = sys.argv[-1] if len(sys.argv) == 2 else '1'
device = torch.device("cuda:{}".format(GPU)) if torch.cuda.is_available() else torch.device("cpu")
in_channels = 2 # Channels of input
embed_size = 32 # Dimension of hidden embedding features
time_num = 288
num_layers = 2 # Number of ST Block
T_dim = 12 # Input length, should be the same as prepareData.py
output_T_dim = 12 # Output Expected length
heads = 4 # Number of Heads in MultiHeadAttention
cheb_K = 2 # Order for Chebyshev Polynomials (Eq 2)
forward_expansion = 32 # Dimension of Feed Forward Network: embed_size --> embed_size * forward_expansion --> embed_size
dropout = 0
A = pd.read_csv(ADJPATH).values
A = torch.Tensor(A)
### Construct Network
model = STTransformer(
A,
in_channels,
embed_size,
time_num,
num_layers,
T_dim,
output_T_dim,
heads,
cheb_K,
forward_expansion,
dropout).to(DEVICE)
summary(model, (2, N_NODE, TIMESTEP_IN), device=device)
print_params('STTransformer', model)
if __name__ == '__main__':
main()
'''
布置作业:
1. 设计 only Spatial Transformer 的版本,跑出PEMSBAY的结果 12 步in 12 步 out
2. 设计 only Temporal Transformer 的版本,跑出PEMSBAY的结果 12 步in 12 步 out
3. 设计 Temporal-Spatial Transformer 的版本,跑出PEMSBAY的结果 12 步in 12 步 out
4. 前面的版本完成后,全部升级为,C 维度由1变成2,多的一个C是时间戳,时间戳的写法,参考
也就是说原来是B N T C=1 ,现在要求改成 B,N,T,C=2, 然后跑出1,2,3 升级版结果。 12 步in 12 步 out PEMSBAY 数据集
'''
| 36.654762
| 228
| 0.566742
| 2,499
| 18,474
| 3.991997
| 0.134854
| 0.056836
| 0.011427
| 0.008821
| 0.542502
| 0.510225
| 0.451183
| 0.440758
| 0.418805
| 0.379912
| 0
| 0.019945
| 0.316066
| 18,474
| 503
| 229
| 36.727634
| 0.769608
| 0.265075
| 0
| 0.469595
| 0
| 0
| 0.015434
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 1
| 0.074324
| false
| 0
| 0.033784
| 0
| 0.179054
| 0.010135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6c52e70a50ff76dae5fa9533aa70b45708e60ab
| 19,221
|
py
|
Python
|
bin/train_vit.py
|
ramizdundar/Chexpert
|
6a5f005f1df421538182ad8497725b78e6de29be
|
[
"Apache-2.0"
] | null | null | null |
bin/train_vit.py
|
ramizdundar/Chexpert
|
6a5f005f1df421538182ad8497725b78e6de29be
|
[
"Apache-2.0"
] | null | null | null |
bin/train_vit.py
|
ramizdundar/Chexpert
|
6a5f005f1df421538182ad8497725b78e6de29be
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
import argparse
import logging
import json
import time
import subprocess
from shutil import copyfile
import numpy as np
from sklearn import metrics
from easydict import EasyDict as edict
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.nn import DataParallel
from vit_pytorch import ViT
from tensorboardX import SummaryWriter
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
from data.dataset import ImageDataset # noqa
from model.classifier import Classifier # noqa
from utils.misc import lr_schedule # noqa
from model.utils import get_optimizer # noqa
parser = argparse.ArgumentParser(description='Train model')
parser.add_argument('cfg_path', default=None, metavar='CFG_PATH', type=str,
help="Path to the config file in yaml format")
parser.add_argument('save_path', default=None, metavar='SAVE_PATH', type=str,
help="Path to the saved models")
parser.add_argument('--num_workers', default=8, type=int, help="Number of "
"workers for each data loader")
parser.add_argument('--device_ids', default='0,1,2,3', type=str,
help="GPU indices ""comma separated, e.g. '0,1' ")
parser.add_argument('--pre_train', default=None, type=str, help="If get"
"parameters from pretrained model")
parser.add_argument('--resume', default=0, type=int, help="If resume from "
"previous run")
parser.add_argument('--logtofile', default=False, type=bool, help="Save log "
"in save_path/log.txt if set True")
parser.add_argument('--verbose', default=False, type=bool, help="Detail info")
def get_loss(output, target, index, device, cfg):
if cfg.criterion == 'BCE':
for num_class in cfg.num_classes:
assert num_class == 1
target = target[:, index].view(-1)
pos_weight = torch.from_numpy(
np.array(cfg.pos_weight,
dtype=np.float32)).to(device).type_as(target)
if cfg.batch_weight:
if target.sum() == 0:
loss = torch.tensor(0., requires_grad=True).to(device)
else:
weight = (target.size()[0] - target.sum()) / target.sum()
loss = F.binary_cross_entropy_with_logits(
output[index].view(-1), target, pos_weight=weight)
else:
loss = F.binary_cross_entropy_with_logits(
output[index].view(-1), target, pos_weight=pos_weight[index])
label = torch.sigmoid(output[index].view(-1)).ge(0.5).float()
acc = (target == label).float().sum() / len(label)
else:
raise Exception('Unknown criterion : {}'.format(cfg.criterion))
return (loss, acc)
def train_epoch(summary, summary_dev, cfg, args, model, dataloader,
dataloader_dev, optimizer, summary_writer, best_dict,
dev_header):
torch.set_grad_enabled(True)
model.train()
device_ids = list(map(int, args.device_ids.split(',')))
device = torch.device('cuda:{}'.format(device_ids[0]))
steps = len(dataloader)
dataiter = iter(dataloader)
label_header = dataloader.dataset._label_header
num_tasks = len(cfg.num_classes)
time_now = time.time()
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
for step in range(steps):
image, target = next(dataiter)
image = image.to(device)
target = target.to(device)
# output, logit_map = model(image)
output = model(image)
output = [torch.unsqueeze(i, 1) for i in output.T]
# different number of tasks
loss = 0
for t in range(num_tasks):
loss_t, acc_t = get_loss(output, target, t, device, cfg)
loss += loss_t
loss_sum[t] += loss_t.item()
acc_sum[t] += acc_t.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
summary['step'] += 1
if summary['step'] % cfg.log_every == 0:
time_spent = time.time() - time_now
time_now = time.time()
loss_sum /= cfg.log_every
acc_sum /= cfg.log_every
loss_str = ' '.join(map(lambda x: '{:.5f}'.format(x), loss_sum))
acc_str = ' '.join(map(lambda x: '{:.3f}'.format(x), acc_sum))
logging.info(
'{}, Train, Epoch : {}, Step : {}, Loss : {}, '
'Acc : {}, Run Time : {:.2f} sec'
.format(time.strftime("%Y-%m-%d %H:%M:%S"),
summary['epoch'] + 1, summary['step'], loss_str,
acc_str, time_spent))
for t in range(num_tasks):
summary_writer.add_scalar(
'train/loss_{}'.format(label_header[t]), loss_sum[t],
summary['step'])
summary_writer.add_scalar(
'train/acc_{}'.format(label_header[t]), acc_sum[t],
summary['step'])
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
if summary['step'] % cfg.test_every == 0:
time_now = time.time()
summary_dev, predlist, true_list = test_epoch(
summary_dev, cfg, args, model, dataloader_dev)
time_spent = time.time() - time_now
auclist = []
for i in range(len(cfg.num_classes)):
y_pred = predlist[i]
y_true = true_list[i]
fpr, tpr, thresholds = metrics.roc_curve(
y_true, y_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
auclist.append(auc)
summary_dev['auc'] = np.array(auclist)
loss_dev_str = ' '.join(map(lambda x: '{:.5f}'.format(x),
summary_dev['loss']))
acc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['acc']))
auc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['auc']))
logging.info(
'{}, Dev, Step : {}, Loss : {}, Acc : {}, Auc : {},'
'Mean auc: {:.3f} ''Run Time : {:.2f} sec' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
summary_dev['auc'].mean(),
time_spent))
for t in range(len(cfg.num_classes)):
summary_writer.add_scalar(
'dev/loss_{}'.format(dev_header[t]),
summary_dev['loss'][t], summary['step'])
summary_writer.add_scalar(
'dev/acc_{}'.format(dev_header[t]), summary_dev['acc'][t],
summary['step'])
summary_writer.add_scalar(
'dev/auc_{}'.format(dev_header[t]), summary_dev['auc'][t],
summary['step'])
save_best = False
mean_acc = summary_dev['acc'][cfg.save_index].mean()
if mean_acc >= best_dict['acc_dev_best']:
best_dict['acc_dev_best'] = mean_acc
if cfg.best_target == 'acc':
save_best = True
mean_auc = summary_dev['auc'][cfg.save_index].mean()
if mean_auc >= best_dict['auc_dev_best']:
best_dict['auc_dev_best'] = mean_auc
if cfg.best_target == 'auc':
save_best = True
mean_loss = summary_dev['loss'][cfg.save_index].mean()
if mean_loss <= best_dict['loss_dev_best']:
best_dict['loss_dev_best'] = mean_loss
if cfg.best_target == 'loss':
save_best = True
if save_best:
torch.save(
{'epoch': summary['epoch'],
'step': summary['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path, 'best{}.ckpt'.format(
best_dict['best_idx']))
)
best_dict['best_idx'] += 1
if best_dict['best_idx'] > cfg.save_top_k:
best_dict['best_idx'] = 1
logging.info(
'{}, Best, Step : {}, Loss : {}, Acc : {},Auc :{},'
'Best Auc : {:.3f}' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
best_dict['auc_dev_best']))
model.train()
torch.set_grad_enabled(True)
summary['epoch'] += 1
return summary, best_dict
def test_epoch(summary, cfg, args, model, dataloader):
torch.set_grad_enabled(False)
model.eval()
device_ids = list(map(int, args.device_ids.split(',')))
device = torch.device('cuda:{}'.format(device_ids[0]))
steps = len(dataloader)
dataiter = iter(dataloader)
num_tasks = len(cfg.num_classes)
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
predlist = list(x for x in range(len(cfg.num_classes)))
true_list = list(x for x in range(len(cfg.num_classes)))
for step in range(steps):
image, target = next(dataiter)
image = image.to(device)
target = target.to(device)
output = model(image)
output = [torch.unsqueeze(i, 1) for i in output.T]
# different number of tasks
for t in range(len(cfg.num_classes)):
loss_t, acc_t = get_loss(output, target, t, device, cfg)
# AUC
output_tensor = torch.sigmoid(
output[t].view(-1)).cpu().detach().numpy()
target_tensor = target[:, t].view(-1).cpu().detach().numpy()
if step == 0:
predlist[t] = output_tensor
true_list[t] = target_tensor
else:
predlist[t] = np.append(predlist[t], output_tensor)
true_list[t] = np.append(true_list[t], target_tensor)
loss_sum[t] += loss_t.item()
acc_sum[t] += acc_t.item()
summary['loss'] = loss_sum / steps
summary['acc'] = acc_sum / steps
return summary, predlist, true_list
def run(args):
with open(args.cfg_path) as f:
cfg = edict(json.load(f))
if args.verbose is True:
print(json.dumps(cfg, indent=4))
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
if args.logtofile is True:
logging.basicConfig(filename=args.save_path + '/log.txt',
filemode="w", level=logging.INFO)
else:
logging.basicConfig(level=logging.INFO)
if not args.resume:
with open(os.path.join(args.save_path, 'cfg.json'), 'w') as f:
json.dump(cfg, f, indent=1)
device_ids = list(map(int, args.device_ids.split(',')))
num_devices = torch.cuda.device_count()
if num_devices < len(device_ids):
raise Exception(
'#available gpu : {} < --device_ids : {}'
.format(num_devices, len(device_ids)))
device = torch.device('cuda:{}'.format(device_ids[0]))
# model = Classifier(cfg)
model = ViT(
cfg = cfg,
image_size=cfg.width,
patch_size=32,
num_classes=5,
dim=1024,
depth=6,
heads=8,
mlp_dim=512,
dropout=0.3,
emb_dropout=0.3,
channels=3
)
if args.verbose is True:
from torchsummary import summary
if cfg.fix_ratio:
h, w = cfg.long_side, cfg.long_side
else:
h, w = cfg.height, cfg.width
summary(model.to(device), (3, h, w))
model = DataParallel(model, device_ids=device_ids).to(device).train()
if args.pre_train is not None:
if os.path.exists(args.pre_train):
ckpt = torch.load(args.pre_train, map_location=device)
model.module.load_state_dict(ckpt)
optimizer = get_optimizer(model.parameters(), cfg)
src_folder = os.path.dirname(os.path.abspath(__file__)) + '/../'
dst_folder = os.path.join(args.save_path, 'classification')
rc, size = subprocess.getstatusoutput('du --max-depth=0 %s | cut -f1'
% src_folder)
if rc != 0:
raise Exception('Copy folder error : {}'.format(rc))
rc, err_msg = subprocess.getstatusoutput('cp -R %s %s' % (src_folder,
dst_folder))
if rc != 0:
raise Exception('copy folder error : {}'.format(err_msg))
copyfile(cfg.train_csv, os.path.join(args.save_path, 'train.csv'))
copyfile(cfg.dev_csv, os.path.join(args.save_path, 'dev.csv'))
dataloader_train = DataLoader(
ImageDataset(cfg.train_csv, cfg, mode='train'),
batch_size=cfg.train_batch_size, num_workers=args.num_workers,
drop_last=True, shuffle=True)
dataloader_dev = DataLoader(
ImageDataset(cfg.dev_csv, cfg, mode='dev'),
batch_size=cfg.dev_batch_size, num_workers=args.num_workers,
drop_last=False, shuffle=False)
dev_header = dataloader_dev.dataset._label_header
summary_train = {'epoch': 0, 'step': 0}
summary_dev = {'loss': float('inf'), 'acc': 0.0}
summary_writer = SummaryWriter(args.save_path)
epoch_start = 0
best_dict = {
"acc_dev_best": 0.0,
"auc_dev_best": 0.0,
"loss_dev_best": float('inf'),
"fused_dev_best": 0.0,
"best_idx": 1}
if args.resume:
ckpt_path = os.path.join(args.save_path, 'train.ckpt')
ckpt = torch.load(ckpt_path, map_location=device)
model.module.load_state_dict(ckpt['state_dict'])
summary_train = {'epoch': ckpt['epoch'], 'step': ckpt['step']}
best_dict['acc_dev_best'] = ckpt['acc_dev_best']
best_dict['loss_dev_best'] = ckpt['loss_dev_best']
best_dict['auc_dev_best'] = ckpt['auc_dev_best']
epoch_start = ckpt['epoch']
for epoch in range(epoch_start, cfg.epoch):
lr = lr_schedule(cfg.lr, cfg.lr_factor, summary_train['epoch'],
cfg.lr_epochs)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
summary_train, best_dict = train_epoch(
summary_train, summary_dev, cfg, args, model,
dataloader_train, dataloader_dev, optimizer,
summary_writer, best_dict, dev_header)
time_now = time.time()
summary_dev, predlist, true_list = test_epoch(
summary_dev, cfg, args, model, dataloader_dev)
time_spent = time.time() - time_now
auclist = []
for i in range(len(cfg.num_classes)):
y_pred = predlist[i]
y_true = true_list[i]
fpr, tpr, thresholds = metrics.roc_curve(
y_true, y_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
auclist.append(auc)
summary_dev['auc'] = np.array(auclist)
loss_dev_str = ' '.join(map(lambda x: '{:.5f}'.format(x),
summary_dev['loss']))
acc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['acc']))
auc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['auc']))
logging.info(
'{}, Dev, Step : {}, Loss : {}, Acc : {}, Auc : {},'
'Mean auc: {:.3f} ''Run Time : {:.2f} sec' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary_train['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
summary_dev['auc'].mean(),
time_spent))
for t in range(len(cfg.num_classes)):
summary_writer.add_scalar(
'dev/loss_{}'.format(dev_header[t]), summary_dev['loss'][t],
summary_train['step'])
summary_writer.add_scalar(
'dev/acc_{}'.format(dev_header[t]), summary_dev['acc'][t],
summary_train['step'])
summary_writer.add_scalar(
'dev/auc_{}'.format(dev_header[t]), summary_dev['auc'][t],
summary_train['step'])
save_best = False
mean_acc = summary_dev['acc'][cfg.save_index].mean()
if mean_acc >= best_dict['acc_dev_best']:
best_dict['acc_dev_best'] = mean_acc
if cfg.best_target == 'acc':
save_best = True
mean_auc = summary_dev['auc'][cfg.save_index].mean()
if mean_auc >= best_dict['auc_dev_best']:
best_dict['auc_dev_best'] = mean_auc
if cfg.best_target == 'auc':
save_best = True
mean_loss = summary_dev['loss'][cfg.save_index].mean()
if mean_loss <= best_dict['loss_dev_best']:
best_dict['loss_dev_best'] = mean_loss
if cfg.best_target == 'loss':
save_best = True
if save_best:
torch.save(
{'epoch': summary_train['epoch'],
'step': summary_train['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path,
'best{}.ckpt'.format(best_dict['best_idx']))
)
best_dict['best_idx'] += 1
if best_dict['best_idx'] > cfg.save_top_k:
best_dict['best_idx'] = 1
logging.info(
'{}, Best, Step : {}, Loss : {}, Acc : {},'
'Auc :{},Best Auc : {:.3f}' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary_train['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
best_dict['auc_dev_best']))
torch.save({'epoch': summary_train['epoch'],
'step': summary_train['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path, 'train.ckpt'))
summary_writer.close()
def main():
args = parser.parse_args()
if args.verbose is True:
print('Using the specified args:')
print(args)
run(args)
if __name__ == '__main__':
main()
| 38.908907
| 78
| 0.542115
| 2,370
| 19,221
| 4.157806
| 0.121519
| 0.029836
| 0.018977
| 0.025878
| 0.594682
| 0.56779
| 0.532068
| 0.500812
| 0.484981
| 0.452202
| 0
| 0.007144
| 0.32272
| 19,221
| 493
| 79
| 38.98783
| 0.749808
| 0.006867
| 0
| 0.49409
| 0
| 0
| 0.115625
| 0
| 0
| 0
| 0
| 0
| 0.002364
| 1
| 0.01182
| false
| 0
| 0.052009
| 0
| 0.070922
| 0.007092
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6c580f84de62db4b9d20acb6cce98ce88761586
| 262
|
py
|
Python
|
Sets/the capaint s room.py
|
AndreasGeiger/hackerrank-python
|
a436c207e62b32f70a6b4279bb641a3c4d90e112
|
[
"MIT"
] | null | null | null |
Sets/the capaint s room.py
|
AndreasGeiger/hackerrank-python
|
a436c207e62b32f70a6b4279bb641a3c4d90e112
|
[
"MIT"
] | null | null | null |
Sets/the capaint s room.py
|
AndreasGeiger/hackerrank-python
|
a436c207e62b32f70a6b4279bb641a3c4d90e112
|
[
"MIT"
] | null | null | null |
groupSize = input()
groups = list(map(int,input().split(' ')))
tmpArray1 = set()
tmpArray2 = set()
for i in groups:
if i in tmpArray1:
tmpArray2.discard(i)
else:
tmpArray1.add(i)
tmpArray2.add(i)
for i in tmpArray2:
print(i)
| 18.714286
| 42
| 0.603053
| 36
| 262
| 4.388889
| 0.5
| 0.056962
| 0.075949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035897
| 0.255725
| 262
| 13
| 43
| 20.153846
| 0.774359
| 0
| 0
| 0
| 0
| 0
| 0.003817
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6c6e8aaf6429afdb1edbeda8513d241f632fc14
| 6,867
|
py
|
Python
|
src/oictest/setup.py
|
rohe/oictest
|
f6f0800220befd5983b8cb34a5c984f98855d089
|
[
"Apache-2.0"
] | 32
|
2015-01-02T20:15:17.000Z
|
2020-02-15T20:46:25.000Z
|
src/oictest/setup.py
|
rohe/oictest
|
f6f0800220befd5983b8cb34a5c984f98855d089
|
[
"Apache-2.0"
] | 8
|
2015-02-23T19:48:53.000Z
|
2016-01-20T08:24:05.000Z
|
src/oictest/setup.py
|
rohe/oictest
|
f6f0800220befd5983b8cb34a5c984f98855d089
|
[
"Apache-2.0"
] | 17
|
2015-01-02T20:15:22.000Z
|
2022-03-22T22:58:28.000Z
|
import copy
import json
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic.utils.keyio import KeyJar
from oic.utils.keyio import KeyBundle
__author__ = 'roland'
import logging
logger = logging.getLogger(__name__)
class OIDCError(Exception):
pass
def flow2sequence(operations, item):
flow = operations.FLOWS[item]
return [operations.PHASES[phase] for phase in flow["sequence"]]
class OIDCTestSetup(object):
def __init__(self, client_cls, config, test_defs):
"""
:param config: Imported configuration module
:return:
"""
self.client_cls = client_cls
self.config = config
self.test_features = []
self.client = self.create_client(**config.CLIENT)
self.test_defs = test_defs
def create_client(self, **kwargs):
"""
Instantiate a _client instance
:param: Keyword arguments
Keys are ["srv_discovery_url", "client_info", "client_registration",
"provider_info". "keys]
:return: _client instance
"""
_key_set = set(kwargs.keys())
args = {}
_client = self.client_cls(client_authn_method=CLIENT_AUTHN_METHOD,
behaviour=kwargs["behaviour"],
verify_ssl=self.config.VERIFY_SSL, **args)
# The behaviour parameter is not significant for the election process
_key_set.discard("behaviour")
try:
setattr(_client, "allow", kwargs["allow"])
except KeyError:
pass
else:
_key_set.discard("allow")
try:
jwks = self.construct_jwks(_client, kwargs["keys"])
except KeyError:
pass
else:
# export JWKS
f = open("export/jwk.json", "w")
f.write(json.dumps(jwks))
f.close()
_client.jwks_uri = self.config.CLIENT["key_export_url"]
self.test_features = _key_set
try:
_client.client_prefs = copy.copy(kwargs["preferences"])
except KeyError:
pass
else:
_key_set.discard("preferences")
if "client_info" in _key_set:
_client.redirect_uris = self.config.CLIENT[
"client_info"]["redirect_uris"]
elif "client_registration" in _key_set:
reg_info = self.config.CLIENT["client_registration"]
_client.redirect_uris = reg_info["redirect_uris"]
_client.client_id = reg_info["client_id"]
_client.client_secret = reg_info["client_secret"]
return _client
@staticmethod
def construct_jwks(_client, key_conf):
"""
Construct the jwks
"""
if _client.keyjar is None:
_client.keyjar = KeyJar()
kbl = []
kid_template = "a%d"
kid = 0
for typ, info in key_conf.items():
kb = KeyBundle(source="file://%s" % info["key"], fileformat="der",
keytype=typ)
for k in kb.keys():
k.serialize()
k.kid = kid_template % kid
kid += 1
_client.kid[k.use][k.kty] = k.kid
_client.keyjar.add_kb("", kb)
kbl.append(kb)
jwks = {"keys": []}
for kb in kbl:
# ignore simple keys
jwks["keys"].extend([k.to_dict()
for k in kb.keys() if k.kty != 'oct'])
return jwks
def make_sequence(self, flow):
"""
Translate a flow name into a sequence of request/responses.
:param flow: Which test flow to use
:return: test sequence and test definitions
"""
sequence = flow2sequence(self.test_defs, flow)
res = {"sequence": sequence,
"tests": {"pre": [], "post": []},
"flow": [flow],
"block": [],
"mode": "",
"expect_exception": False}
_flow = self.test_defs.FLOWS[flow]
for param in ["tests", "block", "mode", "expect_exception"]:
try:
res[param] = _flow[param]
except KeyError:
pass
return res
def add_init(self, test_spec):
"""
Add _client registration and provider info gathering if necessary
:param test_spec:
:return:
"""
_seq = test_spec["sequence"]
_flow = test_spec["flow"]
if "client_info" in self.test_features and \
"registration" not in test_spec["block"]:
_register = True
# May not be the first item in the sequence
for sq in _seq:
try:
if sq[0].request == "RegistrationRequest":
_register = False
except TypeError:
pass
if _register:
_ext = self.test_defs.PHASES["oic-registration"]
_seq.insert(0, _ext)
_flow.insert(0, "oic-registration")
if "srv_discovery_url" in self.test_features:
op_spec = self.test_defs.PHASES["provider-discovery"]
if op_spec not in _seq:
_seq.insert(0, op_spec)
_flow.insert(0, "provider-discovery")
return test_spec
def request_and_return(conv, url, response=None, method="GET", body=None,
body_type="json", state="", http_args=None,
**kwargs):
"""
:param url: The URL to which the request should be sent
:param response: Response type
:param method: Which HTTP method to use
:param body: A message body if any
:param body_type: The format of the body of the return message
:param http_args: Arguments for the HTTP _client
:return: A cls or ErrorResponse instance or the HTTP response
instance if no response body was expected.
"""
if http_args is None:
http_args = {}
_cli = conv._client
try:
_resp = _cli.http_request(url, method, data=body, **http_args)
except Exception:
raise
conv.position = url
conv.last_response = _resp
conv.last_content = _resp.content
if not "keyjar" in kwargs:
kwargs["keyjar"] = conv.keyjar
_response = _cli.parse_request_response(_resp, response, body_type, state,
**kwargs)
conv.protocol_response.append((_response, _resp.content))
return _response
def test_summation(conv, sid):
status = 0
for item in conv.test_output:
if item["status"] > status:
status = item["status"]
if status == 0:
status = 1
info = {
"id": sid,
"status": status,
"tests": conv.test_output
}
return info
| 28.6125
| 80
| 0.553371
| 761
| 6,867
| 4.77661
| 0.254928
| 0.022008
| 0.016506
| 0.018157
| 0.038514
| 0.019257
| 0.019257
| 0
| 0
| 0
| 0
| 0.002674
| 0.346585
| 6,867
| 240
| 81
| 28.6125
| 0.807444
| 0.1481
| 0
| 0.127517
| 0
| 0
| 0.090278
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053691
| false
| 0.040268
| 0.040268
| 0
| 0.154362
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6c8040bae19150daa4afa3909164f31bd76f5c3
| 2,696
|
py
|
Python
|
HLTrigger/Configuration/python/HLT_75e33/modules/hltPFPuppiNoLep_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:24:46.000Z
|
2021-11-30T16:24:46.000Z
|
HLTrigger/Configuration/python/HLT_75e33/modules/hltPFPuppiNoLep_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 4
|
2021-11-29T13:57:56.000Z
|
2022-03-29T06:28:36.000Z
|
HLTrigger/Configuration/python/HLT_75e33/modules/hltPFPuppiNoLep_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:16:05.000Z
|
2021-11-30T16:16:05.000Z
|
import FWCore.ParameterSet.Config as cms
hltPFPuppiNoLep = cms.EDProducer("PuppiProducer",
DeltaZCut = cms.double(0.1),
DeltaZCutForChargedFromPUVtxs = cms.double(0.2),
EtaMaxCharged = cms.double(99999.0),
EtaMaxPhotons = cms.double(2.5),
EtaMinUseDeltaZ = cms.double(-1.0),
MinPuppiWeight = cms.double(0.01),
NumOfPUVtxsForCharged = cms.uint32(0),
PUProxyValue = cms.InputTag("hltPixelClustersMultiplicity"),
PtMaxCharged = cms.double(-1.0),
PtMaxNeutrals = cms.double(200.0),
PtMaxNeutralsStartSlope = cms.double(0.0),
PtMaxPhotons = cms.double(20.0),
UseDeltaZCut = cms.bool(True),
UseFromPVLooseTight = cms.bool(False),
algos = cms.VPSet(
cms.PSet(
EtaMaxExtrap = cms.double(2.0),
MedEtaSF = cms.vdouble(1.0, 1.0),
MinNeutralPt = cms.vdouble(0.5105, 0.821),
MinNeutralPtSlope = cms.vdouble(9.51e-06, 1.902e-05),
RMSEtaSF = cms.vdouble(1.0, 1.0),
etaMax = cms.vdouble(2.5, 3.5),
etaMin = cms.vdouble(0.0, 2.5),
ptMin = cms.vdouble(0.0, 0.0),
puppiAlgos = cms.VPSet(cms.PSet(
algoId = cms.int32(5),
applyLowPUCorr = cms.bool(True),
combOpt = cms.int32(0),
cone = cms.double(0.4),
rmsPtMin = cms.double(0.1),
rmsScaleFactor = cms.double(1.0),
useCharged = cms.bool(True)
))
),
cms.PSet(
EtaMaxExtrap = cms.double(2.0),
MedEtaSF = cms.vdouble(0.75),
MinNeutralPt = cms.vdouble(3.656),
MinNeutralPtSlope = cms.vdouble(5.072e-05),
RMSEtaSF = cms.vdouble(1.0),
etaMax = cms.vdouble(10.0),
etaMin = cms.vdouble(3.5),
ptMin = cms.vdouble(0.0),
puppiAlgos = cms.VPSet(cms.PSet(
algoId = cms.int32(5),
applyLowPUCorr = cms.bool(True),
combOpt = cms.int32(0),
cone = cms.double(0.4),
rmsPtMin = cms.double(0.5),
rmsScaleFactor = cms.double(1.0),
useCharged = cms.bool(False)
))
)
),
applyCHS = cms.bool(True),
candName = cms.InputTag("particleFlowTmp"),
clonePackedCands = cms.bool(False),
invertPuppi = cms.bool(False),
puppiDiagnostics = cms.bool(False),
puppiNoLep = cms.bool(True),
useExistingWeights = cms.bool(False),
useExp = cms.bool(False),
usePUProxyValue = cms.bool(True),
vertexName = cms.InputTag("goodOfflinePrimaryVertices"),
vtxNdofCut = cms.int32(4),
vtxZCut = cms.double(24)
)
| 37.971831
| 65
| 0.563427
| 300
| 2,696
| 5.063333
| 0.29
| 0.112574
| 0.052666
| 0.028966
| 0.347597
| 0.326531
| 0.271231
| 0.271231
| 0.215932
| 0.215932
| 0
| 0.066737
| 0.299703
| 2,696
| 70
| 66
| 38.514286
| 0.737818
| 0
| 0
| 0.289855
| 0
| 0
| 0.030415
| 0.02003
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014493
| 0
| 0.014493
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6c97a9ee684956ae509733d7e8dff568dd9da66
| 623
|
py
|
Python
|
hpotter/src/lazy_init.py
|
LarsenClose/dr.hpotter
|
ef6199ab563a92f3e4916277dbde9217126f36a9
|
[
"MIT"
] | 1
|
2021-08-15T09:24:20.000Z
|
2021-08-15T09:24:20.000Z
|
hpotter/src/lazy_init.py
|
LarsenClose/dr.hpotter
|
ef6199ab563a92f3e4916277dbde9217126f36a9
|
[
"MIT"
] | 18
|
2021-02-01T21:58:20.000Z
|
2021-05-24T17:10:25.000Z
|
hpotter/src/lazy_init.py
|
LarsenClose/dr.hpotter
|
ef6199ab563a92f3e4916277dbde9217126f36a9
|
[
"MIT"
] | 1
|
2021-06-19T12:49:54.000Z
|
2021-06-19T12:49:54.000Z
|
''' Wrap an __init__ function so that I don't have to assign all the
parameters to a self. variable. '''
# https://stackoverflow.com/questions/5048329/python-decorator-for-automatic-binding-init-arguments
import inspect
from functools import wraps
def lazy_init(init):
''' Create an annotation to assign all the parameters to a self.
variable. '''
arg_names = inspect.getfullargspec(init)[0]
# pylint: disable=E1101
@wraps(init)
def new_init(self, *args):
for name, value in zip(arg_names[1:], args):
setattr(self, name, value)
init(self, *args)
return new_init
| 28.318182
| 99
| 0.686998
| 88
| 623
| 4.761364
| 0.613636
| 0.038186
| 0.052506
| 0.066826
| 0.186158
| 0.186158
| 0.186158
| 0.186158
| 0.186158
| 0
| 0
| 0.026263
| 0.205457
| 623
| 21
| 100
| 29.666667
| 0.820202
| 0.46549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6cb19760623f02a584f4187adb3490f5de6005b
| 781
|
py
|
Python
|
main.py
|
technojam/MLian
|
7632c5c7d4c44b1d87de9ab23c1ed7293962ca49
|
[
"MIT"
] | 1
|
2021-12-18T19:54:45.000Z
|
2021-12-18T19:54:45.000Z
|
main.py
|
technojam/MLian
|
7632c5c7d4c44b1d87de9ab23c1ed7293962ca49
|
[
"MIT"
] | 2
|
2021-12-18T19:50:08.000Z
|
2021-12-18T19:52:20.000Z
|
main.py
|
technojam/MLian
|
7632c5c7d4c44b1d87de9ab23c1ed7293962ca49
|
[
"MIT"
] | 1
|
2022-03-01T14:13:27.000Z
|
2022-03-01T14:13:27.000Z
|
# def register_feed():
import os
import cv2
path = '/UserImage'
cam = cv2.VideoCapture(0)
name=input("Name: ")
cv2.namedWindow("test")
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
else:
cv2.imshow("test", frame)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
# img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(name + ".jpg", frame)
# print("{} written!".format(img_name))
print("Image Captured! Proceed...")
img_counter += 1
cam.release()
cv2.destroyAllWindows()
| 22.314286
| 66
| 0.541613
| 92
| 781
| 4.51087
| 0.597826
| 0.072289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039474
| 0.318822
| 781
| 35
| 67
| 22.314286
| 0.740602
| 0.175416
| 0
| 0.083333
| 0
| 0
| 0.150235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6cb563badebdde1d425f141d7f04f5b497ea2ae
| 2,643
|
py
|
Python
|
models/train.py
|
Hiwyl/keras_cnn_finetune
|
f424302a72c8d05056a9af6f9b293003acb8398d
|
[
"MIT"
] | 1
|
2019-09-30T01:07:03.000Z
|
2019-09-30T01:07:03.000Z
|
models/train.py
|
Hiwyl/keras_cnn_finetune
|
f424302a72c8d05056a9af6f9b293003acb8398d
|
[
"MIT"
] | null | null | null |
models/train.py
|
Hiwyl/keras_cnn_finetune
|
f424302a72c8d05056a9af6f9b293003acb8398d
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
'''
@Author : lance
@Email : wangyl306@163.com
'''
import time
from model_cx.inceptionresnet import inceptionresnet
from model_cx.vgg19two import vgg19_all_lr
from model_cx.inceptionv3 import inceptionv3
from model_cx.densenet import densenet
from model_cx.nasnet import nasnet
from model_cx.merge import merge
from model_cx.bcnn import bilinearnet
from model_cx.resnet import ResNet50
from model_cx.mobilenetv2 import mobilenetv2
from model_cx.senet import senet
if __name__=="__main__":
classes = 1
epochs = 100
steps_per_epoch = 113
validation_steps = 48
shape=(224,224)
print("开始训练...")
start = time.time()
#
# try:
# print("densenet")
# densenet(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("bcnn")
# bilinearnet(classes, epochs, steps_per_epoch, validation_steps, shape)
#
# except Exception as e:
# print(e)
# try:
# print("resnet")
# ResNet50(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
try:
print("merge")
merge(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
# try:
# print("ince_res")
# inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, (299, 299))
# # inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("mobilenetv2")
# mobilenetv2(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("inceptionv3")
# inceptionv3(classes, epochs, steps_per_epoch, validation_steps, (299, 299))
# # inceptionv3(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
try:
print("nasnet")
nasnet(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
try:
print("vgg19two")
vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
try:
print("senet")
vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, (100,100))
except Exception as e:
print(e)
end = time.time()
print("ETA:", (end - start) / 3600)
| 31.094118
| 90
| 0.623156
| 309
| 2,643
| 5.122977
| 0.187702
| 0.065698
| 0.106759
| 0.159191
| 0.589387
| 0.589387
| 0.574226
| 0.574226
| 0.541377
| 0.481996
| 0
| 0.035024
| 0.276201
| 2,643
| 85
| 91
| 31.094118
| 0.792473
| 0.404843
| 0
| 0.292683
| 0
| 0
| 0.029717
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.268293
| 0
| 0.268293
| 0.243902
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6cb633a5c540a02c577994bd8b8eebe64755249
| 3,275
|
py
|
Python
|
src/probnum/randprocs/markov/integrator/_preconditioner.py
|
alpiges/probnum
|
2e4153cb0df559984e09ec74487ef6c9d3f6d464
|
[
"MIT"
] | null | null | null |
src/probnum/randprocs/markov/integrator/_preconditioner.py
|
alpiges/probnum
|
2e4153cb0df559984e09ec74487ef6c9d3f6d464
|
[
"MIT"
] | 40
|
2021-04-12T07:56:29.000Z
|
2022-03-28T00:18:18.000Z
|
src/probnum/randprocs/markov/integrator/_preconditioner.py
|
alpiges/probnum
|
2e4153cb0df559984e09ec74487ef6c9d3f6d464
|
[
"MIT"
] | null | null | null |
"""Coordinate changes in state space models."""
import abc
try:
# cached_property is only available in Python >=3.8
from functools import cached_property
except ImportError:
from cached_property import cached_property
import numpy as np
import scipy.special # for vectorised factorial
from probnum import config, linops, randvars
def apply_precon(precon, rv):
# public (because it is needed in some integrator implementations),
# but not exposed to the 'randprocs' namespace
# (i.e. not imported in any __init__.py).
# There is no way of checking whether `rv` has its Cholesky factor computed already or not.
# Therefore, since we need to update the Cholesky factor for square-root filtering,
# we also update the Cholesky factor for non-square-root algorithms here,
# which implies additional cost.
# See Issues #319 and #329.
# When they are resolved, this function here will hopefully be superfluous.
new_mean = precon @ rv.mean
new_cov_cholesky = precon @ rv.cov_cholesky # precon is diagonal, so this is valid
new_cov = new_cov_cholesky @ new_cov_cholesky.T
return randvars.Normal(new_mean, new_cov, cov_cholesky=new_cov_cholesky)
class Preconditioner(abc.ABC):
"""Coordinate change transformations as preconditioners in state space models.
For some models, this makes the filtering and smoothing steps more numerically
stable.
"""
@abc.abstractmethod
def __call__(self, step) -> np.ndarray:
# if more than step is needed, add them into the signature in the future
raise NotImplementedError
@cached_property
def inverse(self) -> "Preconditioner":
raise NotImplementedError
class NordsieckLikeCoordinates(Preconditioner):
"""Nordsieck-like coordinates.
Similar to Nordsieck coordinates (which store the Taylor coefficients instead of the
derivatives), but better for ODE filtering and smoothing. Used in integrator-transitions, e.g. in
:class:`IntegratedWienerTransition`.
"""
def __init__(self, powers, scales, dimension):
# Clean way of assembling these coordinates cheaply,
# because the powers and scales of the inverse
# are better read off than inverted
self.powers = powers
self.scales = scales
self.dimension = dimension
@classmethod
def from_order(cls, order, dimension):
# used to conveniently initialise in the beginning
powers = np.arange(order, -1, -1)
scales = scipy.special.factorial(powers)
return cls(
powers=powers + 0.5,
scales=scales,
dimension=dimension,
)
def __call__(self, step):
scaling_vector = np.abs(step) ** self.powers / self.scales
if config.matrix_free:
return linops.Kronecker(
A=linops.Identity(self.dimension),
B=linops.Scaling(factors=scaling_vector),
)
return np.kron(np.eye(self.dimension), np.diag(scaling_vector))
@cached_property
def inverse(self) -> "NordsieckLikeCoordinates":
return NordsieckLikeCoordinates(
powers=-self.powers,
scales=1.0 / self.scales,
dimension=self.dimension,
)
| 34.114583
| 101
| 0.685802
| 402
| 3,275
| 5.482587
| 0.460199
| 0.038113
| 0.025408
| 0.016334
| 0.071688
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005636
| 0.241527
| 3,275
| 95
| 102
| 34.473684
| 0.881643
| 0.409466
| 0
| 0.081633
| 0
| 0
| 0.020321
| 0.012834
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0.020408
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6cc468eac9d6881bb54cbc2d585ee21f2641f3f
| 2,345
|
py
|
Python
|
allauth/socialaccount/providers/linkedin/provider.py
|
mina-gaid/scp
|
38e1cd303d4728a987df117f666ce194e241ed1a
|
[
"MIT"
] | 1
|
2018-04-06T21:36:59.000Z
|
2018-04-06T21:36:59.000Z
|
allauth/socialaccount/providers/linkedin/provider.py
|
mina-gaid/scp
|
38e1cd303d4728a987df117f666ce194e241ed1a
|
[
"MIT"
] | 6
|
2020-06-05T18:44:19.000Z
|
2022-01-13T00:48:56.000Z
|
allauth/socialaccount/providers/linkedin/provider.py
|
mina-gaid/scp
|
38e1cd303d4728a987df117f666ce194e241ed1a
|
[
"MIT"
] | 1
|
2022-02-01T17:19:28.000Z
|
2022-02-01T17:19:28.000Z
|
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
from allauth.socialaccount import app_settings
class LinkedInAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('public-profile-url')
def get_avatar_url(self):
# try to return the higher res picture-urls::(original) first
try:
if self.account.extra_data.get('picture-urls', {}).get(
'picture-url'):
return self.account.extra_data.get('picture-urls', {}).get(
'picture-url')
except:
# if we can't get higher res for any reason, we'll just return the
# low res
pass
return self.account.extra_data.get('picture-url')
def to_str(self):
dflt = super(LinkedInAccount, self).to_str()
name = self.account.extra_data.get('name', dflt)
first_name = self.account.extra_data.get('first-name', None)
last_name = self.account.extra_data.get('last-name', None)
if first_name and last_name:
name = first_name + ' ' + last_name
return name
class LinkedInProvider(OAuthProvider):
id = 'linkedin'
name = 'LinkedIn'
account_class = LinkedInAccount
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append('r_emailaddress')
return scope
def get_profile_fields(self):
default_fields = ['id',
'first-name',
'last-name',
'email-address',
'picture-url',
'picture-urls::(original)',
# picture-urls::(original) is higher res
'public-profile-url']
fields = self.get_settings().get('PROFILE_FIELDS', default_fields)
return fields
def extract_uid(self, data):
return data['id']
def extract_common_fields(self, data):
return dict(email=data.get('email-address'),
first_name=data.get('first-name'),
last_name=data.get('last-name'))
providers.registry.register(LinkedInProvider)
| 34.485294
| 78
| 0.594456
| 262
| 2,345
| 5.179389
| 0.274809
| 0.051584
| 0.082535
| 0.103169
| 0.181282
| 0.181282
| 0.100221
| 0.06927
| 0.06927
| 0.06927
| 0
| 0
| 0.301066
| 2,345
| 67
| 79
| 35
| 0.827944
| 0.072921
| 0
| 0
| 0
| 0
| 0.121715
| 0.011065
| 0
| 0
| 0
| 0
| 0
| 1
| 0.14
| false
| 0.02
| 0.08
| 0.06
| 0.48
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6ccbdf212404d1bb840cdf710923204e7c1baa5
| 4,744
|
py
|
Python
|
game2048/myNew.py
|
CCTQL/2048-api
|
a75316a90e9a7c8c9171e39e1d1fc24cbac3ba1a
|
[
"Apache-2.0"
] | null | null | null |
game2048/myNew.py
|
CCTQL/2048-api
|
a75316a90e9a7c8c9171e39e1d1fc24cbac3ba1a
|
[
"Apache-2.0"
] | null | null | null |
game2048/myNew.py
|
CCTQL/2048-api
|
a75316a90e9a7c8c9171e39e1d1fc24cbac3ba1a
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
import time
import pandas as pd
import numpy as np
import csv
batch_size = 128
NUM_EPOCHS = 30
LR = 0.001
TIME_STEP = 4
class CCRNN(nn.Module):
def __init__(self):
# 继承RNN
super(CCRNN, self).__init__()
self.ccLSTM = nn.LSTM(
input_size=4,
hidden_size=128,
num_layers=4,
bidirectional=True,
batch_first=True
)
self.ccCNN22 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=2,
stride=2,
padding=0
)
self.ccCNN14 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=(1, 4),
stride=1,
padding=0
)
self.ccCNN41 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=(4, 1),
stride=1,
padding=0
)
self.CNN22toFC = nn.Linear(4, 64)
self.CNN41toFC = nn.Linear(4, 32)
self.CNN14toFC = nn.Linear(4, 32)
self.LSTMtoFC = nn.Linear(256, 128)
self.FCtoOut = nn.Linear(256, 4)
def forward(self, x):
LSTM_out, (h_n, c_n) = self.ccLSTM(x, None)
CNN_in = torch.unsqueeze(x[:, 0:4, :], 1)
CNN_out22 = self.ccCNN22(CNN_in)
CNN_out41 = self.ccCNN41(CNN_in)
CNN_out14 = self.ccCNN14(CNN_in)
CNN22_reshape = CNN_out22.view(-1, 4)
CNN14_reshape = CNN_out41.view(-1, 4)
CNN41_reshape = CNN_out14.view(-1, 4)
CNN22toFC = self.CNN22toFC(CNN22_reshape)
CNN14toFC = self.CNN14toFC(CNN14_reshape)
CNN41toFC = self.CNN41toFC(CNN41_reshape)
LSTMtoFC = self.LSTMtoFC(LSTM_out[:, -1, :])
CNNandLSTM = torch.cat((CNN22toFC, CNN41toFC, CNN14toFC, LSTMtoFC), 1)
out = self.FCtoOut(CNNandLSTM)
return out
#------------------读入数据-----------------------------
csv_data = pd.read_csv('./drive/My Drive/DATA.csv')
csv_data = csv_data.values
A = csv_data.shape[0]
board_data = csv_data[:,0:16]
# X = np.log2(X)
X = torch.FloatTensor(board_data)
X = np.int64(board_data)
# 转置后拼接
X = np.reshape(X, (-1,4,4))
XT = X.transpose(0,2,1)
X = np.concatenate((X,XT),axis=1)
print(X.shape)
direction_data = csv_data[:,16]
Y = np.int64(direction_data)
#-------------------------------------------------------
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2,shuffle=False)
X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)
Y_train = torch.LongTensor(Y_train)
Y_test = torch.LongTensor(Y_test)
train_dataset = torch.utils.data.TensorDataset(X_train,Y_train)
# test_dataset = torch.utils.data.TensorDataset(X_test,Y_test)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True
)
# test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
# batch_size=batch_size,
# shuffle=False
# )
batch_size = 128
NUM_EPOCHS = 30
LR = 0.001
TIME_STEP = 4
#------------------读入数据-----------------------------
csv_data = pd.read_csv('./drive/My Drive/DATA.csv')
csv_data = csv_data.values
A = csv_data.shape[0]
board_data = csv_data[:,0:16]
# X = np.log2(X)
X = torch.FloatTensor(board_data)
X = np.int64(board_data)
# 转置后拼接
X = np.reshape(X, (-1,4,4))
XT = X.transpose(0,2,1)
X = np.concatenate((X,XT),axis=1)
print(X.shape)
direction_data = csv_data[:,16]
Y = np.int64(direction_data)
model = CCRNN()
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr = 0.001)
def train(epoch):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data).cuda(), Variable(target).cuda()
data = data/11.0
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % 50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\t Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
torch.save(self.model, 'rnn_model_' + str(epoch) + '.pkl')
if __name__ == '__main__':
for epoch in range(0, NUM_EPOCHS):
train(epoch)
| 27.421965
| 87
| 0.572513
| 627
| 4,744
| 4.135566
| 0.244019
| 0.032395
| 0.025453
| 0.020825
| 0.378326
| 0.352102
| 0.271886
| 0.271886
| 0.271886
| 0.271886
| 0
| 0.055006
| 0.275717
| 4,744
| 173
| 88
| 27.421965
| 0.699651
| 0.086847
| 0
| 0.344
| 0
| 0
| 0.028937
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024
| false
| 0
| 0.088
| 0
| 0.128
| 0.024
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6cda14ca91ba1556d929a926bfc87a16ab1f726
| 371
|
py
|
Python
|
tests/test_arr_add_value.py
|
dboyliao/TaipeiPy-pybind11-buffer-array
|
22e764d9fbf605950c0de10e3a341de36bc9bf89
|
[
"MIT"
] | 1
|
2022-03-17T10:01:45.000Z
|
2022-03-17T10:01:45.000Z
|
tests/test_arr_add_value.py
|
dboyliao/TaipeiPy-pybind11-buffer-array
|
22e764d9fbf605950c0de10e3a341de36bc9bf89
|
[
"MIT"
] | null | null | null |
tests/test_arr_add_value.py
|
dboyliao/TaipeiPy-pybind11-buffer-array
|
22e764d9fbf605950c0de10e3a341de36bc9bf89
|
[
"MIT"
] | null | null | null |
import numpy as np
import mylib
def test_arr_add_value():
for _ in range(10):
shape = np.random.randint(1, 10, size=np.random.randint(3, 10)).tolist()
in_arr = np.random.rand(*shape).astype(np.double)
ok = np.allclose(mylib.array_add_value(in_arr, np.pi), in_arr + np.pi)
if not ok:
raise ValueError("incorrect result")
| 28.538462
| 80
| 0.638814
| 59
| 371
| 3.864407
| 0.576271
| 0.105263
| 0.092105
| 0.078947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027972
| 0.229111
| 371
| 12
| 81
| 30.916667
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0.043127
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6cea1b013c7155bc06629fbf31e017bbe14f52f
| 658
|
py
|
Python
|
tests/test_units/test_mapper_str.py
|
frewsxcv/routes
|
7690fc1016e56739855435fb54c96acccfa29009
|
[
"MIT"
] | 1
|
2015-11-08T12:58:16.000Z
|
2015-11-08T12:58:16.000Z
|
tests/test_units/test_mapper_str.py
|
frewsxcv/routes
|
7690fc1016e56739855435fb54c96acccfa29009
|
[
"MIT"
] | null | null | null |
tests/test_units/test_mapper_str.py
|
frewsxcv/routes
|
7690fc1016e56739855435fb54c96acccfa29009
|
[
"MIT"
] | null | null | null |
import unittest
from routes import Mapper
class TestMapperStr(unittest.TestCase):
def test_str(self):
m = Mapper()
m.connect('/{controller}/{action}')
m.connect('entries', '/entries', controller='entry', action='index')
m.connect('entry', '/entries/{id}', controller='entry', action='show')
expected = """\
Route name Methods Path
/{controller}/{action}
entries /entries
entry /entries/{id}"""
for expected_line, actual_line in zip(expected.splitlines(), str(m).splitlines()):
assert expected_line == actual_line.rstrip()
| 34.631579
| 91
| 0.582067
| 67
| 658
| 5.641791
| 0.507463
| 0.063492
| 0.111111
| 0.116402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.273556
| 658
| 18
| 92
| 36.555556
| 0.790795
| 0
| 0
| 0
| 0
| 0
| 0.315625
| 0.06875
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6cfd0714854720779418d4a80b8997e25e611e3
| 3,227
|
py
|
Python
|
python-function-files-dictionaries/week4-assignment1.py
|
MauMendes/python3-programming-specialization
|
8bd259f0ac559c6004baa0e759b6ec4bc25e1320
|
[
"MIT"
] | null | null | null |
python-function-files-dictionaries/week4-assignment1.py
|
MauMendes/python3-programming-specialization
|
8bd259f0ac559c6004baa0e759b6ec4bc25e1320
|
[
"MIT"
] | null | null | null |
python-function-files-dictionaries/week4-assignment1.py
|
MauMendes/python3-programming-specialization
|
8bd259f0ac559c6004baa0e759b6ec4bc25e1320
|
[
"MIT"
] | null | null | null |
#1) Write a function, sublist, that takes in a list of numbers as the parameter. In the function, use a while loop to return a sublist of the input list.
# The sublist should contain the same values of the original list up until it reaches the number 5 (it should not contain the number 5).
def sublist(input_lst):
out_lst = list()
number = 0
i = 0
print(input_lst)
print(len(input_lst))
length = len(input_lst)
while i<length:
number = input_lst[i]
i+=1
if number==5: break
else : out_lst.append(number)
print(out_lst)
return out_lst
#2) Write a function called check_nums that takes a list as its parameter, and contains a while loop that only stops once the element of the
# list is the number 7. What is returned is a list of all of the numbers up until it reaches 7.def check_nums(input_lst):
def check_nums(input_lst):
out_lst = list()
number = 0
i = 0
print(input_lst)
print(len(input_lst))
length = len(input_lst)
while i<length:
number = input_lst[i]
i+=1
if number==7: break
else : out_lst.append(number)
print(out_lst)
return out_lst
#3) Write a function, sublist, that takes in a list of strings as the parameter. In the function, use a while loop to return a sublist of the input list.
# The sublist should contain the same values of the original list up until it reaches the string “STOP” (it should not contain the string “STOP”).
def sublist(in_lst):
out_list = list()
str = ""
i = 0
while str!="STOP":
str = in_lst[i]
i+=1
if str=="STOP": break
else: out_list.append(str)
return out_list
#4) Write a function called stop_at_z that iterates through a list of strings. Using a while loop, append each string to a new list until the string that
# appears is “z”. The function should return the new list.
def stop_at_z(in_lst):
out_list = list()
str = ""
i = 0
while str!="z":
str = in_lst[i]
i+=1
if str=="z": break
else: out_list.append(str)
return out_list
#5) Below is a for loop that works. Underneath the for loop, rewrite the problem so that it does the same thing, but using a while loop instead of a for loop.
# Assign the accumulated total in the while loop code to the variable sum2. Once complete, sum2 should equal sum1.
lst = [65, 78, 21, 33]
lenght = len(lst)
i = 0
sum2 = 0
while i<lenght:
sum2 += lst[i]
i+=1
#6) Challenge: Write a function called beginning that takes a list as input and contains a while loop that only stops once the element of the list is the string ‘bye’.
# What is returned is a list that contains up to the first 10 strings, regardless of where the loop stops. (i.e., if it stops on the 32nd element, the first 10 are
# returned. If “bye” is the 5th element, the first 4 are returned.) If you want to make this even more of a challenge, do this without slicing
def beginning(in_list):
length = len(in_list)
out_lst = list()
i = 0
str = ""
while i<length:
str = in_list[i]
i+=1
if str=="bye" or i>10:
break
out_lst.append(str)
return out_lst
| 37.091954
| 168
| 0.664084
| 559
| 3,227
| 3.758497
| 0.218247
| 0.041885
| 0.028558
| 0.014279
| 0.536887
| 0.482627
| 0.462637
| 0.462637
| 0.447406
| 0.411233
| 0
| 0.020912
| 0.259064
| 3,227
| 86
| 169
| 37.523256
| 0.8578
| 0.556864
| 0
| 0.686567
| 0
| 0
| 0.009174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0
| 0
| 0
| 0.149254
| 0.089552
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6d14bad54d6d5d7401435412b7045fd99c1fc0a
| 25,605
|
py
|
Python
|
saas/backend/apps/group/views.py
|
Canway-shiisa/bk-iam-saas
|
73c3770d9647c9cc8d515427cd1d053d8af9d071
|
[
"MIT"
] | null | null | null |
saas/backend/apps/group/views.py
|
Canway-shiisa/bk-iam-saas
|
73c3770d9647c9cc8d515427cd1d053d8af9d071
|
[
"MIT"
] | null | null | null |
saas/backend/apps/group/views.py
|
Canway-shiisa/bk-iam-saas
|
73c3770d9647c9cc8d515427cd1d053d8af9d071
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from functools import wraps
from typing import List
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
from drf_yasg.utils import swagger_auto_schema
from pydantic.tools import parse_obj_as
from rest_framework import serializers, status, views
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet, mixins
from backend.account.permissions import RolePermission, role_perm_class
from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ
from backend.apps.group import tasks # noqa
from backend.apps.group.models import Group
from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ
from backend.apps.template.models import PermTemplatePolicyAuthorized
from backend.audit.audit import audit_context_setter, view_audit_decorator
from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean
from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz
from backend.biz.policy_tag import ConditionTagBean, ConditionTagBiz
from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker
from backend.biz.template import TemplateBiz
from backend.common.error_codes import error_codes
from backend.common.filters import NoCheckModelFilterBackend
from backend.common.serializers import SystemQuerySLZ
from backend.common.time import PERMANENT_SECONDS
from backend.service.constants import PermissionCodeEnum, RoleType, SubjectType
from backend.service.models import Subject
from backend.trans.group import GroupTrans
from .audit import (
GroupCreateAuditProvider,
GroupDeleteAuditProvider,
GroupMemberCreateAuditProvider,
GroupMemberDeleteAuditProvider,
GroupMemberRenewAuditProvider,
GroupPolicyDeleteAuditProvider,
GroupPolicyUpdateAuditProvider,
GroupTemplateCreateAuditProvider,
GroupTransferAuditProvider,
GroupUpdateAuditProvider,
)
from .constants import OperateEnum
from .filters import GroupFilter, GroupTemplateSystemFilter
from .serializers import (
GroupAddMemberSLZ,
GroupAuthoriedConditionSLZ,
GroupAuthorizationSLZ,
GroupCreateSLZ,
GroupDeleteMemberSLZ,
GroupIdSLZ,
GroupMemberUpdateExpiredAtSLZ,
GroupPolicyUpdateSLZ,
GroupSLZ,
GroupTemplateDetailSchemaSLZ,
GroupTemplateDetailSLZ,
GroupTemplateSchemaSLZ,
GroupTemplateSLZ,
GroupTransferSLZ,
GroupUpdateSLZ,
MemberSLZ,
SearchMemberSLZ,
)
permission_logger = logging.getLogger("permission")
def check_readonly_group(operation):
"""用户组可读检测"""
def decorate(func):
@wraps(func)
def wrapper(view, request, *args, **kwargs):
group = view.get_object()
readonly = group.readonly
if readonly:
raise error_codes.FORBIDDEN.format(
message=_("只读用户组({})无法进行({})操作!").format(group.id, operation), replace=True
)
response = func(view, request, *args, **kwargs)
return response
return wrapper
return decorate
class GroupQueryMixin:
def get_queryset(self):
request = self.request
return RoleListQuery(request.role, request.user).query_group()
class GroupPermissionMixin:
def check_object_permissions(self, request, obj):
if not RoleObjectRelationChecker(request.role).check_group(obj):
self.permission_denied(request, message=f"{request.role.type} role can not access group {obj.id}")
class GroupViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"update": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
}
queryset = Group.objects.all()
serializer_class = GroupSLZ
filterset_class = GroupFilter
lookup_field = "id"
group_biz = GroupBiz()
group_check_biz = GroupCheckBiz()
role_biz = RoleBiz()
group_trans = GroupTrans()
@swagger_auto_schema(
operation_description="创建用户组",
request_body=GroupCreateSLZ(label="用户组"),
responses={status.HTTP_201_CREATED: GroupIdSLZ(label="用户组ID")},
tags=["group"],
)
@view_audit_decorator(GroupCreateAuditProvider)
def create(self, request, *args, **kwargs):
"""
创建用户组
"""
serializer = GroupCreateSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
user_id = request.user.username
data = serializer.validated_data
# 用户组名称在角色内唯一
self.group_check_biz.check_role_group_name_unique(request.role.id, data["name"])
# 用户组数量在角色内是否超限
number_of_new_group = 1 # 接口只支持创建一个用户组,不支持批量,所以新增用户组数量为1
self.group_check_biz.check_role_group_limit(request.role, number_of_new_group)
# 检测成员是否满足管理的授权范围
members = parse_obj_as(List[Subject], data["members"])
self.group_check_biz.check_role_subject_scope(request.role, members)
group = self.group_biz.create_and_add_members(
request.role.id, data["name"], data["description"], user_id, members, data["expired_at"]
)
# 使用长时任务触发多个模板同时授权
if data["templates"]:
templates = self.group_trans.from_group_grant_data(data["templates"])
self.group_biz.grant(request.role, group, templates)
# 写入审计上下文
audit_context_setter(group=group)
return Response({"id": group.id}, status=status.HTTP_201_CREATED)
def get_queryset(self):
request = self.request
role = request.role
username = request.user.username
filter_role_id = request.query_params.get("role_id")
# 如果当前角色是staff 并且 存在筛选的role_id
if role.type == RoleType.STAFF.value and filter_role_id:
# 检查用户是否在角色的授权范围内
filter_role = self.role_biz.get_role_scope_include_user(filter_role_id, username)
if not filter_role:
return Group.objects.none()
# 返回角色的用户组列表
return RoleListQuery(filter_role, request.user).query_group()
return RoleListQuery(role, request.user).query_group()
@swagger_auto_schema(
operation_description="用户组列表",
responses={status.HTTP_200_OK: GroupSLZ(label="用户组", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
@swagger_auto_schema(
operation_description="用户组详情",
responses={status.HTTP_200_OK: GroupSLZ(label="用户组")},
tags=["group"],
)
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(
operation_description="修改用户组",
request_body=GroupUpdateSLZ(label="用户组"),
responses={status.HTTP_200_OK: GroupUpdateSLZ(label="用户组")},
tags=["group"],
)
@view_audit_decorator(GroupUpdateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_UPDATE.label)
def update(self, request, *args, **kwargs):
group = self.get_object()
serializer = GroupUpdateSLZ(group, data=request.data)
serializer.is_valid(raise_exception=True)
user_id = request.user.username
data = serializer.validated_data
# 用户组名称在角色内唯一
self.group_check_biz.check_role_group_name_unique(request.role.id, data["name"], group.id)
group = self.group_biz.update(group, data["name"], data["description"], user_id)
# 写入审计上下文
audit_context_setter(group=group)
return Response(serializer.data)
@swagger_auto_schema(
operation_description="删除用户组",
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_DELETE.label)
def destroy(self, request, *args, **kwargs):
group = self.get_object()
self.group_biz.delete(group.id)
# 写入审计上下文
audit_context_setter(group=group)
return Response({})
class GroupMemberViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"list": PermissionCodeEnum.MANAGE_GROUP.value,
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
}
queryset = Group.objects.all()
lookup_field = "id"
biz = GroupBiz()
group_check_biz = GroupCheckBiz()
@swagger_auto_schema(
operation_description="用户组成员列表",
query_serializer=SearchMemberSLZ(label="keyword"),
responses={status.HTTP_200_OK: MemberSLZ(label="成员")},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
# 校验权限
checker = RoleObjectRelationChecker(request.role)
if not checker.check_group(group):
raise error_codes.FORBIDDEN.format(message=_("用户组({})不在当前用户身份可访问的范围内").format(group.id), replace=True)
if request.query_params.get("keyword"):
slz = SearchMemberSLZ(data=request.query_params)
slz.is_valid(raise_exception=True)
keyword = slz.validated_data["keyword"].lower()
group_members = self.biz.search_member_by_keyword(group.id, keyword)
return Response({"results": [one.dict() for one in group_members]})
pagination = LimitOffsetPagination()
limit = pagination.get_limit(request)
offset = pagination.get_offset(request)
count, group_members = self.biz.list_paging_group_member(group.id, limit, offset)
return Response({"count": count, "results": [one.dict() for one in group_members]})
@swagger_auto_schema(
operation_description="用户组添加成员",
request_body=GroupAddMemberSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberCreateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_CREATE.label)
def create(self, request, *args, **kwargs):
serializer = GroupAddMemberSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
members_data = data["members"]
expired_at = data["expired_at"]
# 成员Dict结构转换为Subject结构,并去重
members = list(set(parse_obj_as(List[Subject], members_data)))
# 检测成员是否满足管理的授权范围
self.group_check_biz.check_role_subject_scope(request.role, members)
self.group_check_biz.check_member_count(group.id, len(members))
permission_logger.info("group %s add members %s by user %s", group.id, members, request.user.username)
# 添加成员
self.biz.add_members(group.id, members, expired_at)
# 写入审计上下文
audit_context_setter(group=group, members=[m.dict() for m in members])
return Response({}, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
operation_description="用户组删除成员",
request_body=GroupDeleteMemberSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_DELETE.label)
def destroy(self, request, *args, **kwargs):
serializer = GroupDeleteMemberSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
permission_logger.info(
"group %s delete members %s by user %s", group.id, data["members"], request.user.username
)
self.biz.remove_members(str(group.id), parse_obj_as(List[Subject], data["members"]))
# 写入审计上下文
audit_context_setter(group=group, members=data["members"])
return Response({})
class GroupMemberUpdateExpiredAtViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [role_perm_class(PermissionCodeEnum.MANAGE_GROUP.value)]
queryset = Group.objects.all()
lookup_field = "id"
# service
group_biz = GroupBiz()
@swagger_auto_schema(
operation_description="用户组成员续期",
request_body=GroupMemberUpdateExpiredAtSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberRenewAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_RENEW.label)
def create(self, request, *args, **kwargs):
serializer = GroupMemberUpdateExpiredAtSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
permission_logger.info(
"group %s update members %s expired_at by user %s", group.id, data["members"], request.user.username
)
for m in data["members"]:
m["policy_expired_at"] = m.pop("expired_at")
self.group_biz.update_members_expired_at(
group.id, parse_obj_as(List[GroupMemberExpiredAtBean], data["members"])
)
# 写入审计上下文
audit_context_setter(group=group, members=data["members"])
return Response({})
class GroupTemplateViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {"create": PermissionCodeEnum.MANAGE_GROUP.value}
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
filterset_class = GroupTemplateSystemFilter
filter_backends = [NoCheckModelFilterBackend]
lookup_field = "id"
template_biz = TemplateBiz()
@swagger_auto_schema(
operation_description="用户组拥有的权限模板列表",
responses={status.HTTP_200_OK: GroupTemplateSchemaSLZ(label="权限模板", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
queryset = PermTemplatePolicyAuthorized.objects.filter_by_subject(subject).defer("_data")
queryset = self.filter_queryset(queryset)
return Response(GroupTemplateSLZ(queryset, many=True).data)
@swagger_auto_schema(
operation_description="用户组权限模板授权信息",
responses={status.HTTP_200_OK: GroupTemplateDetailSchemaSLZ(label="授权信息")},
tags=["group"],
)
def retrieve(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
template_id = kwargs["template_id"]
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
authorized_template = PermTemplatePolicyAuthorized.objects.get_by_subject_template(subject, int(template_id))
return Response(GroupTemplateDetailSLZ(authorized_template).data)
class GroupPolicyViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
"update": PermissionCodeEnum.MANAGE_GROUP.value,
}
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
lookup_field = "id"
policy_query_biz = PolicyQueryBiz()
policy_operation_biz = PolicyOperationBiz()
group_biz = GroupBiz()
group_trans = GroupTrans()
@swagger_auto_schema(
operation_description="用户组添加权限",
request_body=GroupAuthorizationSLZ(label="授权信息"),
responses={status.HTTP_201_CREATED: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupTemplateCreateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_CREATE.label)
def create(self, request, *args, **kwargs):
serializer = GroupAuthorizationSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
templates = self.group_trans.from_group_grant_data(data["templates"])
self.group_biz.grant(request.role, group, templates)
# 写入审计上下文
audit_context_setter(
group=group,
templates=[{"system_id": t["system_id"], "template_id": t["template_id"]} for t in data["templates"]],
)
return Response({}, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
operation_description="用户组自定义权限列表",
query_serializer=SystemQuerySLZ,
responses={status.HTTP_200_OK: PolicySLZ(label="策略", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
slz = SystemQuerySLZ(data=request.query_params)
slz.is_valid(raise_exception=True)
system_id = slz.validated_data["system_id"]
group = get_object_or_404(self.queryset, pk=kwargs["id"])
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
policies = self.policy_query_biz.list_by_subject(system_id, subject)
# ResourceNameAutoUpdate
updated_policies = self.policy_operation_biz.update_due_to_renamed_resource(system_id, subject, policies)
return Response([p.dict() for p in updated_policies])
@swagger_auto_schema(
operation_description="用户组删除自定义权限",
request_body=PolicyDeleteSLZ(label="ids"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupPolicyDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_DELETE.label)
def destroy(self, request, *args, **kwargs):
slz = PolicyDeleteSLZ(data=request.data)
slz.is_valid(raise_exception=True)
system_id = slz.validated_data["system_id"]
ids = slz.validated_data["ids"]
group = self.get_object()
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
permission_logger.info(
"subject type=%s, id=%s policy deleted by user %s", subject.type, subject.id, request.user.username
)
policy_list = self.policy_query_biz.query_policy_list_by_policy_ids(system_id, subject, ids)
# 删除权限
self.policy_operation_biz.delete_by_ids(system_id, subject, ids)
# 写入审计上下文
audit_context_setter(group=group, system_id=system_id, policies=policy_list.policies)
return Response()
@swagger_auto_schema(
operation_description="用户组权限修改",
request_body=GroupPolicyUpdateSLZ(label="修改策略"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupPolicyUpdateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_UPDATE.label)
def update(self, request, *args, **kwargs):
group = self.get_object()
slz = GroupPolicyUpdateSLZ(data=request.data)
slz.is_valid(raise_exception=True)
data = slz.validated_data
system_id = data["system_id"]
template_id = data["template_id"]
policies = [PolicyBean(expired_at=PERMANENT_SECONDS, **action) for action in data["actions"]]
self.group_biz.update_policies(request.role, group.id, system_id, template_id, policies)
# 写入审计上下文
audit_context_setter(group=group, system_id=system_id, template_id=template_id, policies=policies)
return Response({})
class GroupSystemViewSet(GenericViewSet):
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
lookup_field = "id"
biz = GroupBiz()
@swagger_auto_schema(
operation_description="用户组有权限的所有系统列表",
responses={status.HTTP_200_OK: PolicySystemSLZ(label="系统", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = self.get_object()
data = self.biz.list_system_counter(group.id)
return Response([one.dict() for one in data])
class GroupTransferView(views.APIView):
"""
用户组转出
"""
permission_classes = [role_perm_class(PermissionCodeEnum.TRANSFER_GROUP.value)]
role_biz = RoleBiz()
@swagger_auto_schema(
operation_description="用户组批量转出",
request_body=GroupTransferSLZ(label="用户转移"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupTransferAuditProvider)
def post(self, request, *args, **kwargs):
slz = GroupTransferSLZ(data=request.data, context={"role": request.role})
slz.is_valid(raise_exception=True)
group_ids = slz.validated_data["group_ids"]
role_id = slz.validated_data["role_id"]
self.role_biz.transfer_groups_role(group_ids, role_id)
audit_context_setter(group_ids=group_ids, role_id=role_id)
return Response({})
class GroupTemplateConditionCompareView(GroupPermissionMixin, GenericViewSet):
condition_biz = ConditionTagBiz()
template_biz = TemplateBiz()
queryset = Group.objects.all()
lookup_field = "id"
@swagger_auto_schema(
operation_description="权限模板操作条件对比",
request_body=GroupAuthoriedConditionSLZ(label="操作条件"),
responses={status.HTTP_200_OK: ConditionTagSLZ(label="条件差异", many=True)},
tags=["group"],
)
def create(self, request, *args, **kwargs):
serializer = GroupAuthoriedConditionSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
group = self.get_object()
action_id = data["action_id"]
resource_group_id = data["resource_group_id"]
related_resource_type = data["related_resource_type"]
new_condition = parse_obj_as(List[ConditionTagBean], related_resource_type["condition"])
# 从模板数据中查找匹配的操作, 资源类型的条件
template_id = kwargs["template_id"]
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
authorized_template = PermTemplatePolicyAuthorized.objects.get_by_subject_template(subject, int(template_id))
for action in authorized_template.data["actions"]:
policy = PolicyBean.parse_obj(action)
# 查询对应的操作
if policy.action_id == action_id:
# 操作操作中对应于资源类型的操作
related_resource_type = policy.get_related_resource_type(
resource_group_id, related_resource_type["system_id"], related_resource_type["type"]
)
old_condition = related_resource_type.condition if related_resource_type else []
# 对比用户组已有的条件与用户提交的条件
conditions = self.condition_biz.compare_and_tag(
new_condition, parse_obj_as(List[ConditionTagBean], old_condition), is_template=True
)
return Response([c.dict() for c in conditions])
raise error_codes.VALIDATE_ERROR.format(_("模板: {} 没有操作: {} 的权限").format(template_id, action_id))
class GroupCustomPolicyConditionCompareView(GroupPermissionMixin, GenericViewSet):
policy_biz = PolicyQueryBiz()
condition_biz = ConditionTagBiz()
queryset = Group.objects.all()
lookup_field = "id"
@swagger_auto_schema(
operation_description="条件差异对比",
request_body=ConditionCompareSLZ(label="资源条件"),
responses={status.HTTP_200_OK: ConditionTagSLZ(label="条件差异", many=True)},
tags=["group"],
)
def create(self, request, *args, **kwargs):
serializer = ConditionCompareSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
group = self.get_object()
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
# 1. 查询policy的condition
related_resource_type = data["related_resource_type"]
old_condition = self.policy_biz.get_policy_resource_type_conditions(
subject,
data["policy_id"],
data["resource_group_id"],
related_resource_type["system_id"],
related_resource_type["type"],
)
# 2. 对比合并差异
conditions = self.condition_biz.compare_and_tag(
parse_obj_as(List[ConditionTagBean], related_resource_type["condition"]),
parse_obj_as(List[ConditionTagBean], old_condition),
is_template=True,
)
return Response([c.dict() for c in conditions])
| 36.216407
| 117
| 0.696348
| 2,751
| 25,605
| 6.251545
| 0.149764
| 0.010583
| 0.022735
| 0.028724
| 0.505175
| 0.455925
| 0.422317
| 0.370334
| 0.330038
| 0.306605
| 0
| 0.004705
| 0.203163
| 25,605
| 706
| 118
| 36.267705
| 0.83821
| 0.047803
| 0
| 0.388446
| 0
| 0
| 0.049475
| 0.002634
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049801
| false
| 0
| 0.067729
| 0.003984
| 0.294821
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6d16a8a093216b78956e0c3642e48c0a64c8778
| 5,188
|
py
|
Python
|
towers.py
|
fillest/7drl2013
|
96d291dce08a85d3871713c99f3a036de482d6ca
|
[
"MIT"
] | 1
|
2015-05-19T08:12:49.000Z
|
2015-05-19T08:12:49.000Z
|
towers.py
|
fillest/7drl2013
|
96d291dce08a85d3871713c99f3a036de482d6ca
|
[
"MIT"
] | null | null | null |
towers.py
|
fillest/7drl2013
|
96d291dce08a85d3871713c99f3a036de482d6ca
|
[
"MIT"
] | null | null | null |
import util
import libtcodpy as tcod
import enemies
import operator
class Missile (util.Entity):
sym = '*'
color = tcod.white
class BasicMissile (Missile):
color = tcod.yellow
class IceMissile (Missile):
color = tcod.light_blue
class AoeMissile (Missile):
color = tcod.red
class Building (util.Entity):
sym = '@'
max_hp = 1
cost = 0
def __init__ (self, *args):
super(Building, self).__init__(*args)
self.hp = self.max_hp
def hurt (self, hp):
self.hp -= hp
if self.hp < 1:
self.die()
def hit (self, e):
if e in self.state.entities:
e.hurt(self.damage)
def die (self):
if self in self.state.entities:
self.delete()
def put (self):
assert self.state.energy > 0
self.state.entities.append(self)
self.state.energy -= self.cost
return self
def delete (self):
self.state.entities.remove(self)
self.state.energy += self.cost
return self
class Heart (Building):
sym = '&'
color = tcod.darker_red
max_hp = 20
def delete (self):
self.state.is_paused = True
return super(Heart, self).delete()
class Bait (Building):
sym = Heart.sym
color = tcod.pink
max_hp = 10
class Tower (Building):
radius = 15
max_hp = 10
damage = 1
missile = None
def __init__ (self, *args):
super(Tower, self).__init__(*args)
self.cooldown = False
def update (self):
if not self.cooldown:
# dist_min = None
# target = None
# for e in self.state.entities.enemies():
# d = util.dist(self.x, self.y, e.x, e.y)
# if d < (self.radius + 1) and ((dist_min is None) or (d < dist_min)):
# dist_min = d
# target = e
preferred_targets = []
other_targets = []
for e in self.state.entities.enemies():
d = util.dist(self.x, self.y, e.x, e.y)
if d < (self.radius + 1):
if e in self.state.targets_towers:
total_damage = sum([t.damage for t in self.state.targets_towers[e]])
if total_damage < e.hp:
preferred_targets.append((d, e))
else:
other_targets.append((d, e))
else:
preferred_targets.append((d, e))
target = None
if preferred_targets:
_d, target = sorted(preferred_targets, key = operator.itemgetter(0))[0]
elif other_targets:
_d, target = sorted(other_targets, key = operator.itemgetter(0))[0]
if target:
self.state.targets_towers[target].append(self)
self._shoot(target)
def render (self):
super(Tower, self).render()
if self.mouse_over:
# if True:
for x in range(self.x - (self.radius + 1), self.x + (self.radius + 1)):
for y in range(self.y - (self.radius + 1), self.y + (self.radius + 1)):
if util.dist(self.x, self.y, x, y) < (self.radius + 1):
tcod.console_set_char_background(0, x, y, tcod.Color(*[15]*3), flag = tcod.BKGND_SET)
def _shoot (self, e):
self.cooldown = True
def clear_cd ():
self.cooldown = False
self.state.timers.start_run_once(1000, clear_cd)
m = self.missile(self.state, self.x, self.y)
self.state.entities.append(m)
missile_speed = 20
self.state.timers.start(missile_speed, self.update_missile, [m, e])
def update_missile (self, m, e):
tcod.line_init(m.x, m.y, e.x, e.y)
x, y = tcod.line_step()
if x is None:
self.state.entities.remove(m)
self.hit(e)
return util.STOP
else:
m.x = x
m.y = y
class BasicTower (Tower):
color = tcod.dark_green
missile = BasicMissile
cost = 1
class ResearchBuilding (Building):
color = tcod.dark_sepia
cost = 1
def __init__ (self, *args):
super(ResearchBuilding, self).__init__(*args)
self.timer = self.state.timers.start(1000, self._research)
def _research (self):
pass
class AoeExplosion (util.Entity):
sym = '*'
color = tcod.dark_red
def __init__ (self, radius, *args):
super(AoeExplosion, self).__init__(*args)
self.radius = radius
def render (self):
for x in range(self.x - self.radius, self.x + self.radius):
for y in range(self.y - self.radius, self.y + self.radius):
tcod.console_put_char(0, x, y, self.sym, tcod.BKGND_NONE)
tcod.console_set_char_foreground(0, x, y, self.color)
class AoeTower (Tower):
color = tcod.dark_orange
missile = AoeMissile
cost = 2
def hit (self, target):
radius = 2
for x in range(target.x - radius, target.x + radius):
for y in range(target.y - radius, target.y + radius):
for e in self.state.entities.enemies():
if (e.x, e.y) == (x, y):
if e in self.state.entities: #TODO copypaste
e.hurt(self.damage)
e = AoeExplosion(radius, self.state, target.x, target.y)
self.state.entities.append(e)
self.state.timers.start_run_once(70, lambda: self.state.entities.remove(e))
class IceTower (Tower):
damage = 0.2
color = tcod.dark_blue
missile = IceMissile
cost = 1
def hit (self, target):
target.hurt(self.damage)
if not getattr(target, 'is_debuffed', False):
old_speed = target.timer.interval
target.timer.interval *= 3
target.timer.time_buf *= 3
target.is_debuffed = True
def rollback ():
target.timer.interval = old_speed
target.timer.time_buf /= 3
target.is_debuffed = False
self.rollback_timer = self.state.timers.start_run_once(1000, rollback)
elif getattr(self, 'rollback_timer', False):
self.rollback_timer.reset()
| 24.018519
| 92
| 0.664418
| 802
| 5,188
| 4.168329
| 0.168329
| 0.069997
| 0.061023
| 0.021538
| 0.311397
| 0.189949
| 0.141789
| 0.114269
| 0.040084
| 0.040084
| 0
| 0.013468
| 0.198535
| 5,188
| 215
| 93
| 24.130233
| 0.790524
| 0.044526
| 0
| 0.189024
| 0
| 0
| 0.005862
| 0
| 0
| 0
| 0
| 0.004651
| 0.006098
| 1
| 0.121951
| false
| 0.006098
| 0.02439
| 0
| 0.445122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6d351ce6a88251c74a7d12532c34a2b0ba6f8b1
| 795
|
py
|
Python
|
python/mandelbrot.py
|
lukasjoc/random
|
5be080b424f02491fb219634902fc0cc192aff6c
|
[
"0BSD"
] | 1
|
2020-11-09T19:32:43.000Z
|
2020-11-09T19:32:43.000Z
|
python/mandelbrot.py
|
lukasjoc/random
|
5be080b424f02491fb219634902fc0cc192aff6c
|
[
"0BSD"
] | null | null | null |
python/mandelbrot.py
|
lukasjoc/random
|
5be080b424f02491fb219634902fc0cc192aff6c
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/python3
from PIL import Image
from numpy import complex, array
from tqdm import tqdm
import colorsys
W=512
#W=142
def mandelbrot(x, y):
def get_colors(i):
color = 255 * array(colorsys.hsv_to_rgb(i / 255.0, 1.0, 0.5))
return tuple(color.astype(int))
c, cc = 0, complex(x, y)
for i in range(1, 1000):
if abs(c) > 2:
return get_colors(i)
c = c * c + cc
return 0,0,0
if __name__ == "__main__":
img = Image.new("RGB", (W, int(W / 2)))
pixels = img.load()
for x in tqdm(range(img.size[0])):
for y in tqdm(range(img.size[1])):
xx = (x - (0.75 * W)) / (W / 4)
yy = (y - (W / 4)) / (W / 4)
pixels[x, y] = mandelbrot(xx, yy)
img.show()
img.save("mandelbrot.jpg")
| 22.714286
| 69
| 0.52956
| 132
| 795
| 3.098485
| 0.431818
| 0.01467
| 0.0489
| 0.06846
| 0.08802
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066908
| 0.304403
| 795
| 34
| 70
| 23.382353
| 0.672694
| 0.027673
| 0
| 0
| 0
| 0
| 0.032425
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.16
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6d3938d66694895ff110b11b2560698b6722338
| 9,672
|
py
|
Python
|
tests/unit/commands/test_deploy.py
|
tonyreina/mlt
|
ee490ebdeb5aa6924dbfc0a067a0653754c470f4
|
[
"Apache-2.0"
] | 1
|
2021-11-29T10:35:20.000Z
|
2021-11-29T10:35:20.000Z
|
tests/unit/commands/test_deploy.py
|
tonyreina/mlt
|
ee490ebdeb5aa6924dbfc0a067a0653754c470f4
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/commands/test_deploy.py
|
tonyreina/mlt
|
ee490ebdeb5aa6924dbfc0a067a0653754c470f4
|
[
"Apache-2.0"
] | 1
|
2020-02-22T01:04:15.000Z
|
2020-02-22T01:04:15.000Z
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
from __future__ import print_function
import uuid
import pytest
from mock import call, MagicMock
from mlt.commands.deploy import DeployCommand
from test_utils.io import catch_stdout
@pytest.fixture
def sleep(patch):
return patch('time.sleep')
@pytest.fixture
def fetch_action_arg(patch):
return patch('files.fetch_action_arg', MagicMock(return_value='output'))
@pytest.fixture
def kube_helpers(patch):
return patch('kubernetes_helpers')
@pytest.fixture
def json_mock(patch):
return patch('json')
@pytest.fixture
def open_mock(patch):
return patch('open')
@pytest.fixture
def popen_mock(patch):
popen_mock = MagicMock()
popen_mock.return_value.poll.return_value = 0
return patch('Popen', popen_mock)
@pytest.fixture
def process_helpers(patch):
return patch('process_helpers')
@pytest.fixture
def progress_bar(patch):
progress_mock = MagicMock()
progress_mock.duration_progress.side_effect = lambda x, y, z: print(
'Pushing ')
return patch('progress_bar', progress_mock)
@pytest.fixture
def template(patch):
return patch('Template')
@pytest.fixture
def verify_build(patch):
return patch('build_helpers.verify_build')
@pytest.fixture
def verify_init(patch):
return patch('config_helpers.load_config')
@pytest.fixture
def walk_mock(patch):
return patch('os.walk', MagicMock(return_value=['foo', 'bar']))
@pytest.fixture
def yaml(patch):
return patch('yaml.load')
def deploy(no_push, skip_crd_check, interactive, extra_config_args, retries=5):
deploy = DeployCommand(
{'deploy': True, '--no-push': no_push,
'--skip-crd-check': skip_crd_check,
'--interactive': interactive, '--retries': retries,
'--logs':False})
deploy.config = {'name': 'app', 'namespace': 'namespace'}
deploy.config.update(extra_config_args)
with catch_stdout() as caught_output:
deploy.action()
output = caught_output.getvalue()
return output
def verify_successful_deploy(output, did_push=True, interactive=False):
"""assert pushing, deploying, then objs created, then pushed"""
pushing = output.find('Pushing ')
push_skip = output.find('Skipping image push')
deploying = output.find('Deploying ')
inspecting = output.find('Inspect created objects by running:\n')
pushed = output.find('Pushed to ')
pod_connect = output.find('Connecting to pod...')
if did_push:
assert all(var >= 0 for var in (
deploying, inspecting, pushing, pushed))
assert deploying < inspecting, pushing < pushed
else:
assert all(var == -1 for var in (pushing, pushed))
assert all(var >= 0 for var in (deploying, inspecting, push_skip))
assert push_skip < deploying, deploying < inspecting
if interactive:
assert pod_connect > inspecting
def test_deploy_gce(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers, verify_build,
verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=False,
extra_config_args={'gceProject': 'gcr://projectfoo'})
verify_successful_deploy(output)
def test_deploy_docker(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers, verify_build,
verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=False,
extra_config_args={'registry': 'dockerhub'})
verify_successful_deploy(output)
def test_deploy_without_push(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers,
verify_build, verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
output = deploy(
no_push=True, skip_crd_check=True,
interactive=False,
extra_config_args={'gceProject': 'gcr://projectfoo'})
verify_successful_deploy(output, did_push=False)
def test_deploy_interactive_one_file(walk_mock, progress_bar, popen_mock,
open_mock, template, kube_helpers,
process_helpers, verify_build,
verify_init, fetch_action_arg, sleep,
yaml, json_mock):
walk_mock.return_value = ['foo']
yaml.return_value = {
'template': {'foo': 'bar'}, 'containers': [{'foo': 'bar'}]}
json_mock.loads.return_value = {'status': {'phase': 'Running'}}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=True,
extra_config_args={'registry': 'dockerhub'})
verify_successful_deploy(output, interactive=True)
# verify that kubectl commands are specifying namespace
for call_args in process_helpers.run_popen.call_args_list:
assert isinstance(call_args, type(call))
assert isinstance(call_args[0], tuple)
assert len(call_args[0]) > 0
command = call_args[0][0]
if command[0] == "kubectl":
assert "--namespace" in command
def test_deploy_interactive_two_files(walk_mock, progress_bar, popen_mock,
open_mock, template, kube_helpers,
process_helpers, verify_build,
verify_init, fetch_action_arg, sleep,
yaml, json_mock):
json_mock.loads.return_value = {'status': {'phase': 'Running'}}
yaml.return_value = {
'template': {'foo': 'bar'}, 'containers': [{'foo': 'bar'}]}
output = deploy(
no_push=False, skip_crd_check=True,
interactive=True,
extra_config_args={'registry': 'dockerhub', '<kube_spec>': 'r'})
verify_successful_deploy(output, interactive=True)
def test_deploy_interactive_pod_not_run(walk_mock, progress_bar, popen_mock,
open_mock, template, kube_helpers,
process_helpers, verify_build,
verify_init, fetch_action_arg, sleep,
yaml, json_mock):
json_mock.loads.return_value = {'status': {'phase': 'Error'}}
yaml.return_value = {
'template': {'foo': 'bar'}, 'containers': [{'foo': 'bar'}]}
with pytest.raises(ValueError):
output = deploy(
no_push=False, skip_crd_check=True,
interactive=True,
extra_config_args={'registry': 'dockerhub', '<kube_spec>': 'r'})
def test_deploy_update_app_run_id(open_mock, json_mock):
run_id = str(uuid.uuid4())
json_mock_data = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
json_mock.load.return_value = json_mock_data
DeployCommand._update_app_run_id(run_id)
assert json_mock_data['app_run_id'] == run_id
def test_image_push_error(walk_mock, progress_bar, popen_mock, open_mock,
template, kube_helpers, process_helpers, verify_build,
verify_init, fetch_action_arg, json_mock):
json_mock.load.return_value = {
'last_remote_container': 'gcr.io/app_name:container_id',
'last_push_duration': 0.18889}
# setup mock to induce and error during the deploy
popen_mock.return_value.poll.return_value = 1
output_str = "normal output..."
error_str = "error message..."
build_output = MagicMock()
build_output.decode.return_value = output_str
error_output = MagicMock()
error_output.decode.return_value = error_str
popen_mock.return_value.communicate.return_value = (build_output,
error_output)
deploy_cmd = DeployCommand({'deploy': True,
'--skip-crd-check': True,
'--no-push': False})
deploy_cmd.config = {'name': 'app', 'namespace': 'namespace'}
deploy_cmd.config.update({'gceProject': 'gcr://projectfoo'})
with catch_stdout() as caught_output:
with pytest.raises(SystemExit):
deploy_cmd.action()
output = caught_output.getvalue()
# assert that we got the normal output, followed by the error message
output_location = output.find(output_str)
error_location = output.find(error_str)
assert all(var >= 0 for var in (output_location, error_location))
assert output_location < error_location
| 34.791367
| 85
| 0.652089
| 1,170
| 9,672
| 5.117949
| 0.189744
| 0.040414
| 0.034736
| 0.022211
| 0.456747
| 0.427856
| 0.389112
| 0.369572
| 0.361389
| 0.313293
| 0
| 0.007494
| 0.241212
| 9,672
| 277
| 86
| 34.916968
| 0.808421
| 0.087055
| 0
| 0.430769
| 0
| 0
| 0.129501
| 0.036238
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.117949
| false
| 0
| 0.030769
| 0.05641
| 0.220513
| 0.010256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6d61cff66c7d3846169dfff6eca952a90b72ddd
| 1,940
|
py
|
Python
|
packages/mccomponents/tests/mccomponentsbpmodule/sample/Broadened_E_Q_Kernel_TestCase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 5
|
2017-01-16T03:59:47.000Z
|
2020-06-23T02:54:19.000Z
|
packages/mccomponents/tests/mccomponentsbpmodule/sample/Broadened_E_Q_Kernel_TestCase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 293
|
2015-10-29T17:45:52.000Z
|
2022-01-07T16:31:09.000Z
|
packages/mccomponents/tests/mccomponentsbpmodule/sample/Broadened_E_Q_Kernel_TestCase.py
|
mcvine/mcvine
|
42232534b0c6af729628009bed165cd7d833789d
|
[
"BSD-3-Clause"
] | 1
|
2019-05-25T00:53:31.000Z
|
2019-05-25T00:53:31.000Z
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2010 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
standalone = True
import unittestX as unittest
import journal
debug = journal.debug( "Broadened_E_Q_Kernel_TestCase" )
warning = journal.warning( "Broadened_E_Q_Kernel_TestCase" )
import mcni
from mccomposite import mccompositebp
from mccomponents import mccomponentsbp
class TestCase(unittest.TestCase):
def test(self):
E_Q = "Q*Q/3."
S_Q = "1"
sigma_Q = "Q/2."
Qmin = 0; Qmax = 10
absorption_coefficient = scattering_coefficient = 1.
kernel = mccomponentsbp.create_Broadened_E_Q_Kernel(
E_Q, S_Q, sigma_Q,
Qmin, Qmax,
absorption_coefficient,
scattering_coefficient,
)
ei = 500 # meV
from mcni.utils import conversion
vil = conversion.e2v(ei)
vi = (0,0,vil)
import numpy.linalg as nl
import numpy as np
for i in range(10):
event = mcni.neutron(
r = (0,0,0), v = vi,
prob = 1, time = 0 )
kernel.scatter( event );
vf = np.array(event.state.velocity)
diffv = vi - vf
Q = conversion.v2k(nl.norm(diffv))
ef = conversion.v2e(nl.norm(vf))
E = ei - ef
# print E, Q, event
E1 = eval(E_Q)
continue
return
pass # end of TestCase
def main():
unittest.main()
return
if __name__ == "__main__":
main()
# version
__id__ = "$Id: TestCase.py 696 2010-11-09 06:23:06Z linjiao $"
# End of file
| 23.373494
| 80
| 0.491753
| 207
| 1,940
| 4.439614
| 0.516908
| 0.015234
| 0.035909
| 0.055495
| 0.054407
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037677
| 0.343299
| 1,940
| 82
| 81
| 23.658537
| 0.683673
| 0.209278
| 0
| 0.042553
| 0
| 0
| 0.084433
| 0.038259
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0.021277
| 0.170213
| 0
| 0.276596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6d751bc3f23bc91c2716777ca9ac12139d4b799
| 6,325
|
py
|
Python
|
Model_setup/NEISO_data_file/downsampling_generators_v1.py
|
keremakdemir/ISONE_UCED
|
11ce34c5ac5d34dcab771640f41c0d2ce4ab21f9
|
[
"MIT"
] | null | null | null |
Model_setup/NEISO_data_file/downsampling_generators_v1.py
|
keremakdemir/ISONE_UCED
|
11ce34c5ac5d34dcab771640f41c0d2ce4ab21f9
|
[
"MIT"
] | null | null | null |
Model_setup/NEISO_data_file/downsampling_generators_v1.py
|
keremakdemir/ISONE_UCED
|
11ce34c5ac5d34dcab771640f41c0d2ce4ab21f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 18:45:34 2020
@author: kakdemi
"""
import pandas as pd
#importing generators
all_generators = pd.read_excel('generators2.xlsx', sheet_name='NEISO generators (dispatch)')
#getting all oil generators
all_oil = all_generators[all_generators['typ']=='oil'].copy()
#getting all generators in every zone
CT_oil = all_oil[all_oil['zone']=='CT'].copy()
ME_oil = all_oil[all_oil['zone']=='ME'].copy()
NEMA_oil = all_oil[all_oil['zone']=='NEMA'].copy()
NH_oil = all_oil[all_oil['zone']=='NH'].copy()
RI_oil = all_oil[all_oil['zone']=='RI'].copy()
SEMA_oil = all_oil[all_oil['zone']=='SEMA'].copy()
VT_oil = all_oil[all_oil['zone']=='VT'].copy()
WCMA_oil = all_oil[all_oil['zone']=='WCMA'].copy()
#defining zones
zones = ['CT','ME','NEMA','NH','RI','SEMA','VT','WCMA']
#getting all slack generators
all_slack = all_generators[all_generators['typ']=='slack'].copy()
#getting generators other than slack and oil
all_other = all_generators[(all_generators['typ']!='oil') & (all_generators['typ']!='slack')].copy()
#defining a function to downsample oil generators
def oil_downsampler(zone):
#copying the oil generators in that zone and sorting wrt to their seg1 heat rate
Selected_line_oil = globals()[zone+'_oil'].copy()
sorted_df = Selected_line_oil.sort_values(by=['seg1'])
sorted_df_reset = sorted_df.reset_index(drop=True)
#creating 3 chunks wrt their heatrates
heat_rate = list(sorted_df_reset.loc[:,'seg1'])
num = int(len(heat_rate)/3)
First_plant = sorted_df_reset.iloc[:num,:].copy()
Second_plant = sorted_df_reset.iloc[num:num*2,:].copy()
Third_plant = sorted_df_reset.iloc[num*2:,:].copy()
#finding the relevant parameters for the downsampled oil plants
First_cap = First_plant.loc[:,'netcap'].sum()
Second_cap = Second_plant.loc[:,'netcap'].sum()
Third_cap = Third_plant.loc[:,'netcap'].sum()
netcap = [First_cap, Second_cap, Third_cap]
ramp_1 = First_cap
ramp_2 = Second_cap
ramp_3 = Third_cap
ramp = [ramp_1, ramp_2, ramp_3]
First_min_cap = First_cap*0.35
Second_min_cap = Second_cap*0.35
Third_min_cap = Third_cap*0.35
min_cap = [First_min_cap, Second_min_cap, Third_min_cap]
Min_u = [1, 1, 1]
Min_d = [1, 1, 1]
zones = [zone, zone, zone]
types = ['oil', 'oil', 'oil']
seg_1_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg1']
seg_1_1_new = seg_1_1.sum()/First_plant.loc[:,'netcap'].sum()
seg_1_2 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg2']
seg_1_2_new = seg_1_2.sum()/First_plant.loc[:,'netcap'].sum()
seg_1_3 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg3']
seg_1_3_new = seg_1_3.sum()/First_plant.loc[:,'netcap'].sum()
seg_2_1 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg1']
seg_2_1_new = seg_2_1.sum()/Second_plant.loc[:,'netcap'].sum()
seg_2_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg2']
seg_2_2_new = seg_2_2.sum()/Second_plant.loc[:,'netcap'].sum()
seg_2_3 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg3']
seg_2_3_new = seg_2_3.sum()/Second_plant.loc[:,'netcap'].sum()
seg_3_1 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg1']
seg_3_1_new = seg_3_1.sum()/Third_plant.loc[:,'netcap'].sum()
seg_3_2 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg2']
seg_3_2_new = seg_3_2.sum()/Third_plant.loc[:,'netcap'].sum()
seg_3_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg3']
seg_3_3_new = seg_3_3.sum()/Third_plant.loc[:,'netcap'].sum()
seg_1 = [seg_1_1_new, seg_2_1_new, seg_3_1_new]
seg_2 = [seg_1_2_new, seg_2_2_new, seg_3_2_new]
seg_3 = [seg_1_3_new, seg_2_3_new, seg_3_3_new]
var_om_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'var_om']
var_om_1_new = var_om_1.sum()/First_plant.loc[:,'netcap'].sum()
var_om_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'var_om']
var_om_2_new = var_om_2.sum()/Second_plant.loc[:,'netcap'].sum()
var_om_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'var_om']
var_om_3_new = var_om_3.sum()/Third_plant.loc[:,'netcap'].sum()
var_om = [var_om_1_new, var_om_2_new, var_om_3_new]
no_load_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'no_load']
no_load_1_new = no_load_1.sum()/First_plant.loc[:,'netcap'].sum()
no_load_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'no_load']
no_load_2_new = no_load_2.sum()/Second_plant.loc[:,'netcap'].sum()
no_load_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'no_load']
no_load_3_new = no_load_3.sum()/Third_plant.loc[:,'netcap'].sum()
no_load = [no_load_1_new, no_load_2_new, no_load_3_new]
st_cost_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'st_cost']
st_cost_1_new = st_cost_1.sum()/First_plant.loc[:,'netcap'].sum()
st_cost_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'st_cost']
st_cost_2_new = st_cost_2.sum()/Second_plant.loc[:,'netcap'].sum()
st_cost_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'st_cost']
st_cost_3_new = st_cost_3.sum()/Third_plant.loc[:,'netcap'].sum()
st_cost = [st_cost_1_new, st_cost_2_new, st_cost_3_new]
name = [zone+'_agg_oil_1', zone+'_agg_oil_2', zone+'_agg_oil_3']
#creating a dataframe that includes downsampled oil generators
list_labels = list(WCMA_oil.columns)
list_columns = [name, types, zones, netcap, seg_1, seg_2, seg_3, min_cap, ramp, Min_u,
Min_d, var_om, no_load, st_cost]
zipped_list = list(zip(list_labels, list_columns))
gen_df = dict(zipped_list)
df_oils = pd.DataFrame(gen_df)
return df_oils
#downsampling oil generators in every zone by using the defined function
for z in zones:
globals()[z+'_agg_oil_df'] = oil_downsampler(z)
#adding downsampled oil generators to create a complete list of generators
final_generators = pd.concat([all_other, CT_agg_oil_df, ME_agg_oil_df, NEMA_agg_oil_df,
NH_agg_oil_df, RI_agg_oil_df, SEMA_agg_oil_df, VT_agg_oil_df,
WCMA_agg_oil_df, all_slack], ignore_index=True)
#exporting the generators as an Excel file
final_generators.to_excel('generators.xlsx', sheet_name='NEISO generators (dispatch)', index=False)
| 47.201493
| 100
| 0.68253
| 1,067
| 6,325
| 3.660731
| 0.135895
| 0.116743
| 0.139785
| 0.091398
| 0.528418
| 0.454941
| 0.314132
| 0.180748
| 0
| 0
| 0
| 0.03114
| 0.147036
| 6,325
| 133
| 101
| 47.556391
| 0.692864
| 0.113043
| 0
| 0
| 0
| 0
| 0.103991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010526
| false
| 0
| 0.010526
| 0
| 0.031579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6d7ef175de941485b4682919229774de09d58bb
| 307
|
py
|
Python
|
GUI1.py
|
otmanabdoun/IHM-Python
|
624e961c2f6966b98bf2c1bc4dd276b812954ba1
|
[
"Apache-2.0"
] | 3
|
2021-12-08T10:34:55.000Z
|
2022-01-17T21:02:40.000Z
|
GUI1.py
|
otmanabdoun/IHM-Python
|
624e961c2f6966b98bf2c1bc4dd276b812954ba1
|
[
"Apache-2.0"
] | null | null | null |
GUI1.py
|
otmanabdoun/IHM-Python
|
624e961c2f6966b98bf2c1bc4dd276b812954ba1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 16 19:47:41 2021
@author: User
"""
import tkinter as tk
racine = tk . Tk ()
label = tk . Label ( racine , text ="J ' adore Python !")
bouton = tk . Button ( racine , text =" Quitter ", command = racine . destroy )
label . pack ()
bouton . pack ()
| 23.615385
| 80
| 0.579805
| 42
| 307
| 4.238095
| 0.714286
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057018
| 0.257329
| 307
| 13
| 81
| 23.615385
| 0.723684
| 0.237785
| 0
| 0
| 0
| 0
| 0.125581
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6d83253f8c1c21cef502fbe86bb43dc1f2be4ac
| 2,579
|
py
|
Python
|
app/routes/v1/endpoints/clickup.py
|
ertyurk/bugme
|
5a3ef3e089e0089055074c1c896c3fdc76600e93
|
[
"MIT"
] | null | null | null |
app/routes/v1/endpoints/clickup.py
|
ertyurk/bugme
|
5a3ef3e089e0089055074c1c896c3fdc76600e93
|
[
"MIT"
] | null | null | null |
app/routes/v1/endpoints/clickup.py
|
ertyurk/bugme
|
5a3ef3e089e0089055074c1c896c3fdc76600e93
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter, status, Body, HTTPException
from fastapi.encoders import jsonable_encoder
from starlette.responses import JSONResponse
from app.models.common import *
from app.models.clickup import *
from app.database.crud.clickup import *
router = APIRouter()
@router.get("/", response_description="Clickup integrations are retrieved.")
async def get_clickup_integrations():
clickups = await retrieve_clickups()
return (
ResponseModel(clickups, "Clickup integrations data retrieved successfully")
if len(clickups) > 0
else ResponseModel(clickups, "Empty list returned")
)
@router.post(
"/", response_description="Clickup integrations data added into the database."
)
async def add_clickup_a_integration(clickup: ClickupModel = Body(...)):
clickup = jsonable_encoder(clickup)
new_clickup = await add_new_clickup(clickup)
return ResponseModel(
new_clickup,
"clickup integration created successfully.",
status.HTTP_201_CREATED,
)
@router.get("/{id}/", response_description="Clickup data retrieved.")
async def find_clickup_integration(id):
clickup = await retrieve_clickup(id)
return (
ResponseModel(clickup, "Clickup integrations data retrieved successfully")
if clickup
else ErrorResponseModel(
"An error occured.", status.HTTP_404_NOT_FOUND, "Integration doesn't exist."
)
)
@router.put(
"/{id}/", response_description="Clickup integrations data updated in the database."
)
async def update_a_clickup_integration(
id: str, clickup: UpdateClickupModel = Body(...)
):
clickup = jsonable_encoder(clickup)
updated_clickup = await update_clickup_data(id, clickup)
return (
ResponseModel({"id": id}, "Clickup integration updated successfully")
if updated_clickup
else ErrorResponseModel(
"An error occurred",
status.HTTP_404_NOT_FOUND,
"There was an error updating the Clickup integration.",
)
)
@router.delete("/{id}/", response_description="Delete the integration")
async def delete_clickup_integration(id: str):
deleted_clickup = await delete_integration(id)
return (
ResponseModel(
"Integration with ID: {} removed".format(id),
"Integration deleted successfully",
)
if deleted_clickup
else ErrorResponseModel(
"An error occured",
status.HTTP_404_NOT_FOUND,
"Integration with id {0} doesn't exist".format(id),
)
)
| 31.839506
| 88
| 0.6867
| 275
| 2,579
| 6.28
| 0.294545
| 0.06601
| 0.06022
| 0.06601
| 0.259988
| 0.140127
| 0.086856
| 0.086856
| 0.086856
| 0.086856
| 0
| 0.006969
| 0.221016
| 2,579
| 80
| 89
| 32.2375
| 0.852663
| 0
| 0
| 0.161765
| 0
| 0
| 0.24273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.088235
| 0
| 0.161765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6d9219a9f3da8435460a41632a908023dbaa338
| 2,668
|
py
|
Python
|
cellfinder_core/main.py
|
npeschke/cellfinder-core
|
7a86a7d2c879c94da529ec6140f7e5c3f02bf288
|
[
"BSD-3-Clause"
] | 5
|
2021-01-22T11:40:01.000Z
|
2021-09-10T07:16:05.000Z
|
cellfinder_core/main.py
|
npeschke/cellfinder-core
|
7a86a7d2c879c94da529ec6140f7e5c3f02bf288
|
[
"BSD-3-Clause"
] | 38
|
2021-01-22T11:50:29.000Z
|
2022-03-11T11:04:06.000Z
|
cellfinder_core/main.py
|
npeschke/cellfinder-core
|
7a86a7d2c879c94da529ec6140f7e5c3f02bf288
|
[
"BSD-3-Clause"
] | 12
|
2021-06-18T09:57:24.000Z
|
2022-03-06T13:03:18.000Z
|
"""
N.B imports are within functions to prevent tensorflow being imported before
it's warnings are silenced
"""
import os
import logging
from imlib.general.logging import suppress_specific_logs
tf_suppress_log_messages = [
"multiprocessing can interact badly with TensorFlow"
]
def main(
signal_array,
background_array,
voxel_sizes,
start_plane=0,
end_plane=-1,
trained_model=None,
model_weights=None,
model="resnet50_tv",
batch_size=32,
n_free_cpus=2,
network_voxel_sizes=[5, 1, 1],
soma_diameter=16,
ball_xy_size=6,
ball_z_size=15,
ball_overlap_fraction=0.6,
log_sigma_size=0.2,
n_sds_above_mean_thresh=10,
soma_spread_factor=1.4,
max_cluster_size=100000,
cube_width=50,
cube_height=50,
cube_depth=20,
network_depth="50",
):
suppress_tf_logging(tf_suppress_log_messages)
from cellfinder_core.detect import detect
from cellfinder_core.classify import classify
from cellfinder_core.tools import prep
from pathlib import Path
home = Path.home()
install_path = home / ".cellfinder"
logging.info("Detecting cell candidates")
points = detect.main(
signal_array,
start_plane,
end_plane,
voxel_sizes,
soma_diameter,
max_cluster_size,
ball_xy_size,
ball_z_size,
ball_overlap_fraction,
soma_spread_factor,
n_free_cpus,
log_sigma_size,
n_sds_above_mean_thresh,
)
model_weights = prep.prep_classification(
trained_model, model_weights, install_path, model, n_free_cpus
)
if len(points) > 0:
logging.info("Running classification")
points = classify.main(
points,
signal_array,
background_array,
n_free_cpus,
voxel_sizes,
network_voxel_sizes,
batch_size,
cube_height,
cube_width,
cube_depth,
trained_model,
model_weights,
network_depth,
)
else:
logging.info("No candidates, skipping classification")
return points
# logging.info("Saving classified cells")
# save_cells(points, classified_points_path)
def suppress_tf_logging(tf_suppress_log_messages):
"""
Prevents many lines of logs such as:
"2019-10-24 16:54:41.363978: I tensorflow/stream_executor/platform/default
/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1"
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
for message in tf_suppress_log_messages:
suppress_specific_logs("tensorflow", message)
| 25.653846
| 78
| 0.664168
| 335
| 2,668
| 4.958209
| 0.441791
| 0.030102
| 0.031306
| 0.050572
| 0.068633
| 0.045756
| 0.045756
| 0
| 0
| 0
| 0
| 0.031424
| 0.260495
| 2,668
| 103
| 79
| 25.902913
| 0.810441
| 0.137931
| 0
| 0.123457
| 0
| 0
| 0.083774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024691
| false
| 0
| 0.08642
| 0
| 0.123457
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6d9b9257b4bb7dd1463fcb578829bc893311e39
| 1,378
|
py
|
Python
|
server.py
|
rezist-ro/rezistenta.tv
|
0c0dfa4842061baf2b575688588c5d77cfdba427
|
[
"MIT"
] | null | null | null |
server.py
|
rezist-ro/rezistenta.tv
|
0c0dfa4842061baf2b575688588c5d77cfdba427
|
[
"MIT"
] | null | null | null |
server.py
|
rezist-ro/rezistenta.tv
|
0c0dfa4842061baf2b575688588c5d77cfdba427
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import dateutil.parser
import flask
import json
import os
import time
import urllib
import yaml
EPISODES = yaml.load(open("episodes.yaml").read())
app = flask.Flask(__name__,
static_path="/assets",
static_folder="assets")
app.jinja_env.filters["strftime"] = \
lambda str, fmt: dateutil.parser.parse(str).strftime(fmt)
app.jinja_env.filters["quote_plus"] = lambda u: urllib.quote_plus(u)
ASSETS = os.path.join(app.root_path, "assets")
@app.route("/favicon.ico")
def favicon():
return flask.send_from_directory(
ASSETS,
"favicon.ico",
mimetype="image/icon")
@app.route("/")
def home():
return flask.render_template("pages/home.html",
playlist=os.environ["PLAYLIST"],
episodes=EPISODES,
autoplay=not app.debug)
@app.route("/episod/<int:number>")
def episode(number):
if number < 1:
return "not found"
elif number > len(EPISODES):
return "coming soon"
else:
episode = EPISODES[len(EPISODES) - number]
template = "pages/episode/%s.html" % (
"youtube" if "yt" in episode else "facebook"
)
return flask.render_template(template,
number=number,
episode=episode,
episodes=EPISODES)
| 25.054545
| 68
| 0.592163
| 157
| 1,378
| 5.101911
| 0.464968
| 0.029963
| 0.027466
| 0.044944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00202
| 0.281567
| 1,378
| 54
| 69
| 25.518519
| 0.807071
| 0.008708
| 0
| 0
| 0
| 0
| 0.135631
| 0.015396
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.166667
| 0.047619
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6dce6f716b933d2a36c1e77462d5b0eb2326793
| 5,449
|
py
|
Python
|
transformer.py
|
ghafran/KerasPersonLab
|
fcd80b62247aee8bd1d41ff91e31c822950f561e
|
[
"MIT"
] | null | null | null |
transformer.py
|
ghafran/KerasPersonLab
|
fcd80b62247aee8bd1d41ff91e31c822950f561e
|
[
"MIT"
] | null | null | null |
transformer.py
|
ghafran/KerasPersonLab
|
fcd80b62247aee8bd1d41ff91e31c822950f561e
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import cos, sin, pi
import cv2
import random
from config import config, TransformationParams
from data_prep import map_coco_to_personlab
class AugmentSelection:
def __init__(self, flip=False, degree = 0., crop = (0,0), scale = 1.):
self.flip = flip
self.degree = degree #rotate
self.crop = crop #shift actually
self.scale = scale
@staticmethod
def random():
flip = random.uniform(0.,1.) > TransformationParams.flip_prob
degree = random.uniform(-1.,1.) * TransformationParams.max_rotate_degree
scale = (TransformationParams.scale_max - TransformationParams.scale_min)*random.uniform(0.,1.)+TransformationParams.scale_min \
if random.uniform(0.,1.) < TransformationParams.scale_prob else 1.
x_offset = int(random.uniform(-1.,1.) * TransformationParams.center_perterb_max);
y_offset = int(random.uniform(-1.,1.) * TransformationParams.center_perterb_max);
return AugmentSelection(flip, degree, (x_offset,y_offset), scale)
@staticmethod
def unrandom():
flip = False
degree = 0.
scale = 1.
x_offset = 0
y_offset = 0
return AugmentSelection(flip, degree, (x_offset,y_offset), scale)
def affine(self, center=(config.IMAGE_SHAPE[1]//2, config.IMAGE_SHAPE[0]//2) , scale_self=1.):
# the main idea: we will do all image transformations with one affine matrix.
# this saves lot of cpu and make code significantly shorter
# same affine matrix could be used to transform joint coordinates afterwards
A = self.scale * cos(self.degree / 180. * pi )
B = self.scale * sin(self.degree / 180. * pi )
# scale_size = TransformationParams.target_dist / scale_self * self.scale
scale_size = TransformationParams.target_dist / self.scale
(width, height) = center
center_x = width + self.crop[0]
center_y = height + self.crop[1]
center2zero = np.array( [[ 1., 0., -center_x],
[ 0., 1., -center_y ],
[ 0., 0., 1. ]] )
rotate = np.array( [[ A, B, 0 ],
[ -B, A, 0 ],
[ 0, 0, 1. ] ])
scale = np.array( [[ scale_size, 0, 0 ],
[ 0, scale_size, 0 ],
[ 0, 0, 1. ] ])
flip = np.array( [[ -1 if self.flip else 1., 0., 0. ],
[ 0., 1., 0. ],
[ 0., 0., 1. ]] )
center2center = np.array( [[ 1., 0., config.IMAGE_SHAPE[1]//2],
[ 0., 1., config.IMAGE_SHAPE[0]//2 ],
[ 0., 0., 1. ]] )
# order of combination is reversed
combined = center2center.dot(flip).dot(scale).dot(rotate).dot(center2zero)
return combined[0:2]
class Transformer:
@staticmethod
def transform(img, masks, keypoints, aug=AugmentSelection.random()):
# warp picture and mask
M = aug.affine(center=(img.shape[1]//2, img.shape[0]//2))
cv_shape = (config.IMAGE_SHAPE[1], config.IMAGE_SHAPE[0])
# TODO: need to understand this, scale_provided[0] is height of main person divided by 368, caclulated in generate_hdf5.py
# print(img.shape)
# for i, img in enumerate(input_transform_targets):
img = cv2.warpAffine(img, M, cv_shape, flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=(127,127,127))
# concat = np.stack(output_transform_targets, axis=-1)
# fix from https://github.com/octiapp/KerasPersonLab/issues/2
# masks = cv2.warpAffine(masks, M, cv_shape, flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
out_masks = np.zeros(cv_shape[::-1]+(masks.shape[-1],))
for i in range(masks.shape[-1]):
out_masks[:,:,i] = cv2.warpAffine(masks[:,:,i], M, cv_shape, flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
masks = out_masks
# warp key points
#TODO: joint could be cropped by augmentation, in this case we should mark it as invisible.
#update: may be we don't need it actually, original code removed part sliced more than half totally, may be we should keep it
keypoints = map_coco_to_personlab(keypoints)
original_points = keypoints.copy()
# print keypoints
original_points[:,:,2]=1 # we reuse 3rd column in completely different way here, it is hack
converted_points = np.matmul(M, original_points.transpose([0,2,1])).transpose([0,2,1])
keypoints[:,:,0:2]=converted_points
cropped_kp = keypoints[:,:,0] >= config.IMAGE_SHAPE[1]
cropped_kp = np.logical_or(cropped_kp, keypoints[:,:,1] >= config.IMAGE_SHAPE[0])
cropped_kp = np.logical_or(cropped_kp, keypoints[:,:,0] < 0)
cropped_kp = np.logical_or(cropped_kp, keypoints[:,:,1] < 0)
keypoints[cropped_kp, 2] = 0
# we just made image flip, i.e. right leg just became left leg, and vice versa
if aug.flip:
tmpLeft = keypoints[:, config.LEFT_KP, :]
tmpRight = keypoints[:, config.RIGHT_KP, :]
keypoints[:, config.LEFT_KP, :] = tmpRight
keypoints[:, config.RIGHT_KP, :] = tmpLeft
# print keypoints
return img, masks, keypoints
| 41.915385
| 142
| 0.599193
| 693
| 5,449
| 4.58153
| 0.287157
| 0.008819
| 0.040315
| 0.021417
| 0.322205
| 0.225512
| 0.200315
| 0.200315
| 0.200315
| 0.098898
| 0
| 0.035106
| 0.278583
| 5,449
| 129
| 143
| 42.24031
| 0.772577
| 0.215452
| 0
| 0.126582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007752
| 0
| 1
| 0.063291
| false
| 0
| 0.075949
| 0
| 0.21519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6dee5544a49eb20feb56cbcfdbdf81cda6aae63
| 10,859
|
py
|
Python
|
NLP/UNIMO/src/finetune/visual_entailment.py
|
zhangyimi/Research
|
866f91d9774a38d205d6e9a3b1ee6293748261b3
|
[
"Apache-2.0"
] | 1,319
|
2020-02-14T10:42:07.000Z
|
2022-03-31T15:42:18.000Z
|
NLP/UNIMO/src/finetune/visual_entailment.py
|
green9989/Research
|
94519a72e7936c77f62a31709634b72c09aabf74
|
[
"Apache-2.0"
] | 192
|
2020-02-14T02:53:34.000Z
|
2022-03-31T02:25:48.000Z
|
NLP/UNIMO/src/finetune/visual_entailment.py
|
green9989/Research
|
94519a72e7936c77f62a31709634b72c09aabf74
|
[
"Apache-2.0"
] | 720
|
2020-02-14T02:12:38.000Z
|
2022-03-31T12:21:15.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for visual_entailment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import time
import numpy as np
import paddle.fluid as fluid
from model.unimo_finetune import UNIMOModel
from eval import glue_eval
from collections import OrderedDict
from utils.utils import print_eval_log
def kl_divergence_with_logits(q_logits, p_logits):
"""
symmetric KL-divergence (See SMART, Sec 3.1)
q_logits: logits
p_logits: delta_logits
"""
q = fluid.layers.softmax(input=q_logits)
p = fluid.layers.softmax(input=p_logits)
kl_qp = fluid.layers.reduce_sum(q * (fluid.layers.log(q) - fluid.layers.log(p)), -1)
kl_pq = fluid.layers.reduce_sum(p * (fluid.layers.log(p) - fluid.layers.log(q)), -1)
vat_loss = fluid.layers.mean(x=kl_qp+kl_pq)
return vat_loss
def create_model(args, config, pyreader_name="train_reader", is_train=True):
"""create_model"""
shapes = [[-1, args.max_seq_len, 1], # src_ids
[-1, args.max_seq_len, 1], # pos_ids
[-1, args.max_seq_len, 1], # sent_ids
[-1, args.max_img_len + args.max_seq_len, args.max_img_len + args.max_seq_len], # input_mask
[-1, args.max_img_len, 1], # v_mask
[-1, args.max_seq_len, 1], # t_mask
[-1, args.max_img_len, config["image_embedding_size"]], # image_embedding
[-1, args.max_img_len, 5], # image_loc
[-1, 1] # labels
]
dtypes = ['int64', 'int64', 'int64', 'float32', 'float32', 'float32', 'float32','float32', 'int64']
lod_levels = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pyreader = fluid.layers.py_reader(
capacity=70,
shapes=shapes,
dtypes=dtypes,
lod_levels=lod_levels,
name=pyreader_name,
use_double_buffer=True)
(src_ids, pos_ids, sent_ids, input_mask, v_mask, t_mask, image_embedding, image_loc, labels) \
= fluid.layers.read_file(pyreader)
emb_ids = {"word_embedding": src_ids, "sent_embedding": sent_ids, "pos_embedding": pos_ids}
image_input = {"image_embedding": image_embedding, "loc_embedding": image_loc}
adv_step, adv_lr, norm_type, adv_max_norm, adv_init_mag = \
args.adv_step, args.adv_lr, args.norm_type, args.adv_max_norm, args.adv_init_mag
assert adv_step > 0 and adv_init_mag > 0
def get_loss_and_logits(text_feats, image_feats):
feats = text_feats + image_feats
cls_params_name = ["cls_out_w_0", "cls_out_b_0"]
feats = fluid.layers.fc(
input=feats,
size=2048,
param_attr=fluid.ParamAttr(
name=cls_params_name[0],
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=cls_params_name[1], initializer=fluid.initializer.Constant(0.)))
feats = fluid.layers.dropout(
x=feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
cls_params_name = ["cls_out_w_1", "cls_out_b_1"]
logits = fluid.layers.fc(
input=feats,
size=args.num_labels,
param_attr=fluid.ParamAttr(
name=cls_params_name[0],
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=cls_params_name[1], initializer=fluid.initializer.Constant(0.)))
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=labels, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss) / adv_step
return loss, logits, probs
def init_delta(input, mask, shape, name='text'):
real_seq_len = fluid.layers.shape(input)[1]
fake = fluid.layers.data(name=name+"_fake", shape=shape, dtype='float32')
mask_slice = fluid.layers.slice(mask, axes=[1], starts=[0], ends=fluid.layers.shape(mask)[1])
length = fluid.layers.reduce_sum(mask_slice, dim=1, keep_dim=True) * shape[-1]
# l2 norm
delta = fluid.layers.uniform_random_batch_size_like(mask, shape=fake.shape, min=-1.0, max=1.0)
delta = fluid.layers.slice(delta, axes=[1], starts=[0], ends=real_seq_len)
delta = delta * mask_slice
mag = adv_init_mag / fluid.layers.sqrt(length)
delta = delta * mag
return delta
if is_train:
text_emb_shape = [-1, args.max_seq_len, config['hidden_size']]
text_delta = init_delta(src_ids, t_mask, text_emb_shape, name='text')
image_emb_shape = [-1, args.max_img_len, config['image_embedding_size']]
image_delta = init_delta(image_embedding, v_mask, image_emb_shape, name='img')
else:
text_delta, image_delta = None, None
def pgd_with_l2(loss, delta):
# grad
delta_grad = fluid.backward.gradients(loss, delta)[0]
# l2 norm
delta_norm = fluid.layers.sqrt(fluid.layers.reduce_sum(fluid.layers.pow(fluid.layers.reshape(delta_grad, \
[fluid.layers.shape(delta_grad)[0], -1]), factor=2), dim=1, keep_dim=True))
delta_norm = fluid.layers.clamp(delta_norm, min=float(1e-8))
# pgd
delta = delta + adv_lr * delta_grad / delta_norm
# projection
if adv_max_norm > 0:
exceed_mask = (delta_norm > adv_max_norm).astype('float32')
reweights = (adv_max_norm / delta_norm) * exceed_mask + (1 - exceed_mask)
delta = delta * reweights
delta_grad.stop_gradient=True
return delta
loss = None
for iter in range(adv_step):
vl_pure = UNIMOModel(
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
vl_text = UNIMOModel(
text_adv_delta=text_delta,
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
vl_image = UNIMOModel(
image_adv_delta=image_delta,
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
h_pure_text, h_pure_image = vl_pure.get_pooled_output()
h_text_text, h_text_image = vl_text.get_pooled_output()
h_image_text, h_image_image = vl_image.get_pooled_output()
loss_pure, logit_pure, probs_pure = get_loss_and_logits(h_pure_text, h_pure_image)
loss_text, logit_text, probs_text = get_loss_and_logits(h_text_text, h_text_image)
loss_image, logit_image, probs_image = get_loss_and_logits(h_image_text, h_image_image)
if is_train:
text_delta = pgd_with_l2(loss_text, text_delta)
image_delta = pgd_with_l2(loss_image, image_delta)
kl_adv_text_loss = kl_divergence_with_logits(logit_pure, logit_text)
kl_adv_image_loss = kl_divergence_with_logits(logit_pure, logit_image)
cur_loss = loss_pure + loss_text + loss_image + kl_adv_text_loss + kl_adv_image_loss
loss = cur_loss if loss is None else loss + cur_loss
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs_pure, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs_pure,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs
}
for k, v in graph_vars.items():
v.persistable = False
return pyreader, graph_vars
def evaluate(args, exe, test_pyreader, graph_vars, eval_phase, dev_count=1, gpu_id=0):
"""evaluate"""
all_mat = []
test_pyreader.start()
time_begin = time.time()
fetch_list = [graph_vars["probs"].name, graph_vars["labels"].name]
while True:
try:
np_probs, np_labels = exe.run(fetch_list=fetch_list)
np_preds = np.argmax(np_probs, axis=1).reshape((-1, 1))
np_labels = np_labels.reshape((-1, 1))
mat = np.concatenate([np_preds, np_labels], axis=1)
all_mat.extend(mat.tolist())
except fluid.core.EOFException:
test_pyreader.reset()
break
all_mat = np.array(all_mat)
time_end = time.time()
save_file = "%s/%s.trainers_%d.part_%d.npy" % (args.eval_dir, eval_phase, dev_count, gpu_id)
np.save(save_file, all_mat)
tmp_file = "%s/%s.trainers_%d.part_%d.finish" % (args.eval_dir, eval_phase, dev_count, gpu_id)
tmp_writer = open(tmp_file, "w")
tmp_writer.close()
if gpu_id == 0:
while True:
ret = os.popen('find %s -maxdepth 1 -name "%s.trainers_%d.part_*.finish"' %
(args.eval_dir, eval_phase, dev_count)).readlines()
if len(ret) != dev_count:
time.sleep(1)
continue
else:
break
all_mats = []
save_files = glob.glob("%s/%s.trainers_%d.part_*.npy" % (args.eval_dir, eval_phase, dev_count))
for cur_save_file in save_files:
mat = np.load(cur_save_file).tolist()
all_mats.extend(mat)
all_mats = np.array(all_mats)
cur_time = str(int(time.time()))
os.system("mkdir %s/%s" % (args.eval_dir, cur_time))
os.system("mv %s/%s.trainers_%d.* %s/%s" % (args.eval_dir, eval_phase, dev_count, args.eval_dir, cur_time))
ret = OrderedDict()
ret['phase'] = eval_phase
ret['loss'] = -1
ret['data_num'] = all_mats.shape[0]
ret['used_time'] = round(time_end - time_begin, 4)
metrics = OrderedDict()
metrics["simple_accuracy"] = glue_eval.simple_accuracy
if args.eval_mertrics in metrics:
ret_metric = metrics[args.eval_mertrics](all_mats[:, 0], all_mats[:, 1])
ret.update(ret_metric)
print_eval_log(ret)
else:
raise ValueError('unsupported metric {}'.format(args.eval_mertrics))
return ret
else:
return None
| 38.644128
| 115
| 0.634773
| 1,511
| 10,859
| 4.25546
| 0.212442
| 0.054743
| 0.012442
| 0.014152
| 0.259565
| 0.211353
| 0.170295
| 0.154121
| 0.120684
| 0.11042
| 0
| 0.016177
| 0.254259
| 10,859
| 280
| 116
| 38.782143
| 0.777846
| 0.077447
| 0
| 0.2
| 0
| 0
| 0.057375
| 0.011957
| 0
| 0
| 0
| 0
| 0.004762
| 1
| 0.028571
| false
| 0
| 0.057143
| 0
| 0.119048
| 0.014286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6df6e5deaed8c701c0957596bd842d1b7c2b65f
| 923
|
py
|
Python
|
leetcode/102-Medium-Binary-Tree-Level-Order-Traversal/answer.py
|
vaishali-bariwal/Practice-Coding-Questions
|
747bfcb1cb2be5340daa745f2b9938f0ee87c9ac
|
[
"Unlicense"
] | 25
|
2018-05-22T15:18:50.000Z
|
2022-01-08T02:41:46.000Z
|
leetcode/102-Medium-Binary-Tree-Level-Order-Traversal/answer.py
|
vaishali-bariwal/Practice-Coding-Questions
|
747bfcb1cb2be5340daa745f2b9938f0ee87c9ac
|
[
"Unlicense"
] | 1
|
2019-05-24T16:55:27.000Z
|
2019-05-24T16:55:27.000Z
|
leetcode/102-Medium-Binary-Tree-Level-Order-Traversal/answer.py
|
vaishali-bariwal/Practice-Coding-Questions
|
747bfcb1cb2be5340daa745f2b9938f0ee87c9ac
|
[
"Unlicense"
] | 18
|
2018-09-20T15:39:26.000Z
|
2022-03-02T21:38:22.000Z
|
#!/usr/bin/python3
#------------------------------------------------------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
stack = [(root, 0)]
result = []
while stack:
(node, level) = stack.pop(0)
if level == len(result):
result.append([])
result[level].append(node.val)
if node.left: stack.append((node.left, level+1))
if node.right: stack.append((node.right, level+1))
return result
#------------------------------------------------------------------------------
#Testing
| 26.371429
| 79
| 0.40195
| 84
| 923
| 4.369048
| 0.464286
| 0.081744
| 0.081744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007962
| 0.31961
| 923
| 34
| 80
| 27.147059
| 0.576433
| 0.406284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6e0a15a9ec84da1c3d497af8bd4ec8d117edbbd
| 4,291
|
py
|
Python
|
sparsely_lstmvae_main.py
|
pengkangzaia/usad
|
937a29c24632cfa31e0c626cd5b058b3af74ef94
|
[
"BSD-3-Clause"
] | null | null | null |
sparsely_lstmvae_main.py
|
pengkangzaia/usad
|
937a29c24632cfa31e0c626cd5b058b3af74ef94
|
[
"BSD-3-Clause"
] | null | null | null |
sparsely_lstmvae_main.py
|
pengkangzaia/usad
|
937a29c24632cfa31e0c626cd5b058b3af74ef94
|
[
"BSD-3-Clause"
] | null | null | null |
from model.sparsely_lstm_vae import *
import torch.utils.data as data_utils
from sklearn import preprocessing
from utils.eval_methods import *
device = get_default_device()
# Read data
# normal = pd.read_csv("data/SWaT_Dataset_Normal_v1.csv") # , nrows=1000)
normal = pd.read_csv("data/SWaT/SWaT_Dataset_Normal_v1.csv", nrows=10000) # , nrows=1000)
normal = normal.drop(["Timestamp", "Normal/Attack"], axis=1)
# normal.shape
# Transform all columns into float64
for i in list(normal):
normal[i] = normal[i].apply(lambda x: str(x).replace(",", "."))
normal = normal.astype(float)
# 数据预处理
min_max_scaler = preprocessing.MinMaxScaler()
x = normal.values
x_scaled = min_max_scaler.fit_transform(x)
normal = pd.DataFrame(x_scaled)
# Read data
# attack = pd.read_csv("data/SWaT_Dataset_Attack_v0.csv", sep=";") # , nrows=1000)
attack = pd.read_csv("data/SWaT/SWaT_Dataset_Attack_v0.csv", sep=";", nrows=10000) # , nrows=1000)
labels = [float(label != 'Normal') for label in attack["Normal/Attack"].values]
attack = attack.drop(["Timestamp", "Normal/Attack"], axis=1)
# Transform all columns into float64
for i in list(attack):
attack[i] = attack[i].apply(lambda x: str(x).replace(",", "."))
attack = attack.astype(float)
x = attack.values
x_scaled = min_max_scaler.transform(x)
attack = pd.DataFrame(x_scaled)
############## windows ###################
window_size = 12
# np.arange(window_size)[None, :] 1*12 (0,1,2,3,4,5,6,7,8,9,10,11)一行12列
# np.arange(normal.shape[0] - window_size)[:, None] (1000-12)*1 (0,1,2,3,4,5...) 988列,每列递增
# np.arange(window_size)[None, :] + np.arange(normal.shape[0] - window_size)[:, None] (1000-12)*12
windows_normal = normal.values[np.arange(window_size)[None, :] + np.arange(attack.shape[0] - window_size)[:, None]]
windows_attack = attack.values[np.arange(window_size)[None, :] + np.arange(attack.shape[0] - window_size)[:, None]]
############## training ###################
# BATCH_SIZE = 7919
BATCH_SIZE = 200
N_EPOCHS = 100
hidden_size = 100
latent_size = 40
# w_size = windows_normal.shape[1] * windows_normal.shape[2] # window_size * feature_size
# z_size = windows_normal.shape[1] * hidden_size # window_size * hidden_size
windows_normal_train = windows_normal[:int(np.floor(.8 * windows_normal.shape[0]))]
windows_normal_val = windows_normal[int(np.floor(.8 * windows_normal.shape[0])):int(np.floor(windows_normal.shape[0]))]
train_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_normal_train).float().view(([windows_normal_train.shape[0], windows_normal_train.shape[1], windows_normal_train.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
val_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_normal_val).float().view(([windows_normal_val.shape[0], windows_normal_train.shape[1], windows_normal_train.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
test_loader = torch.utils.data.DataLoader(data_utils.TensorDataset(
torch.from_numpy(windows_attack).float().view(([windows_attack.shape[0], windows_attack.shape[1], windows_attack.shape[2]]))
), batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
model = SparselyLstmVae(BATCH_SIZE, window_size, windows_normal.shape[2], hidden_size, latent_size, former_step=3)
model = to_device(model, device)
val_loss, train_loss = training(N_EPOCHS, model, train_loader, val_loader)
plot_simple_history(val_loss)
plot_train_loss(train_loss)
torch.save({'ae': model.state_dict()}, "saved_model/model.pth")
############ testing #################
checkpoint = torch.load("model.pth")
model.load_state_dict(checkpoint['ae'])
# 每一个batch都有一个result。组成result集合
results = testing(model, test_loader)
windows_labels = []
for i in range(len(labels) - window_size):
windows_labels.append(list(np.int_(labels[i:i + window_size])))
# 窗口中有误差,则为异常,表示为1
y_test = [1.0 if (np.sum(window) > 0) else 0 for window in windows_labels]
# 样本太少的话,误差会很大
y_pred = np.concatenate(
[torch.stack(results[:-1]).flatten().detach().cpu().numpy(),
results[-1].flatten().detach().cpu().numpy()])
y_pred = (y_pred - y_pred.min()) / (y_pred.max() - y_pred.min())
threshold = ROC(y_test, y_pred)
t, th = bf_search(y_pred, y_test, start=0, end=1, step_num=1000, display_freq=50)
| 41.660194
| 152
| 0.723141
| 657
| 4,291
| 4.496195
| 0.242009
| 0.088016
| 0.037915
| 0.03893
| 0.458362
| 0.435342
| 0.342925
| 0.283683
| 0.283683
| 0.256601
| 0
| 0.035355
| 0.096947
| 4,291
| 102
| 153
| 42.068627
| 0.726968
| 0.189699
| 0
| 0.05
| 0
| 0
| 0.051817
| 0.027695
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6e20c3e769f1a5e89011c872f7f4c1dc10d94e8
| 542
|
py
|
Python
|
src/demo/tasks.py
|
MexsonFernandes/AsynchronousTasks-Django-Celery-RabbitMQ-Redis
|
b64b31cec4ccf8e0dca2cfe9faba40da647b94f7
|
[
"Apache-2.0"
] | 1
|
2019-01-17T09:16:06.000Z
|
2019-01-17T09:16:06.000Z
|
src/demo/tasks.py
|
MexsonFernandes/Asynchronous_Tasks-Django-Celery-RabbitMQ-Redis
|
b64b31cec4ccf8e0dca2cfe9faba40da647b94f7
|
[
"Apache-2.0"
] | 7
|
2019-10-20T18:47:34.000Z
|
2022-02-10T07:42:18.000Z
|
src/demo/tasks.py
|
MexsonFernandes/AsynchronousTasks-Django-Celery-RabbitMQ-Redis
|
b64b31cec4ccf8e0dca2cfe9faba40da647b94f7
|
[
"Apache-2.0"
] | 2
|
2019-10-20T18:47:59.000Z
|
2022-03-02T12:31:54.000Z
|
from __future__ import absolute_import, unicode_literals
from dcs.celeryconf import app
import time
from django.core.mail import EmailMessage
@app.task(bind=True, ignore_result=False, max_retries=3)
def demo_task1(self):
result = {
'val1': 1,
'val2': 2,
'val3': 3,
}
print("hellp")
from_email = 'testmyserverwebsite@gmail.com'
to_list = ['robomex2020@gmail.com',]
sendemail = EmailMessage("Message received!!!", "Hello test", str(from_email), to_list)
sendemail.send()
return result
| 25.809524
| 91
| 0.680812
| 68
| 542
| 5.235294
| 0.720588
| 0.050562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027586
| 0.197417
| 542
| 20
| 92
| 27.1
| 0.790805
| 0
| 0
| 0
| 0
| 0
| 0.177449
| 0.092421
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.235294
| 0
| 0.352941
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6e829827c4e2ffcbb07be400f025860fb9ae813
| 10,409
|
py
|
Python
|
keras/models.py
|
kalyc/keras-apache-mxnet
|
5497ebd50a45ccc446b8944ebbe11fb7721a5533
|
[
"MIT"
] | 300
|
2018-04-04T05:01:21.000Z
|
2022-02-25T18:56:04.000Z
|
keras/models.py
|
kalyc/keras-apache-mxnet
|
5497ebd50a45ccc446b8944ebbe11fb7721a5533
|
[
"MIT"
] | 163
|
2018-04-03T17:41:22.000Z
|
2021-09-03T16:44:04.000Z
|
keras/models.py
|
kalyc/keras-apache-mxnet
|
5497ebd50a45ccc446b8944ebbe11fb7721a5533
|
[
"MIT"
] | 72
|
2018-04-21T06:42:30.000Z
|
2021-12-26T06:02:42.000Z
|
"""Model-related utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import backend as K
from .utils.generic_utils import has_arg
from .utils.generic_utils import to_list
from .engine.input_layer import Input
from .engine.input_layer import InputLayer
from .engine.training import Model
from .engine.sequential import Sequential
from .engine.saving import save_model
from .engine.saving import load_model
from .engine.saving import model_from_config
from .engine.saving import model_from_yaml
from .engine.saving import model_from_json
from .engine.saving import save_mxnet_model
try:
import h5py
except ImportError:
h5py = None
def _clone_functional_model(model, input_tensors=None):
"""Clone a functional `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Model`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Model):
raise ValueError('Expected `model` argument '
'to be a `Model` instance, got ', model)
if isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'got a `Sequential` instance instead:', model)
layer_map = {} # Cache for created layers.
tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)}
if input_tensors is None:
# Create placeholders to build the model on top of.
input_layers = []
input_tensors = []
for layer in model._input_layers:
input_tensor = Input(batch_shape=layer.batch_input_shape,
dtype=layer.dtype,
sparse=layer.sparse,
name=layer.name)
input_tensors.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[layer] = newly_created_input_layer
for _original, _cloned in zip(model._input_layers, input_layers):
layer_map[_original] = _cloned
else:
# Make sure that all input tensors come from a Keras layer.
# If tensor comes from an input layer: cache the input layer.
input_tensors = to_list(input_tensors)
_input_tensors = []
for i, x in enumerate(input_tensors):
if not K.is_keras_tensor(x):
name = model._input_layers[i].name
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + name)
_input_tensors.append(input_tensor)
# Cache newly created input layer.
original_input_layer = x._keras_history[0]
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[original_input_layer] = newly_created_input_layer
else:
_input_tensors.append(x)
input_tensors = _input_tensors
for x, y in zip(model.inputs, input_tensors):
tensor_map[x] = (y, None) # tensor, mask
# Iterated over every node in the reference model, in depth order.
depth_keys = list(model._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = model._nodes_by_depth[depth]
for node in nodes:
# Recover the corresponding layer.
layer = node.outbound_layer
# Get or create layer.
if layer not in layer_map:
# Clone layer.
new_layer = layer.__class__.from_config(layer.get_config())
layer_map[layer] = new_layer
layer = new_layer
else:
# Reuse previously cloned layer.
layer = layer_map[layer]
# Don't call InputLayer multiple times.
if isinstance(layer, InputLayer):
continue
# Gather inputs to call the new layer.
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_data = [] # List of tuples (input, mask).
for x in reference_input_tensors:
if x in tensor_map:
computed_data.append(tensor_map[x])
if len(computed_data) == len(reference_input_tensors):
# Call layer.
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_mask
output_tensors = to_list(
layer(computed_tensor, **kwargs))
output_masks = to_list(
layer.compute_mask(computed_tensor,
computed_mask))
computed_tensors = [computed_tensor]
computed_masks = [computed_mask]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_masks
output_tensors = to_list(
layer(computed_tensors, **kwargs))
output_masks = to_list(
layer.compute_mask(computed_tensors,
computed_masks))
# Update tensor_map.
for x, y, mask in zip(reference_output_tensors,
output_tensors,
output_masks):
tensor_map[x] = (y, mask)
# Check that we did compute the model outputs,
# then instantiate a new model from inputs and outputs.
output_tensors = []
for x in model.outputs:
assert x in tensor_map, 'Could not compute output ' + str(x)
tensor, _ = tensor_map[x]
output_tensors.append(tensor)
return Model(input_tensors, output_tensors, name=model.name)
def _clone_sequential_model(model, input_tensors=None):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
def clone(layer):
return layer.__class__.from_config(layer.get_config())
layers = [clone(layer) for layer in model.layers]
if input_tensors is None:
return Sequential(layers=layers, name=model.name)
else:
if len(to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
x = to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history[0]
if isinstance(origin_layer, InputLayer):
return Sequential(layers=[origin_layer] + layers,
name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history[0]
return Sequential(layers=[input_layer] + layers, name=model.name)
def clone_model(model, input_tensors=None):
"""Clone any `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Model`
(could be a functional model or a Sequential model).
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if isinstance(model, Sequential):
return _clone_sequential_model(model, input_tensors=input_tensors)
else:
return _clone_functional_model(model, input_tensors=input_tensors)
| 41.142292
| 77
| 0.593525
| 1,202
| 10,409
| 4.941764
| 0.155574
| 0.072727
| 0.022896
| 0.022222
| 0.524916
| 0.428283
| 0.369697
| 0.341751
| 0.334848
| 0.304882
| 0
| 0.001893
| 0.340186
| 10,409
| 252
| 78
| 41.305556
| 0.862988
| 0.265539
| 0
| 0.176471
| 0
| 0
| 0.07296
| 0
| 0
| 0
| 0
| 0
| 0.006536
| 1
| 0.026144
| false
| 0
| 0.117647
| 0.006536
| 0.189542
| 0.006536
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6e86dd990b3c5cac611e5ac9c031855b2eafefb
| 2,223
|
py
|
Python
|
mmgp/kernels/wavelet_slice.py
|
axdahl/SC-MMGP
|
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
|
[
"Apache-2.0"
] | null | null | null |
mmgp/kernels/wavelet_slice.py
|
axdahl/SC-MMGP
|
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
|
[
"Apache-2.0"
] | null | null | null |
mmgp/kernels/wavelet_slice.py
|
axdahl/SC-MMGP
|
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
|
[
"Apache-2.0"
] | null | null | null |
'''
Wavelet kernel
slice allows kernel operation on feature subset
active_dims is iterable of feature dimensions to extract
input_dim must equal dimension defined by active_dims
'''
import numpy as np
import tensorflow as tf
from .. import util
from . import kernel
from .kernel_extras import *
class WaveletSlice(kernel.Kernel):
def __init__(self, input_dim, active_dims=None, shift=0, scale = 0.01,
white=0.01, input_scaling=False):
if input_scaling:
self.shift = tf.Variable(shift * tf.ones([input_dim]))
self.scale = tf.Variable(scale * tf.ones([input_dim]))
else:
self.shift = tf.Variable([shift], dtype=tf.float32)
self.scale = tf.Variable([scale], dtype=tf.float32)
self.input_dim = input_dim
self.active_dims = active_dims
self.white = white
def kernel(self, points1, points2=None):
if points2 is None:
points2 = points1
white_noise = (self.white * util.eye(tf.shape(points1)[0]) +
0.1 * self.white * tf.ones( [tf.shape(points1)[0], tf.shape(points1)[0]]))
else:
white_noise = 0.01 * self.white * tf.ones( [tf.shape(points1)[0], tf.shape(points2)[0]] )
points1, points2 = dim_slice(self, points1, points2)
def h(x):
# Zhang wavelet
#return tf.cos(1.75*x)*tf.exp(-0.5*x**2)
# mexican hat wavelet
return (1-x**2)*tf.exp(-0.5*x**2)
kern1, kern2 = h((points1 - self.shift)/tf.exp(self.scale)), h((points2 - self.shift)/tf.exp(self.scale))
kern1, kern2 = tf.reduce_prod(kern1, axis=1), tf.reduce_prod(kern2, axis=1)
kern = tf.einsum('i,j->ij', kern1, kern2)
return kern + white_noise
def diag_kernel(self, points):
def h(x):
# Zhang wavelet
return tf.cos(1.75*x)*tf.exp(-0.5*x**2)
# mexican hat wavelet
#return (1-x**2)*tf.exp(-0.5*x**2)
points = dim_slice_diag(self, points)
kern = tf.reduce_prod(h((points - self.shift)/tf.exp(self.scale)) , axis=1) **2
return kern + self.white
def get_params(self):
return [self.shift, self.scale]
| 34.734375
| 113
| 0.597391
| 322
| 2,223
| 4.034161
| 0.254658
| 0.026944
| 0.04234
| 0.046189
| 0.301001
| 0.227098
| 0.17398
| 0.17398
| 0.17398
| 0.17398
| 0
| 0.043452
| 0.264957
| 2,223
| 63
| 114
| 35.285714
| 0.75153
| 0.141251
| 0
| 0.102564
| 0
| 0
| 0.003692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.128205
| 0.076923
| 0.435897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6e91782ecbf3d082de6c4e80c1d94b9a36175e3
| 8,084
|
py
|
Python
|
transform.py
|
latenite4/python3
|
30e367471ba48e5fc0fb07327b636fcb9959e3e0
|
[
"Apache-2.0"
] | null | null | null |
transform.py
|
latenite4/python3
|
30e367471ba48e5fc0fb07327b636fcb9959e3e0
|
[
"Apache-2.0"
] | null | null | null |
transform.py
|
latenite4/python3
|
30e367471ba48e5fc0fb07327b636fcb9959e3e0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
#program to parse png images and change images
# cmd: python3 transform.py
# you must have local input/ and output/ directories
#
# name: R. Melton
# date: 12/27/20
# cmdline: python transform.py cmd show image='city.png' --ulx=1 --uly=2 --brx=0 --bry=9
# python transform.py show city.png
# python transform.py blur city.png
from image import Image
import numpy as np
import time, os, argparse, string
#from tkinter import *
import imghdr
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def adjust_brightness(image,factor):
#scale each value by some amount
x_pixels, y_pixels,num_channels = image.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x,y,c] = image.array[x,y,c] * factor #non vectorized version
#vectorized version
# new_im.array = image.array * factor -# this is faster
return new_im
#adjust the contrast by increasing difference from user
#defined midpoint
def adjust_contrast(image, factor, mid=0.5):
x_pixels, y_pixels,num_channels = image.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x,y,c] = (image.array[x,y,c] -mid)* factor + mid #non vectorized version
#vectorized version
# new_im.array = (image.array - mid) * factor + mid
return new_im
# blur and image
def blur(image, k_size):
#k_size is the number of pixels to use when doing the blur
#k_size=3 would be above and below and left neighbor, right neighbor pixels, and diagonal
#neighbor pixels.
im = Image(filename = image)
x_pixels, y_pixels,num_channels = im.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
neighbor_range = k_size // 2
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
total = 0
for x_i in range(max(0,x-neighbor_range), min(new_im.x_pixels-1, x+neighbor_range)+1):
for y_i in range(max(0,y-neighbor_range), min(new_im.y_pixels-1, y+neighbor_range)+1):
total += image.array[x_i, y_i, c]
new_im.array[x,y,c] = total / (k_size **2) # average for kernel size in image
return new_im
def apply_kernel(image, kernel):
# the kernel should be a 2D array that represents the kernel we'll use!
# for the sake of simiplicity of this implementation, let's assume that the kernel is SQUARE
# for example the sobel x kernel (detecting horizontal edges) is as follows:
# [1 0 -1]
# [2 0 -2]
# [1 0 -1]
x_pixels, y_pixels, num_channels = image.array.shape # represents x, y pixels of image, # channels (R, G, B)
new_im = Image(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels) # making a new array to copy values to!
neighbor_range = kernel.shape[0] // 2 # this is a variable that tells us how many neighbors we actually look at (ie for a 3x3 kernel, this value should be 1)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
total = 0
for x_i in range(max(0,x-neighbor_range), min(new_im.x_pixels-1, x+neighbor_range)+1):
for y_i in range(max(0,y-neighbor_range), min(new_im.y_pixels-1, y+neighbor_range)+1):
x_k = x_i + neighbor_range - x
y_k = y_i + neighbor_range - y
kernel_val = kernel[x_k, y_k]
total += image.array[x_i, y_i, c] * kernel_val
new_im.array[x, y, c] = total
return new_im
def combine_images(image1, image2):
# let's combine two images using the squared sum of squares: value = sqrt(value_1**2, value_2**2)
# size of image1 and image2 MUST be the same
x_pixels, y_pixels, num_channels = image1.array.shape # represents x, y pixels of image, # channels (R, G, B)
new_im = Image(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels) # making a new array to copy values to!
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x, y, c] = (image1.array[x, y, c]**2 + image2.array[x, y, c]**2)**0.5
return new_im
def show_image(in_image):
path="input/"
img = mpimg.imread(path+in_image)
imgplot = plt.imshow(img)
plt.show()
# check for necessary parts of the runtime environment
def check_env( in_image):
#check to verify that output/input dirs exist:
path = './output/'
is_path = os.path.isdir(path)
if not is_path:
print('local ./output dir must exist, cannot continue...')
print(quit)
quit()
#verify output is writeable
is_w = os.access(path, os.W_OK)
if not is_w:
print('local ./output dir must be writeable, cannot continue...')
print(quit)
quit()
path = './input/'
is_path = os.path.isdir(path)
if not is_path:
print('local ./input dir must exist, cannot continue...')
print(quit)
quit()
#verify input image
if in_image:
thefile = 'input/'+in_image
print('file path: '+thefile)
is_file = os.path.isfile(thefile)
if not is_file:
print(f'local ./input file {in_image} must exist, cannot continue...')
print(quit)
quit()
if imghdr.what(thefile) != 'png':
print('wrong image file type, cannot continue...')
print(quit)
quit()
def cmd():
print("routine cmd")
# setup command line args and parms
# optional args have --
# fixed (required args do not have --)
def arg_init():
parser = argparse.ArgumentParser(description='Process an image.')
parser.add_argument("cmd",help="command to this program",type=str)
parser.add_argument("image",help="input image name for the command",type=str)
parser.add_argument("--ulx",action='store_true',help="upperleft x in image")
parser.add_argument("--uly",action='store_true',help="upperleft y in image")
parser.add_argument("--brx",action='store_true',help="bottomright x in image")
parser.add_argument("--bry",action='store_true',help="bottomright y in image")
group = parser.add_mutually_exclusive_group()
group.add_argument('--v', action='store_true',help="add more text output")
group.add_argument('--q', action='store_true',help="minimal output")
args = parser.parse_args()
print(args.image)
#if args.cmd != "show" and args.cmd != "blur":
return args
#def show_image(filename):
if __name__ == '__main__':
args = arg_init()
check_env(args.image)
lake = Image(filename = 'lake.png')
city = Image(filename='city.png')
start_time = time.time()
# brightened_im = adjust_brightness(lake, 1.7)
# brightened_im.write_image('brightened.png')
# darkened_im = adjust_brightness(lake, 0.3)
# darkened_im.write_image('darkened.png')
# incr_contrast = adjust_contrast(lake, 2,0.5)
# incr_contrast.write_image('incr_contrast.png')
# decr_contrast = adjust_contrast(lake, 0.5,0.5)
# decr_contrast.write_image('decr_contrast.png')
# blur_3 = blur(city,3)
# blur_3.write_image('blur_k3.png')
# blur_15 = blur(city,15)
# blur_15.write_image('blur_k15.png')
# let's apply a sobel kernel on the x and y axis
# sobel_x = apply_kernel(city, np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
# sobel_x.write_image('edge_x.png')
# sobel_y = apply_kernel(city, np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]))
# sobel_y.write_image('edge_y.png')
# # this will show x and y edges
# sobel_xy = combine_images(sobel_x, sobel_y)
# sobel_xy.write_image('edge_xy.png')
if args.cmd == "show" and args.image:
show_image(args.image)
if args.cmd == "blur" and args.image:
blur_15 = blur(args.image,15)
blur_15.write_image(args.image+'blur_k15.png')
show_image(blur_k15.png)
if args.v:
print(f'total execution duration: {time.time() - start_time}s')
| 35.30131
| 162
| 0.671326
| 1,317
| 8,084
| 3.951405
| 0.191344
| 0.032283
| 0.037471
| 0.026902
| 0.410454
| 0.342813
| 0.307264
| 0.293428
| 0.285742
| 0.259224
| 0
| 0.017413
| 0.204354
| 8,084
| 229
| 163
| 35.30131
| 0.791667
| 0.336838
| 0
| 0.384
| 0
| 0
| 0.130246
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072
| false
| 0
| 0.048
| 0
| 0.168
| 0.112
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6e98c6da8123831026901d34d51a2a66f9be3c8
| 4,563
|
py
|
Python
|
plugins/wyr.py
|
Jeglet/pcbot
|
89178d4982151adb2fadfacdc3080e46cda9e891
|
[
"MIT"
] | null | null | null |
plugins/wyr.py
|
Jeglet/pcbot
|
89178d4982151adb2fadfacdc3080e46cda9e891
|
[
"MIT"
] | null | null | null |
plugins/wyr.py
|
Jeglet/pcbot
|
89178d4982151adb2fadfacdc3080e46cda9e891
|
[
"MIT"
] | null | null | null |
""" Would you rather? This plugin includes would you rather functionality
"""
import asyncio
import random
import re
import discord
import bot
import plugins
from pcbot import Config
client = plugins.client # type: bot.Client
db = Config("would-you-rather", data=dict(timeout=10, responses=["**{name}** would **{choice}**!"], questions=[]),
pretty=True)
command_pattern = re.compile(r"(.+)(?:\s+or|\s*,)\s+([^?]+)\?*")
sessions = set() # All running would you rather's are in this set
@plugins.argument("{open}option ...{close} or/, {open}other option ...{close}[?]", allow_spaces=True)
async def options(arg):
""" Command argument for receiving two options. """
match = command_pattern.match(arg)
assert match
assert not match.group(1).lower() == match.group(2).lower(), "**The choices cannot be the same.**"
return match.group(1), match.group(2)
def get_choice(choices: list, choice: str):
""" Get the chosen option. This accept 1 and 2 as numbers. """
if choice == "1":
return 0
if choice == "2":
return 1
choices = list(map(str.lower, choices))
words = list(map(str.split, choices))
# Go through all words in the given message, and find any words unique to a choice
for word in choice.lower().split():
if word in words[0] and word not in words[1]:
return 0
elif word in words[1] and word not in words[0]:
return 1
# Invalid choice
return None
@plugins.command(aliases="wyr rather either")
async def wouldyourather(message: discord.Message, opt: options = None):
""" Ask the bot if he would rather, or have the bot ask you.
**Examples:**
Registering a choice: `!wouldyourather lie or be lied to`
Asking the bot: `!wouldyourather`"""
# If there are no options, the bot will ask the questions (if there are any to choose from)
if opt is None:
assert message.channel.id not in sessions, "**A would you rather session is already in progress.**"
sessions.add(message.channel.id)
assert db.data["questions"], "**There are ZERO questions saved. Ask me one!**"
question = random.choice(db.data["questions"])
choices = question["choices"]
await client.say(message, "Would you rather **{}** or **{}**?".format(*choices))
timeout = db.data["timeout"]
replied = []
# Wait for replies from anyone in the channel
while True:
def check(m):
return m.channel == message.channel and m.author not in replied
try:
reply = await client.wait_for_message(timeout=timeout, check=check)
# Break on timeout
except asyncio.TimeoutError:
break
# Check if the choice is valid
choice = get_choice(choices, reply.content)
if choice is None:
continue
# Register that this author has replied
replied.append(reply.author)
# Update the answers in the DB
# We don't care about multiples, just the amount (yes it will probably be biased)
question["answers"][choice] += 1
name = reply.author.display_name
response = random.choice(db.data["responses"]).format(name=name, NAME=name.upper(),
choice=choices[choice])
await client.say(message, response)
# Say the total tallies
await client.say(message, "A total of {0} would **{2}**, while {1} would **{3}**!".format(
*question["answers"], *choices))
await db.asyncsave()
sessions.remove(message.channel.id)
# Otherwise, the member asked a question to the bot
else:
db.data["questions"].append(dict(
choices=list(opt),
answers=[0, 0]
))
await db.asyncsave()
answer = random.choice(opt)
await client.say(message, "**I would {}**!".format(answer))
@wouldyourather.command(aliases="delete", owner=True)
async def remove(message: discord.Message, opt: options):
""" Remove a wouldyourather question with the given options. """
for q in db.data["questions"]:
if q["choices"][0] == opt[0] and q["choices"][1] == opt[1]:
db.data["questions"].remove(q)
await db.asyncsave()
await client.say(message, "**Entry removed.**")
break
else:
await client.say(message, "**Could not find the question.**")
| 34.308271
| 114
| 0.601359
| 585
| 4,563
| 4.676923
| 0.31453
| 0.015351
| 0.030702
| 0.046053
| 0.035088
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008732
| 0.272189
| 4,563
| 132
| 115
| 34.568182
| 0.815116
| 0.150121
| 0
| 0.1375
| 0
| 0
| 0.154236
| 0.008725
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.025
| false
| 0
| 0.0875
| 0.0125
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6e9911a23d6bd5acc93e8e6fe7c90d813721358
| 5,690
|
py
|
Python
|
suit_tool/argparser.py
|
bergzand/suit-manifest-generator
|
da82651a8b02fd4d7261e826cc70b5c862dd94ea
|
[
"Apache-2.0"
] | 16
|
2018-03-16T23:56:47.000Z
|
2022-01-23T14:14:09.000Z
|
suit_tool/argparser.py
|
bergzand/suit-manifest-generator
|
da82651a8b02fd4d7261e826cc70b5c862dd94ea
|
[
"Apache-2.0"
] | 23
|
2018-06-05T14:30:23.000Z
|
2021-02-15T20:53:09.000Z
|
suit_tool/argparser.py
|
bergzand/suit-manifest-generator
|
da82651a8b02fd4d7261e826cc70b5c862dd94ea
|
[
"Apache-2.0"
] | 10
|
2018-03-16T23:56:52.000Z
|
2020-07-21T16:36:46.000Z
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright 2019-2020 ARM Limited or its affiliates
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import sys, argparse, os
from suit_tool import __version__
from suit_tool import keygen
from suit_tool import get_pubkey
import json
import re
def str_to_component(s):
types = {
'file' : ('file', lambda x : str(x.strip('"'))),
# 'desc' : ('component-description', lambda x : str(x.strip('"'))),
'inst' : ('install-id', lambda x : [ str(y) for y in eval(x) ]),
'uri' : ('uri', lambda x : str(x.strip('"')))
}
d = {types[k][0]:types[k][1](v) for k,v in [ re.split(r'=',e, maxsplit=1) for e in re.split(r''',\s*(?=["']?[a-zA-Z0-9_-]+["']?=)''', s)]}
return d
class MainArgumentParser(object):
def __init__(self):
self.parser = self._make_parser()
def _make_parser(self):
parser = argparse.ArgumentParser(description = 'Create or transform a manifest.'
' Use {} [command] -h for help on each command.'.format(sys.argv[0]))
# Add all top-level commands
parser.add_argument('-l', '--log-level', choices=['debug','info','warning','exception'], default='info',
help='Set the verbosity level of console output.')
parser.add_argument('--version', action='version', version=__version__,
help='display the version'
)
subparsers = parser.add_subparsers(dest="action")
subparsers.required = True
create_parser = subparsers.add_parser('create', help='Create a new manifest')
# create_parser.add_argument('-v', '--manifest-version', choices=['1'], default='1')
create_parser.add_argument('-i', '--input-file', metavar='FILE', type=argparse.FileType('r'),
help='An input file describing the update. The file must be formated as JSON. The overal structure is described in README.')
create_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
create_parser.add_argument('-f', '--format', metavar='FMT', choices=['suit', 'suit-debug', 'json'], default='suit')
create_parser.add_argument('-s', '--severable', action='store_true', help='Convert large elements to severable fields.')
create_parser.add_argument('-c', '--add-component', action='append', type=str_to_component, dest='components', default=[])
sign_parser = subparsers.add_parser('sign', help='Sign a manifest')
sign_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
sign_parser.add_argument('-k', '--private-key', metavar='FILE', type=argparse.FileType('rb'), required=True)
sign_parser.add_argument('-i', '--key-id', metavar='ID', type=str)
sign_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
parse_parser = subparsers.add_parser('parse', help='Parse a manifest')
parse_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
parse_parser.add_argument('-j', '--json-output', default=False, action='store_true', dest='json')
get_pubkey_parser = subparsers.add_parser('pubkey', help='Get the public key for a supplied private key.')
get_pubkey_parser.add_argument('-k', '--private-key', metavar='FILE', type=argparse.FileType('rb'), required=True)
get_pubkey_parser.add_argument('-f', '--output-format', choices=get_pubkey.OutputFormaters.keys(), default='pem')
get_pubkey_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), default=sys.stdout)
keygen_parser = subparsers.add_parser('keygen', help='Create a signing key. Not for production use')
keygen_parser.add_argument('-t', '--type', choices=keygen.KeyGenerators.keys(),
default='secp256r1', help='The type of the key to generate')
keygen_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), default=sys.stdout)
keygen_parser.add_argument('-f', '--output-format', choices=keygen.OutputFormaters.keys(), default='pem')
keygen_parser.add_argument('-l', '--levels', help='The number of hss-lms levels', type=int, default=2)
sever_parser = subparsers.add_parser('sever', help='Remove one or more severable elements from the manifest, if present.')
sever_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
sever_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
sever_parser.add_argument('-e', '--element', action='append', type=str, dest='elements', default=[])
sever_parser.add_argument('-a', '--all', action='store_true', default=False)
return parser
def parse_args(self, args=None):
self.options = self.parser.parse_args(args)
return self
| 55.784314
| 142
| 0.649561
| 737
| 5,690
| 4.884668
| 0.290366
| 0.065
| 0.118056
| 0.070278
| 0.273889
| 0.253056
| 0.235833
| 0.216944
| 0.216944
| 0.216944
| 0
| 0.005869
| 0.161511
| 5,690
| 101
| 143
| 56.336634
| 0.74869
| 0.168541
| 0
| 0
| 0
| 0.016393
| 0.252071
| 0.007008
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.098361
| 0
| 0.229508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6e9b0500db4a76f7cfddf89a8acd023b1673bdb
| 437
|
py
|
Python
|
python/process/process_pool.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
python/process/process_pool.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
python/process/process_pool.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
import random
import time
from multiprocessing import Pool
def worker(name: str) -> None:
print(f'Started worker {name}')
worker_time = random.choice(range(1, 5))
time.sleep(worker_time)
print(f'{name} worker finished in {worker_time} seconds')
if __name__ == '__main__':
process_names = [f'computer_{i}' for i in range(15)]
pool = Pool(processes=5)
pool.map(worker, process_names)
# pool.terminate()
| 24.277778
| 61
| 0.686499
| 62
| 437
| 4.612903
| 0.532258
| 0.104895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014045
| 0.185355
| 437
| 17
| 62
| 25.705882
| 0.789326
| 0.036613
| 0
| 0
| 0
| 0
| 0.210024
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6e9e879bcf76ce5cfbee781823873ae94cc9222
| 45,541
|
py
|
Python
|
Project/Support-NotSourced/generic_pydicom_ns.py
|
mazalgarab-git/OSICpypy
|
003fb0b146c9ed711f05475e6cc7563bf549f230
|
[
"CC0-1.0"
] | 1
|
2020-12-18T14:39:24.000Z
|
2020-12-18T14:39:24.000Z
|
Project/Support-NotSourced/generic_pydicom_ns.py
|
mazalgarab-git/OSICpypy
|
003fb0b146c9ed711f05475e6cc7563bf549f230
|
[
"CC0-1.0"
] | null | null | null |
Project/Support-NotSourced/generic_pydicom_ns.py
|
mazalgarab-git/OSICpypy
|
003fb0b146c9ed711f05475e6cc7563bf549f230
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 11:48:59 2020
@author: mazal
"""
"""
=========================================
Support functions of pydicom (Not sourced)
=========================================
Purpose: Create support functions for the pydicom project
"""
"""
Test mode 1 | Basics
testMode = True
reportMode = False
Test mode 2 | Function Report
testMode = False
reportMode = True
Commisionning mode
testMode = False
reportMode = False
"""
testMode = False
reportMode = False
"""
=========================================
Function 1: Aleatory Sampling
=========================================
Purpose: Build an aleatory sample given a train dataset of Kaggle for competition and a sample size
Raw code reference (see Tester.py): Test 5
"""
def trainDatasetSampler(samplingSize,testMode,reportMode):
# Set sampling size (% of the train population)
samplingSize = 5
# Build a Sampling dataset | Phase 1: Determine: (1) the source path of the train data; (2) the location path of the sampling
import os
import pandas as pd
path_source = 'Y:/Kaggle_OSIC/2-Data/train/'
path_source_test = 'Y:/Kaggle_OSIC/2-Data/test/'
path_destination = 'Y:/Kaggle_OSIC/4-Data (Sampling)/train/'
path_destination_test = 'Y:/Kaggle_OSIC/4-Data (Sampling)/test/'
path_destination_outcome = 'Y:/Kaggle_OSIC/4-Data (Sampling)/outcome/'
# Build a Sampling dataset | Phase 2: Build dataset using the following features from train data: (1) ID; (2) # of DICOM files per ID (including percentage).
## Improvement: (3) # of other registers (not related to DICOM files)
os.chdir(path_source)
ID_list = os.listdir(path_source)
ID_list_range = len(ID_list)
DICOMFile_list = []
DICOMFileNumber_list = []
for i in range(0,ID_list_range):
path_ID = path_source + ID_list[i] + '/'
DICOMFile_list_unitary = os.listdir(path_ID)
DICOMFile_list = DICOMFile_list + [DICOMFile_list_unitary]
DICOMFileNumber_list_unitary = len(DICOMFile_list_unitary)
DICOMFileNumber_list = DICOMFileNumber_list + [DICOMFileNumber_list_unitary]
Population_Dictionary = {'ID':ID_list,'NumberDicomFiles':DICOMFileNumber_list,'DicomFIles':DICOMFile_list}
Population_DataFrame = pd.DataFrame(data = Population_Dictionary)
DICOMFilePercentage_list = []
TotalNumberDicomFiles = sum(Population_DataFrame.NumberDicomFiles)
for j in range(0,ID_list_range):
Percentage = Population_DataFrame['NumberDicomFiles'][j] / TotalNumberDicomFiles * 100
Percentage = round(Percentage,6)
DICOMFilePercentage_list = DICOMFilePercentage_list + [Percentage]
Population_Percentage_Dictionary = {'Percentage':DICOMFilePercentage_list}
Population_Percentage_DataFrame = pd.DataFrame(data=Population_Percentage_Dictionary)
Population_DataFrame = pd.concat([Population_DataFrame, Population_Percentage_DataFrame],axis=1, sort=False)
filename_population = 'populationDataset.csv'
path_population = path_destination_outcome
Population_DataFrame.to_csv(path_population+filename_population)
# Build a Sampling dataset | Phase 3: Get an aleatory grouping of IDs (just tags)
import random
Population_DataFrame_IndexToSample=[]
Population_DataFrame_IDToSample=[]
Population_DataFrame_PercentageToSample=[]
samplingSizeGoal = 0
while (samplingSizeGoal <= samplingSize):
randomNumberTermination = len(Population_DataFrame.ID)
randomNumber = random.randrange(0,randomNumberTermination,1)
if (randomNumber not in Population_DataFrame_IndexToSample):
Population_DataFrame_IndexToSample = Population_DataFrame_IndexToSample + [randomNumber]
ID_unitary = Population_DataFrame.ID[randomNumber]
Population_DataFrame_IDToSample = Population_DataFrame_IDToSample + [ID_unitary]
Percentage_unitary = Population_DataFrame.Percentage[randomNumber]
Population_DataFrame_PercentageToSample = Population_DataFrame_PercentageToSample + [Percentage_unitary]
samplingSize_unitary = Population_DataFrame.Percentage[randomNumber]
samplingSizeGoal = samplingSizeGoal + samplingSize_unitary
samplingDataset_Dictionary = {'Index':Population_DataFrame_IndexToSample,'ID':Population_DataFrame_IDToSample,'Percentage':Population_DataFrame_PercentageToSample}
samplingDataset_DataFrame = pd.DataFrame(data=samplingDataset_Dictionary)
filename_sampling = 'samplingDataset.csv'
path_sampling = path_destination_outcome
samplingDataset_DataFrame.to_csv(path_sampling+filename_sampling)
# Build a Sampling dataset | Phase 3: Get train dataset (an aleatory grouping of IDs; tree-copy task)
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import copy_tree
remove_tree(path_destination)
create_tree(path_destination,[])
if testMode == True:
print("=========================================")
print("Building the Sampling Dataset given the Train Dataset of Kaggle for competition")
print("=========================================")
for k in Population_DataFrame_IDToSample:
path_source_unitary = path_source + k + '/'
path_destination_unitary = path_destination + k + '/'
create_tree(path_destination_unitary,[])
copy_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",k)
# Build a Sampling dataset | Phase 4: Get test dataset (tree-copy task)
## Assumption: The complete test dataset is copied.
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import copy_tree
remove_tree(path_destination_test)
create_tree(path_destination_test,[])
if testMode == True:
print("=========================================")
print("Building the Test Dataset given the Test Dataset of Kaggle for competition")
print("=========================================")
IDList_test = os.listdir(path_source_test)
for l in IDList_test:
path_source_unitary = path_source + l + '/'
path_destination_unitary = path_destination_test + l + '/'
create_tree(path_destination_unitary,[])
copy_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",l)
if (testMode == False and reportMode == True):
from datetime import date
reportDate = date.today()
print("=========================================")
print("Function Report | Date:",reportDate.year,'/',reportDate.month,'/',reportDate.day,'/' )
print("=========================================")
print("Function: trainDatasetSampler(samplingSize,testMode)")
print("=========================================")
print("(1) Inputs")
print("=========================================")
print("-Sampling Size :", samplingSize, "%")
print("-Test Mode : False")
print("=========================================")
print("(2) Outputs")
print("=========================================")
print("-Type of sample: Aleatory based on IDs")
print("-Train dataset percentage to sample (base): ", round(abs(samplingSize),6),"%")
print("-Train dataset percentage to sample (adjustment): ", round(abs(samplingSizeGoal-samplingSize),6),"%")
print("-Train dataset percentage to sample (fitted): ", round(samplingSizeGoal,6),"%")
print("-Population of Train dataset (just information) available in file: ", filename_population)
print("-Sample of Train dataset (just information) available in file: ", filename_sampling)
print("=========================================")
print("(2) Outcomes:")
print("=========================================")
print("Being the outcome expressed under the variable result, outcomes are as follows:")
print("result[0] -> Dataframe for Population")
print("result[1] -> Dataframe for Sample")
print("result[2] -> Test Mode")
print("result[3] -> Rerport Mode")
print("=========================================")
return Population_DataFrame, samplingDataset_DataFrame, testMode, reportMode
if testMode == True:
samplingSize = 5
resultFunction1 = trainDatasetSampler(samplingSize,testMode,reportMode)
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[0])
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[1])
print("=========================================")
print("Test result Function 1: Success")
print("=========================================")
"""
=========================================
Function 2: Submission Builder
=========================================
Purpose: Build a submission CSV file
Raw code reference (see Tester.py): Test 8
"""
def SubmissionBuilder(ProductType,filename,testMode):
import os
import pandas as pd
# Set ProductType
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# Set productType and splitType
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set outcome
path_outcome = path_ProductType + 'outcome/'
# Get raw data as a DataFrame
os.chdir(path_outcome)
rawFile_DataFrame = pd.read_csv('submissionRawFile_2020_09_19.csv')
# Get submission file template as a DataFrame
os.chdir(path_ProductType)
submissionFile_DataFrame = pd.read_csv('sample_submission.csv')
# Get submission data as required in submission file
submissionNumber_range = len(rawFile_DataFrame.index)
IDcases_List = submissionFile_DataFrame.Patient_Week.copy()
IDcases_List = IDcases_List[0:5]
IDcases_List_range = len(IDcases_List)
for i in range (0,IDcases_List_range):
IDcases_List[i] = IDcases_List[i][:-4]
# Get submission data as required in submission file | FVC
FVCDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_FVC')
datum = rawFile_DataFrame[IDlabel_rawFile][k]
datum = round(datum,0)
# Set datum in submission file
FVCDataList = FVCDataList + [datum]
submissionFile_DataFrame['FVC'] = FVCDataList
# Get submission data as required in submission file | Confidence
CONDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_CON')
datum = rawFile_DataFrame[IDlabel_rawFile][k]
datum = round(datum,0)
# Set datum in submission file
CONDataList = CONDataList + [datum]
submissionFile_DataFrame['Confidence'] = CONDataList
# Save file | Get directory
path_destination = path_outcome+'submissions/'
try:
os.chdir(path_destination)
GetCreation = True
except FileNotFoundError:
GetCreation = False
if GetCreation == False:
from distutils.dir_util import mkpath
mkpath(path_destination)
os.chdir(path_destination)
submissionList = os.listdir(path_destination)
number = len(submissionList)
filename = 'submission_'+str(number+1)+'.csv'
submissionFile_DataFrame.to_csv(filename, index=False)
return submissionFile_DataFrame, filename, testMode
if testMode == True:
ProductType = 'population'
filename = 'submissionRawFile_2020_09_19.csv'
resultFunction2 = SubmissionBuilder(ProductType,filename,testMode)
print("=========================================")
print("Product Type:")
print("=========================================")
print(ProductType)
print("=========================================")
print("Submission File saved as:")
print("=========================================")
print(resultFunction2[1])
print("=========================================")
print("Test result Function 2: Success")
print("=========================================")
"""
=========================================
Function 3: Dataset builder (Stacking solution case) to process with ML models
=========================================
Purpose: Build an input dataset to be processed with an stacking solution
Raw code reference (see Tester.py): Test 15
"""
def stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType)
# Get train dataset and test dataset
import pandas as pd
filename_trainDataset = 'train.csv'
train_dataset = pd.read_csv(path_ProductType+filename_trainDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission dataset (template)
import numpy as np
path_resources = 'Y:/Kaggle_OSIC/3-Data (Prototype)/resources/'
if (PydicomMode == False):
filename_submissionDataset = 'submissionInputDataset.csv'
else:
filename_submissionDataset = 'submissionInputDataset_pydicom.csv'
submission_dataset = pd.read_csv(path_resources+filename_submissionDataset)
submission_dataset = submission_dataset.replace(np.nan,'iNaN')
# Adjust train dataset | Phase 1: Get ID list of the test dataset
IDList = list(test_dataset.Patient)
# Adjust train dataset | Phase 2: Get submission instances from train dataset
instancesPopulation = len(train_dataset.Patient)
indexList = []
for i in IDList:
for j in range(0,instancesPopulation):
if i == train_dataset.Patient[j]:
indexToInclude = train_dataset.index[j]
indexList = indexList + [indexToInclude]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | a. Remove test instances from train dataset and reset index
train_dataset_adjusted = train_dataset.drop(indexList)
train_dataset_adjusted.reset_index
# Adjust train dataset | Phase 3: Create an adjusted train dataset | b. Get Transferring data from train dataset
instanceToTrasferList_index = []
for k in range(0,instancesPopulation):
for l in IDList:
if train_dataset.Patient[k] == l:
instanceToTransfer_Index = train_dataset.index[k]
instanceToTrasferList_index = instanceToTrasferList_index + [instanceToTransfer_Index]
train_dataset_instancesToTransfer = train_dataset.take(instanceToTrasferList_index)
train_dataset_instancesToTransfer.index
train_dataset_instancesToTransfer = train_dataset_instancesToTransfer.reset_index()
train_dataset_instancesToTransfer.drop(columns='index')
# Adjust train dataset | Phase 3: Create an adjusted train dataset | c. Update the submission dataset with the transferring data in b.
submission_dataset_range = len(submission_dataset.Patient)
train_dataset_instancesToTransfer_range = len(train_dataset_instancesToTransfer.Patient)
Patient_List = []
Week_List = []
FVC_List = []
Percent_List = []
Age_List = []
Sex_List = []
SmokingStatus_List = []
for m in range (0,submission_dataset_range):
timesCopy = 0
if(submission_dataset.Patient[m] in IDList):
referenceWeek = submission_dataset.Weeks[m]
for n in range (0,train_dataset_instancesToTransfer_range):
if(train_dataset_instancesToTransfer.Patient[n] == submission_dataset.Patient[m] and train_dataset_instancesToTransfer.Weeks[n] == referenceWeek):
if (timesCopy == 0):
submission_dataset.FVC[m] = train_dataset_instancesToTransfer.FVC[n]
submission_dataset.Percent[m] = train_dataset_instancesToTransfer.Percent[n]
submission_dataset.Age[m] = train_dataset_instancesToTransfer.Age[n]
submission_dataset.Sex[m] = train_dataset_instancesToTransfer.Sex[n]
submission_dataset.SmokingStatus[m] = train_dataset_instancesToTransfer.SmokingStatus[n]
timesCopy = timesCopy + 1
else:
# Additional instances to include
Patient_List = Patient_List + [train_dataset_instancesToTransfer.Patient[n]]
Week_List = Week_List + [train_dataset_instancesToTransfer.Weeks[n]]
FVC_List = FVC_List + [train_dataset_instancesToTransfer.FVC[n]]
Percent_List = Percent_List + [train_dataset_instancesToTransfer.Percent[n]]
Age_List = Age_List + [train_dataset_instancesToTransfer.Age[n]]
Sex_List = Sex_List + [train_dataset_instancesToTransfer.Sex[n]]
SmokingStatus_List = SmokingStatus_List + [train_dataset_instancesToTransfer.SmokingStatus[n]]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | d. Add common values to submission dataset given those from the test dataset (Features: Age, Sex, SmokingStatus)
submission_dataset_range = len(submission_dataset.Patient)
for o in range(0,submission_dataset_range):
if(submission_dataset.Patient[o] in IDList):
for p in range(0,train_dataset_instancesToTransfer_range):
if(submission_dataset.Patient[o] == train_dataset_instancesToTransfer.Patient[p]):
submission_dataset.Age[o] = train_dataset_instancesToTransfer.Age[p]
submission_dataset.Sex[o] = train_dataset_instancesToTransfer.Sex[p]
submission_dataset.SmokingStatus[o] = train_dataset_instancesToTransfer.SmokingStatus[p]
# Scenario to replace NaN values: Average FVC for a given Patient
averageFVC = train_dataset_instancesToTransfer.FVC[train_dataset_instancesToTransfer.Patient == train_dataset_instancesToTransfer.Patient[p]].mean()
submission_dataset.FVC[o] = averageFVC
# Adjust train dataset | Phase 4: Create an adjusted train dataset | e. Concatenate the submission dataset (and additional instance) and the adjusted train dataset
additionalDictionary = {submission_dataset.columns[0]:Patient_List,
submission_dataset.columns[1]:Week_List,
submission_dataset.columns[2]:FVC_List,
submission_dataset.columns[3]:Percent_List,
submission_dataset.columns[4]:Age_List,
submission_dataset.columns[5]:Sex_List,
submission_dataset.columns[6]:SmokingStatus_List}
additional_dataset = pd.DataFrame(data=additionalDictionary)
frames = [train_dataset_adjusted,submission_dataset,additional_dataset]
train_dataset_adjusted = pd.concat(frames)
train_dataset_adjusted = train_dataset_adjusted.reset_index()
train_dataset_adjusted = train_dataset_adjusted.drop(columns='index')
# Adjust train dataset with pydicom train dataset) | Phase 1: Get pydicom train dataset
if(PydicomMode == True):
filename_pydicom = 'train_pydicom.csv'
path_ProductType_pydicom = path_ProductType + 'outcome/'
train_dataset_pydicom = pd.read_csv(path_ProductType_pydicom + filename_pydicom)
# Adjust train dataset with pydicom train dataset) | Phase 2: Include values from train_adjusted_pydicom.py into adjusted train dataset
if(PydicomMode == True):
instancesToInclude_List = list(train_dataset_pydicom.Patient)
InstanceToInclude_Patient = i
newIndex = len(train_dataset_adjusted.Patient)
for i in instancesToInclude_List:
# Get instance to transfer
InstanceToInclude_Patient = i
InstanceToInclude_Week = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].Weeks)[0]
InstanceToInclude_indexType1_Exhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Exhalation)[0]
InstanceToInclude_indexType1_Inhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Inhalation)[0]
InstanceToInclude_ImageType = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].ImageType)[0]
# Put instance into train_dataset_adjusted DataFrame
if (0 in list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Weeks)):
# Get index
indexToComplete = list(train_dataset_adjusted[train_dataset_adjusted.Weeks == 0].Patient[train_dataset_adjusted.Patient == i].index)
# Complete instance
train_dataset_adjusted.indexType1_Exhalation[indexToComplete] = InstanceToInclude_indexType1_Exhalation
train_dataset_adjusted.indexType1_Inhalation[indexToComplete] = InstanceToInclude_indexType1_Inhalation
train_dataset_adjusted.ImageType[indexToComplete] = str(InstanceToInclude_ImageType)
else:
# Add new instance
## Get repeatable instances
repeatableInstance1 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].FVC)[0]
repeatableInstance2 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Percent)[0]
repeatableInstance3 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Age)[0]
repeatableInstance4 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Sex)[0]
repeatableInstance5 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].SmokingStatus)[0]
## Get Dictionary
DictionaryToInclude = {}
DictionaryToInclude['Patient'] = InstanceToInclude_Patient
DictionaryToInclude['Weeks'] = InstanceToInclude_Week
DictionaryToInclude['FVC'] = repeatableInstance1
DictionaryToInclude['Percent'] = repeatableInstance2
DictionaryToInclude['Age'] = repeatableInstance3
DictionaryToInclude['Sex'] = repeatableInstance4
DictionaryToInclude['SmokingStatus'] = repeatableInstance5
DictionaryToInclude['indexType1_Exhalation'] = InstanceToInclude_indexType1_Exhalation
DictionaryToInclude['indexType1_Inhalation'] = InstanceToInclude_indexType1_Inhalation
DictionaryToInclude['ImageType'] = str(InstanceToInclude_ImageType)
## Get DataFrame
DataFrameToInclude = pd.DataFrame(data = DictionaryToInclude, index=[newIndex])
newIndex = newIndex + 1
## Concatenate DataFrame
train_dataset_adjusted = pd.concat([train_dataset_adjusted, DataFrameToInclude])
# nan filling
train_dataset_adjusted = train_dataset_adjusted.replace('iNaN',np.nan)
# Specifying dtype
train_dataset_adjusted.astype({'Patient': 'O'}).dtypes
train_dataset_adjusted.astype({'Weeks': 'float64'}).dtypes
train_dataset_adjusted.astype({'Percent': 'float64'}).dtypes
train_dataset_adjusted.astype({'Age': 'float64'}).dtypes
train_dataset_adjusted.astype({'Sex': 'O'}).dtypes
train_dataset_adjusted.astype({'SmokingStatus': 'O'}).dtypes
train_dataset_adjusted.astype({'FVC': 'float64'}).dtypes
if(PydicomMode == True):
train_dataset_adjusted.astype({'indexType1_Exhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'indexType1_Inhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'ImageType': 'O'}).dtypes
# Get CSV file
path_output = path_ProductType +'outcome/'
if(PydicomMode == False):
filename_output = 'train_adjusted.csv'
else:
filename_output = 'train_adjusted_pydicom.csv'
train_dataset_adjusted.to_csv(path_output+filename_output)
# Function Result
resultFunction = train_dataset_adjusted,path_output,filename_output
# Report Mode
if reportMode == True:
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction[0])
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction[1])
print("=========================================")
print("Input File saved as:", resultFunction[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
return resultFunction
if testMode == True:
ProductType = 'prototype'
PydicomMode = True
reportMode = False
resultFunction3 = stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode)
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction3[0])
print("=========================================")
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction3[1])
print("=========================================")
print("Input File saved as:", resultFunction3[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction3[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
"""
=========================================
Function 4: Submission dataset builder (Stacking solution case) after ML outcome
=========================================
Purpose: Build a submission CSV file (Stacking solution case)
Raw code reference (see Tester.py): Test 17
About the Shape Parameter: It amounts to c = 0.12607421874999922 for every instance in the oject of concern. c value has been computed
deeming the following data fitting scope: (1) Data: FVC predictions; (2) Probability density function as follows (staistical function
in scipy renowend as scipy.stats.loglaplace): loglaplace.pdf(x, c, loc=0, scale=1).
"""
def Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType + 'outcome/')
# Get result data and test dataset
import pandas as pd
if(pydicomMode == True):
filename_resultDataset = 'result_pydicom.csv'
else:
filename_resultDataset = 'result.csv'
result_dataset = pd.read_csv(path_ProductType+'outcome/'+filename_resultDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission instances | Phase 1: Index
IDList = list(test_dataset.Patient)
IDList_index_dictionary = {}
for i in IDList:
itemToInclude = result_dataset.Patient[result_dataset.Patient==i].index
IDList_index_dictionary[i] = itemToInclude
# Get submission instances | Phase 2: Extract submission instances from result dataset
IDList_index = []
IDList_columns = ['Patient', 'Weeks', 'Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
for j in IDList: IDList_index = IDList_index + list(IDList_index_dictionary[j])
submission_dataset = result_dataset.loc[IDList_index]
# Get submission instances | Phase 3: Extract duplicated instances
submission_dataset = submission_dataset.drop_duplicates(subset=['Patient','Weeks'])
# Get submission instances | Phase 4: Sort submission instances by Weeks (ascending) and reset index
submission_dataset = submission_dataset.sort_values(by=['Weeks','Patient'])
submission_dataset = submission_dataset.reset_index()
submission_dataset = submission_dataset.drop(columns=['Unnamed: 0','index'])
# Get confidence measure | Phase 1: Get shape Parameter DataFrame by default
## When shapeParameter_DataFrame==[], parameter c = 0.126074 is assigned by default per model and ID
if (shapeParameter_DataFrame == []):
shapeParameter_dictionary = {}
shapeParameter = 0.126074
MLModelList = IDList_columns[2:]
for l in MLModelList:
keyShapeParameter = 'c Parameter_'+l
shapeParameter_dictionary[keyShapeParameter] = [shapeParameter,shapeParameter,shapeParameter,shapeParameter,shapeParameter]
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = IDList)
# Get confidence measure | Phase 2: Get standard-deviation-clipped per instance
## Metric - Part 1: standard_deviation_clipped = max(standard_deviation, 70)
## Build a DataFrame with standard-deviation-clipped values given an ID and a ML Model: standardDeviationClipped_DataFrame
standardDeviationClipped_DataFrame = shapeParameter_DataFrame.copy()
columnLabels = list(standardDeviationClipped_DataFrame.columns)
columnLabels_SDC_dictionary = {}
for i in columnLabels:
columnLabels_item ='SD_Clipped'+i[11:]
columnLabels_SDC_dictionary[i]=columnLabels_item
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.rename(columns=columnLabels_SDC_dictionary)
import numpy as np
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.replace(3,np.nan)
ID_List = list(standardDeviationClipped_DataFrame.index)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
CParameter_List = list(shapeParameter_DataFrame.columns)
numy = 0
from scipy.stats import loglaplace
for j in ID_List:
for k in SDModel_List:
itemToInclude = CParameter_List[numy]
c = shapeParameter_DataFrame[itemToInclude][j]
sd_LL = loglaplace.std(c, loc=0, scale=100)
standardDeviationClipped_DataFrame[k][j] = max(70,sd_LL) # j: index is ID | k: SD_Clipped_(ML Model)
numy = numy + 1
numy = 0
# Get confidence measure | Phase 3: Get metric axe per model: |FVC_true - FVC_predicted|
## Metric - Part 1: |FVC_true - FVC_pred|
if(pydicomMode == True):
variableNumber = 10
else:
variableNumber = 7
MLModelList = list(submission_dataset.columns[variableNumber:])
metric_dictionary = {}
for j in MLModelList:
metric_differential = abs(submission_dataset.FVC - submission_dataset[j])
metric_differential = list(metric_differential)
keyToInclude = 'metric_'+j
metric_dictionary[keyToInclude] = metric_differential
metric_DataFrame = pd.DataFrame(data=metric_dictionary)
# Get confidence measure | Phase 4: Get metric axe per model: min(|FVC_true - FVC_predicted|, 1000)
## metric per instance
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
metricLabels = list(metric_DataFrame.columns)
instancesNumber = len(submission_dataset.index)
for i in metricLabels:
j = 0
while (j<instancesNumber):
metric_DataFrame[i][j] = min(metric_DataFrame[i][j],1000)
j = j+1
submission_dataset = submission_dataset.join(metric_DataFrame)
# Get confidence measure | Phase 5: Get metric axe per model: (-1 * differential * 2^0.5 / SDC ) - ln(2^0.5 * SCD)
## metric per instance
## differential = min(|FVC_true - FVC_predicted|, 1000)
## SDC: Standard Deviation Clipped
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
IDList = list(test_dataset.Patient)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
SDModel_index_List = list(standardDeviationClipped_DataFrame.index)
metric_lists = list(metric_DataFrame.columns)
metric_index_lists = list(metric_DataFrame.index)
submission_dataset_index_List = list(submission_dataset.index)
instancesNumber = len(submission_dataset_index_List)
indexPerID_dictionary = {}
### Step 1: Get index per ID to compute
for i in IDList:
listToInclude = list(submission_dataset.Patient[submission_dataset.Patient == i].index)
indexPerID_dictionary[i] = listToInclude
indexPerID_DataFrame = pd.DataFrame(data=indexPerID_dictionary)
### Step 3: Compute metric
import math
from math import log1p
for k in IDList:
for i in metric_lists:
for j in list(indexPerID_DataFrame[k]):
differential = submission_dataset[i][j]
SDC_Label = 'SD_Clipped_' + i[7:]
SDC = standardDeviationClipped_DataFrame[SDC_Label][k]
metric_part1 = -1* 2**0.5 * differential / SDC
metric_part2 = -1 * math.log1p(2**0.5 * SDC)
metric = metric_part1 + metric_part2
submission_dataset[i][j] = metric
# Result function specification
resultFunction = submission_dataset,shapeParameter_DataFrame,standardDeviationClipped_DataFrame
# Get submission files | Phase 1: Get submission file template
filename = 'sample_submission.csv'
submissionFile = pd.read_csv(path_ProductType+filename)
## Get submission files | Phase 2: Create directory
try:
path_output = path_ProductType + 'submission/'
os.chdir(path_output)
except FileNotFoundError:
import distutils.ccompiler
path_output = path_ProductType + 'submission/'
distutils.dir_util.mkpath(path_output)
## Get submission files | Phase 3: Get correlative
files_list = os.listdir(path_output)
try:
maxNumber = max(files_list)
maxNumber = maxNumber[:-4]
maxNumber = int(maxNumber)
nextNumber = maxNumber+1
except ValueError:
nextNumber = 0
## Get submission files | Phase 4: Get models to include and their corresponding metrics
ModelToInclude = IDList_columns[2:]
## Get submission files | Phase 5: Build Files
for i in ModelToInclude:
filename = 'sample_submission.csv'
submissionFile = pd.read_csv(path_ProductType+filename)
submissionFile_columns = list(submissionFile.columns)
fvc_array = np.array(submission_dataset[i])
confidence_array = np.array(submission_dataset['metric_'+i])
submissionFile['FVC'] = fvc_array
submissionFile['Confidence'] = confidence_array
filename_output = str(nextNumber)+'.csv'
path_output = path_ProductType +'submission/'
submissionFile.to_csv(path_output+filename_output,columns=submissionFile_columns,index=False)
nextNumber = nextNumber + 1
return resultFunction
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
example = False
if (example == True):
import pandas as pd
shapeParameter_IDList = ['ID00419637202311204720264','ID00421637202311550012437','ID00422637202311677017371','ID00423637202312137826377','ID00426637202313170790466']
c_List1 = [3,3,3,3,3]
c_List2 = [3,3,3,3,3]
c_List3 = [3,3,3,3,3]
c_List4 = [3,3,3,3,3]
shapeParameter_dictionary = {'Random Forest':c_List1, 'Lasso':c_List2, 'Gradient Boosting':c_List3, 'Stacking Regressor':c_List4}
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = shapeParameter_IDList)
else:
shapeParameter_DataFrame = []
# Set Pydicom mode
pydicomMode = True
resultFunction4 = Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[1])
print("Standard Deviation Clipped - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[2])
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
"""
=========================================
Function 5: Get parameters given a must-usage of a log-laplace distribution (i.e. Laplace Log Likelihood)
=========================================
Purpose: Get shape parameter visualization for loglaplace
Raw code reference (see Tester.py): Test 17
"""
def shapeParameter_visualizer(ProductType,testMode):
import numpy as np
from scipy.stats import loglaplace
import matplotlib.pyplot as plt
fig, ax = plt.subplots(4, 5, sharex=False, sharey=False, figsize=(32, 24))
## Get IDs to test
import os
import pandas as pd
## Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
## Get probabilities from predicted values grouping by ID and Model
path = path_ProductType + 'outcome/'
filename = 'result.csv'
y_pred = pd.read_csv(path+filename)
## Get IDs to test
path = path_ProductType
filename = 'test.csv'
test_dataset = pd.read_csv(path+filename)
ID_List = list(test_dataset.Patient)
## Get models
model_List = ['Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
## Grouping task
k = 0
l = 0
for i in ID_List:
k = 0
for j in model_List:
# Data Fit task
#r = y_pred[y_pred.Patient==i][j]/sum(y_pred[y_pred.Patient==i][j])
r = y_pred[y_pred.Patient==i][j]
r = np.array(r)
c1, loc1, scale1 = loglaplace.fit(r,floc=0,fscale=1)
c = c1
# # Calculate a few first moments
# mean, var, skew, kurt = loglaplace.stats(c, moments='mvsk')
# Display the probability density function (pdf):
x = np.linspace(loglaplace.ppf(0.01, c), loglaplace.ppf(0.99, c), num=100)
ax[k,l].plot(x, loglaplace.pdf(x, c),'r-', lw=5, alpha=0.6, label='loglaplace pdf')
# Freeze the distribution and display the frozen pdf:
rv = loglaplace(c)
ax[k,l].plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Generate random numbers:
r = loglaplace.rvs(c1, loc=0, scale=1, size=1000)
# And compare the histogram:
#ax[k,l].hist(r, density=True, histtype='stepfilled', alpha=0.2)
ax[k,l].legend(loc='best', frameon=False)
# Set limits
#ax[k,l].set_xlim(0,0.1)
#ax[k,l].set_ylim(0,4)
ax[k,l].set_xlabel('x')
ax[k,l].set_ylabel('f(x,c)')
# Check Accuracy
vals = loglaplace.ppf([0.001, 0.5, 0.999], c)
accuracy = np.allclose([0.001, 0.5, 0.999], loglaplace.cdf(vals, c))
# Returns True if two arrays are element-wise equal within a tolerance.
if(accuracy == True):
accuracy = 'Equal case'
else:
accuracy = 'Unequal case'
# Set title
title = str('Probability density function for loglaplace'+'\n'+i + '\n' + j + ' | Accuracy:'+accuracy)
ax[k,l].set_title(title)
k = k + 1
l = l + 1
plt.tight_layout()
plt.show()
resultFunction = c
return resultFunction
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
resultFunction5 = shapeParameter_visualizer(ProductType, testMode = True)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction5)
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
# """
# =========================================
# Function : Dataset builder 2 (Stacking solution case) to process with ML models
# =========================================
# Purpose: Build an input dataset to be processed with an stacking solution but including Pydicom image-processing solution
# Raw code reference (see Tester.py): 15
# """
# def stacking_Dataset_Builder_PydicomSolution(productType, testMode):
# # Set Product Type and its corresponding path
# if ProductType == 'population':
# path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# if ProductType == 'prototype':
# path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
# if ProductType == 'sampling':
# path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
| 41.973272
| 186
| 0.603215
| 4,457
| 45,541
| 5.976442
| 0.118914
| 0.055412
| 0.033037
| 0.013215
| 0.387206
| 0.293389
| 0.231858
| 0.192852
| 0.166573
| 0.127004
| 0
| 0.016036
| 0.251005
| 45,541
| 1,085
| 187
| 41.973272
| 0.764878
| 0.135526
| 0
| 0.322684
| 0
| 0
| 0.163236
| 0.090468
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007987
| false
| 0
| 0.046326
| 0
| 0.0623
| 0.186901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6e9ffb5e0649025342ebb242012d9b21913b192
| 8,378
|
py
|
Python
|
paperscraper/scrapers/keywords.py
|
ahmed-shariff/scraper
|
52bed967db7e08e438daaa8dfa8d9338567ad7c2
|
[
"MIT"
] | 1
|
2021-11-19T02:56:22.000Z
|
2021-11-19T02:56:22.000Z
|
paperscraper/scrapers/keywords.py
|
ahmed-shariff/scraper
|
52bed967db7e08e438daaa8dfa8d9338567ad7c2
|
[
"MIT"
] | 1
|
2021-11-19T03:42:58.000Z
|
2022-03-29T16:32:16.000Z
|
paperscraper/scrapers/keywords.py
|
ahmed-shariff/scraper
|
52bed967db7e08e438daaa8dfa8d9338567ad7c2
|
[
"MIT"
] | 1
|
2021-11-19T02:56:28.000Z
|
2021-11-19T02:56:28.000Z
|
import re
regex = re.compile(r'[\n\r\t]')
def acm_digital_library(soup):
try:
keywords = set()
keywords_parent_ol = soup.find('ol', class_="rlist organizational-chart")
keywords_divs = keywords_parent_ol.findChildren('div', recursive=True)
for kw_parent in keywords_divs:
kw = kw_parent.text
keywords.add(regex.sub("", kw.split(",")[0]))
return list(keywords)
except Exception as e:
print(e)
return None
def graphics_interface_proceedings(soup):
return None
def ieee_explore(soup):
try:
keywords = set()
ggp_ul = soup.find('ul', class_="doc-keywords-list stats-keywords-list")
gp_li = ggp_ul.findChildren("li", class_="doc-keywords-list-item", recursive=False)
for p_li in gp_li:
if p_li.find('strong').text in ["IEEE Keywords", "INSPEC: Controlled Indexing", "INSPEC: Non-Controlled Indexing", "MeSH Terms"]:
for keywords_l in p_li.find('ul').findChildren("li", recursive=False):
a_tag = keywords_l.find("a", class_="stats-keywords-list-item")
if a_tag is not None:
keywords.add(str(regex.sub("", a_tag.text.split(",")[0])))
else:
keywords.add(str(regex.sub("", str(keywords_l.text).split(",")[0])))
return list(keywords)
except Exception as e:
print(e)
return None
def eurographics_digital_library(soup):
try:
keywords_set = set()
p_tablebody = soup.find('table', class_="detailtable").find("tbody")
p_trs = p_tablebody.findChildren('tr')
for tr in p_trs:
label = tr.find("td", class_="label-cell")
if label.text == "dc.subject":
keywords = tr.find("td", class_="word-break")
# e.g. CASE 1: ['Categories and Subject Descriptors (according to ACM CCS): I.4.1 [Image Processing and Computer Vision]: Enhancement-Filtering I.3.3 [Computer Graphics]: Picture/Image Generation-Bitmap and framebuffer operations']
# e.g. CASE 2 [TODO: Not taken care of yet] Categories and Subject Descriptors (according to ACM CCS): Information Interfaces And Presentation (e.g., HCI) [H.5.2]: User Interfaces-Graphical user interfaces (GUI)
# Step 1: Remove annoying substrings
# Step 2: Choose to take ONLY Categories, not the Subject Descriptors > Write a REGEX to take substrings between [].
# Step 3: Split the string by , or ; or :
to_replaces = ["CCS Concepts", "Categories and Subject Descriptors", "Categories and subject descriptors", "Categories and Subject Descriptors (according to ACM CCS)", "according to ACM CCS"]
keywords_str = keywords.text
for to_replace in to_replaces:
keywords_str = keywords_str.replace(to_replace, "")
keywords_extracted = re.findall(r'\[(.*?)\]', keywords_str)
if keywords_extracted:
keywords_set.update(keywords_extracted)
else:
keywords_set.update(re.split(',|:|;', keywords_str))
return list(keywords_set)
except Exception as e:
print(e)
return None
def springer_v2(soup):
try:
keywords = set()
keywords_parent_div = soup.find('div', class_="KeywordGroup")
keywords_span = keywords_parent_div.findChildren("span", class_="Keyword")
for k in keywords_span:
keywords.add(k.text)
return list(keywords)
except Exception as e:
print(e)
return None
def dagstuhl(soup):
try:
keywords_label = soup.find('b', text="Keywords:")
keywords_parent_font = keywords_label.parent
keywords_parent_td = keywords_parent_font.parent
keywords_font = keywords_parent_td.find_next('td').find_next('td').find("font")
if keywords_font is not None:
return re.split(',', keywords_font.text)
except Exception as e:
print(e)
return None
def springer_v1(soup):
try:
keywords = set()
keywords_parent_section = soup.find('ul', class_="c-article-subject-list")
keywords_li = keywords_parent_section.findChildren("li", class_="c-article-subject-list__subject")
for k in keywords_li:
kw = k.find("span").text
keywords.add(str(regex.sub("", kw)).strip())
return list(keywords)
except Exception as e:
print(e)
return None
def wiley_online_library(soup):
try:
keywords_parent_section = soup.find('section', class_="keywords")
keywords_ul = keywords_parent_section.find('ul')
keywords_lis = keywords_ul.findChildren("li")
keywords_set = set()
for keywords_li in keywords_lis:
# e.g. Case 1: "[3.1.1] Human-Centered Computing" and so on
# e.g. Case 2: CCS Concepts don't have '[' and ']' but they have strings such as "• Human‐centered computing → Graph drawings"
# Step 1: Remove annoying substrings
# Step 2: Choose to take ONLY Categories, not the Subject Descriptors > Write a REGEX to take substrings between [].
# Step 3: Split the string by , or ; or :
to_replaces = ["CCS Concepts", "Categories and Subject Descriptors", "Categories and subject descriptors", "Categories and Subject Descriptors (according to ACM CCS)", "according to ACM CCS"]
keywords_str = keywords_li.find("a").text
for to_replace in to_replaces:
keywords_str = keywords_str.replace(to_replace, "")
keywords_extracted = re.findall(r'\[(.*?)\]', keywords_str)
if keywords_extracted:
keywords_set.update(keywords_extracted)
else:
# CCS Concepts don't have '[' and ']' but they have strings such as "• Human‐centered computing → Graph drawings"
regex_find = r'•(.*)→(.*)'
regex_replace = r'\1;\2' # set the delimiter to either , : ; (as is used below to split)
keywords_str = re.sub(regex_find, regex_replace, keywords_str)
keywords_set.update(re.split(',|:|;', keywords_str))
return list(keywords_set)
except Exception as e:
print(e)
return None
def cogsci(soup):
return None
def scitepress(soup):
try:
keywords_set = set()
keywords_span = soup.find('span', id="ContentPlaceHolder1_LinkPaperPage_LinkPaperContent_LabelPublicationDetailKeywords")
for kw in keywords_span.text.split(","):
keywords_set.add(kw)
return list(keywords_set)
except Exception as e:
print(e)
return None
def scienceopen(soup):
try:
keywords_set = set()
for span_label in soup.find_all('span', class_="so-metadata-label"):
if "Keywords" in span_label.text:
for keyword_a in span_label.find_next_siblings('a'):
keywords_set.add(keyword_a.text)
return list(keywords_set)
except Exception as e:
pass
return None
def aaai(soup):
return None
def get_keywords(publisher, soup):
keywords_list = None
if publisher == "acm_digital_library":
keywords_list = acm_digital_library(soup)
elif publisher == "graphics_interface_proceedings":
keywords_list = graphics_interface_proceedings(soup)
elif publisher == "ieee_explore":
keywords_list = ieee_explore(soup)
elif publisher == "cogsci":
keywords_list = cogsci(soup)
elif publisher == "springer_v1":
keywords_list = springer_v1(soup)
elif publisher == "springer_v2":
keywords_list = springer_v2(soup)
elif publisher == "scitepress":
keywords_list = scitepress(soup)
elif publisher == "scienceopen":
keywords_list = scienceopen(soup)
elif publisher == "eurographics_digital_library":
keywords_list = eurographics_digital_library(soup)
elif publisher == "wiley_online_library":
keywords_list = wiley_online_library(soup)
elif publisher == "dagstuhl":
keywords_list = dagstuhl(soup)
elif publisher == "aaai":
keywords_list = aaai(soup)
return None if len(keywords_list) == 0 else keywords_list
| 38.608295
| 247
| 0.62509
| 1,032
| 8,378
| 4.905039
| 0.19186
| 0.045041
| 0.030818
| 0.032003
| 0.435994
| 0.378507
| 0.350454
| 0.350454
| 0.323785
| 0.323785
| 0
| 0.005214
| 0.267486
| 8,378
| 216
| 248
| 38.787037
| 0.818315
| 0.140368
| 0
| 0.432927
| 0
| 0
| 0.145639
| 0.033106
| 0
| 0
| 0
| 0.00463
| 0
| 1
| 0.079268
| false
| 0.006098
| 0.006098
| 0.018293
| 0.219512
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6ea376dac46236ea3d4ce92ad3215d1dbffb660
| 6,642
|
py
|
Python
|
topobank/publication/models.py
|
ContactEngineering/TopoBank
|
12710c24cc158801db20f030c3e0638060e24a0e
|
[
"MIT",
"BSD-3-Clause"
] | 3
|
2021-12-03T19:11:07.000Z
|
2021-12-27T17:14:39.000Z
|
topobank/publication/models.py
|
ContactEngineering/TopoBank
|
12710c24cc158801db20f030c3e0638060e24a0e
|
[
"MIT",
"BSD-3-Clause"
] | 268
|
2021-03-19T13:57:00.000Z
|
2022-03-31T20:58:26.000Z
|
topobank/publication/models.py
|
ContactEngineering/TopoBank
|
12710c24cc158801db20f030c3e0638060e24a0e
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.conf import settings
MAX_LEN_AUTHORS_FIELD = 512
CITATION_FORMAT_FLAVORS = ['html', 'ris', 'bibtex', 'biblatex']
DEFAULT_KEYWORDS = ['surface', 'topography']
class UnknownCitationFormat(Exception):
def __init__(self, flavor):
self._flavor = flavor
def __str__(self):
return f"Unknown citation format flavor '{self._flavor}'."
class Publication(models.Model):
LICENSE_CHOICES = [(k, settings.CC_LICENSE_INFOS[k]['option_name'])
for k in ['cc0-1.0', 'ccby-4.0', 'ccbysa-4.0']]
short_url = models.CharField(max_length=10, unique=True, null=True)
surface = models.OneToOneField("manager.Surface", on_delete=models.PROTECT, related_name='publication')
original_surface = models.ForeignKey("manager.Surface", on_delete=models.SET_NULL,
null=True, related_name='derived_publications')
publisher = models.ForeignKey("users.User", on_delete=models.PROTECT)
publisher_orcid_id = models.CharField(max_length=19, default='') # 16 digits including 3 dashes
version = models.PositiveIntegerField(default=1)
datetime = models.DateTimeField(auto_now_add=True)
license = models.CharField(max_length=12, choices=LICENSE_CHOICES, blank=False, default='')
authors = models.CharField(max_length=MAX_LEN_AUTHORS_FIELD)
container = models.FileField(max_length=50, default='')
def get_absolute_url(self):
return reverse('publication:go', args=[self.short_url])
def get_full_url(self, request):
return request.build_absolute_uri(self.get_absolute_url())
def get_citation(self, flavor, request):
if flavor not in CITATION_FORMAT_FLAVORS:
raise UnknownCitationFormat(flavor)
method_name = '_get_citation_as_'+flavor
return getattr(self, method_name)(request)
def _get_citation_as_html(self, request):
s = '{authors}. ({year}). contact.engineering. <em>{surface.name} (Version {version})</em>.'
s += ' <a href="{publication_url}">{publication_url}</a>'
s = s.format(
authors=self.authors,
year=self.datetime.year,
version=self.version,
surface=self.surface,
publication_url=self.get_full_url(request),
)
return mark_safe(s)
def _get_citation_as_ris(self, request):
# see http://refdb.sourceforge.net/manual-0.9.6/sect1-ris-format.html
# or https://en.wikipedia.org/wiki/RIS_(file_format)
# or https://web.archive.org/web/20120526103719/http://refman.com/support/risformat_intro.asp
# https://web.archive.org/web/20120717122530/http://refman.com/support/direct%20export.zip
s = ""
def add(key, value):
nonlocal s
s += f"{key} - {value}\n"
# Electronic citation / Website
add('TY', 'ELEC')
# Title
add('TI', f"{self.surface.name} (Version {self.version})")
# Authors
for author in self.authors.split(','):
add('AU', author.strip())
# Publication Year
add('PY', format(self.datetime, '%Y/%m/%d/'))
# URL
add('UR', self.get_full_url(request))
# Name of Database
add('DB', 'contact.engineering')
# Notes
add('N1', self.surface.description)
# add keywords, defaults ones and tags
for kw in DEFAULT_KEYWORDS:
add('KW', kw)
for t in self.surface.tags.all():
add('KW', t.name)
# End of record, must be empty and last tag
add('ER', '')
return s.strip()
def _get_citation_as_bibtex(self, request):
title = f"{self.surface.name} (Version {self.version})"
shortname = f"{self.surface.name}_v{self.version}".lower().replace(' ','_')
keywords = ",".join(DEFAULT_KEYWORDS)
if self.surface.tags.count()>0:
keywords += ","+",".join(t.name for t in self.surface.tags.all())
s = """
@misc{{
{shortname},
title = {{{title}}},
author = {{{author}}},
year = {{{year}}},
note = {{{note}}},
keywords = {{{keywords}}},
howpublished = {{{publication_url}}},
}}
""".format(title=title,
author=self.authors.replace(', ', ' and '),
year=self.datetime.year,
note=self.surface.description,
publication_url=self.get_full_url(request),
keywords=keywords,
shortname=shortname,
)
return s.strip()
def _get_citation_as_biblatex(self, request):
shortname = f"{self.surface.name}_v{self.version}".lower().replace(' ','_')
keywords = ",".join(DEFAULT_KEYWORDS)
if self.surface.tags.count()>0:
keywords += ","+",".join(t.name for t in self.surface.tags.all())
s = """
@online{{
{shortname},
title = {{{title}}},
version = {{{version}}},
author = {{{author}}},
year = {{{year}}},
month = {{{month}}},
date = {{{date}}},
note = {{{note}}},
keywords = {{{keywords}}},
url = {{{url}}},
urldate = {{{urldate}}}
}}
""".format(title=self.surface.name,
version=self.version,
author=self.authors.replace(', ', ' and '),
year=self.datetime.year,
month=self.datetime.month,
date=format(self.datetime, "%Y-%m-%d"),
note=self.surface.description,
url=self.get_full_url(request),
urldate=format(timezone.now(), "%Y-%m-%d"),
keywords=keywords,
shortname=shortname,
)
return s.strip()
@property
def storage_prefix(self):
"""Return prefix used for storage.
https://docs.djangoproject.com/en/2.2/ref/models/fields/#django.db.models.FileField.upload_to
Looks like a relative path to a directory.
If storage is on filesystem, the prefix should correspond
to a real directory.
"""
return "publications/{}/".format(self.short_url)
@property
def container_storage_path(self):
"""Return relative path of container in storage."""
return f"{self.storage_prefix}container.zip"
| 36.696133
| 107
| 0.579645
| 745
| 6,642
| 5.024161
| 0.302013
| 0.041143
| 0.013358
| 0.025648
| 0.238312
| 0.206519
| 0.180069
| 0.100454
| 0.100454
| 0.075341
| 0
| 0.012743
| 0.279283
| 6,642
| 180
| 108
| 36.9
| 0.769166
| 0.124812
| 0
| 0.341085
| 0
| 0.007752
| 0.236697
| 0.029717
| 0.015504
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.03876
| 0.023256
| 0.310078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6eb31b711fe08af2de8afcc37c668f59c3bdd16
| 1,579
|
py
|
Python
|
day_22_b.py
|
Gyaha/AOC2020
|
fbabae9acd7d274b84bc0c64f2665dfba9f008ca
|
[
"MIT"
] | null | null | null |
day_22_b.py
|
Gyaha/AOC2020
|
fbabae9acd7d274b84bc0c64f2665dfba9f008ca
|
[
"MIT"
] | null | null | null |
day_22_b.py
|
Gyaha/AOC2020
|
fbabae9acd7d274b84bc0c64f2665dfba9f008ca
|
[
"MIT"
] | null | null | null |
def play_recursively_combat(p1: list, p2: list) -> bool:
rounds = set()
winner = None
while len(p1) > 0 and len(p2) > 0:
r = tuple(p1 + [-1] + p2)
if r in rounds:
return True
else:
rounds.add(r)
c1 = p1.pop(0)
c2 = p2.pop(0)
if c1 <= len(p1) and c2 <= len(p2):
winner = play_recursively_combat(p1[:c1], p2[:c2])
else:
winner = c1 > c2
if winner:
p1.append(c1)
p1.append(c2)
else:
p2.append(c2)
p2.append(c1)
return winner
def play_combat(s: str):
p1, p2 = s.strip().split("\n\n")
p1, p2 = convert_cards(p1), convert_cards(p2)
winner = play_recursively_combat(p1, p2)
w = p1 if winner else p2
s = 0
for i, c in enumerate(reversed(w), 1):
s += c * i
return s
def convert_cards(s: str) -> list:
c = []
for p in s.splitlines()[1:]:
c.append(int(p))
return c
def run_tests():
test_input = """Player 1:
9
2
6
3
1
Player 2:
5
8
4
7
10"""
test_output = 291
assert play_combat(test_input) == test_output
test_input = """Player 1:
43
19
Player 2:
2
29
14"""
assert play_combat(test_input)
def run() -> int:
with open("inputs/input_22.txt") as file:
data = file.read()
return play_combat(data)
if __name__ == "__main__":
run_tests()
import time
time_start = time.perf_counter()
print(run())
time_end = time.perf_counter() - time_start
print(f"Time: {time_end:0.4f} sec")
| 17.544444
| 62
| 0.542115
| 239
| 1,579
| 3.430962
| 0.364017
| 0.04878
| 0.076829
| 0.084146
| 0.136585
| 0.07561
| 0
| 0
| 0
| 0
| 0
| 0.072165
| 0.324256
| 1,579
| 89
| 63
| 17.741573
| 0.696345
| 0
| 0
| 0.1
| 0
| 0
| 0.082964
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 1
| 0.071429
| false
| 0
| 0.014286
| 0
| 0.157143
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6efe17c4e6e08ec55040433cf5ea1ff20fecb68
| 528
|
py
|
Python
|
src/ping.py
|
jnsougata/rich-embed
|
95901e590f00c4e4eabeb99c8f06bb5f90718d80
|
[
"MIT"
] | null | null | null |
src/ping.py
|
jnsougata/rich-embed
|
95901e590f00c4e4eabeb99c8f06bb5f90718d80
|
[
"MIT"
] | null | null | null |
src/ping.py
|
jnsougata/rich-embed
|
95901e590f00c4e4eabeb99c8f06bb5f90718d80
|
[
"MIT"
] | null | null | null |
import discord
import app_util
class Ping(app_util.Cog):
def __init__(self, bot: app_util.Bot):
self.bot = bot
@app_util.Cog.command(
command=app_util.SlashCommand(
name='ping', description='shows avg ping of client'
),
guild_id=877399405056102431
)
async def command(self, ctx: app_util.Context):
await ctx.send_response(embed=discord.Embed(title=f'{self.bot.latency * 1000:.2f}ms'))
def setup(bot: app_util.Bot):
bot.add_application_cog(Ping(bot))
| 24
| 94
| 0.662879
| 74
| 528
| 4.527027
| 0.513514
| 0.146269
| 0.089552
| 0.077612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05569
| 0.217803
| 528
| 21
| 95
| 25.142857
| 0.755448
| 0
| 0
| 0
| 0
| 0
| 0.111742
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6f05425230fc70414cb78c1b2738e7f0e282ac0
| 2,017
|
py
|
Python
|
2020/24/visualization.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | null | null | null |
2020/24/visualization.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | null | null | null |
2020/24/visualization.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | 1
|
2021-12-04T10:37:09.000Z
|
2021-12-04T10:37:09.000Z
|
#!/usr/bin/env python3
import sys
import re
import numpy as np
from PIL import Image
moves = { 'e': (2, 0), 'se': (1, 2), 'sw': (-1, 2), 'w': (-2, 0), 'nw': (-1, -2), 'ne': (1, -2) }
# Save (x, y): True/False in tiles. True = black, False = white.
tiles = {}
for line in open(sys.argv[1]).read().splitlines():
pos = np.array((0, 0))
for d in re.findall(r'e|se|sw|w|nw|ne', line):
pos += moves[d]
t = tuple(pos)
if t in tiles:
tiles[t] = not tiles[t]
else:
tiles[t] = True
# Part 1
print('black:', sum(val == True for val in tiles.values()))
# -- Part 2 --
# take a chance on how wide it needs to be
width = 300
heigth = 300
board = np.zeros(width * heigth, dtype=np.int8)
board = board.reshape(heigth, width)
# Fill in tiles, move to center
for key, value in tiles.items():
x, y = key
x += width // 2
y += heigth // 2
board[y][x] = value
def black_neighbours(y, x, b):
num = 0
for m in moves.values():
num += b[(y + m[1], x + m[0])]
return num
def game():
board_copy = np.copy(board)
w, h = board.shape
# Don't do outer edge (to avoid special cases)
for y in range(2, h - 2):
for x in range(2, w - 2):
tile = board_copy[(y, x)]
n = black_neighbours(y, x, board_copy)
if tile:
# black
if n == 0 or n > 2:
board[(y, x)] = False
else:
# white
if n == 2:
board[(y, x)] = True
def save_image(day):
colours = [(0, 0, 0), (255, 255, 255)]
im = Image.new('RGB', (width, heigth))
for y in range(heigth):
for x in range(width):
c = colours[board[y][x]]
im.putpixel((x, y), c)
im.save('img%03d.png' % (day))
save_image(0)
for day in range(1, 101):
game()
save_image(day)
print('Day %d: %d' % (day, len(np.where(board == True)[0])))
ys, xs = np.where(board)
print(min(ys), max(ys), min(xs), max(xs))
| 24.901235
| 97
| 0.511155
| 331
| 2,017
| 3.090634
| 0.350453
| 0.013685
| 0.02737
| 0.02346
| 0.017595
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041066
| 0.311849
| 2,017
| 80
| 98
| 25.2125
| 0.695965
| 0.115022
| 0
| 0.034483
| 0
| 0
| 0.030968
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.068966
| 0
| 0.137931
| 0.051724
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6f0fc4f8d5c7522b3b6e45957a0edd9bcec2662
| 16,451
|
py
|
Python
|
experimental/tracing/bin/diff_heap_profiler.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | 1,894
|
2015-04-17T18:29:53.000Z
|
2022-03-28T22:41:06.000Z
|
experimental/tracing/bin/diff_heap_profiler.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
experimental/tracing/bin/diff_heap_profiler.py
|
atuchin-m/catapult
|
108ea3e2ec108e68216b1250a3d79cc642600294
|
[
"BSD-3-Clause"
] | 698
|
2015-06-02T19:18:35.000Z
|
2022-03-29T16:57:15.000Z
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import print_function
import argparse
import gzip
import json
import os
import shutil
import six
from six.moves import zip
_OUTPUT_DIR = 'output'
_OUTPUT_GRAPH_DIR = os.path.join(_OUTPUT_DIR, 'graph')
class Process(object):
def __init__(self):
self.pid = None
self.name = None
self.labels = None
self.types = {}
self.strings = {}
self.stackframes = {}
self.allocators = None
self.version = None
class Entry(object):
def __init__(self):
self.count = None
self.size = None
self.type = None
self.stackframe = None
class GraphDump(object):
def __init__(self):
self.pid = None
self.name = None
self.labels = None
self.heap = None
self.root = ''
self.leaks = ''
self.leak_stackframes = 0
self.leak_objects = 0
def OpenTraceFile(file_path, mode):
if file_path.endswith('.gz'):
return gzip.open(file_path, mode + 'b')
return open(file_path, mode + 't')
def FindMemoryDumps(filename):
processes = {}
with OpenTraceFile(filename, 'r') as f:
data = json.loads(f.read().decode('utf-8'))
for event in data['traceEvents']:
pid = event['pid']
if pid not in processes:
processes[pid] = Process()
processes[pid].pid = pid
process = processes[pid]
# Retrieve process informations.
if event['ph'] == 'M':
if event['name'] == 'process_name' and 'name' in event['args']:
process.name = event['args']['name']
if event['name'] == 'process_labels' and 'labels' in event['args']:
process.labels = event['args']['labels']
if event['name'] == 'typeNames':
process.types = {}
for type_id, t in six.iteritems(event['args']['typeNames']):
process.types[int(type_id)] = t
if event['name'] == 'stackFrames':
process.stackframes = {}
for stack_id, s in six.iteritems(event['args']['stackFrames']):
new_stackframe = {}
new_stackframe['name'] = s['name']
if 'parent' in s:
new_stackframe['parent'] = int(s['parent'])
process.stackframes[int(stack_id)] = new_stackframe
# Look for a detailed memory dump event.
if not ((event['name'] == 'periodic_interval' or
event['name'] == 'explicitly_triggered') and
event['args']['dumps']['level_of_detail'] == 'detailed'):
continue
# Check for a memory dump V1.
if u'heaps' in event['args']['dumps']:
# Get the first memory dump.
if not process.allocators:
process.version = 1
process.allocators = event['args']['dumps']['heaps']
# Check for a memory dump V2.
# See format: [chromium] src/base/trace_event/heap_profiler_event_writer.h
if u'heaps_v2' in event['args']['dumps']:
# Memory dump format V2 is dumping information incrementally. Update
# the cumulated indexes.
maps = event['args']['dumps']['heaps_v2']['maps']
for string in maps['strings']:
process.strings[string['id']] = string['string']
for node in maps['nodes']:
node_v1 = {}
node_v1['name'] = process.strings[node['name_sid']]
if 'parent' in node:
node_v1['parent'] = node['parent']
process.stackframes[node['id']] = node_v1
for t in maps['types']:
process.types[t['id']] = process.strings[t['name_sid']]
# Get the first memory dump.
if not process.allocators:
dump = event['args']['dumps']
process.version = 2
process.allocators = dump['heaps_v2']['allocators']
# Remove processes with incomplete memory dump.
for pid, process in processes.items():
if not (process.allocators and process.stackframes and process.types):
del processes[pid]
return processes
def ResolveMemoryDumpFields(entries, stackframes, types):
def ResolveStackTrace(stack_id, stackframes):
stackframe = stackframes[stack_id]
tail = ()
if 'parent' in stackframe:
tail = ResolveStackTrace(stackframe['parent'], stackframes)
name = stackframe['name'].replace('\r', '').replace('\n', '')
return (name,) + tail
def ResolveType(type_id, types):
return types[type_id]
for entry in entries:
# Stackframe may be -1 (18446744073709551615L) when not stackframe are
# available.
if entry.stackframe not in stackframes:
entry.stackframe = []
else:
entry.stackframe = ResolveStackTrace(entry.stackframe, stackframes)
entry.type = ResolveType(entry.type, types)
def IncrementHeapEntry(stack, count, size, typename, root):
if not stack:
root['count'] += count
root['size'] += size
if typename not in root['count_by_type']:
root['count_by_type'][typename] = 0
root['count_by_type'][typename] += count
else:
top = stack[-1]
tail = stack[:-1]
if top not in root['children']:
new_node = {}
new_node['count'] = 0
new_node['size'] = 0
new_node['children'] = {}
new_node['count_by_type'] = {}
root['children'][top] = new_node
IncrementHeapEntry(tail, count, size, typename, root['children'][top])
def CanonicalHeapEntries(root):
total_count = 0
total_size = 0
for child in six.itervalues(root['children']):
total_count += child['count']
total_size += child['size']
root['count'] -= total_count
root['size'] -= total_size
for typename in root['count_by_type']:
total_count_for_type = 0
for child in six.itervalues(root['children']):
if typename in child['count_by_type']:
total_count_for_type += child['count_by_type'][typename]
root['count_by_type'][typename] -= total_count_for_type
for child in six.itervalues(root['children']):
CanonicalHeapEntries(child)
def FindLeaks(root, stack, leaks, threshold, size_threshold):
for frame in root['children']:
FindLeaks(root['children'][frame], [frame] + stack, leaks, threshold,
size_threshold)
if root['count'] > threshold and root['size'] > size_threshold:
leaks.append({'count': root['count'],
'size': root['size'],
'count_by_type': root['count_by_type'],
'stackframes': stack})
def DumpTree(root, frame, output, threshold, size_threshold):
output.write('\n{ \"name\": \"%s\",' % frame)
if root['count'] > threshold and root['count'] > size_threshold:
output.write(' \"size\": \"%s\",' % root['size'])
output.write(' \"count\": \"%s\",' % root['count'])
output.write(' \"children\": [')
is_first = True
for child_frame, child in root['children'].items():
if is_first:
is_first = False
else:
output.write(',')
DumpTree(child, child_frame, output, threshold, size_threshold)
output.write(']')
output.write('}')
def GetEntries(heap, process):
"""
Returns all entries in a heap, after filtering out unknown entries, and doing
some post processing to extract the relevant fields.
"""
if not process:
return []
entries = []
if process.version == 1:
for raw_entry in process.allocators[heap]['entries']:
# Cumulative sizes and types are skipped. see:
# https://chromium.googlesource.com/chromium/src/+/a990af190304be5bf38b120799c594df5a293518/base/trace_event/heap_profiler_heap_dump_writer.cc#294
if 'type' not in raw_entry or not raw_entry['bt']:
continue
entry = Entry()
entry.count = int(raw_entry['count'], 16)
entry.size = int(raw_entry['size'], 16)
entry.type = int(raw_entry['type'])
entry.stackframe = int(raw_entry['bt'])
entries.append(entry)
elif process.version == 2:
raw_entries = list(zip(process.allocators[heap]['counts'],
process.allocators[heap]['sizes'],
process.allocators[heap]['types'],
process.allocators[heap]['nodes']))
for (raw_count, raw_size, raw_type, raw_stackframe) in raw_entries:
entry = Entry()
entry.count = raw_count
entry.size = raw_size
entry.type = raw_type
entry.stackframe = raw_stackframe
entries.append(entry)
# Resolve fields by looking into indexes
ResolveMemoryDumpFields(entries, process.stackframes, process.types)
return entries
def FilterProcesses(processes, filter_by_name, filter_by_labels):
remaining_processes = {}
for pid, process in six.iteritems(processes):
if filter_by_name and process.name != filter_by_name:
continue
if (filter_by_labels and
(not process.labels or filter_by_labels not in process.labels)):
continue
remaining_processes[pid] = process
return remaining_processes
def FindRelevantProcesses(start_trace, end_trace,
filter_by_name,
filter_by_labels,
match_by_labels):
# Retrieve the processes and the associated memory dump.
end_processes = FindMemoryDumps(end_trace)
end_processes = FilterProcesses(end_processes, filter_by_name,
filter_by_labels)
start_processes = None
if start_trace:
start_processes = FindMemoryDumps(start_trace)
start_processes = FilterProcesses(start_processes, filter_by_name,
filter_by_labels)
# Build a sequence of pair of processes to be compared.
processes = []
if not start_processes:
# Only keep end-processes.
for _, end_process in six.iteritems(end_processes):
processes.append((None, end_process))
elif match_by_labels:
# Processes are paired based on name/labels.
for _, end_process in six.iteritems(end_processes):
matching_start_process = None
for _, start_process in six.iteritems(start_processes):
if (start_process.name == end_process.name and
(start_process.name in ['Browser', 'GPU'] or
start_process.labels == end_process.labels)):
matching_start_process = start_process
if matching_start_process:
processes.append((matching_start_process, end_process))
else:
# Processes are paired based on their PID.
relevant_pids = set(end_processes.keys()) & set(start_processes.keys())
for pid in relevant_pids:
start_process = start_processes[pid]
end_process = end_processes[pid]
processes.append((start_process, end_process))
return processes
def BuildGraphDumps(processes, threshold, size_threshold):
"""
Build graph for a sequence of pair of processes.
If start_process is None, counts objects in end_trace.
Otherwise, counts objects present in end_trace, but not in start_process.
"""
graph_dumps = []
for (start_process, end_process) in processes:
pid = end_process.pid
name = end_process.name if end_process.name else ''
labels = end_process.labels if end_process.labels else ''
print('Process[%d] %s: %s' % (pid, name, labels))
for heap in end_process.allocators:
start_entries = GetEntries(heap, start_process)
end_entries = GetEntries(heap, end_process)
graph = GraphDump()
graph.pid = pid
graph.name = name
graph.labels = labels
graph.heap = heap
graph_dumps.append(graph)
# Do the math: diffing start and end memory dumps.
root = {}
root['count'] = 0
root['size'] = 0
root['children'] = {}
root['count_by_type'] = {}
for entry in start_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, - entry.count, - entry.size,
entry.type, root)
for entry in end_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, entry.count, entry.size,
entry.type, root)
CanonicalHeapEntries(root)
graph.root = root
# Find leaks
leaks = []
FindLeaks(root, [], leaks, threshold, size_threshold)
leaks.sort(reverse=True, key=lambda k: k['size'])
if leaks:
print(' %s: %d potential leaks found.' % (heap, len(leaks)))
graph.leaks = leaks
graph.leak_stackframes = len(leaks)
for leak in leaks:
graph.leak_objects += leak['count']
return graph_dumps
def WritePotentialLeaks(graph_dumps):
for graph in graph_dumps:
if graph.leaks:
filename = 'process_%d_%s-leaks.json' % (graph.pid, graph.heap)
output_filename = os.path.join(_OUTPUT_DIR, filename)
with open(output_filename, 'w') as output:
json.dump(graph.leaks, output)
def WriteGrahDumps(graph_dumps, threshold, size_threshold):
for graph in graph_dumps:
# Dump the remaining allocated objects tree.
filename = 'process_%d_%s-objects.json' % (graph.pid, graph.heap)
output_filename = os.path.join(_OUTPUT_GRAPH_DIR, filename)
if graph.root:
with open(output_filename, 'w') as output:
DumpTree(graph.root, '.', output, threshold, size_threshold)
graph.root = filename
def WriteIndex(graph_dumps):
output_filename = os.path.join(_OUTPUT_GRAPH_DIR, 'index.json')
with open(output_filename, 'w') as output:
json.dump([
{'pid': graph.pid,
'heap': graph.heap,
'name': graph.name,
'labels': graph.labels,
'objects': graph.root,
'potential leaks': graph.leak_stackframes,
'objects leaked': graph.leak_objects,
}
for graph in graph_dumps], output)
def WriteHTML():
# Copy the HTML page.
source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'diff_heap_profiler.html')
destination = os.path.join(_OUTPUT_GRAPH_DIR, 'index.html')
shutil.copyfile(source, destination)
# Copy the D3 library file.
source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir,
os.path.pardir,
os.path.pardir,
'tracing',
'third_party',
'd3',
'd3.min.js')
destination = os.path.join(_OUTPUT_GRAPH_DIR, 'd3.min.js')
shutil.copyfile(source, destination)
def Main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--flame-graph',
action='store_true',
help='Output a flame graph based on stackframe allocations')
parser.add_argument(
'--threshold',
type=int,
default=0,
help='Objects threshold for being a potential memory leak')
parser.add_argument(
'--size-threshold',
type=int,
default=0,
help='Size threshold for being a potential memory leak')
parser.add_argument(
'--filter-by-name',
type=str,
help='Only keep processes with name (i.e. Browser, Renderer, ...)')
parser.add_argument(
'--filter-by-labels',
type=str,
help='Only keep processes with matching labels')
parser.add_argument(
'--match-by-labels',
action='store_true',
help='Match processes between runs by labels')
parser.add_argument(
'trace',
nargs='+',
help='Trace files to be processed')
options = parser.parse_args()
if options.threshold == 0 and options.size_threshold == 0:
options.threshold = 1000
if len(options.trace) == 1:
end_trace = options.trace[0]
start_trace = None
else:
start_trace = options.trace[0]
end_trace = options.trace[1]
if not os.path.exists(_OUTPUT_DIR):
os.makedirs(_OUTPUT_DIR)
# Find relevant processes to be processed.
processes = FindRelevantProcesses(start_trace, end_trace,
options.filter_by_name,
options.filter_by_labels,
options.match_by_labels)
graph_dumps = BuildGraphDumps(processes, options.threshold,
options.size_threshold)
WritePotentialLeaks(graph_dumps)
if options.flame_graph:
if not os.path.exists(_OUTPUT_GRAPH_DIR):
os.makedirs(_OUTPUT_GRAPH_DIR)
WriteGrahDumps(graph_dumps, options.threshold, options.size_threshold)
WriteIndex(graph_dumps)
WriteHTML()
if __name__ == '__main__':
Main()
| 32.005837
| 152
| 0.636253
| 2,007
| 16,451
| 5.046338
| 0.155954
| 0.010071
| 0.011947
| 0.010367
| 0.229759
| 0.17032
| 0.143661
| 0.093207
| 0.073855
| 0.057662
| 0
| 0.008292
| 0.24497
| 16,451
| 513
| 153
| 32.068226
| 0.807101
| 0.096651
| 0
| 0.167102
| 0
| 0
| 0.108296
| 0.004932
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05483
| false
| 0
| 0.023499
| 0.002611
| 0.112272
| 0.007833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6f21f20dc1c7283a540aac397169a7429e851b1
| 3,743
|
py
|
Python
|
mne_bids/commands/mne_bids_raw_to_bids.py
|
kingjr/mne-bids
|
3a4543076912cebbc89a5f0b9433cda1b9e288b8
|
[
"BSD-3-Clause"
] | null | null | null |
mne_bids/commands/mne_bids_raw_to_bids.py
|
kingjr/mne-bids
|
3a4543076912cebbc89a5f0b9433cda1b9e288b8
|
[
"BSD-3-Clause"
] | null | null | null |
mne_bids/commands/mne_bids_raw_to_bids.py
|
kingjr/mne-bids
|
3a4543076912cebbc89a5f0b9433cda1b9e288b8
|
[
"BSD-3-Clause"
] | null | null | null |
"""Write raw files to BIDS format.
example usage: $ mne_bids raw_to_bids --subject_id sub01 --task rest
--raw data.edf --bids_root new_path
"""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
import mne_bids
from mne_bids import write_raw_bids, BIDSPath
from mne_bids.read import _read_raw
def run():
"""Run the raw_to_bids command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--subject_id', dest='subject_id',
help=('subject name in BIDS compatible format '
'(01, 02, etc.)'))
parser.add_option('--task', dest='task',
help='name of the task the data is based on')
parser.add_option('--raw', dest='raw_fname',
help='path to the raw MEG file')
parser.add_option('--bids_root', dest='bids_root',
help='The path of the BIDS compatible folder.')
parser.add_option('--session_id', dest='session_id',
help='session name in BIDS compatible format')
parser.add_option('--run', dest='run',
help='run number for this dataset')
parser.add_option('--acq', dest='acq',
help='acquisition parameter for this dataset')
parser.add_option('--events_data', dest='events_data',
help='events file (events.tsv)')
parser.add_option('--event_id', dest='event_id',
help='event id dict', metavar='eid')
parser.add_option('--hpi', dest='hpi',
help='path to the MEG marker points')
parser.add_option('--electrode', dest='electrode',
help='path to head-native digitizer points')
parser.add_option('--hsp', dest='hsp',
help='path to headshape points')
parser.add_option('--config', dest='config',
help='path to the configuration file')
parser.add_option('--overwrite', dest='overwrite',
help="whether to overwrite existing data (BOOLEAN)")
parser.add_option('--line_freq', dest='line_freq',
help="The frequency of the line noise in Hz "
"(e.g. 50 or 60). If unknown, pass None")
opt, args = parser.parse_args()
if len(args) > 0:
parser.print_help()
parser.error('Do not specify arguments without flags. Found: "{}".\n'
.format(args))
if not all([opt.subject_id, opt.task, opt.raw_fname, opt.bids_root]):
parser.print_help()
parser.error('Arguments missing. You need to specify at least the'
'following: --subject_id, --task, --raw, --bids_root.')
bids_path = BIDSPath(
subject=opt.subject_id, session=opt.session_id, run=opt.run,
acquisition=opt.acq, task=opt.task, root=opt.bids_root)
allow_maxshield = False
if opt.raw_fname.endswith('.fif'):
allow_maxshield = True
raw = _read_raw(opt.raw_fname, hpi=opt.hpi, electrode=opt.electrode,
hsp=opt.hsp, config=opt.config,
allow_maxshield=allow_maxshield)
if opt.line_freq is not None:
line_freq = None if opt.line_freq == "None" else opt.line_freq
raw.info['line_freq'] = line_freq
write_raw_bids(raw, bids_path, event_id=opt.event_id,
events_data=opt.events_data, overwrite=opt.overwrite,
verbose=True)
if __name__ == '__main__':
run()
| 41.588889
| 77
| 0.594176
| 477
| 3,743
| 4.45912
| 0.297694
| 0.06347
| 0.105783
| 0.018336
| 0.076164
| 0.027268
| 0
| 0
| 0
| 0
| 0
| 0.004458
| 0.280791
| 3,743
| 89
| 78
| 42.05618
| 0.785661
| 0.078547
| 0
| 0.030303
| 0
| 0
| 0.287212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015152
| false
| 0.015152
| 0.060606
| 0
| 0.075758
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6f290178fbe89e1c3a852359d5e4b95ce0dd4ec
| 1,460
|
py
|
Python
|
lab1oop.py
|
NastiaK/NewRepository
|
d1907fc2e159dc1831071d7c79e20bbfb47fb822
|
[
"MIT"
] | null | null | null |
lab1oop.py
|
NastiaK/NewRepository
|
d1907fc2e159dc1831071d7c79e20bbfb47fb822
|
[
"MIT"
] | null | null | null |
lab1oop.py
|
NastiaK/NewRepository
|
d1907fc2e159dc1831071d7c79e20bbfb47fb822
|
[
"MIT"
] | null | null | null |
class Calculations:
def __init__(self, first, second):
self.first = first
self.second = second
def add(self):
print(self.first + self.second)
def subtract(self):
print(self.first - self.second)
def multiply(self):
print(self.first * self.second)
def divide(self):
if second == 0:
print("Can't divide by zero")
else:
print(self.first / self.second)
def main():
print("Calculator has started")
while True:
a = float(input("Enter first number "))
b = float(input("Enter second number "))
chooseop = 1
calc=Calculations(a, b)
while (chooseop == 1) | (chooseop == 2) | (chooseop == 3) | (chooseop == 4):
chooseop = int(input("Enter 1 for addition, 2 for subtraction, 3 for multiplication and 4 for division "))
print(chooseop)
if chooseop == 1:
calc.add()
break
elif chooseop == 2:
calc.subtract()
break
elif chooseop == 3:
calc.multiply()
break
elif chooseop == 4:
calc.divide()
break
elif (chooseop != 1) & (chooseop != 2) & (chooseop != 3) & (chooseop != 4):
print("Invalid operation number")
if __name__ == "__main__":
main()
| 29.795918
| 119
| 0.489041
| 150
| 1,460
| 4.68
| 0.326667
| 0.076923
| 0.106838
| 0.102564
| 0.273504
| 0.273504
| 0.235043
| 0.102564
| 0
| 0
| 0
| 0.020501
| 0.39863
| 1,460
| 48
| 120
| 30.416667
| 0.779043
| 0
| 0
| 0.097561
| 0
| 0
| 0.137394
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0
| 0
| 0.170732
| 0.195122
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|